blob: 98f19211705a2711990daa39e4432701852ad5b2 [file] [log] [blame]
Brian Waters13d96012017-12-08 16:53:31 -06001/*********************************************************************************************************
2* Software License Agreement (BSD License) *
3* Author: Sebastien Decugis <sdecugis@freediameter.net> *
4* *
5* Copyright (c) 2013, WIDE Project and NICT *
6* All rights reserved. *
7* *
8* Redistribution and use of this software in source and binary forms, with or without modification, are *
9* permitted provided that the following conditions are met: *
10* *
11* * Redistributions of source code must retain the above *
12* copyright notice, this list of conditions and the *
13* following disclaimer. *
14* *
15* * Redistributions in binary form must reproduce the above *
16* copyright notice, this list of conditions and the *
17* following disclaimer in the documentation and/or other *
18* materials provided with the distribution. *
19* *
20* * Neither the name of the WIDE Project or NICT nor the *
21* names of its contributors may be used to endorse or *
22* promote products derived from this software without *
23* specific prior written permission of WIDE Project and *
24* NICT. *
25* *
26* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED *
27* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A *
28* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR *
29* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *
30* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
31* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR *
32* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF *
33* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
34*********************************************************************************************************/
35
36#include "fdcore-internal.h"
37
38/* Global list of peers */
39struct fd_list fd_g_peers = FD_LIST_INITIALIZER(fd_g_peers);
40pthread_rwlock_t fd_g_peers_rw = PTHREAD_RWLOCK_INITIALIZER;
41
42/* List of active peers */
43struct fd_list fd_g_activ_peers = FD_LIST_INITIALIZER(fd_g_activ_peers); /* peers linked by their p_actives oredered by p_diamid */
44pthread_rwlock_t fd_g_activ_peers_rw = PTHREAD_RWLOCK_INITIALIZER;
45
46/* List of validation callbacks (registered with fd_peer_validate_register) */
47static struct fd_list validators = FD_LIST_INITIALIZER(validators); /* list items are simple fd_list with "o" pointing to the callback */
48static pthread_rwlock_t validators_rw = PTHREAD_RWLOCK_INITIALIZER;
49
50
51/* Alloc / reinit a peer structure. if *ptr is not NULL, it must already point to a valid struct fd_peer. */
52int fd_peer_alloc(struct fd_peer ** ptr)
53{
54 struct fd_peer *p;
55
56 TRACE_ENTRY("%p", ptr);
57 CHECK_PARAMS(ptr);
58
59 if (*ptr) {
60 p = *ptr;
61 } else {
62 CHECK_MALLOC( p = malloc(sizeof(struct fd_peer)) );
63 *ptr = p;
64 }
65
66 /* Now initialize the content */
67 memset(p, 0, sizeof(struct fd_peer));
68
69 fd_list_init(&p->p_hdr.chain, p);
70
71 fd_list_init(&p->p_hdr.info.pi_endpoints, p);
72 fd_list_init(&p->p_hdr.info.runtime.pir_apps, p);
73
74 p->p_eyec = EYEC_PEER;
75 CHECK_POSIX( pthread_mutex_init(&p->p_state_mtx, NULL) );
76
77 fd_list_init(&p->p_actives, p);
78 fd_list_init(&p->p_expiry, p);
79 CHECK_FCT( fd_fifo_new(&p->p_tosend, 5) );
80 CHECK_FCT( fd_fifo_new(&p->p_tofailover, 0) );
81 p->p_hbh = lrand48();
82
83 fd_list_init(&p->p_sr.srs, p);
84 fd_list_init(&p->p_sr.exp, p);
85 CHECK_POSIX( pthread_mutex_init(&p->p_sr.mtx, NULL) );
86 CHECK_POSIX( pthread_cond_init(&p->p_sr.cnd, NULL) );
87
88 fd_list_init(&p->p_connparams, p);
89
90 return 0;
91}
92
93/* Add a new peer entry */
94int fd_peer_add ( struct peer_info * info, const char * orig_dbg, void (*cb)(struct peer_info *, void *), void * cb_data )
95{
96 struct fd_peer *p = NULL;
97 struct fd_list * li, *li_inf;
98 int ret = 0;
99
100 TRACE_ENTRY("%p %p %p %p", info, orig_dbg, cb, cb_data);
101 CHECK_PARAMS(info && info->pi_diamid);
102
103 if (info->config.pic_realm) {
104 if (!fd_os_is_valid_DiameterIdentity((os0_t)info->config.pic_realm, strlen(info->config.pic_realm))) {
105 TRACE_DEBUG(INFO, "'%s' is not a valid DiameterIdentity.", info->config.pic_realm);
106 return EINVAL;
107 }
108 }
109
110 /* Create a structure to contain the new peer information */
111 CHECK_FCT( fd_peer_alloc(&p) );
112
113 /* Copy the informations from the parameters received */
114 p->p_hdr.info.pi_diamid = info->pi_diamid;
115 CHECK_FCT( fd_os_validate_DiameterIdentity(&p->p_hdr.info.pi_diamid, &p->p_hdr.info.pi_diamidlen, 1) );
116
117 memcpy( &p->p_hdr.info.config, &info->config, sizeof(p->p_hdr.info.config) );
118
119 /* Duplicate the strings if provided */
120 if (info->config.pic_realm) {
121 CHECK_MALLOC( p->p_hdr.info.config.pic_realm = strdup(info->config.pic_realm) );
122 }
123 if (info->config.pic_priority) {
124 CHECK_MALLOC( p->p_hdr.info.config.pic_priority = strdup(info->config.pic_priority) );
125 }
126
127 /* Move the list of endpoints into the peer */
128 if (info->pi_endpoints.next)
129 while (!FD_IS_LIST_EMPTY( &info->pi_endpoints ) ) {
130 li = info->pi_endpoints.next;
131 fd_list_unlink(li);
132 fd_list_insert_before(&p->p_hdr.info.pi_endpoints, li);
133 }
134
135 /* The internal data */
136 if (orig_dbg) {
137 CHECK_MALLOC( p->p_dbgorig = strdup(orig_dbg) );
138 } else {
139 CHECK_MALLOC( p->p_dbgorig = strdup("unspecified") );
140 }
141 p->p_cb = cb;
142 p->p_cb_data = cb_data;
143
144 /* Ok, now check if we don't already have an entry with the same Diameter Id, and insert this one */
145 CHECK_POSIX( pthread_rwlock_wrlock(&fd_g_peers_rw) );
146 li_inf = &fd_g_peers;
147 for (li = fd_g_peers.next; li != &fd_g_peers; li = li->next) {
148 struct fd_peer * next = (struct fd_peer *)li;
149 int cont;
150 int cmp = fd_os_almostcasesrch( p->p_hdr.info.pi_diamid, p->p_hdr.info.pi_diamidlen,
151 next->p_hdr.info.pi_diamid, next->p_hdr.info.pi_diamidlen,
152 &cont );
153 if (cmp > 0)
154 li_inf = li; /* it will come after this element, for sure */
155
156 if (cmp == 0) {
157 ret = EEXIST; /* we have a duplicate */
158 break;
159 }
160 if (!cont)
161 break;
162 }
163
164 /* We can insert the new peer object */
165 if (! ret)
166 do {
167 /* Update expiry list */
168 CHECK_FCT_DO( ret = fd_p_expi_update( p ), break );
169
170 /* Insert the new element in the list */
171 fd_list_insert_after( li_inf, &p->p_hdr.chain );
172 } while (0);
173
174 CHECK_POSIX( pthread_rwlock_unlock(&fd_g_peers_rw) );
175 if (ret) {
176 CHECK_FCT( fd_peer_free(&p) );
177 } else {
178 CHECK_FCT( fd_psm_begin(p) );
179 }
180 return ret;
181}
182
183/* Search for a peer */
184int fd_peer_getbyid( DiamId_t diamid, size_t diamidlen, int igncase, struct peer_hdr ** peer )
185{
186 struct fd_list * li;
187 TRACE_ENTRY("%p %zd %d %p", diamid, diamidlen, igncase, peer);
188 CHECK_PARAMS( diamid && diamidlen && peer );
189
190 *peer = NULL;
191
192 /* Search in the list */
193 CHECK_POSIX( pthread_rwlock_rdlock(&fd_g_peers_rw) );
194 if (igncase) {
195 for (li = fd_g_peers.next; li != &fd_g_peers; li = li->next) {
196 struct fd_peer * next = (struct fd_peer *)li;
197 int cmp, cont;
198 cmp = fd_os_almostcasesrch( diamid, diamidlen, next->p_hdr.info.pi_diamid, next->p_hdr.info.pi_diamidlen, &cont );
199 if (cmp == 0) {
200 *peer = &next->p_hdr;
201 break;
202 }
203 if (!cont)
204 break;
205 }
206 } else {
207 for (li = fd_g_peers.next; li != &fd_g_peers; li = li->next) {
208 struct fd_peer * next = (struct fd_peer *)li;
209 int cmp = fd_os_cmp( diamid, diamidlen, next->p_hdr.info.pi_diamid, next->p_hdr.info.pi_diamidlen );
210 if (cmp > 0)
211 continue;
212 if (cmp == 0)
213 *peer = &next->p_hdr;
214 break;
215 }
216 }
217 CHECK_POSIX( pthread_rwlock_unlock(&fd_g_peers_rw) );
218
219 return 0;
220}
221
222
223#define free_null( _v ) \
224 if (_v) { \
225 free(_v); \
226 (_v) = NULL; \
227 }
228
229#define free_list( _l ) \
230 while (!FD_IS_LIST_EMPTY(_l)) { \
231 struct fd_list * __li = ((struct fd_list *)(_l))->next; \
232 fd_list_unlink(__li); \
233 free(__li); \
234 }
235
236/* Empty the lists of p_tosend, p_failover, and p_sentreq messages */
237void fd_peer_failover_msg(struct fd_peer * peer)
238{
239 struct msg *m;
240 TRACE_ENTRY("%p", peer);
241 CHECK_PARAMS_DO(CHECK_PEER(peer), return);
242
243 /* Requeue all messages in the "out" queue */
244 while ( fd_fifo_tryget(peer->p_tosend, &m) == 0 ) {
245 /* but only if they are routable */
246 if (fd_msg_is_routable(m)) {
247 fd_hook_call(HOOK_MESSAGE_FAILOVER, m, peer, NULL, fd_msg_pmdl_get(m));
248 CHECK_FCT_DO(fd_fifo_post_noblock(fd_g_outgoing, (void *)&m),
249 {
250 /* fallback: destroy the message */
251 fd_hook_call(HOOK_MESSAGE_DROPPED, m, NULL, "Internal error: unable to requeue this message during failover process", fd_msg_pmdl_get(m));
252 CHECK_FCT_DO(fd_msg_free(m), /* What can we do more? */)
253 } );
254 } else {
255 /* Just free it */
256 /* fd_hook_call(HOOK_MESSAGE_DROPPED, m, NULL, "Non-routable message freed during handover", fd_msg_pmdl_get(m)); */
257 CHECK_FCT_DO(fd_msg_free(m), /* What can we do more? */)
258 }
259 }
260
261 /* Requeue all messages in the "failover" queue */
262 while ( fd_fifo_tryget(peer->p_tofailover, &m) == 0 ) {
263 fd_hook_call(HOOK_MESSAGE_FAILOVER, m, peer, NULL, fd_msg_pmdl_get(m));
264 CHECK_FCT_DO(fd_fifo_post_noblock(fd_g_outgoing, (void *)&m),
265 {
266 /* fallback: destroy the message */
267 fd_hook_call(HOOK_MESSAGE_DROPPED, m, NULL, "Internal error: unable to requeue this message during failover process", fd_msg_pmdl_get(m));
268 CHECK_FCT_DO(fd_msg_free(m), /* What can we do more? */)
269 } );
270 }
271
272 /* Requeue all routable sent requests */
273 fd_p_sr_failover(&peer->p_sr);
274
275 /* Done */
276 return;
277}
278
279/* Describe the current connection */
280int fd_peer_cnx_proto_info(struct peer_hdr *peer, char * buf, size_t len)
281{
282 struct fd_peer * p = (struct fd_peer *)peer;
283 TRACE_ENTRY("%p %p %zd", peer, buf, len);
284 CHECK_PARAMS(CHECK_PEER(peer) && buf && len);
285
286 if (p->p_cnxctx) {
287 CHECK_FCT(fd_cnx_proto_info(p->p_cnxctx, buf, len));
288 } else if (p->p_receiver) {
289 CHECK_FCT(fd_cnx_proto_info(p->p_receiver, buf, len));
290 } else {
291 snprintf(buf, len, "Not Connected");
292 }
293
294 return 0;
295}
296
297/* Return the value of srlist->cnt */
298int fd_peer_get_load_pending(struct peer_hdr *peer, long * to_receive, long * to_send)
299{
300 struct fd_peer * p = (struct fd_peer *)peer;
301 TRACE_ENTRY("%p %p %p", peer, to_receive, to_send);
302 CHECK_PARAMS(CHECK_PEER(peer));
303
304 if (to_receive) {
305 CHECK_POSIX( pthread_mutex_lock(&p->p_sr.mtx) );
306 *to_receive = p->p_sr.cnt;
307 CHECK_POSIX( pthread_mutex_unlock(&p->p_sr.mtx) );
308 }
309 if (to_send) {
310 CHECK_POSIX( pthread_mutex_lock(&p->p_state_mtx) );
311 *to_send = p->p_reqin_count;
312 CHECK_POSIX( pthread_mutex_unlock(&p->p_state_mtx) );
313 }
314
315 return 0;
316}
317
318
319/* Destroy a structure once cleanups have been performed (fd_psm_abord, ...) */
320int fd_peer_free(struct fd_peer ** ptr)
321{
322 struct fd_peer *p;
323
324 TRACE_ENTRY("%p", ptr);
325 CHECK_PARAMS(ptr);
326 p = *ptr;
327 *ptr = NULL;
328 CHECK_PARAMS(p);
329
330 CHECK_PARAMS( FD_IS_LIST_EMPTY(&p->p_hdr.chain) );
331
332 free_null(p->p_hdr.info.pi_diamid);
333
334 free_null(p->p_hdr.info.config.pic_realm);
335 free_null(p->p_hdr.info.config.pic_priority);
336
337 free_null(p->p_hdr.info.runtime.pir_realm);
338 free_null(p->p_hdr.info.runtime.pir_prodname);
339 free_list( &p->p_hdr.info.runtime.pir_apps );
340
341 free_list( &p->p_hdr.info.pi_endpoints );
342
343 free_null(p->p_dbgorig);
344
345 fd_list_unlink(&p->p_expiry);
346 fd_list_unlink(&p->p_actives);
347
348 CHECK_FCT_DO( fd_fifo_del(&p->p_tosend), /* continue */ );
349 CHECK_FCT_DO( fd_fifo_del(&p->p_tofailover), /* continue */ );
350 CHECK_POSIX_DO( pthread_mutex_destroy(&p->p_state_mtx), /* continue */);
351 CHECK_POSIX_DO( pthread_mutex_destroy(&p->p_sr.mtx), /* continue */);
352 CHECK_POSIX_DO( pthread_cond_destroy(&p->p_sr.cnd), /* continue */);
353
354 /* If the callback is still around... */
355 if (p->p_cb)
356 (*p->p_cb)(NULL, p->p_cb_data);
357
358 /* Free the structure */
359 free(p);
360 return 0;
361}
362
363/* Terminate peer module (destroy all peers, first gently, then violently) */
364int fd_peer_fini()
365{
366 struct fd_list * li;
367 struct fd_list purge = FD_LIST_INITIALIZER(purge); /* Store zombie peers here */
368 int list_empty;
369 struct timespec wait_until, now;
370
371 TRACE_ENTRY();
372
373 CHECK_FCT_DO(fd_p_expi_fini(), /* continue */);
374
375 TRACE_DEBUG(INFO, "Sending terminate signal to all peer connections");
376
377 CHECK_FCT_DO( pthread_rwlock_wrlock(&fd_g_peers_rw), /* continue */ );
378 for (li = fd_g_peers.next; li != &fd_g_peers; li = li->next) {
379 struct fd_peer * peer = (struct fd_peer *)li->o;
380
381 if (fd_peer_getstate(peer) != STATE_ZOMBIE) {
382 CHECK_FCT_DO( fd_psm_terminate(peer, "REBOOTING"), /* continue */ );
383 } else {
384 li = li->prev; /* to avoid breaking the loop */
385 fd_list_unlink(&peer->p_hdr.chain);
386 fd_list_insert_before(&purge, &peer->p_hdr.chain);
387 }
388 }
389 list_empty = FD_IS_LIST_EMPTY(&fd_g_peers);
390 CHECK_FCT_DO( pthread_rwlock_unlock(&fd_g_peers_rw), /* continue */ );
391
392 if (!list_empty) {
393 CHECK_SYS( clock_gettime(CLOCK_REALTIME, &now) );
394 fd_psm_start(); /* just in case */
395 TRACE_DEBUG(INFO, "Waiting for connections shutdown... (%d sec max)", DPR_TIMEOUT + 1);
396 wait_until.tv_sec = now.tv_sec + DPR_TIMEOUT + 1;
397 wait_until.tv_nsec = now.tv_nsec;
398 }
399
400 while ((!list_empty) && (TS_IS_INFERIOR(&now, &wait_until))) {
401
402 /* Allow the PSM(s) to execute */
403 usleep(100000);
404
405 /* Remove zombie peers */
406 CHECK_FCT_DO( pthread_rwlock_wrlock(&fd_g_peers_rw), /* continue */ );
407 for (li = fd_g_peers.next; li != &fd_g_peers; li = li->next) {
408 struct fd_peer * peer = (struct fd_peer *)li->o;
409 if (fd_peer_getstate(peer) == STATE_ZOMBIE) {
410 li = li->prev; /* to avoid breaking the loop */
411 fd_list_unlink(&peer->p_hdr.chain);
412 fd_list_insert_before(&purge, &peer->p_hdr.chain);
413 }
414 }
415 list_empty = FD_IS_LIST_EMPTY(&fd_g_peers);
416 CHECK_FCT_DO( pthread_rwlock_unlock(&fd_g_peers_rw), /* continue */ );
417 CHECK_SYS( clock_gettime(CLOCK_REALTIME, &now) );
418 }
419
420 if (!list_empty) {
421 TRACE_DEBUG(INFO, "Forcing connections shutdown");
422 CHECK_FCT_DO( pthread_rwlock_wrlock(&fd_g_peers_rw), /* continue */ );
423 while (!FD_IS_LIST_EMPTY(&fd_g_peers)) {
424 struct fd_peer * peer = (struct fd_peer *)(fd_g_peers.next->o);
425 fd_psm_abord(peer);
426 fd_list_unlink(&peer->p_hdr.chain);
427 fd_list_insert_before(&purge, &peer->p_hdr.chain);
428 }
429 CHECK_FCT_DO( pthread_rwlock_unlock(&fd_g_peers_rw), /* continue */ );
430 }
431
432 /* Free memory objects of all peers */
433 while (!FD_IS_LIST_EMPTY(&purge)) {
434 struct fd_peer * peer = (struct fd_peer *)(purge.next->o);
435 fd_list_unlink(&peer->p_hdr.chain);
436 fd_peer_free(&peer);
437 }
438
439 /* Now empty the validators list */
440 CHECK_FCT_DO( pthread_rwlock_wrlock(&validators_rw), /* continue */ );
441 while (!FD_IS_LIST_EMPTY( &validators )) {
442 struct fd_list * v = validators.next;
443 fd_list_unlink(v);
444 free(v);
445 }
446 CHECK_FCT_DO( pthread_rwlock_unlock(&validators_rw), /* continue */ );
447
448 return 0;
449}
450
451/* Dump info of one peer */
452DECLARE_FD_DUMP_PROTOTYPE(fd_peer_dump, struct peer_hdr * p, int details)
453{
454 FD_DUMP_HANDLE_OFFSET();
455
456 CHECK_MALLOC_DO( fd_dump_extend( FD_DUMP_STD_PARAMS, "{peer}(@%p): ", p), return NULL);
457
458 if (!CHECK_PEER(p)) {
459 CHECK_MALLOC_DO( fd_dump_extend( FD_DUMP_STD_PARAMS, "INVALID/NULL"), return NULL);
460 } else {
461 struct fd_peer * peer = (struct fd_peer *)p;
462
463 CHECK_MALLOC_DO( fd_dump_extend( FD_DUMP_STD_PARAMS, "%s [%s, cnt:%ldsr,%ldpa]", peer->p_hdr.info.pi_diamid, STATE_STR(fd_peer_getstate(peer)), peer->p_sr.cnt, peer->p_reqin_count), return NULL);
464 if (details > 0) {
465 CHECK_MALLOC_DO( fd_dump_extend( FD_DUMP_STD_PARAMS, " rlm:%s", peer->p_hdr.info.runtime.pir_realm ?: "<unknown>"), return NULL);
466 if (peer->p_hdr.info.runtime.pir_prodname) {
467 CHECK_MALLOC_DO( fd_dump_extend( FD_DUMP_STD_PARAMS, " ['%s' %u]", peer->p_hdr.info.runtime.pir_prodname, peer->p_hdr.info.runtime.pir_firmrev), return NULL);
468 }
469 }
470 if (details > 1) {
471 CHECK_MALLOC_DO( fd_dump_extend( FD_DUMP_STD_PARAMS, " [from:%s] flags:%s%s%s%s%s%s%s%s lft:%ds",
472 peer->p_dbgorig ?: "unset",
473 peer->p_hdr.info.config.pic_flags.pro3 == PI_P3_DEFAULT ? "-" :
474 (peer->p_hdr.info.config.pic_flags.pro3 == PI_P3_IP ? "4" : "6"),
475 peer->p_hdr.info.config.pic_flags.pro4 == PI_P4_DEFAULT ? "-" :
476 (peer->p_hdr.info.config.pic_flags.pro4 == PI_P4_TCP ? "T" : "S"),
477 peer->p_hdr.info.config.pic_flags.alg ? "P" : "-",
478 peer->p_hdr.info.config.pic_flags.sec & PI_SEC_NONE ? "N" :"-",
479 peer->p_hdr.info.config.pic_flags.sec & PI_SEC_TLS_OLD ? "O" :"-",
480 peer->p_hdr.info.config.pic_flags.sctpsec & PI_SCTPSEC_3436 ? "3" :"-",
481 peer->p_hdr.info.config.pic_flags.exp ? "E" : "-",
482 peer->p_hdr.info.config.pic_flags.persist ? "P" : "-",
483 peer->p_hdr.info.config.pic_lft), return NULL);
484 }
485
486 }
487
488 return *buf;
489}
490
491/* Dump the list of peers */
492DECLARE_FD_DUMP_PROTOTYPE(fd_peer_dump_list, int details)
493{
494 struct fd_list * li;
495 FD_DUMP_HANDLE_OFFSET();
496
497 CHECK_FCT_DO( pthread_rwlock_rdlock(&fd_g_peers_rw), /* continue */ );
498
499 for (li = fd_g_peers.next; li != &fd_g_peers; li = li->next) {
500 CHECK_MALLOC_DO( fd_peer_dump(FD_DUMP_STD_PARAMS, (struct peer_hdr *)li->o, details), break);
501 if (li->next != &fd_g_peers) {
502 CHECK_MALLOC_DO( fd_dump_extend( FD_DUMP_STD_PARAMS, "\n"), break);
503 }
504 }
505
506 CHECK_FCT_DO( pthread_rwlock_unlock(&fd_g_peers_rw), /* continue */ );
507 return *buf;
508}
509
510static struct dict_object *avp_oh_model = NULL;
511static pthread_mutex_t cache_avp_lock = PTHREAD_MUTEX_INITIALIZER;
512
513/* Handle an incoming CER request on a new connection */
514int fd_peer_handle_newCER( struct msg ** cer, struct cnxctx ** cnx )
515{
516 struct msg * msg;
517 struct avp *avp_oh;
518 struct avp_hdr * avp_hdr;
519 struct fd_list * li, *li_inf;
520 int found = 0;
521 int ret = 0;
522 struct fd_peer * peer;
523 struct cnx_incoming * ev_data;
524
525 TRACE_ENTRY("%p %p", cer, cnx);
526 CHECK_PARAMS(cer && *cer && cnx && *cnx);
527
528 msg = *cer;
529
530 /* If needed, resolve the dictionary model for Origin-Host */
531 CHECK_POSIX( pthread_mutex_lock(&cache_avp_lock) );
532 if (!avp_oh_model) {
533 avp_code_t code = AC_ORIGIN_HOST;
534 CHECK_FCT_DO( fd_dict_search ( fd_g_config->cnf_dict, DICT_AVP, AVP_BY_CODE, &code, &avp_oh_model, ENOENT),
535 { LOG_E("Cannot find Origin-Host AVP definition in the dictionary!"); (void) pthread_mutex_unlock(&cache_avp_lock); return __ret__; } );
536 }
537 CHECK_POSIX( pthread_mutex_unlock(&cache_avp_lock) );
538
539 /* Find the Diameter Identity of the remote peer in the message */
540 CHECK_FCT( fd_msg_search_avp ( msg, avp_oh_model, &avp_oh ) );
541 ASSERT(avp_oh); /* otherwise it should not have passed rules validation, right? */
542 CHECK_FCT( fd_msg_avp_hdr ( avp_oh, &avp_hdr ) );
543
544 /* First, check if the Origin-Host value is valid */
545 if (!fd_os_is_valid_DiameterIdentity(avp_hdr->avp_value->os.data, avp_hdr->avp_value->os.len)) {
546 CHECK_FCT( fd_msg_new_answer_from_req ( fd_g_config->cnf_dict, cer, MSGFL_ANSW_ERROR ) );
547 CHECK_FCT( fd_msg_rescode_set(*cer, "DIAMETER_INVALID_AVP_VALUE",
548 "Your Origin-Host contains invalid characters.", avp_oh, 1 ) );
549
550 fd_hook_call(HOOK_PEER_CONNECT_FAILED, *cer, NULL, "Received CER with invalid Origin-Host AVP", NULL);
551
552 CHECK_FCT( fd_out_send(cer, *cnx, NULL, 0) );
553 return EINVAL;
554 }
555
556 /* Search if we already have this peer id in our list. We take directly the write lock so that we don't need to upgrade if it is a new peer.
557 * There is space for a small optimization here if needed.
558 */
559 CHECK_POSIX( pthread_rwlock_wrlock(&fd_g_peers_rw) );
560
561 li_inf = &fd_g_peers;
562 for (li = fd_g_peers.next; li != &fd_g_peers; li = li->next) {
563 int cmp, cont;
564 peer = (struct fd_peer *)li;
565 cmp = fd_os_almostcasesrch( avp_hdr->avp_value->os.data, avp_hdr->avp_value->os.len, peer->p_hdr.info.pi_diamid, peer->p_hdr.info.pi_diamidlen, &cont );
566 if (cmp > 0) {
567 li_inf = li;
568 }
569 if (cmp == 0) {
570 found = 1;
571 break;
572 }
573 if (!cont)
574 break;
575 }
576
577 if (!found) {
578 /* Create a new peer entry for this new remote peer */
579 peer = NULL;
580 CHECK_FCT_DO( ret = fd_peer_alloc(&peer), goto out );
581
582 /* Set the peer Diameter Id and the responder flag parameters */
583 CHECK_MALLOC_DO( peer->p_hdr.info.pi_diamid = os0dup(avp_hdr->avp_value->os.data, avp_hdr->avp_value->os.len),
584 { ret = ENOMEM; goto out; } );
585 peer->p_hdr.info.pi_diamidlen = avp_hdr->avp_value->os.len;
586 CHECK_MALLOC_DO( peer->p_dbgorig = strdup(fd_cnx_getid(*cnx)), { ret = ENOMEM; goto out; } );
587 peer->p_flags.pf_responder = 1;
588 peer->p_flags.pf_delete = 1;
589
590 LOG_D("Created new peer object for incoming CER: %s", peer->p_hdr.info.pi_diamid);
591
592#ifndef DISABLE_PEER_EXPIRY
593 /* Set this peer to expire on inactivity */
594 peer->p_hdr.info.config.pic_flags.exp = PI_EXP_INACTIVE;
595 peer->p_hdr.info.config.pic_lft = 3600; /* 1 hour without any message
596 -- RFC3539 states that this must not be inferior to BRINGDOWN_INTERVAL = 5 minutes */
597
598 CHECK_FCT_DO( ret = fd_p_expi_update( peer ), goto out );
599#endif /* DISABLE_PEER_EXPIRY */
600
601 /* Insert the new peer in the list (the PSM will take care of setting the expiry after validation) */
602 fd_list_insert_after( li_inf, &peer->p_hdr.chain );
603
604 /* Start the PSM, which will receive the event below */
605 CHECK_FCT_DO( ret = fd_psm_begin(peer), goto out );
606 } else {
607 /* Check if the peer is in zombie state */
608 if (fd_peer_getstate(peer) == STATE_ZOMBIE) {
609 /* Re-activate the peer */
610 if (peer->p_hdr.info.config.pic_flags.exp)
611 peer->p_flags.pf_responder = 1;
612 CHECK_POSIX_DO( pthread_mutex_lock(&peer->p_state_mtx), );
613 peer->p_state = STATE_NEW;
614 CHECK_POSIX_DO( pthread_mutex_unlock(&peer->p_state_mtx), );
615 peer->p_flags.pf_localterm = 0;
616 CHECK_FCT_DO( ret = fd_psm_begin(peer), goto out );
617 }
618 }
619
620 /* Send the new connection event to the PSM */
621 CHECK_MALLOC_DO( ev_data = malloc(sizeof(struct cnx_incoming)), { ret = ENOMEM; goto out; } );
622 memset(ev_data, 0, sizeof(*ev_data));
623
624 ev_data->cer = msg;
625 ev_data->cnx = *cnx;
626 ev_data->validate = !found;
627
628 CHECK_FCT_DO( ret = fd_event_send(peer->p_events, FDEVP_CNX_INCOMING, sizeof(*ev_data), ev_data), goto out );
629
630out:
631 CHECK_POSIX( pthread_rwlock_unlock(&fd_g_peers_rw) );
632
633 if (ret == 0) {
634 /* Reset the "out" parameters, so that they are not cleanup on function return. */
635 *cer = NULL;
636 *cnx = NULL;
637 } else {
638 char buf[1024];
639 snprintf(buf, sizeof(buf), "An error occurred while processing new incoming CER: %s", strerror(ret));
640 fd_hook_call(HOOK_PEER_CONNECT_FAILED, *cer, NULL, buf, NULL);
641 }
642
643 return ret;
644}
645
646/* Save a callback to accept / reject incoming unknown peers */
647int fd_peer_validate_register ( int (*peer_validate)(struct peer_info * /* info */, int * /* auth */, int (**cb2)(struct peer_info *)) )
648{
649 struct fd_list * v;
650
651 TRACE_ENTRY("%p", peer_validate);
652 CHECK_PARAMS(peer_validate);
653
654 /* Alloc a new entry */
655 CHECK_MALLOC( v = malloc(sizeof(struct fd_list)) );
656 fd_list_init( v, peer_validate );
657
658 /* Add at the beginning of the list */
659 CHECK_FCT( pthread_rwlock_wrlock(&validators_rw) );
660 fd_list_insert_after(&validators, v);
661 CHECK_FCT( pthread_rwlock_unlock(&validators_rw));
662
663 /* Done! */
664 return 0;
665}
666
667/* Validate a peer by calling the callbacks in turn -- return 0 if the peer is validated, ! 0 in case of error (>0) or if the peer is rejected (-1) */
668int fd_peer_validate( struct fd_peer * peer )
669{
670 int ret = 0;
671 struct fd_list * v;
672
673 CHECK_FCT( pthread_rwlock_rdlock(&validators_rw) );
674 for (v = validators.next; v != &validators; v = v->next) {
675 int auth = 0;
676 pthread_cleanup_push(fd_cleanup_rwlock, &validators_rw);
677 CHECK_FCT_DO( ret = ((int(*)(struct peer_info *, int *, int (**)(struct peer_info *)))(v->o)) (&peer->p_hdr.info, &auth, &peer->p_cb2), );
678 pthread_cleanup_pop(0);
679 if (ret)
680 goto out;
681 if (auth) {
682 ret = (auth > 0) ? 0 : -1;
683 goto out;
684 }
685 peer->p_cb2 = NULL;
686 }
687
688 /* No callback has given a firm result, the default is to reject */
689 ret = -1;
690out:
691 CHECK_FCT( pthread_rwlock_unlock(&validators_rw));
692 return ret;
693}