Brian Waters | 13d9601 | 2017-12-08 16:53:31 -0600 | [diff] [blame] | 1 | /********************************************************************************************************* |
| 2 | * Software License Agreement (BSD License) * |
| 3 | * Author: Sebastien Decugis <sdecugis@freediameter.net> * |
| 4 | * * |
| 5 | * Copyright (c) 2013, WIDE Project and NICT * |
| 6 | * All rights reserved. * |
| 7 | * * |
| 8 | * Redistribution and use of this software in source and binary forms, with or without modification, are * |
| 9 | * permitted provided that the following conditions are met: * |
| 10 | * * |
| 11 | * * Redistributions of source code must retain the above * |
| 12 | * copyright notice, this list of conditions and the * |
| 13 | * following disclaimer. * |
| 14 | * * |
| 15 | * * Redistributions in binary form must reproduce the above * |
| 16 | * copyright notice, this list of conditions and the * |
| 17 | * following disclaimer in the documentation and/or other * |
| 18 | * materials provided with the distribution. * |
| 19 | * * |
| 20 | * * Neither the name of the WIDE Project or NICT nor the * |
| 21 | * names of its contributors may be used to endorse or * |
| 22 | * promote products derived from this software without * |
| 23 | * specific prior written permission of WIDE Project and * |
| 24 | * NICT. * |
| 25 | * * |
| 26 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED * |
| 27 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * |
| 28 | * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * |
| 29 | * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * |
| 30 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * |
| 31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * |
| 32 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * |
| 33 | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * |
| 34 | *********************************************************************************************************/ |
| 35 | |
| 36 | /* Manage the list of RADIUS clients, along with their shared secrets. */ |
| 37 | |
| 38 | /* Probably some changes are needed to support RADIUS Proxies */ |
| 39 | |
| 40 | #include "rgw.h" |
| 41 | |
| 42 | #define REVERSE_DNS_SIZE_MAX 512 /* length of our buffer for reverse DNS */ |
| 43 | #define DUPLICATE_CHECK_LIFETIME 60 /* number of seconds that the received RADIUS records are kept for duplicate checking . TODO: make it configurable if needed */ |
| 44 | |
| 45 | /* Ordered lists of clients. The order relationship is a memcmp on the address zone. |
| 46 | For same addresses, the port is compared. |
| 47 | The same address cannot be added twice, once with a 0-port and once with another port value. |
| 48 | */ |
| 49 | static struct fd_list cli_ip = FD_LIST_INITIALIZER(cli_ip); |
| 50 | static struct fd_list cli_ip6 = FD_LIST_INITIALIZER(cli_ip6); |
| 51 | |
| 52 | /* Lock to protect the previous lists. We use a rwlock because this list is mostly static, to allow parallel reading */ |
| 53 | static pthread_rwlock_t cli_rwl = PTHREAD_RWLOCK_INITIALIZER; |
| 54 | |
| 55 | /* Structure describing one received RADIUS message, for duplicate checks purpose. */ |
| 56 | struct req_info { |
| 57 | uint16_t port; /* UDP source port of the request */ |
| 58 | uint8_t id; /* The identifier in the request header */ |
| 59 | uint8_t auth[16]; /* Request authenticator, since some RADIUS clients do not implement the id mechanism properly. */ |
| 60 | struct radius_msg *ans; /* The replied answer if any, in case the previous answer got lost. */ |
| 61 | |
| 62 | int nbdup; /* Number of times this request was received as a duplicate */ |
| 63 | struct fd_list by_id; /* The list of requests ordered by their id, port, and auth */ |
| 64 | time_t received; /* When was the last duplicate received? */ |
| 65 | struct fd_list by_time; /* The list of requests ordered by the 'received' value . */ |
| 66 | }; |
| 67 | |
| 68 | static pthread_t dbt_expire = (pthread_t)NULL; /* The thread that will remove old requests information from all clients (one thread for all) */ |
| 69 | |
| 70 | /* Structure describing one client */ |
| 71 | struct rgw_client { |
| 72 | /* Link information in global list (cli_ip or cli_ip6) */ |
| 73 | struct fd_list chain; |
| 74 | |
| 75 | /* Reference count */ |
| 76 | int refcount; |
| 77 | |
| 78 | /* The address and optional port (alloc'd during configuration file parsing). */ |
| 79 | union { |
| 80 | struct sockaddr *sa; /* generic pointer */ |
| 81 | struct sockaddr_in *sin; |
| 82 | struct sockaddr_in6 *sin6; |
| 83 | }; |
| 84 | |
| 85 | /* The FQDN, realm, and optional aliases */ |
| 86 | int is_local; /* true if the RADIUS client runs on the same host -- we use Diameter Identity in that case */ |
| 87 | enum rgw_cli_type type; /* is it a proxy ? */ |
| 88 | DiamId_t fqdn; /* malloc'd here */ |
| 89 | size_t fqdn_len; |
| 90 | DiamId_t realm; /* references another string, do not free */ |
| 91 | size_t realm_len; |
| 92 | struct { |
| 93 | os0_t name; |
| 94 | size_t len; |
| 95 | } *aliases; /* Received aliases */ |
| 96 | size_t aliases_nb; |
| 97 | |
| 98 | /* The secret key data. */ |
| 99 | struct { |
| 100 | unsigned char * data; |
| 101 | size_t len; |
| 102 | } key; |
| 103 | |
| 104 | /* information of previous msg received, for duplicate checks. */ |
| 105 | struct { |
| 106 | pthread_mutex_t dupl_lock; /* The mutex protecting the following lists */ |
| 107 | struct fd_list dupl_by_id; /* The list of req_info structures ordered by their id, port, and auth */ |
| 108 | struct fd_list dupl_by_time; /* The list of req_info structures ordered by their time (approximative) */ |
| 109 | } dupl_info[2]; /*[0] for auth, [1] for acct. */ |
| 110 | }; |
| 111 | |
| 112 | |
| 113 | /* Create a new req_info structure and initialize its data from a RADIUS request message */ |
| 114 | static struct req_info * dupl_new_req_info(struct rgw_radius_msg_meta *msg) { |
| 115 | struct req_info * ret = NULL; |
| 116 | CHECK_MALLOC_DO( ret = malloc(sizeof(struct req_info)), return NULL ); |
| 117 | memset(ret, 0, sizeof(struct req_info)); |
| 118 | ret->port = msg->port; |
| 119 | ret->id = msg->radius.hdr->identifier; |
| 120 | memcpy(&ret->auth[0], &msg->radius.hdr->authenticator[0], 16); |
| 121 | fd_list_init(&ret->by_id, ret); |
| 122 | fd_list_init(&ret->by_time, ret); |
| 123 | ret->received = time(NULL); |
| 124 | return ret; |
| 125 | } |
| 126 | |
| 127 | /* Destroy a req_info structure, after it has been unlinked */ |
| 128 | static void dupl_free_req_info(struct req_info * r) { |
| 129 | CHECK_PARAMS_DO( r && FD_IS_LIST_EMPTY(&r->by_id) && FD_IS_LIST_EMPTY(&r->by_time), return ); |
| 130 | if (r->ans) { |
| 131 | /* Free this RADIUS message */ |
| 132 | radius_msg_free(r->ans); |
| 133 | free(r->ans); |
| 134 | } |
| 135 | |
| 136 | /* Use r->nbdup for some purpose? */ |
| 137 | |
| 138 | free(r); |
| 139 | } |
| 140 | |
| 141 | /* The core of the purge thread */ |
| 142 | static int dupl_purge_list(struct fd_list * clients) { |
| 143 | |
| 144 | struct fd_list *li = NULL; |
| 145 | |
| 146 | for (li = clients->next; li != clients; li = li->next) { |
| 147 | struct rgw_client * client = (struct rgw_client *)li; |
| 148 | int p; |
| 149 | |
| 150 | for (p=0; p<=1; p++) { |
| 151 | |
| 152 | /* Lock this list */ |
| 153 | time_t now; |
| 154 | CHECK_POSIX( pthread_mutex_lock(&client->dupl_info[p].dupl_lock) ); |
| 155 | |
| 156 | now = time(NULL); |
| 157 | |
| 158 | while (!FD_IS_LIST_EMPTY(&client->dupl_info[p].dupl_by_time)) { |
| 159 | |
| 160 | /* Check the first item in the list */ |
| 161 | struct req_info * r = (struct req_info *)(client->dupl_info[p].dupl_by_time.next->o); |
| 162 | |
| 163 | if (now - r->received > DUPLICATE_CHECK_LIFETIME) { |
| 164 | |
| 165 | TRACE_DEBUG(ANNOYING + 1, "Purging RADIUS request (id: %02hhx, port: %hu, dup #%d, age %ld secs)", r->id, ntohs(r->port), r->nbdup, (long)(now - r->received)); |
| 166 | |
| 167 | /* Remove this record */ |
| 168 | fd_list_unlink(&r->by_time); |
| 169 | fd_list_unlink(&r->by_id); |
| 170 | dupl_free_req_info(r); |
| 171 | } else { |
| 172 | /* We are done for this list */ |
| 173 | break; |
| 174 | } |
| 175 | } |
| 176 | |
| 177 | CHECK_POSIX( pthread_mutex_unlock(&client->dupl_info[p].dupl_lock) ); |
| 178 | } |
| 179 | } |
| 180 | return 0; |
| 181 | } |
| 182 | |
| 183 | /* Thread that purges old RADIUS requests */ |
| 184 | static void * dupl_th(void * arg) { |
| 185 | /* Set the thread name */ |
| 186 | fd_log_threadname ( "app_radgw:duplicate_purge" ); |
| 187 | |
| 188 | /* The thread will be canceled */ |
| 189 | while (1) { |
| 190 | |
| 191 | /* We don't use a cond var, we simply wake up every 5 seconds. If the size of the duplicate cache is critical, it might be changed */ |
| 192 | sleep(5); |
| 193 | |
| 194 | /* When we wake up, we will check all clients duplicate lists one by one */ |
| 195 | CHECK_POSIX_DO( pthread_rwlock_rdlock(&cli_rwl), break ); |
| 196 | |
| 197 | CHECK_FCT_DO( dupl_purge_list(&cli_ip), break ); |
| 198 | CHECK_FCT_DO( dupl_purge_list(&cli_ip6), break ); |
| 199 | |
| 200 | CHECK_POSIX_DO( pthread_rwlock_unlock(&cli_rwl), break ); |
| 201 | |
| 202 | /* Loop */ |
| 203 | } |
| 204 | |
| 205 | /* If we reach this part, some fatal error was encountered */ |
| 206 | CHECK_FCT_DO(fd_core_shutdown(), ); |
| 207 | TRACE_DEBUG(FULL, "Thread terminated"); |
| 208 | return NULL; |
| 209 | } |
| 210 | |
| 211 | |
| 212 | /* create a new rgw_client. the arguments are MOVED into the structure (to limit malloc & free calls). */ |
| 213 | static int client_create(struct rgw_client ** res, struct sockaddr ** ip_port, unsigned char ** key, size_t keylen, enum rgw_cli_type type ) |
| 214 | { |
| 215 | struct rgw_client *tmp = NULL; |
| 216 | DiamId_t fqdn; |
| 217 | size_t fqdn_len = 0; |
| 218 | int ret, i; |
| 219 | int loc = 0; |
| 220 | |
| 221 | /* Check if the IP address is local */ |
| 222 | if ( ( ((*ip_port)->sa_family == AF_INET ) && ( IN_IS_ADDR_LOOPBACK( &((struct sockaddr_in *)(*ip_port))->sin_addr ) ) ) |
| 223 | ||( ((*ip_port)->sa_family == AF_INET6) && ( IN6_IS_ADDR_LOOPBACK( &((struct sockaddr_in6 *)(*ip_port))->sin6_addr) ) )) { |
| 224 | /* The client is local */ |
| 225 | loc = 1; |
| 226 | } else { |
| 227 | char buf[255]; |
| 228 | |
| 229 | /* Search FQDN for the client */ |
| 230 | ret = getnameinfo( *ip_port, sizeof(struct sockaddr_storage), &buf[0], sizeof(buf), NULL, 0, 0 ); |
| 231 | if (ret) { |
| 232 | TRACE_DEBUG(INFO, "Unable to resolve peer name: %s", gai_strerror(ret)); |
| 233 | return EINVAL; |
| 234 | } |
| 235 | fqdn = &buf[0]; |
| 236 | CHECK_FCT_DO( ret = fd_os_validate_DiameterIdentity(&fqdn, &fqdn_len, 1), |
| 237 | { |
| 238 | TRACE_DEBUG(INFO, "Unable to use resolved peer name '%s' as DiameterIdentity: %s", buf, strerror(ret)); |
| 239 | return ret; |
| 240 | } ); |
| 241 | } |
| 242 | |
| 243 | /* Create the new object */ |
| 244 | CHECK_MALLOC( tmp = malloc(sizeof (struct rgw_client)) ); |
| 245 | memset(tmp, 0, sizeof(struct rgw_client)); |
| 246 | fd_list_init(&tmp->chain, NULL); |
| 247 | |
| 248 | /* Initialize the duplicate list info */ |
| 249 | for (i=0; i<=1; i++) { |
| 250 | CHECK_POSIX( pthread_mutex_init(&tmp->dupl_info[i].dupl_lock, NULL) ); |
| 251 | fd_list_init(&tmp->dupl_info[i].dupl_by_id, NULL); |
| 252 | fd_list_init(&tmp->dupl_info[i].dupl_by_time, NULL); |
| 253 | } |
| 254 | tmp->type = type; |
| 255 | |
| 256 | if (loc) { |
| 257 | tmp->is_local = 1; |
| 258 | } else { |
| 259 | /* Copy the fqdn */ |
| 260 | tmp->fqdn = fqdn; |
| 261 | tmp->fqdn_len = fqdn_len; |
| 262 | |
| 263 | /* Find an appropriate realm */ |
| 264 | tmp->realm = strchr(fqdn, '.'); |
| 265 | if (tmp->realm) { |
| 266 | tmp->realm += 1; |
| 267 | tmp->realm_len = tmp->fqdn_len - (tmp->realm - fqdn); |
| 268 | } |
| 269 | if ((!tmp->realm) || (*tmp->realm == '\0')) { /* in case the fqdn was "localhost." for example, if it is possible... */ |
| 270 | tmp->realm = fd_g_config->cnf_diamrlm; |
| 271 | tmp->realm_len = fd_g_config->cnf_diamrlm_len; |
| 272 | } |
| 273 | } |
| 274 | |
| 275 | /* move the sa info reference */ |
| 276 | tmp->sa = *ip_port; |
| 277 | *ip_port = NULL; |
| 278 | |
| 279 | /* move the key material */ |
| 280 | tmp->key.data = *key; |
| 281 | tmp->key.len = keylen; |
| 282 | *key = NULL; |
| 283 | |
| 284 | /* Done! */ |
| 285 | *res = tmp; |
| 286 | return 0; |
| 287 | } |
| 288 | |
| 289 | /* Decrease refcount on a client; the lock must be held when this function is called. */ |
| 290 | static void client_unlink(struct rgw_client * client) |
| 291 | { |
| 292 | client->refcount -= 1; |
| 293 | |
| 294 | if (client->refcount <= 0) { |
| 295 | int idx; |
| 296 | /* to be sure: the refcount should be 0 only when client_fini is called */ |
| 297 | ASSERT( FD_IS_LIST_EMPTY(&client->chain) ); |
| 298 | |
| 299 | /* Free the data */ |
| 300 | for (idx = 0; idx < client->aliases_nb; idx++) |
| 301 | free(client->aliases[idx].name); |
| 302 | free(client->aliases); |
| 303 | free(client->fqdn); |
| 304 | free(client->sa); |
| 305 | free(client->key.data); |
| 306 | |
| 307 | /* Free the duplicate info */ |
| 308 | for (idx=0; idx <= 1; idx++){ |
| 309 | CHECK_POSIX_DO( pthread_mutex_lock( &client->dupl_info[idx].dupl_lock ), /* continue */ ); |
| 310 | |
| 311 | while (!FD_IS_LIST_EMPTY(&client->dupl_info[idx].dupl_by_id)) { |
| 312 | struct req_info * r = (struct req_info *)(client->dupl_info[idx].dupl_by_id.next->o); |
| 313 | fd_list_unlink( &r->by_id ); |
| 314 | fd_list_unlink( &r->by_time ); |
| 315 | dupl_free_req_info(r); |
| 316 | } |
| 317 | |
| 318 | CHECK_POSIX_DO( pthread_mutex_unlock( &client->dupl_info[idx].dupl_lock ), /* continue */ ); |
| 319 | |
| 320 | } |
| 321 | |
| 322 | free(client); |
| 323 | } |
| 324 | } |
| 325 | |
| 326 | |
| 327 | /* Macro to avoid duplicating the code in the next function */ |
| 328 | #define client_search_family( _family_ ) \ |
| 329 | case AF_INET##_family_: { \ |
| 330 | struct sockaddr_in##_family_ * sin##_family_ = (struct sockaddr_in##_family_ *)ip_port; \ |
| 331 | for (ref = cli_ip##_family_.next; ref != &cli_ip##_family_; ref = ref->next) { \ |
| 332 | cmp = memcmp(&sin##_family_->sin##_family_##_addr, \ |
| 333 | &((struct rgw_client *)ref)->sin##_family_->sin##_family_##_addr, \ |
| 334 | sizeof(struct in##_family_##_addr)); \ |
| 335 | if (cmp > 0) continue; /* search further in the list */ \ |
| 336 | if (cmp < 0) break; /* this IP is not in the list */ \ |
| 337 | /* Now compare the ports as follow: */ \ |
| 338 | /* If the ip_port we are searching does not contain a port, just return the first match result */ \ |
| 339 | if ( (sin##_family_->sin##_family_##_port == 0) \ |
| 340 | /* If the entry in the list does not contain a port, return it as a match */ \ |
| 341 | || (((struct rgw_client *)ref)->sin##_family_->sin##_family_##_port == 0) \ |
| 342 | /* If both ports are equal, it is a match */ \ |
| 343 | || (sin##_family_->sin##_family_##_port == \ |
| 344 | ((struct rgw_client *)ref)->sin##_family_->sin##_family_##_port)) { \ |
| 345 | *res = (struct rgw_client *)ref; \ |
| 346 | return EEXIST; \ |
| 347 | } \ |
| 348 | /* Otherwise, the list is ordered by port value (byte order does not matter */ \ |
| 349 | if (sin##_family_->sin##_family_##_port \ |
| 350 | > ((struct rgw_client *)ref)->sin##_family_->sin##_family_##_port) continue; \ |
| 351 | else break; \ |
| 352 | } \ |
| 353 | *res = (struct rgw_client *)(ref->prev); \ |
| 354 | return ENOENT; \ |
| 355 | } |
| 356 | /* Function to look for an existing rgw_client, or the previous element. |
| 357 | The cli_rwl must be held for reading (at least) when calling this function. |
| 358 | Returns ENOENT if the matching client does not exist, and res points to the previous element in the list. |
| 359 | Returns EEXIST if the matching client is found, and res points to this element. |
| 360 | Returns other error code on other error. */ |
| 361 | static int client_search(struct rgw_client ** res, struct sockaddr * ip_port ) |
| 362 | { |
| 363 | int cmp; |
| 364 | struct fd_list *ref = NULL; |
| 365 | |
| 366 | CHECK_PARAMS(res && ip_port); |
| 367 | |
| 368 | switch (ip_port->sa_family) { |
| 369 | client_search_family() |
| 370 | break; |
| 371 | |
| 372 | client_search_family( 6 ) |
| 373 | break; |
| 374 | } |
| 375 | |
| 376 | /* We're never supposed to reach this point */ |
| 377 | ASSERT(0); |
| 378 | return EINVAL; |
| 379 | } |
| 380 | |
| 381 | int rgw_clients_getkey(struct rgw_client * cli, unsigned char **key, size_t *key_len) |
| 382 | { |
| 383 | CHECK_PARAMS( cli && key && key_len ); |
| 384 | *key = cli->key.data; |
| 385 | *key_len = cli->key.len; |
| 386 | return 0; |
| 387 | } |
| 388 | |
| 389 | int rgw_clients_gettype(struct rgw_client * cli, enum rgw_cli_type *type) |
| 390 | { |
| 391 | CHECK_PARAMS( cli && type ); |
| 392 | *type = cli->type; |
| 393 | return 0; |
| 394 | } |
| 395 | |
| 396 | |
| 397 | int rgw_clients_search(struct sockaddr * ip_port, struct rgw_client ** ref) |
| 398 | { |
| 399 | int ret = 0; |
| 400 | |
| 401 | TRACE_ENTRY("%p %p", ip_port, ref); |
| 402 | |
| 403 | CHECK_PARAMS(ip_port && ref); |
| 404 | |
| 405 | CHECK_POSIX( pthread_rwlock_rdlock(&cli_rwl) ); |
| 406 | |
| 407 | ret = client_search(ref, ip_port); |
| 408 | if (ret == EEXIST) { |
| 409 | (*ref)->refcount ++; |
| 410 | ret = 0; |
| 411 | } else { |
| 412 | *ref = NULL; |
| 413 | } |
| 414 | |
| 415 | CHECK_POSIX( pthread_rwlock_unlock(&cli_rwl) ); |
| 416 | |
| 417 | return ret; |
| 418 | } |
| 419 | |
| 420 | int rgw_clients_check_dup(struct rgw_radius_msg_meta **msg, struct rgw_client *cli) |
| 421 | { |
| 422 | int p, dup = 0; |
| 423 | struct fd_list * li; |
| 424 | struct req_info * r; |
| 425 | |
| 426 | TRACE_ENTRY("%p %p", msg, cli); |
| 427 | |
| 428 | CHECK_PARAMS( msg && cli ); |
| 429 | |
| 430 | if ((*msg)->serv_type == RGW_PLG_TYPE_AUTH) |
| 431 | p = 0; |
| 432 | else |
| 433 | p = 1; |
| 434 | |
| 435 | CHECK_POSIX( pthread_mutex_lock( &cli->dupl_info[p].dupl_lock ) ); |
| 436 | |
| 437 | /* Search if we have this message in our list */ |
| 438 | for (li = cli->dupl_info[p].dupl_by_id.next; li != &cli->dupl_info[p].dupl_by_id; li = li->next) { |
| 439 | int cmp = 0; |
| 440 | r = (struct req_info *)(li->o); |
| 441 | if (r->id < (*msg)->radius.hdr->identifier) |
| 442 | continue; |
| 443 | if (r->id > (*msg)->radius.hdr->identifier) |
| 444 | break; |
| 445 | if (r->port < (*msg)->port) |
| 446 | continue; |
| 447 | if (r->port > (*msg)->port) |
| 448 | break; |
| 449 | cmp = memcmp(&r->auth[0], &(*msg)->radius.hdr->authenticator[0], 16); |
| 450 | if (cmp < 0) |
| 451 | continue; |
| 452 | if (cmp > 0) |
| 453 | break; |
| 454 | dup = 1; |
| 455 | break; |
| 456 | } |
| 457 | |
| 458 | if (dup) { |
| 459 | time_t now = time(NULL); |
| 460 | r->nbdup += 1; |
| 461 | TRACE_DEBUG(INFO, "Received duplicated RADIUS message (id: %02hhx, port: %hu, dup #%d, previously seen %ld secs ago).", |
| 462 | r->id, ntohs(r->port), r->nbdup, (long)(now - r->received)); |
| 463 | |
| 464 | if (r->ans) { |
| 465 | /* Resend the answer */ |
| 466 | CHECK_FCT_DO( rgw_servers_send((*msg)->serv_type, r->ans->buf, r->ans->buf_used, cli->sa, r->port), ); |
| 467 | |
| 468 | /* Should we delete 'r' so that a further duplicate will again be converted to Diameter? */ |
| 469 | } |
| 470 | |
| 471 | /* Update the timestamp */ |
| 472 | r->received = now; |
| 473 | fd_list_unlink(&r->by_time); |
| 474 | fd_list_insert_before(&cli->dupl_info[p].dupl_by_time, &r->by_time); /* Move as last entry, since it is the most recent */ |
| 475 | |
| 476 | /* Delete the request message */ |
| 477 | rgw_msg_free(msg); |
| 478 | |
| 479 | } else { |
| 480 | /* The message was not a duplicate, we save it */ |
| 481 | /* li currently points the the next entry in list_by_id */ |
| 482 | CHECK_MALLOC_DO( r= dupl_new_req_info(*msg), { CHECK_POSIX_DO(pthread_mutex_unlock( &cli->dupl_info[p].dupl_lock ), ); return ENOMEM; } ); |
| 483 | fd_list_insert_before(li, &r->by_id); |
| 484 | fd_list_insert_before(&cli->dupl_info[p].dupl_by_time, &r->by_time); /* it is the most recent */ |
| 485 | } |
| 486 | |
| 487 | CHECK_POSIX( pthread_mutex_unlock( &cli->dupl_info[p].dupl_lock ) ); |
| 488 | |
| 489 | return 0; |
| 490 | } |
| 491 | |
| 492 | /* Check if the message has a valid authenticator, and update the meta-data accordingly */ |
| 493 | int rgw_clients_auth_check(struct rgw_radius_msg_meta * msg, struct rgw_client * cli, uint8_t * req_auth) |
| 494 | { |
| 495 | unsigned char * key; |
| 496 | size_t keylen; |
| 497 | int count; |
| 498 | |
| 499 | TRACE_ENTRY("%p %p %p", msg, cli, req_auth); |
| 500 | |
| 501 | CHECK_PARAMS(msg && cli); |
| 502 | |
| 503 | CHECK_FCT(rgw_clients_getkey(cli, &key, &keylen)); |
| 504 | |
| 505 | count = radius_msg_count_attr(&msg->radius, RADIUS_ATTR_MESSAGE_AUTHENTICATOR, 0); |
| 506 | if (count > 1) { |
| 507 | TRACE_DEBUG(INFO, "Too many Message-Authenticator attributes (%d), discarding message.", count); |
| 508 | return EINVAL; |
| 509 | } |
| 510 | if (count == 0) { |
| 511 | TRACE_DEBUG(FULL, "Message does not contain a Message-Authenticator attributes."); |
| 512 | msg->valid_mac = 0; |
| 513 | } else { |
| 514 | if (radius_msg_verify_msg_auth( &msg->radius, key, keylen, req_auth )) { |
| 515 | TRACE_DEBUG(INFO, "Invalid Message-Authenticator received, discarding message."); |
| 516 | return EINVAL; |
| 517 | } |
| 518 | msg->valid_mac = 1; |
| 519 | } |
| 520 | |
| 521 | return 0; |
| 522 | } |
| 523 | |
| 524 | static struct dict_object * cache_orig_host = NULL; |
| 525 | static struct dict_object * cache_orig_realm = NULL; |
| 526 | static struct dict_object * cache_route_record = NULL; |
| 527 | |
| 528 | int rgw_clients_init(void) |
| 529 | { |
| 530 | TRACE_ENTRY(); |
| 531 | CHECK_FCT( fd_dict_search(fd_g_config->cnf_dict, DICT_AVP, AVP_BY_NAME, "Origin-Host", &cache_orig_host, ENOENT) ); |
| 532 | CHECK_FCT( fd_dict_search(fd_g_config->cnf_dict, DICT_AVP, AVP_BY_NAME, "Origin-Realm", &cache_orig_realm, ENOENT) ); |
| 533 | CHECK_FCT( fd_dict_search(fd_g_config->cnf_dict, DICT_AVP, AVP_BY_NAME, "Route-Record", &cache_route_record, ENOENT) ); |
| 534 | |
| 535 | /* Create the thread that will purge old RADIUS duplicates */ |
| 536 | CHECK_POSIX( pthread_create( &dbt_expire, NULL, dupl_th, NULL) ); |
| 537 | |
| 538 | return 0; |
| 539 | } |
| 540 | |
| 541 | |
| 542 | /* The following function checks if a RADIUS message contains a valid NAS identifier, and initializes an empty Diameter |
| 543 | message with the appropriate routing information */ |
| 544 | /* Check that the NAS-IP-Adress or NAS-Identifier is coherent with the IP the packet was received from */ |
| 545 | /* Also update the client list of aliases if needed */ |
| 546 | int rgw_clients_create_origin(struct rgw_radius_msg_meta *msg, struct rgw_client * cli, struct msg ** diam) |
| 547 | { |
| 548 | int idx; |
| 549 | int valid_nas_info = 0; |
| 550 | struct radius_attr_hdr *nas_ip = NULL, *nas_ip6 = NULL, *nas_id = NULL; |
| 551 | size_t nas_id_len; |
| 552 | char * oh_str = NULL; size_t oh_strlen = 0; int oh_free = 0; |
| 553 | char * or_str = NULL; size_t or_strlen = 0; |
| 554 | char * rr_str = NULL; size_t rr_strlen = 0; |
| 555 | char buf[REVERSE_DNS_SIZE_MAX]; /* to store DNS lookups results */ |
| 556 | |
| 557 | struct avp *avp = NULL; |
| 558 | union avp_value avp_val; |
| 559 | |
| 560 | TRACE_ENTRY("%p %p %p", msg, cli, diam); |
| 561 | CHECK_PARAMS(msg && cli && diam && (*diam == NULL)); |
| 562 | |
| 563 | /* Find the relevant attributes, if any */ |
| 564 | for (idx = 0; idx < msg->radius.attr_used; idx++) { |
| 565 | struct radius_attr_hdr * attr = (struct radius_attr_hdr *)(msg->radius.buf + msg->radius.attr_pos[idx]); |
| 566 | size_t attr_len = attr->length - sizeof(struct radius_attr_hdr); |
| 567 | |
| 568 | if ((attr->type == RADIUS_ATTR_NAS_IP_ADDRESS) && (attr_len = 4)) { |
| 569 | nas_ip = attr; |
| 570 | continue; |
| 571 | } |
| 572 | |
| 573 | if ((attr->type == RADIUS_ATTR_NAS_IDENTIFIER) && (attr_len > 0)) { |
| 574 | nas_id = attr; |
| 575 | nas_id_len = attr_len; |
| 576 | continue; |
| 577 | } |
| 578 | |
| 579 | if ((attr->type == RADIUS_ATTR_NAS_IPV6_ADDRESS) && (attr_len = 16)) { |
| 580 | nas_ip6 = attr; |
| 581 | continue; |
| 582 | } |
| 583 | } |
| 584 | |
| 585 | if (!nas_ip && !nas_ip6 && !nas_id) { |
| 586 | TRACE_DEBUG(FULL, "The message does not contain any NAS identification attribute."); |
| 587 | |
| 588 | /* Get information on this peer */ |
| 589 | CHECK_FCT( rgw_clients_get_origin(cli, &oh_str, &oh_strlen, &or_str, &or_strlen) ); |
| 590 | |
| 591 | goto diameter; |
| 592 | } |
| 593 | |
| 594 | /* Check if the message was received from the IP in NAS-IP-Address attribute */ |
| 595 | if (nas_ip && (cli->sa->sa_family == AF_INET) && !memcmp(nas_ip+1, &cli->sin->sin_addr, sizeof(struct in_addr))) { |
| 596 | TRACE_DEBUG(FULL, "NAS-IP-Address contains the same address as the message was received from."); |
| 597 | valid_nas_info |= 1; |
| 598 | } |
| 599 | if (nas_ip6 && (cli->sa->sa_family == AF_INET6) && !memcmp(nas_ip6+1, &cli->sin6->sin6_addr, sizeof(struct in6_addr))) { |
| 600 | TRACE_DEBUG(FULL, "NAS-IPv6-Address contains the same address as the message was received from."); |
| 601 | valid_nas_info |= 2; |
| 602 | } |
| 603 | |
| 604 | |
| 605 | /* |
| 606 | In RADIUS it would be possible for a rogue NAS to forge the NAS-IP- |
| 607 | Address attribute value. Diameter/RADIUS translation agents MUST |
| 608 | check a received NAS-IP-Address or NAS-IPv6-Address attribute against |
| 609 | the source address of the RADIUS packet. If they do not match and |
| 610 | the Diameter/RADIUS translation agent does not know whether the |
| 611 | packet was sent by a RADIUS proxy or NAS (e.g., no Proxy-State |
| 612 | attribute), then by default it is assumed that the source address |
| 613 | corresponds to a RADIUS proxy, and that the NAS Address is behind |
| 614 | that proxy, potentially with some additional RADIUS proxies in |
| 615 | between. The Diameter/RADIUS translation agent MUST insert entries |
| 616 | in the Route-Record AVP corresponding to the apparent route. This |
| 617 | implies doing a reverse lookup on the source address and NAS-IP- |
| 618 | Address or NAS-IPv6-Address attributes to determine the corresponding |
| 619 | FQDNs. |
| 620 | |
| 621 | If the source address and the NAS-IP-Address or NAS-IPv6-Address do |
| 622 | not match, and the Diameter/RADIUS translation agent knows that it is |
| 623 | talking directly to the NAS (e.g., there are no RADIUS proxies |
| 624 | between it and the NAS), then the error should be logged, and the |
| 625 | packet MUST be discarded. |
| 626 | |
| 627 | Diameter agents and servers MUST check whether the NAS-IP-Address AVP |
| 628 | corresponds to an entry in the Route-Record AVP. This is done by |
| 629 | doing a reverse lookup (PTR RR) for the NAS-IP-Address to retrieve |
| 630 | the corresponding FQDN, and by checking for a match with the Route- |
| 631 | Record AVP. If no match is found, then an error is logged, but no |
| 632 | other action is taken. |
| 633 | */ |
| 634 | if (nas_ip || nas_ip6) { |
| 635 | if (!valid_nas_info) { |
| 636 | if ((!cli->is_local) && (cli->type == RGW_CLI_NAS)) { |
| 637 | TRACE_DEBUG(INFO, "Message received with a NAS-IP-Address or NAS-IPv6-Address different from the sender's. Please configure as Proxy if this is expected. Message discarded."); |
| 638 | return EINVAL; |
| 639 | } else { |
| 640 | int ret; |
| 641 | sSS ss; |
| 642 | /* the peer is configured as a proxy, or running on localhost, so accept the message */ |
| 643 | |
| 644 | /* In that case, the cli will be stored as Route-Record and the NAS-IP-Address as origin */ |
| 645 | if (!cli->is_local) { |
| 646 | rr_str = cli->fqdn; |
| 647 | rr_strlen = cli->fqdn_len; |
| 648 | } |
| 649 | |
| 650 | /* We must DNS-reverse the NAS-IP*-Address */ |
| 651 | memset(&ss, 0 , sizeof(sSS)); |
| 652 | if (nas_ip) { |
| 653 | sSA4 * sin = (sSA4 *)&ss; |
| 654 | sin->sin_family = AF_INET; |
| 655 | memcpy(&sin->sin_addr, nas_ip + 1, sizeof(struct in_addr)); |
| 656 | } else { |
| 657 | sSA6 * sin6 = (sSA6 *)&ss; |
| 658 | sin6->sin6_family = AF_INET6; |
| 659 | memcpy(&sin6->sin6_addr, nas_ip6 + 1, sizeof(struct in6_addr)); |
| 660 | } |
| 661 | CHECK_SYS_DO( getnameinfo( (sSA *)&ss, sSAlen(&ss), &buf[0], sizeof(buf), NULL, 0, NI_NAMEREQD), |
| 662 | { |
| 663 | if (cli->is_local) { |
| 664 | CHECK_FCT( rgw_clients_get_origin(cli, &oh_str, &oh_strlen, &or_str, &or_strlen) ); |
| 665 | goto diameter; |
| 666 | } |
| 667 | |
| 668 | TRACE_DEBUG(INFO, "The NAS-IP*-Address cannot be DNS reversed in order to create the Origin-Host AVP; rejecting the message (translation is impossible)."); |
| 669 | return EINVAL; |
| 670 | } ); |
| 671 | |
| 672 | oh_str = &buf[0]; |
| 673 | CHECK_FCT_DO( ret = fd_os_validate_DiameterIdentity(&oh_str, &oh_strlen, 1), |
| 674 | { |
| 675 | if (cli->is_local) { |
| 676 | CHECK_FCT( rgw_clients_get_origin(cli, &oh_str, &oh_strlen, &or_str, &or_strlen) ); |
| 677 | goto diameter; |
| 678 | } |
| 679 | |
| 680 | TRACE_DEBUG(INFO, "Unable to use resolved client name '%s' as DiameterIdentity: %s", buf, strerror(ret)); |
| 681 | return ret; |
| 682 | } ); |
| 683 | oh_free = 1; |
| 684 | |
| 685 | or_str = strchr(oh_str, '.'); |
| 686 | if (or_str) { |
| 687 | or_str ++; /* move after the first dot */ |
| 688 | if (*or_str == '\0') |
| 689 | or_str = NULL; /* Discard this realm, we will use the local realm later */ |
| 690 | else |
| 691 | or_strlen = oh_strlen - (or_str - oh_str); |
| 692 | } |
| 693 | } |
| 694 | } else { |
| 695 | /* The attribute matches the source address, just use this in origin-host */ |
| 696 | CHECK_FCT( rgw_clients_get_origin(cli, &oh_str, &oh_strlen, &or_str, &or_strlen) ); |
| 697 | } |
| 698 | |
| 699 | goto diameter; /* we ignore the nas_id in that case */ |
| 700 | } |
| 701 | |
| 702 | /* We don't have a NAS-IP*-Address attribute if we are here */ |
| 703 | if (cli->is_local) { |
| 704 | /* Simple: we use our own configuration */ |
| 705 | CHECK_FCT( rgw_clients_get_origin(cli, &oh_str, &oh_strlen, &or_str, &or_strlen) ); |
| 706 | goto diameter; |
| 707 | } |
| 708 | |
| 709 | /* At this point, we only have nas_id, and the client is not local */ |
| 710 | ASSERT(nas_id); |
| 711 | |
| 712 | { |
| 713 | int found, ret; |
| 714 | struct addrinfo hint, *res, *ptr; |
| 715 | |
| 716 | /* |
| 717 | In RADIUS it would be possible for a rogue NAS to forge the NAS- |
| 718 | Identifier attribute. Diameter/RADIUS translation agents SHOULD |
| 719 | attempt to check a received NAS-Identifier attribute against the |
| 720 | source address of the RADIUS packet, by doing an A/AAAA RR query. If |
| 721 | the NAS-Identifier attribute contains an FQDN, then such a query |
| 722 | would resolve to an IP address matching the source address. However, |
| 723 | the NAS-Identifier attribute is not required to contain an FQDN, so |
| 724 | such a query could fail. If it fails, an error should be logged, but |
| 725 | no action should be taken, other than a reverse lookup on the source |
| 726 | address and insert the resulting FQDN into the Route-Record AVP. |
| 727 | |
| 728 | Diameter agents and servers SHOULD check whether a NAS-Identifier AVP |
| 729 | corresponds to an entry in the Route-Record AVP. If no match is |
| 730 | found, then an error is logged, but no other action is taken. |
| 731 | */ |
| 732 | |
| 733 | /* first, check if the nas_id is the fqdn of the peer or a known alias */ |
| 734 | if (!fd_os_almostcasesrch(nas_id + 1, nas_id_len, |
| 735 | cli->fqdn, cli->fqdn_len, NULL)) { |
| 736 | TRACE_DEBUG(FULL, "NAS-Identifier contains the fqdn of the client"); |
| 737 | found = 1; |
| 738 | } else { |
| 739 | for (idx = 0; idx < cli->aliases_nb; idx++) { |
| 740 | if (!fd_os_cmp(nas_id + 1, nas_id_len, |
| 741 | cli->aliases[idx].name, cli->aliases[idx].len)) { |
| 742 | TRACE_DEBUG(FULL, "NAS-Identifier valid value found in the cache"); |
| 743 | found = 1; |
| 744 | break; |
| 745 | } |
| 746 | } |
| 747 | } |
| 748 | |
| 749 | if (found) { |
| 750 | /* The NAS-Identifier matches the source IP */ |
| 751 | CHECK_FCT( rgw_clients_get_origin(cli, &oh_str, &oh_strlen, &or_str, &or_strlen) ); |
| 752 | |
| 753 | goto diameter; |
| 754 | } |
| 755 | |
| 756 | /* Attempt DNS resolution of the identifier */ |
| 757 | ASSERT( nas_id_len < sizeof(buf) ); |
| 758 | memcpy(buf, nas_id + 1, nas_id_len); |
| 759 | buf[nas_id->length - sizeof(struct radius_attr_hdr)] = '\0'; |
| 760 | |
| 761 | /* Now check if this alias is valid for this peer */ |
| 762 | memset(&hint, 0, sizeof(hint)); |
| 763 | hint.ai_flags = AI_CANONNAME; |
| 764 | ret = getaddrinfo(buf, NULL, &hint, &res); |
| 765 | if (ret == 0) { |
| 766 | strncpy(buf, res->ai_canonname, sizeof(buf)); |
| 767 | /* The name was resolved correctly, does it match the IP of the client? */ |
| 768 | for (ptr = res; ptr != NULL; ptr = ptr->ai_next) { |
| 769 | if (cli->sa->sa_family != ptr->ai_family) |
| 770 | continue; |
| 771 | if (memcmp(cli->sa, ptr->ai_addr, sSAlen(cli->sa))) |
| 772 | continue; |
| 773 | |
| 774 | found = 1; |
| 775 | break; |
| 776 | } |
| 777 | freeaddrinfo(res); |
| 778 | |
| 779 | if (!found) { |
| 780 | if (cli->type == RGW_CLI_NAS) { |
| 781 | TRACE_DEBUG(INFO, "The NAS-Identifier value '%.*s' resolves to a different IP than the client's, discarding the message. Configure this client as a Proxy if this message should be valid.", |
| 782 | (int)nas_id_len, (char *)(nas_id + 1)); |
| 783 | return EINVAL; |
| 784 | } else { |
| 785 | /* This identifier matches a different IP, assume it is a proxied message */ |
| 786 | if (!cli->is_local) { |
| 787 | rr_str = cli->fqdn; |
| 788 | rr_strlen = cli->fqdn_len; |
| 789 | } |
| 790 | oh_str = &buf[0]; /* The canonname resolved */ |
| 791 | oh_strlen = 0; |
| 792 | CHECK_FCT_DO( ret = fd_os_validate_DiameterIdentity(&oh_str, &oh_strlen, 1), |
| 793 | { |
| 794 | TRACE_DEBUG(INFO, "Unable to use resolved client name '%s' as DiameterIdentity: %s", buf, strerror(ret)); |
| 795 | return ret; |
| 796 | } ); |
| 797 | oh_free = 1; |
| 798 | or_str = strchr(oh_str, '.'); |
| 799 | if (or_str) { |
| 800 | or_str ++; /* move after the first dot */ |
| 801 | if (*or_str == '\0') |
| 802 | or_str = NULL; /* Discard this realm, we will use the local realm later */ |
| 803 | else |
| 804 | or_strlen = oh_strlen - (or_str - oh_str); |
| 805 | } |
| 806 | } |
| 807 | } else { |
| 808 | /* It is a valid alias, save it */ |
| 809 | CHECK_MALLOC( cli->aliases = realloc(cli->aliases, (cli->aliases_nb + 1) * sizeof(cli->aliases[0])) ); |
| 810 | |
| 811 | CHECK_MALLOC( cli->aliases[cli->aliases_nb + 1].name = os0dup(nas_id + 1, nas_id_len ) ); |
| 812 | cli->aliases[cli->aliases_nb + 1].len = nas_id_len; |
| 813 | |
| 814 | cli->aliases_nb ++; |
| 815 | TRACE_DEBUG(FULL, "Saved valid alias for client: '%.*s' -> '%s'", (int)nas_id_len, (char *)(nas_id + 1), cli->fqdn); |
| 816 | CHECK_FCT( rgw_clients_get_origin(cli, &oh_str, &oh_strlen, &or_str, &or_strlen) ); |
| 817 | } |
| 818 | } else { |
| 819 | /* Error resolving the name */ |
| 820 | TRACE_DEBUG(INFO, "NAS-Identifier '%s' cannot be resolved: %s. Ignoring...", buf, gai_strerror(ret)); |
| 821 | /* Assume this is a valid identifier for the client */ |
| 822 | CHECK_FCT( rgw_clients_get_origin(cli, &oh_str, &oh_strlen, &or_str, &or_strlen) ); |
| 823 | } |
| 824 | } |
| 825 | |
| 826 | /* Now, let's create the empty Diameter message with Origin-Host, -Realm, and Route-Record if needed. */ |
| 827 | diameter: |
| 828 | ASSERT(oh_str); /* If it is not defined here, there is a bug... */ |
| 829 | if (!or_str) { |
| 830 | or_str = fd_g_config->cnf_diamrlm; /* Use local realm in that case */ |
| 831 | or_strlen = fd_g_config->cnf_diamrlm_len; |
| 832 | } |
| 833 | |
| 834 | /* Create an empty Diameter message so that extensions can store their AVPs */ |
| 835 | CHECK_FCT( fd_msg_new ( NULL, MSGFL_ALLOC_ETEID, diam ) ); |
| 836 | |
| 837 | /* Add the Origin-Host as next AVP */ |
| 838 | CHECK_FCT( fd_msg_avp_new ( cache_orig_host, 0, &avp ) ); |
| 839 | memset(&avp_val, 0, sizeof(avp_val)); |
| 840 | avp_val.os.data = (unsigned char *)oh_str; |
| 841 | avp_val.os.len = oh_strlen; |
| 842 | CHECK_FCT( fd_msg_avp_setvalue ( avp, &avp_val ) ); |
| 843 | CHECK_FCT( fd_msg_avp_add ( *diam, MSG_BRW_LAST_CHILD, avp) ); |
| 844 | |
| 845 | /* Add the Origin-Realm as next AVP */ |
| 846 | CHECK_FCT( fd_msg_avp_new ( cache_orig_realm, 0, &avp ) ); |
| 847 | memset(&avp_val, 0, sizeof(avp_val)); |
| 848 | avp_val.os.data = (unsigned char *)or_str; |
| 849 | avp_val.os.len = or_strlen; |
| 850 | CHECK_FCT( fd_msg_avp_setvalue ( avp, &avp_val ) ); |
| 851 | CHECK_FCT( fd_msg_avp_add ( *diam, MSG_BRW_LAST_CHILD, avp) ); |
| 852 | |
| 853 | if (rr_str) { |
| 854 | CHECK_FCT( fd_msg_avp_new ( cache_route_record, 0, &avp ) ); |
| 855 | memset(&avp_val, 0, sizeof(avp_val)); |
| 856 | avp_val.os.data = (unsigned char *)rr_str; |
| 857 | avp_val.os.len = rr_strlen; |
| 858 | CHECK_FCT( fd_msg_avp_setvalue ( avp, &avp_val ) ); |
| 859 | CHECK_FCT( fd_msg_avp_add ( *diam, MSG_BRW_LAST_CHILD, avp) ); |
| 860 | } |
| 861 | |
| 862 | if (oh_free) |
| 863 | free(oh_str); |
| 864 | |
| 865 | /* Done! */ |
| 866 | return 0; |
| 867 | } |
| 868 | |
| 869 | int rgw_clients_get_origin(struct rgw_client *cli, DiamId_t *fqdn, size_t *fqdnlen, DiamId_t *realm, size_t *realmlen) |
| 870 | { |
| 871 | TRACE_ENTRY("%p %p %p %p %p", cli, fqdn, fqdnlen, realm, realmlen); |
| 872 | CHECK_PARAMS(cli && fqdn && fqdnlen); |
| 873 | |
| 874 | if (cli->is_local) { |
| 875 | *fqdn = fd_g_config->cnf_diamid; |
| 876 | *fqdnlen = fd_g_config->cnf_diamid_len; |
| 877 | if (realm) |
| 878 | *realm= fd_g_config->cnf_diamrlm; |
| 879 | if (realmlen) |
| 880 | *realmlen= fd_g_config->cnf_diamrlm_len; |
| 881 | } else { |
| 882 | *fqdn = cli->fqdn; |
| 883 | *fqdnlen = cli->fqdn_len; |
| 884 | if (realm) |
| 885 | *realm= cli->realm; |
| 886 | if (realmlen) |
| 887 | *realmlen= cli->realm_len; |
| 888 | } |
| 889 | |
| 890 | return 0; |
| 891 | } |
| 892 | |
| 893 | char * rgw_clients_id(struct rgw_client *cli) |
| 894 | { |
| 895 | return cli->is_local ? "(local)" : cli->fqdn; |
| 896 | } |
| 897 | |
| 898 | |
| 899 | void rgw_clients_dispose(struct rgw_client ** ref) |
| 900 | { |
| 901 | TRACE_ENTRY("%p", ref); |
| 902 | CHECK_PARAMS_DO(ref, return); |
| 903 | |
| 904 | CHECK_POSIX_DO( pthread_rwlock_wrlock(&cli_rwl), ); |
| 905 | client_unlink(*ref); |
| 906 | *ref = NULL; |
| 907 | CHECK_POSIX_DO( pthread_rwlock_unlock(&cli_rwl), ); |
| 908 | } |
| 909 | |
| 910 | int rgw_clients_add( struct sockaddr * ip_port, unsigned char ** key, size_t keylen, enum rgw_cli_type type ) |
| 911 | { |
| 912 | struct rgw_client * prev = NULL, *new = NULL; |
| 913 | int ret; |
| 914 | |
| 915 | TRACE_ENTRY("%p %p %zu", ip_port, key, keylen); |
| 916 | |
| 917 | CHECK_PARAMS( ip_port && key && *key && keylen ); |
| 918 | CHECK_PARAMS( (ip_port->sa_family == AF_INET) || (ip_port->sa_family == AF_INET6) ); |
| 919 | CHECK_PARAMS( (type == RGW_CLI_NAS) || (type == RGW_CLI_PXY) ); |
| 920 | |
| 921 | /* Dump the entry in debug mode */ |
| 922 | if (TRACE_BOOL(FULL + 1 )) { |
| 923 | char sa_buf[sSA_DUMP_STRLEN]; |
| 924 | fd_sa_sdump_numeric(sa_buf, ip_port); |
| 925 | TRACE_DEBUG(FULL, "Adding %s:", (type == RGW_CLI_NAS) ? "NAS" : "PROXY" ); |
| 926 | TRACE_DEBUG(FULL, "\tIP : %s", sa_buf ); |
| 927 | TRACE_BUFFER(FD_LOG_DEBUG, FULL, "\tKey: [", *key, keylen, "]" ); |
| 928 | } |
| 929 | |
| 930 | /* Lock the lists */ |
| 931 | CHECK_POSIX( pthread_rwlock_wrlock(&cli_rwl) ); |
| 932 | |
| 933 | /* Check if the same entry does not already exist */ |
| 934 | ret = client_search(&prev, ip_port ); |
| 935 | if (ret == ENOENT) { |
| 936 | /* No duplicate found, Ok to add */ |
| 937 | CHECK_FCT_DO( ret = client_create( &new, &ip_port, key, keylen, type ), goto end ); |
| 938 | fd_list_insert_after(&prev->chain, &new->chain); |
| 939 | new->refcount++; |
| 940 | ret = 0; |
| 941 | goto end; |
| 942 | } |
| 943 | |
| 944 | if (ret == EEXIST) { |
| 945 | char sa_buf[sSA_DUMP_STRLEN]; |
| 946 | /* Check if the key is the same, then skip or return an error */ |
| 947 | if ((keylen == prev->key.len ) && ( ! memcmp(*key, prev->key.data, keylen) ) && (type == prev->type)) { |
| 948 | TRACE_DEBUG(INFO, "Skipping duplicate client description"); |
| 949 | ret = 0; |
| 950 | goto end; |
| 951 | } |
| 952 | |
| 953 | fd_log_error("ERROR: Conflicting RADIUS clients descriptions!"); |
| 954 | TRACE_ERROR("Previous entry: %s", (prev->type == RGW_CLI_NAS) ? "NAS" : "PROXY"); |
| 955 | fd_sa_sdump_numeric(sa_buf, prev->sa); |
| 956 | TRACE_ERROR("\tIP : %s", sa_buf); |
| 957 | TRACE_BUFFER(FD_LOG_ERROR, NONE, "\tKey: [", prev->key.data, prev->key.len, "]" ); |
| 958 | TRACE_ERROR("Conflicting entry: %s", (type == RGW_CLI_NAS) ? "NAS" : "PROXY"); |
| 959 | fd_sa_sdump_numeric(sa_buf, ip_port); |
| 960 | TRACE_ERROR("\tIP : %s", sa_buf); |
| 961 | TRACE_BUFFER(FD_LOG_ERROR, NONE, "\tKey: [", *key, keylen, "]" ); |
| 962 | } |
| 963 | end: |
| 964 | /* release the lists */ |
| 965 | CHECK_POSIX( pthread_rwlock_unlock(&cli_rwl) ); |
| 966 | |
| 967 | return ret; |
| 968 | } |
| 969 | |
| 970 | static void dump_cli_list(struct fd_list *senti) |
| 971 | { |
| 972 | struct rgw_client * client = NULL; |
| 973 | struct fd_list *ref = NULL; |
| 974 | |
| 975 | for (ref = senti->next; ref != senti; ref = ref->next) { |
| 976 | char sa_buf[sSA_DUMP_STRLEN]; |
| 977 | client = (struct rgw_client *)ref; |
| 978 | fd_sa_sdump_numeric(sa_buf, client->sa); |
| 979 | LOG_D(" - %s%s", sa_buf, (client->type == RGW_CLI_NAS) ? "" : " [PROXY]" ); |
| 980 | } |
| 981 | } |
| 982 | |
| 983 | void rgw_clients_dump(void) |
| 984 | { |
| 985 | if ( ! TRACE_BOOL(FULL) ) |
| 986 | return; |
| 987 | |
| 988 | CHECK_POSIX_DO( pthread_rwlock_rdlock(&cli_rwl), /* ignore error */ ); |
| 989 | |
| 990 | if (!FD_IS_LIST_EMPTY(&cli_ip)) |
| 991 | fd_log_debug(" RADIUS IP clients list:"); |
| 992 | dump_cli_list(&cli_ip); |
| 993 | |
| 994 | if (!FD_IS_LIST_EMPTY(&cli_ip6)) |
| 995 | fd_log_debug(" RADIUS IPv6 clients list:"); |
| 996 | dump_cli_list(&cli_ip6); |
| 997 | |
| 998 | CHECK_POSIX_DO( pthread_rwlock_unlock(&cli_rwl), /* ignore error */ ); |
| 999 | } |
| 1000 | |
| 1001 | void rgw_clients_fini(void) |
| 1002 | { |
| 1003 | struct fd_list * client; |
| 1004 | |
| 1005 | TRACE_ENTRY(); |
| 1006 | |
| 1007 | CHECK_POSIX_DO( pthread_rwlock_wrlock(&cli_rwl), /* ignore error */ ); |
| 1008 | |
| 1009 | CHECK_FCT_DO( fd_thr_term(&dbt_expire), /* continue */ ); |
| 1010 | |
| 1011 | /* empty the lists */ |
| 1012 | while ( ! FD_IS_LIST_EMPTY(&cli_ip) ) { |
| 1013 | client = cli_ip.next; |
| 1014 | fd_list_unlink(client); |
| 1015 | client_unlink((struct rgw_client *)client); |
| 1016 | } |
| 1017 | while (! FD_IS_LIST_EMPTY(&cli_ip6)) { |
| 1018 | client = cli_ip6.next; |
| 1019 | fd_list_unlink(client); |
| 1020 | client_unlink((struct rgw_client *)client); |
| 1021 | } |
| 1022 | |
| 1023 | CHECK_POSIX_DO( pthread_rwlock_unlock(&cli_rwl), /* ignore error */ ); |
| 1024 | |
| 1025 | } |
| 1026 | |
| 1027 | int rgw_client_finish_send(struct radius_msg ** msg, struct rgw_radius_msg_meta * req, struct rgw_client * cli) |
| 1028 | { |
| 1029 | int p; |
| 1030 | struct fd_list * li; |
| 1031 | |
| 1032 | TRACE_ENTRY("%p %p %p", msg, req, cli); |
| 1033 | CHECK_PARAMS( msg && *msg && cli ); |
| 1034 | |
| 1035 | if (!req) { |
| 1036 | /* We don't support this case yet */ |
| 1037 | ASSERT(0); |
| 1038 | return ENOTSUP; |
| 1039 | } |
| 1040 | |
| 1041 | /* Add all the Proxy-States back in the message */ |
| 1042 | for (p = 0; p < req->ps_nb; p++) { |
| 1043 | struct radius_attr_hdr * attr = (struct radius_attr_hdr *)(req->radius.buf + req->radius.attr_pos[req->ps_first + p]); |
| 1044 | |
| 1045 | if (radius_msg_add_attr_to_array(*msg, attr)) { |
| 1046 | TRACE_DEBUG(INFO, "Error in radius_msg_add_attr_to_array, ENOMEM"); |
| 1047 | radius_msg_free(*msg); |
| 1048 | free(*msg); |
| 1049 | *msg = NULL; |
| 1050 | return ENOMEM; |
| 1051 | } |
| 1052 | } |
| 1053 | |
| 1054 | /* Add the Message-Authenticator if needed, and other final tasks */ |
| 1055 | if (radius_msg_finish_srv(*msg, cli->key.data, cli->key.len, req->radius.hdr->authenticator)) { |
| 1056 | TRACE_DEBUG(INFO, "An error occurred while preparing the RADIUS answer"); |
| 1057 | radius_msg_free(*msg); |
| 1058 | free(*msg); |
| 1059 | *msg = NULL; |
| 1060 | return EINVAL; |
| 1061 | } |
| 1062 | |
| 1063 | /* Debug */ |
| 1064 | TRACE_DEBUG(FULL, "RADIUS message ready for sending:"); |
| 1065 | rgw_msg_dump((struct rgw_radius_msg_meta *)*msg, 0); |
| 1066 | |
| 1067 | /* Send the message */ |
| 1068 | CHECK_FCT( rgw_servers_send(req->serv_type, (*msg)->buf, (*msg)->buf_used, cli->sa, req->port) ); |
| 1069 | |
| 1070 | /* update the duplicate cache */ |
| 1071 | if (req->serv_type == RGW_PLG_TYPE_AUTH) |
| 1072 | p = 0; |
| 1073 | else |
| 1074 | p = 1; |
| 1075 | |
| 1076 | CHECK_POSIX( pthread_mutex_lock( &cli->dupl_info[p].dupl_lock ) ); |
| 1077 | |
| 1078 | /* Search this message in our list */ |
| 1079 | for (li = cli->dupl_info[p].dupl_by_id.next; li != &cli->dupl_info[p].dupl_by_id; li = li->next) { |
| 1080 | int cmp = 0; |
| 1081 | struct req_info * r = (struct req_info *)(li->o); |
| 1082 | if (r->id < req->radius.hdr->identifier) |
| 1083 | continue; |
| 1084 | if (r->id > req->radius.hdr->identifier) |
| 1085 | break; |
| 1086 | if (r->port < req->port) |
| 1087 | continue; |
| 1088 | if (r->port > req->port) |
| 1089 | break; |
| 1090 | cmp = memcmp(&r->auth[0], &req->radius.hdr->authenticator[0], 16); |
| 1091 | if (cmp < 0) |
| 1092 | continue; |
| 1093 | if (cmp > 0) |
| 1094 | break; |
| 1095 | |
| 1096 | /* We have the request in our duplicate cache */ |
| 1097 | /* This should not happen, but just in case... */ |
| 1098 | if (r->ans) { |
| 1099 | radius_msg_free(r->ans); |
| 1100 | free(r->ans); |
| 1101 | } |
| 1102 | |
| 1103 | /* Now save the message */ |
| 1104 | r->ans = *msg; |
| 1105 | *msg = NULL; |
| 1106 | |
| 1107 | /* Update the timestamp */ |
| 1108 | { |
| 1109 | time_t now = time(NULL); |
| 1110 | r->received = now; |
| 1111 | fd_list_unlink(&r->by_time); /* Move as last entry, since it is the most recent */ |
| 1112 | fd_list_insert_before(&cli->dupl_info[p].dupl_by_time, &r->by_time); |
| 1113 | } |
| 1114 | break; |
| 1115 | } |
| 1116 | |
| 1117 | CHECK_POSIX( pthread_mutex_unlock( &cli->dupl_info[p].dupl_lock ) ); |
| 1118 | |
| 1119 | /* If we have not found the request in our list, the purge time is probably too small */ |
| 1120 | if (*msg) { |
| 1121 | TODO("Augment the purge time..."); |
| 1122 | /* If we receive the duplicate request again, it will be converted to Diameter... */ |
| 1123 | radius_msg_free(*msg); |
| 1124 | free(*msg); |
| 1125 | *msg = NULL; |
| 1126 | } |
| 1127 | |
| 1128 | /* Finished */ |
| 1129 | return 0; |
| 1130 | } |
| 1131 | |
| 1132 | /* Call this function when a RADIUS request has explicitely no answer (mainly accounting) so |
| 1133 | that we purge the duplicate cache and allow further message to be translated again. |
| 1134 | This is useful for example when a temporary error occurred in Diameter (like UNABLE_TO_DELIVER) */ |
| 1135 | int rgw_client_finish_nosend(struct rgw_radius_msg_meta * req, struct rgw_client * cli) |
| 1136 | { |
| 1137 | int p; |
| 1138 | struct fd_list * li; |
| 1139 | |
| 1140 | TRACE_ENTRY("%p %p", req, cli); |
| 1141 | CHECK_PARAMS( req && cli ); |
| 1142 | |
| 1143 | /* update the duplicate cache */ |
| 1144 | if (req->serv_type == RGW_PLG_TYPE_AUTH) |
| 1145 | p = 0; |
| 1146 | else |
| 1147 | p = 1; |
| 1148 | |
| 1149 | CHECK_POSIX( pthread_mutex_lock( &cli->dupl_info[p].dupl_lock ) ); |
| 1150 | |
| 1151 | /* Search this message in our list */ |
| 1152 | for (li = cli->dupl_info[p].dupl_by_id.next; li != &cli->dupl_info[p].dupl_by_id; li = li->next) { |
| 1153 | int cmp = 0; |
| 1154 | struct req_info * r = (struct req_info *)(li->o); |
| 1155 | if (r->id < req->radius.hdr->identifier) |
| 1156 | continue; |
| 1157 | if (r->id > req->radius.hdr->identifier) |
| 1158 | break; |
| 1159 | if (r->port < req->port) |
| 1160 | continue; |
| 1161 | if (r->port > req->port) |
| 1162 | break; |
| 1163 | cmp = memcmp(&r->auth[0], &req->radius.hdr->authenticator[0], 16); |
| 1164 | if (cmp < 0) |
| 1165 | continue; |
| 1166 | if (cmp > 0) |
| 1167 | break; |
| 1168 | |
| 1169 | /* We have the request in our duplicate cache, remove it */ |
| 1170 | fd_list_unlink(&r->by_id); |
| 1171 | fd_list_unlink(&r->by_time); |
| 1172 | dupl_free_req_info(r); |
| 1173 | break; |
| 1174 | } |
| 1175 | |
| 1176 | CHECK_POSIX( pthread_mutex_unlock( &cli->dupl_info[p].dupl_lock ) ); |
| 1177 | |
| 1178 | /* Finished */ |
| 1179 | return 0; |
| 1180 | } |
| 1181 | |