blob: e86b4325bb4d76fdffc70b2b51981be753786bf7 [file] [log] [blame]
Paul Jakma57345092011-12-25 17:52:09 +01001/*
2 * This file is free software: you may copy, redistribute and/or modify it
3 * under the terms of the GNU General Public License as published by the
4 * Free Software Foundation, either version 2 of the License, or (at your
5 * option) any later version.
6 *
7 * This file is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program. If not, see <http://www.gnu.org/licenses/>.
14 *
15 * This file incorporates work covered by the following copyright and
16 * permission notice:
17 *
18
19Copyright (c) 2007, 2008 by Juliusz Chroboczek
20
21Permission is hereby granted, free of charge, to any person obtaining a copy
22of this software and associated documentation files (the "Software"), to deal
23in the Software without restriction, including without limitation the rights
24to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
25copies of the Software, and to permit persons to whom the Software is
26furnished to do so, subject to the following conditions:
27
28The above copyright notice and this permission notice shall be included in
29all copies or substantial portions of the Software.
30
31THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
34AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
35LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
36OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
37THE SOFTWARE.
38*/
39
Paul Jakma57345092011-12-25 17:52:09 +010040#include <zebra.h>
41#include "if.h"
42
43#include "babeld.h"
44#include "util.h"
45#include "net.h"
46#include "babel_interface.h"
47#include "source.h"
48#include "neighbour.h"
49#include "route.h"
50#include "xroute.h"
51#include "resend.h"
52#include "message.h"
53#include "kernel.h"
54
55unsigned char packet_header[4] = {42, 2};
56
57int parasitic = 0;
58int split_horizon = 1;
59
60unsigned short myseqno = 0;
61struct timeval seqno_time = {0, 0};
62
63#define UNICAST_BUFSIZE 1024
64int unicast_buffered = 0;
65unsigned char *unicast_buffer = NULL;
66struct neighbour *unicast_neighbour = NULL;
67struct timeval unicast_flush_timeout = {0, 0};
68
69static const unsigned char v4prefix[16] =
70 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0, 0, 0, 0 };
71
Matthieu Boutierc35fafd2012-01-23 23:46:32 +010072/* Parse a network prefix, encoded in the somewhat baroque compressed
73 representation used by Babel. Return the number of bytes parsed. */
Paul Jakma57345092011-12-25 17:52:09 +010074static int
75network_prefix(int ae, int plen, unsigned int omitted,
76 const unsigned char *p, const unsigned char *dp,
77 unsigned int len, unsigned char *p_r)
78{
79 unsigned pb;
80 unsigned char prefix[16];
Matthieu Boutierc35fafd2012-01-23 23:46:32 +010081 int ret = -1;
Paul Jakma57345092011-12-25 17:52:09 +010082
83 if(plen >= 0)
84 pb = (plen + 7) / 8;
85 else if(ae == 1)
86 pb = 4;
87 else
88 pb = 16;
89
90 if(pb > 16)
91 return -1;
92
93 memset(prefix, 0, 16);
94
95 switch(ae) {
Matthieu Boutierc35fafd2012-01-23 23:46:32 +010096 case 0:
97 ret = 0;
98 break;
Paul Jakma57345092011-12-25 17:52:09 +010099 case 1:
100 if(omitted > 4 || pb > 4 || (pb > omitted && len < pb - omitted))
101 return -1;
102 memcpy(prefix, v4prefix, 12);
103 if(omitted) {
104 if (dp == NULL || !v4mapped(dp)) return -1;
105 memcpy(prefix, dp, 12 + omitted);
106 }
107 if(pb > omitted) memcpy(prefix + 12 + omitted, p, pb - omitted);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100108 ret = pb - omitted;
Paul Jakma57345092011-12-25 17:52:09 +0100109 break;
110 case 2:
111 if(omitted > 16 || (pb > omitted && len < pb - omitted)) return -1;
112 if(omitted) {
113 if (dp == NULL || v4mapped(dp)) return -1;
114 memcpy(prefix, dp, omitted);
115 }
116 if(pb > omitted) memcpy(prefix + omitted, p, pb - omitted);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100117 ret = pb - omitted;
Paul Jakma57345092011-12-25 17:52:09 +0100118 break;
119 case 3:
120 if(pb > 8 && len < pb - 8) return -1;
121 prefix[0] = 0xfe;
122 prefix[1] = 0x80;
123 if(pb > 8) memcpy(prefix + 8, p, pb - 8);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100124 ret = pb - 8;
Paul Jakma57345092011-12-25 17:52:09 +0100125 break;
126 default:
127 return -1;
128 }
129
130 mask_prefix(p_r, prefix, plen < 0 ? 128 : ae == 1 ? plen + 96 : plen);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100131 return ret;
132}
133
134static void
135parse_route_attributes(const unsigned char *a, int alen,
136 unsigned char *channels)
137{
138 int type, len, i = 0;
139
140 while(i < alen) {
141 type = a[i];
142 if(type == 0) {
143 i++;
144 continue;
145 }
146
147 if(i + 1 > alen) {
148 fprintf(stderr, "Received truncated attributes.\n");
149 return;
150 }
151 len = a[i + 1];
152 if(i + len > alen) {
153 fprintf(stderr, "Received truncated attributes.\n");
154 return;
155 }
156
157 if(type == 1) {
158 /* Nothing. */
159 } else if(type == 2) {
160 if(len > DIVERSITY_HOPS) {
161 fprintf(stderr,
162 "Received overlong channel information (%d > %d).\n",
163 len, DIVERSITY_HOPS);
164 len = DIVERSITY_HOPS;
165 }
166 if(memchr(a + i + 2, 0, len) != NULL) {
167 /* 0 is reserved. */
168 fprintf(stderr, "Channel information contains 0!");
169 return;
170 }
171 memset(channels, 0, DIVERSITY_HOPS);
172 memcpy(channels, a + i + 2, len);
173 } else {
174 fprintf(stderr, "Received unknown route attribute %d.\n", type);
175 }
176
177 i += len + 2;
178 }
Paul Jakma57345092011-12-25 17:52:09 +0100179}
180
181static int
182network_address(int ae, const unsigned char *a, unsigned int len,
183 unsigned char *a_r)
184{
185 return network_prefix(ae, -1, 0, a, NULL, len, a_r);
186}
187
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100188static int
189channels_len(unsigned char *channels)
190{
191 unsigned char *p = memchr(channels, 0, DIVERSITY_HOPS);
192 return p ? (p - channels) : DIVERSITY_HOPS;
193}
194
Paul Jakma57345092011-12-25 17:52:09 +0100195void
196parse_packet(const unsigned char *from, struct interface *ifp,
197 const unsigned char *packet, int packetlen)
198{
199 int i;
200 const unsigned char *message;
201 unsigned char type, len;
202 int bodylen;
203 struct neighbour *neigh;
204 int have_router_id = 0, have_v4_prefix = 0, have_v6_prefix = 0,
205 have_v4_nh = 0, have_v6_nh = 0;
206 unsigned char router_id[8], v4_prefix[16], v6_prefix[16],
207 v4_nh[16], v6_nh[16];
208
209 if(!linklocal(from)) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100210 zlog_err("Received packet from non-local address %s.",
211 format_address(from));
Paul Jakma57345092011-12-25 17:52:09 +0100212 return;
213 }
214
215 if(packet[0] != 42) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100216 zlog_err("Received malformed packet on %s from %s.",
217 ifp->name, format_address(from));
Paul Jakma57345092011-12-25 17:52:09 +0100218 return;
219 }
220
221 if(packet[1] != 2) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100222 zlog_err("Received packet with unknown version %d on %s from %s.",
223 packet[1], ifp->name, format_address(from));
Paul Jakma57345092011-12-25 17:52:09 +0100224 return;
225 }
226
227 neigh = find_neighbour(from, ifp);
228 if(neigh == NULL) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100229 zlog_err("Couldn't allocate neighbour.");
Paul Jakma57345092011-12-25 17:52:09 +0100230 return;
231 }
232
233 DO_NTOHS(bodylen, packet + 2);
234
235 if(bodylen + 4 > packetlen) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100236 zlog_err("Received truncated packet (%d + 4 > %d).",
237 bodylen, packetlen);
Paul Jakma57345092011-12-25 17:52:09 +0100238 bodylen = packetlen - 4;
239 }
240
241 i = 0;
242 while(i < bodylen) {
243 message = packet + 4 + i;
244 type = message[0];
245 if(type == MESSAGE_PAD1) {
246 debugf(BABEL_DEBUG_COMMON,"Received pad1 from %s on %s.",
247 format_address(from), ifp->name);
248 i++;
249 continue;
250 }
251 if(i + 1 > bodylen) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100252 zlog_err("Received truncated message.");
Paul Jakma57345092011-12-25 17:52:09 +0100253 break;
254 }
255 len = message[1];
256 if(i + len > bodylen) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100257 zlog_err("Received truncated message.");
Paul Jakma57345092011-12-25 17:52:09 +0100258 break;
259 }
260
261 if(type == MESSAGE_PADN) {
262 debugf(BABEL_DEBUG_COMMON,"Received pad%d from %s on %s.",
263 len, format_address(from), ifp->name);
264 } else if(type == MESSAGE_ACK_REQ) {
265 unsigned short nonce, interval;
266 if(len < 6) goto fail;
267 DO_NTOHS(nonce, message + 4);
268 DO_NTOHS(interval, message + 6);
269 debugf(BABEL_DEBUG_COMMON,"Received ack-req (%04X %d) from %s on %s.",
270 nonce, interval, format_address(from), ifp->name);
271 send_ack(neigh, nonce, interval);
272 } else if(type == MESSAGE_ACK) {
273 debugf(BABEL_DEBUG_COMMON,"Received ack from %s on %s.",
274 format_address(from), ifp->name);
275 /* Nothing right now */
276 } else if(type == MESSAGE_HELLO) {
277 unsigned short seqno, interval;
278 int changed;
279 if(len < 6) goto fail;
280 DO_NTOHS(seqno, message + 4);
281 DO_NTOHS(interval, message + 6);
282 debugf(BABEL_DEBUG_COMMON,"Received hello %d (%d) from %s on %s.",
283 seqno, interval,
284 format_address(from), ifp->name);
Paul Jakma57345092011-12-25 17:52:09 +0100285 changed = update_neighbour(neigh, seqno, interval);
286 update_neighbour_metric(neigh, changed);
287 if(interval > 0)
288 schedule_neighbours_check(interval * 10, 0);
289 } else if(type == MESSAGE_IHU) {
290 unsigned short txcost, interval;
291 unsigned char address[16];
292 int rc;
293 if(len < 6) goto fail;
294 DO_NTOHS(txcost, message + 4);
295 DO_NTOHS(interval, message + 6);
296 rc = network_address(message[2], message + 8, len - 6, address);
297 if(rc < 0) goto fail;
298 debugf(BABEL_DEBUG_COMMON,"Received ihu %d (%d) from %s on %s for %s.",
299 txcost, interval,
300 format_address(from), ifp->name,
301 format_address(address));
302 if(message[2] == 0 || is_interface_ll_address(ifp, address)) {
303 int changed = txcost != neigh->txcost;
304 neigh->txcost = txcost;
305 neigh->ihu_time = babel_now;
306 neigh->ihu_interval = interval;
307 update_neighbour_metric(neigh, changed);
308 if(interval > 0)
309 schedule_neighbours_check(interval * 10 * 3, 0);
310 }
311 } else if(type == MESSAGE_ROUTER_ID) {
312 if(len < 10) {
313 have_router_id = 0;
314 goto fail;
315 }
316 memcpy(router_id, message + 4, 8);
317 have_router_id = 1;
318 debugf(BABEL_DEBUG_COMMON,"Received router-id %s from %s on %s.",
319 format_eui64(router_id), format_address(from), ifp->name);
320 } else if(type == MESSAGE_NH) {
321 unsigned char nh[16];
322 int rc;
323 if(len < 2) {
324 have_v4_nh = 0;
325 have_v6_nh = 0;
326 goto fail;
327 }
328 rc = network_address(message[2], message + 4, len - 2,
329 nh);
330 if(rc < 0) {
331 have_v4_nh = 0;
332 have_v6_nh = 0;
333 goto fail;
334 }
335 debugf(BABEL_DEBUG_COMMON,"Received nh %s (%d) from %s on %s.",
336 format_address(nh), message[2],
337 format_address(from), ifp->name);
338 if(message[2] == 1) {
339 memcpy(v4_nh, nh, 16);
340 have_v4_nh = 1;
341 } else {
342 memcpy(v6_nh, nh, 16);
343 have_v6_nh = 1;
344 }
345 } else if(type == MESSAGE_UPDATE) {
346 unsigned char prefix[16], *nh;
347 unsigned char plen;
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100348 unsigned char channels[DIVERSITY_HOPS];
Paul Jakma57345092011-12-25 17:52:09 +0100349 unsigned short interval, seqno, metric;
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100350 int rc, parsed_len;
Paul Jakma57345092011-12-25 17:52:09 +0100351 if(len < 10) {
352 if(len < 2 || message[3] & 0x80)
353 have_v4_prefix = have_v6_prefix = 0;
354 goto fail;
355 }
356 DO_NTOHS(interval, message + 6);
357 DO_NTOHS(seqno, message + 8);
358 DO_NTOHS(metric, message + 10);
359 if(message[5] == 0 ||
360 (message[3] == 1 ? have_v4_prefix : have_v6_prefix))
361 rc = network_prefix(message[2], message[4], message[5],
362 message + 12,
363 message[2] == 1 ? v4_prefix : v6_prefix,
364 len - 10, prefix);
365 else
366 rc = -1;
367 if(rc < 0) {
368 if(message[3] & 0x80)
369 have_v4_prefix = have_v6_prefix = 0;
370 goto fail;
371 }
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100372 parsed_len = 10 + rc;
Paul Jakma57345092011-12-25 17:52:09 +0100373
374 plen = message[4] + (message[2] == 1 ? 96 : 0);
375
376 if(message[3] & 0x80) {
377 if(message[2] == 1) {
378 memcpy(v4_prefix, prefix, 16);
379 have_v4_prefix = 1;
380 } else {
381 memcpy(v6_prefix, prefix, 16);
382 have_v6_prefix = 1;
383 }
384 }
385 if(message[3] & 0x40) {
386 if(message[2] == 1) {
387 memset(router_id, 0, 4);
388 memcpy(router_id + 4, prefix + 12, 4);
389 } else {
390 memcpy(router_id, prefix + 8, 8);
391 }
392 have_router_id = 1;
393 }
394 if(!have_router_id && message[2] != 0) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100395 zlog_err("Received prefix with no router id.");
Paul Jakma57345092011-12-25 17:52:09 +0100396 goto fail;
397 }
398 debugf(BABEL_DEBUG_COMMON,"Received update%s%s for %s from %s on %s.",
399 (message[3] & 0x80) ? "/prefix" : "",
400 (message[3] & 0x40) ? "/id" : "",
401 format_prefix(prefix, plen),
402 format_address(from), ifp->name);
403
404 if(message[2] == 0) {
405 if(metric < 0xFFFF) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100406 zlog_err("Received wildcard update with finite metric.");
Paul Jakma57345092011-12-25 17:52:09 +0100407 goto done;
408 }
409 retract_neighbour_routes(neigh);
410 goto done;
411 } else if(message[2] == 1) {
412 if(!have_v4_nh)
413 goto fail;
414 nh = v4_nh;
415 } else if(have_v6_nh) {
416 nh = v6_nh;
417 } else {
418 nh = neigh->address;
419 }
420
421 if(message[2] == 1) {
422 if(!babel_get_if_nfo(ifp)->ipv4)
423 goto done;
424 }
425
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100426 if((ifp->flags & BABEL_IF_FARAWAY)) {
427 channels[0] = 0;
428 } else {
429 /* This will be overwritten by parse_route_attributes below. */
430 if(metric < 256) {
431 /* Assume non-interfering (wired) link. */
432 channels[0] = 0;
433 } else {
434 /* Assume interfering. */
435 channels[0] = BABEL_IF_CHANNEL_INTERFERING;
436 channels[1] = 0;
437 }
438
439 if(parsed_len < len)
440 parse_route_attributes(message + 2 + parsed_len,
441 len - parsed_len, channels);
442 }
443
Paul Jakma57345092011-12-25 17:52:09 +0100444 update_route(router_id, prefix, plen, seqno, metric, interval,
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100445 neigh, nh,
446 channels, channels_len(channels));
Paul Jakma57345092011-12-25 17:52:09 +0100447 } else if(type == MESSAGE_REQUEST) {
448 unsigned char prefix[16], plen;
449 int rc;
450 if(len < 2) goto fail;
451 rc = network_prefix(message[2], message[3], 0,
452 message + 4, NULL, len - 2, prefix);
453 if(rc < 0) goto fail;
454 plen = message[3] + (message[2] == 1 ? 96 : 0);
455 debugf(BABEL_DEBUG_COMMON,"Received request for %s from %s on %s.",
456 message[2] == 0 ? "any" : format_prefix(prefix, plen),
457 format_address(from), ifp->name);
458 if(message[2] == 0) {
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100459 struct babel_interface *babel_ifp =babel_get_if_nfo(neigh->ifp);
Paul Jakma57345092011-12-25 17:52:09 +0100460 /* If a neighbour is requesting a full route dump from us,
461 we might as well send it an IHU. */
462 send_ihu(neigh, NULL);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100463 /* Since nodes send wildcard requests on boot, booting
464 a large number of nodes at the same time may cause an
465 update storm. Ignore a wildcard request that happens
466 shortly after we sent a full update. */
467 if(babel_ifp->last_update_time <
Juliusz Chroboczek52d54422012-02-11 13:08:00 +0100468 (time_t)(babel_now.tv_sec -
469 MAX(babel_ifp->hello_interval / 100, 1)))
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100470 send_update(neigh->ifp, 0, NULL, 0);
Paul Jakma57345092011-12-25 17:52:09 +0100471 } else {
472 send_update(neigh->ifp, 0, prefix, plen);
473 }
474 } else if(type == MESSAGE_MH_REQUEST) {
475 unsigned char prefix[16], plen;
476 unsigned short seqno;
477 int rc;
478 if(len < 14) goto fail;
479 DO_NTOHS(seqno, message + 4);
480 rc = network_prefix(message[2], message[3], 0,
481 message + 16, NULL, len - 14, prefix);
482 if(rc < 0) goto fail;
483 plen = message[3] + (message[2] == 1 ? 96 : 0);
484 debugf(BABEL_DEBUG_COMMON,"Received request (%d) for %s from %s on %s (%s, %d).",
485 message[6],
486 format_prefix(prefix, plen),
487 format_address(from), ifp->name,
488 format_eui64(message + 8), seqno);
489 handle_request(neigh, prefix, plen, message[6],
490 seqno, message + 8);
491 } else {
492 debugf(BABEL_DEBUG_COMMON,"Received unknown packet type %d from %s on %s.",
493 type, format_address(from), ifp->name);
494 }
495 done:
496 i += len + 2;
497 continue;
498
499 fail:
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100500 zlog_err("Couldn't parse packet (%d, %d) from %s on %s.",
501 message[0], message[1], format_address(from), ifp->name);
Paul Jakma57345092011-12-25 17:52:09 +0100502 goto done;
503 }
504 return;
505}
506
507/* Under normal circumstances, there are enough moderation mechanisms
508 elsewhere in the protocol to make sure that this last-ditch check
509 should never trigger. But I'm superstitious. */
510
511static int
512check_bucket(struct interface *ifp)
513{
514 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
515 if(babel_ifp->bucket <= 0) {
516 int seconds = babel_now.tv_sec - babel_ifp->bucket_time;
517 if(seconds > 0) {
518 babel_ifp->bucket = MIN(BUCKET_TOKENS_MAX,
519 seconds * BUCKET_TOKENS_PER_SEC);
520 }
521 /* Reset bucket time unconditionally, in case clock is stepped. */
522 babel_ifp->bucket_time = babel_now.tv_sec;
523 }
524
525 if(babel_ifp->bucket > 0) {
526 babel_ifp->bucket--;
527 return 1;
528 } else {
529 return 0;
530 }
531}
532
533void
534flushbuf(struct interface *ifp)
535{
536 int rc;
537 struct sockaddr_in6 sin6;
538 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
539
540 assert(babel_ifp->buffered <= babel_ifp->bufsize);
541
542 flushupdates(ifp);
543
544 if(babel_ifp->buffered > 0) {
545 debugf(BABEL_DEBUG_COMMON," (flushing %d buffered bytes on %s)",
546 babel_ifp->buffered, ifp->name);
547 if(check_bucket(ifp)) {
548 memset(&sin6, 0, sizeof(sin6));
549 sin6.sin6_family = AF_INET6;
550 memcpy(&sin6.sin6_addr, protocol_group, 16);
551 sin6.sin6_port = htons(protocol_port);
552 sin6.sin6_scope_id = ifp->ifindex;
553 DO_HTONS(packet_header + 2, babel_ifp->buffered);
554 rc = babel_send(protocol_socket,
555 packet_header, sizeof(packet_header),
556 babel_ifp->sendbuf, babel_ifp->buffered,
557 (struct sockaddr*)&sin6, sizeof(sin6));
558 if(rc < 0)
559 zlog_err("send: %s", safe_strerror(errno));
560 } else {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100561 zlog_err("Warning: bucket full, dropping packet to %s.",
562 ifp->name);
Paul Jakma57345092011-12-25 17:52:09 +0100563 }
564 }
565 VALGRIND_MAKE_MEM_UNDEFINED(babel_ifp->sendbuf, babel_ifp->bufsize);
566 babel_ifp->buffered = 0;
567 babel_ifp->have_buffered_hello = 0;
568 babel_ifp->have_buffered_id = 0;
569 babel_ifp->have_buffered_nh = 0;
570 babel_ifp->have_buffered_prefix = 0;
571 babel_ifp->flush_timeout.tv_sec = 0;
572 babel_ifp->flush_timeout.tv_usec = 0;
573}
574
575static void
576schedule_flush(struct interface *ifp)
577{
578 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
579 unsigned msecs = jitter(babel_ifp, 0);
580 if(babel_ifp->flush_timeout.tv_sec != 0 &&
581 timeval_minus_msec(&babel_ifp->flush_timeout, &babel_now) < msecs)
582 return;
583 set_timeout(&babel_ifp->flush_timeout, msecs);
584}
585
586static void
587schedule_flush_now(struct interface *ifp)
588{
589 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
590 /* Almost now */
591 unsigned msecs = roughly(10);
592 if(babel_ifp->flush_timeout.tv_sec != 0 &&
593 timeval_minus_msec(&babel_ifp->flush_timeout, &babel_now) < msecs)
594 return;
595 set_timeout(&babel_ifp->flush_timeout, msecs);
596}
597
598static void
599schedule_unicast_flush(unsigned msecs)
600{
601 if(!unicast_neighbour)
602 return;
603 if(unicast_flush_timeout.tv_sec != 0 &&
604 timeval_minus_msec(&unicast_flush_timeout, &babel_now) < msecs)
605 return;
606 unicast_flush_timeout.tv_usec = (babel_now.tv_usec + msecs * 1000) %1000000;
607 unicast_flush_timeout.tv_sec =
608 babel_now.tv_sec + (babel_now.tv_usec / 1000 + msecs) / 1000;
609}
610
611static void
612ensure_space(struct interface *ifp, int space)
613{
614 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
615 if(babel_ifp->bufsize - babel_ifp->buffered < space)
616 flushbuf(ifp);
617}
618
619static void
620start_message(struct interface *ifp, int type, int len)
621{
622 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
623 if(babel_ifp->bufsize - babel_ifp->buffered < len + 2)
624 flushbuf(ifp);
625 babel_ifp->sendbuf[babel_ifp->buffered++] = type;
626 babel_ifp->sendbuf[babel_ifp->buffered++] = len;
627}
628
629static void
630end_message(struct interface *ifp, int type, int bytes)
631{
632 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
633 assert(babel_ifp->buffered >= bytes + 2 &&
634 babel_ifp->sendbuf[babel_ifp->buffered - bytes - 2] == type &&
635 babel_ifp->sendbuf[babel_ifp->buffered - bytes - 1] == bytes);
636 schedule_flush(ifp);
637}
638
639static void
640accumulate_byte(struct interface *ifp, unsigned char value)
641{
642 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
643 babel_ifp->sendbuf[babel_ifp->buffered++] = value;
644}
645
646static void
647accumulate_short(struct interface *ifp, unsigned short value)
648{
649 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
650 DO_HTONS(babel_ifp->sendbuf + babel_ifp->buffered, value);
651 babel_ifp->buffered += 2;
652}
653
654static void
655accumulate_bytes(struct interface *ifp,
656 const unsigned char *value, unsigned len)
657{
658 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
659 memcpy(babel_ifp->sendbuf + babel_ifp->buffered, value, len);
660 babel_ifp->buffered += len;
661}
662
663static int
664start_unicast_message(struct neighbour *neigh, int type, int len)
665{
666 if(unicast_neighbour) {
667 if(neigh != unicast_neighbour ||
668 unicast_buffered + len + 2 >=
669 MIN(UNICAST_BUFSIZE, babel_get_if_nfo(neigh->ifp)->bufsize))
670 flush_unicast(0);
671 }
672 if(!unicast_buffer)
673 unicast_buffer = malloc(UNICAST_BUFSIZE);
674 if(!unicast_buffer) {
675 zlog_err("malloc(unicast_buffer): %s", safe_strerror(errno));
676 return -1;
677 }
678
679 unicast_neighbour = neigh;
680
681 unicast_buffer[unicast_buffered++] = type;
682 unicast_buffer[unicast_buffered++] = len;
683 return 1;
684}
685
686static void
687end_unicast_message(struct neighbour *neigh, int type, int bytes)
688{
689 assert(unicast_neighbour == neigh && unicast_buffered >= bytes + 2 &&
690 unicast_buffer[unicast_buffered - bytes - 2] == type &&
691 unicast_buffer[unicast_buffered - bytes - 1] == bytes);
692 schedule_unicast_flush(jitter(babel_get_if_nfo(neigh->ifp), 0));
693}
694
695static void
696accumulate_unicast_byte(struct neighbour *neigh, unsigned char value)
697{
698 unicast_buffer[unicast_buffered++] = value;
699}
700
701static void
702accumulate_unicast_short(struct neighbour *neigh, unsigned short value)
703{
704 DO_HTONS(unicast_buffer + unicast_buffered, value);
705 unicast_buffered += 2;
706}
707
708static void
709accumulate_unicast_bytes(struct neighbour *neigh,
710 const unsigned char *value, unsigned len)
711{
712 memcpy(unicast_buffer + unicast_buffered, value, len);
713 unicast_buffered += len;
714}
715
716void
717send_ack(struct neighbour *neigh, unsigned short nonce, unsigned short interval)
718{
719 int rc;
720 debugf(BABEL_DEBUG_COMMON,"Sending ack (%04x) to %s on %s.",
721 nonce, format_address(neigh->address), neigh->ifp->name);
722 rc = start_unicast_message(neigh, MESSAGE_ACK, 2); if(rc < 0) return;
723 accumulate_unicast_short(neigh, nonce);
724 end_unicast_message(neigh, MESSAGE_ACK, 2);
725 /* Roughly yields a value no larger than 3/2, so this meets the deadline */
726 schedule_unicast_flush(roughly(interval * 6));
727}
728
729void
730send_hello_noupdate(struct interface *ifp, unsigned interval)
731{
732 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
733 /* This avoids sending multiple hellos in a single packet, which breaks
734 link quality estimation. */
735 if(babel_ifp->have_buffered_hello)
736 flushbuf(ifp);
737
738 babel_ifp->hello_seqno = seqno_plus(babel_ifp->hello_seqno, 1);
739 set_timeout(&babel_ifp->hello_timeout, babel_ifp->hello_interval);
740
741 if(!if_up(ifp))
742 return;
743
744 debugf(BABEL_DEBUG_COMMON,"Sending hello %d (%d) to %s.",
745 babel_ifp->hello_seqno, interval, ifp->name);
746
747 start_message(ifp, MESSAGE_HELLO, 6);
748 accumulate_short(ifp, 0);
749 accumulate_short(ifp, babel_ifp->hello_seqno);
750 accumulate_short(ifp, interval > 0xFFFF ? 0xFFFF : interval);
751 end_message(ifp, MESSAGE_HELLO, 6);
752 babel_ifp->have_buffered_hello = 1;
753}
754
755void
756send_hello(struct interface *ifp)
757{
758 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
759 send_hello_noupdate(ifp, (babel_ifp->hello_interval + 9) / 10);
760 /* Send full IHU every 3 hellos, and marginal IHU each time */
Juliusz Chroboczek52d54422012-02-11 13:08:00 +0100761 if(babel_ifp->hello_seqno % 3 == 0)
Paul Jakma57345092011-12-25 17:52:09 +0100762 send_ihu(NULL, ifp);
763 else
764 send_marginal_ihu(ifp);
765}
766
767void
768flush_unicast(int dofree)
769{
770 struct sockaddr_in6 sin6;
771 int rc;
772
773 if(unicast_buffered == 0)
774 goto done;
775
776 if(!if_up(unicast_neighbour->ifp))
777 goto done;
778
779 /* Preserve ordering of messages */
780 flushbuf(unicast_neighbour->ifp);
781
782 if(check_bucket(unicast_neighbour->ifp)) {
783 memset(&sin6, 0, sizeof(sin6));
784 sin6.sin6_family = AF_INET6;
785 memcpy(&sin6.sin6_addr, unicast_neighbour->address, 16);
786 sin6.sin6_port = htons(protocol_port);
787 sin6.sin6_scope_id = unicast_neighbour->ifp->ifindex;
788 DO_HTONS(packet_header + 2, unicast_buffered);
789 rc = babel_send(protocol_socket,
790 packet_header, sizeof(packet_header),
791 unicast_buffer, unicast_buffered,
792 (struct sockaddr*)&sin6, sizeof(sin6));
793 if(rc < 0)
794 zlog_err("send(unicast): %s", safe_strerror(errno));
795 } else {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100796 zlog_err("Warning: bucket full, dropping unicast packet to %s if %s.",
797 format_address(unicast_neighbour->address),
798 unicast_neighbour->ifp->name);
Paul Jakma57345092011-12-25 17:52:09 +0100799 }
800
801 done:
802 VALGRIND_MAKE_MEM_UNDEFINED(unicast_buffer, UNICAST_BUFSIZE);
803 unicast_buffered = 0;
804 if(dofree && unicast_buffer) {
805 free(unicast_buffer);
806 unicast_buffer = NULL;
807 }
808 unicast_neighbour = NULL;
809 unicast_flush_timeout.tv_sec = 0;
810 unicast_flush_timeout.tv_usec = 0;
811}
812
813static void
814really_send_update(struct interface *ifp,
815 const unsigned char *id,
816 const unsigned char *prefix, unsigned char plen,
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100817 unsigned short seqno, unsigned short metric,
818 unsigned char *channels, int channels_len)
Paul Jakma57345092011-12-25 17:52:09 +0100819{
820 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
821 int add_metric, v4, real_plen, omit = 0;
822 const unsigned char *real_prefix;
823 unsigned short flags = 0;
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100824 int channels_size;
825
826 if(diversity_kind != DIVERSITY_CHANNEL)
827 channels_len = -1;
828
829 channels_size = channels_len >= 0 ? channels_len + 2 : 0;
Paul Jakma57345092011-12-25 17:52:09 +0100830
831 if(!if_up(ifp))
832 return;
833
834 add_metric = output_filter(id, prefix, plen, ifp->ifindex);
835 if(add_metric >= INFINITY)
836 return;
837
838 metric = MIN(metric + add_metric, INFINITY);
839 /* Worst case */
840 ensure_space(ifp, 20 + 12 + 28);
841
842 v4 = plen >= 96 && v4mapped(prefix);
843
844 if(v4) {
845 if(!babel_ifp->ipv4)
846 return;
847 if(!babel_ifp->have_buffered_nh ||
848 memcmp(babel_ifp->buffered_nh, babel_ifp->ipv4, 4) != 0) {
849 start_message(ifp, MESSAGE_NH, 6);
850 accumulate_byte(ifp, 1);
851 accumulate_byte(ifp, 0);
852 accumulate_bytes(ifp, babel_ifp->ipv4, 4);
853 end_message(ifp, MESSAGE_NH, 6);
854 memcpy(babel_ifp->buffered_nh, babel_ifp->ipv4, 4);
855 babel_ifp->have_buffered_nh = 1;
856 }
857
858 real_prefix = prefix + 12;
859 real_plen = plen - 96;
860 } else {
861 if(babel_ifp->have_buffered_prefix) {
862 while(omit < plen / 8 &&
863 babel_ifp->buffered_prefix[omit] == prefix[omit])
864 omit++;
865 }
866 if(!babel_ifp->have_buffered_prefix || plen >= 48)
867 flags |= 0x80;
868 real_prefix = prefix;
869 real_plen = plen;
870 }
871
872 if(!babel_ifp->have_buffered_id
873 || memcmp(id, babel_ifp->buffered_id, 8) != 0) {
874 if(real_plen == 128 && memcmp(real_prefix + 8, id, 8) == 0) {
875 flags |= 0x40;
876 } else {
877 start_message(ifp, MESSAGE_ROUTER_ID, 10);
878 accumulate_short(ifp, 0);
879 accumulate_bytes(ifp, id, 8);
880 end_message(ifp, MESSAGE_ROUTER_ID, 10);
881 }
882 memcpy(babel_ifp->buffered_id, id, 16);
883 babel_ifp->have_buffered_id = 1;
884 }
885
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100886 start_message(ifp, MESSAGE_UPDATE, 10 + (real_plen + 7) / 8 - omit +
887 channels_size);
Paul Jakma57345092011-12-25 17:52:09 +0100888 accumulate_byte(ifp, v4 ? 1 : 2);
889 accumulate_byte(ifp, flags);
890 accumulate_byte(ifp, real_plen);
891 accumulate_byte(ifp, omit);
892 accumulate_short(ifp, (babel_ifp->update_interval + 5) / 10);
893 accumulate_short(ifp, seqno);
894 accumulate_short(ifp, metric);
895 accumulate_bytes(ifp, real_prefix + omit, (real_plen + 7) / 8 - omit);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100896 /* Note that an empty channels TLV is different from no such TLV. */
897 if(channels_len >= 0) {
898 accumulate_byte(ifp, 2);
899 accumulate_byte(ifp, channels_len);
900 accumulate_bytes(ifp, channels, channels_len);
901 }
902 end_message(ifp, MESSAGE_UPDATE, 10 + (real_plen + 7) / 8 - omit +
903 channels_size);
Paul Jakma57345092011-12-25 17:52:09 +0100904
905 if(flags & 0x80) {
906 memcpy(babel_ifp->buffered_prefix, prefix, 16);
907 babel_ifp->have_buffered_prefix = 1;
908 }
909}
910
911static int
912compare_buffered_updates(const void *av, const void *bv)
913{
914 const struct buffered_update *a = av, *b = bv;
915 int rc, v4a, v4b, ma, mb;
916
917 rc = memcmp(a->id, b->id, 8);
918 if(rc != 0)
919 return rc;
920
921 v4a = (a->plen >= 96 && v4mapped(a->prefix));
922 v4b = (b->plen >= 96 && v4mapped(b->prefix));
923
924 if(v4a > v4b)
925 return 1;
926 else if(v4a < v4b)
927 return -1;
928
929 ma = (!v4a && a->plen == 128 && memcmp(a->prefix + 8, a->id, 8) == 0);
930 mb = (!v4b && b->plen == 128 && memcmp(b->prefix + 8, b->id, 8) == 0);
931
932 if(ma > mb)
933 return -1;
934 else if(mb > ma)
935 return 1;
936
937 if(a->plen < b->plen)
938 return 1;
939 else if(a->plen > b->plen)
940 return -1;
941
942 return memcmp(a->prefix, b->prefix, 16);
943}
944
945void
946flushupdates(struct interface *ifp)
947{
948 babel_interface_nfo *babel_ifp = NULL;
949 struct xroute *xroute;
Denis Ovsienkoef4de4d2012-01-08 15:29:19 +0400950 struct babel_route *route;
Paul Jakma57345092011-12-25 17:52:09 +0100951 const unsigned char *last_prefix = NULL;
952 unsigned char last_plen = 0xFF;
953 int i;
954
955 if(ifp == NULL) {
956 struct interface *ifp_aux;
957 struct listnode *linklist_node = NULL;
958 FOR_ALL_INTERFACES(ifp_aux, linklist_node)
959 flushupdates(ifp_aux);
960 return;
961 }
962
963 babel_ifp = babel_get_if_nfo(ifp);
964 if(babel_ifp->num_buffered_updates > 0) {
965 struct buffered_update *b = babel_ifp->buffered_updates;
966 int n = babel_ifp->num_buffered_updates;
967
968 babel_ifp->buffered_updates = NULL;
969 babel_ifp->update_bufsize = 0;
970 babel_ifp->num_buffered_updates = 0;
971
972 if(!if_up(ifp))
973 goto done;
974
975 debugf(BABEL_DEBUG_COMMON," (flushing %d buffered updates on %s (%d))",
976 n, ifp->name, ifp->ifindex);
977
978 /* In order to send fewer update messages, we want to send updates
979 with the same router-id together, with IPv6 going out before IPv4. */
980
981 for(i = 0; i < n; i++) {
982 route = find_installed_route(b[i].prefix, b[i].plen);
983 if(route)
984 memcpy(b[i].id, route->src->id, 8);
985 else
986 memcpy(b[i].id, myid, 8);
987 }
988
989 qsort(b, n, sizeof(struct buffered_update), compare_buffered_updates);
990
991 for(i = 0; i < n; i++) {
Paul Jakma57345092011-12-25 17:52:09 +0100992 /* The same update may be scheduled multiple times before it is
993 sent out. Since our buffer is now sorted, it is enough to
994 compare with the previous update. */
995
996 if(last_prefix) {
997 if(b[i].plen == last_plen &&
998 memcmp(b[i].prefix, last_prefix, 16) == 0)
999 continue;
1000 }
1001
1002 xroute = find_xroute(b[i].prefix, b[i].plen);
1003 route = find_installed_route(b[i].prefix, b[i].plen);
1004
1005 if(xroute && (!route || xroute->metric <= kernel_metric)) {
1006 really_send_update(ifp, myid,
1007 xroute->prefix, xroute->plen,
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001008 myseqno, xroute->metric,
1009 NULL, 0);
Paul Jakma57345092011-12-25 17:52:09 +01001010 last_prefix = xroute->prefix;
1011 last_plen = xroute->plen;
1012 } else if(route) {
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001013 unsigned char channels[DIVERSITY_HOPS];
1014 int chlen;
1015 struct interface *route_ifp = route->neigh->ifp;
1016 struct babel_interface *babel_route_ifp = NULL;
1017 unsigned short metric;
1018 unsigned short seqno;
1019
Paul Jakma57345092011-12-25 17:52:09 +01001020 seqno = route->seqno;
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001021 metric =
1022 route_interferes(route, ifp) ?
1023 route_metric(route) :
1024 route_metric_noninterfering(route);
1025
Paul Jakma57345092011-12-25 17:52:09 +01001026 if(metric < INFINITY)
1027 satisfy_request(route->src->prefix, route->src->plen,
1028 seqno, route->src->id, ifp);
1029 if((babel_ifp->flags & BABEL_IF_SPLIT_HORIZON) &&
1030 route->neigh->ifp == ifp)
1031 continue;
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001032
1033 babel_route_ifp = babel_get_if_nfo(route_ifp);
1034 if(babel_route_ifp->channel ==BABEL_IF_CHANNEL_NONINTERFERING) {
1035 memcpy(channels, route->channels, DIVERSITY_HOPS);
1036 } else {
1037 if(babel_route_ifp->channel == BABEL_IF_CHANNEL_UNKNOWN)
1038 channels[0] = BABEL_IF_CHANNEL_INTERFERING;
1039 else {
1040 assert(babel_route_ifp->channel > 0 &&
1041 babel_route_ifp->channel <= 255);
1042 channels[0] = babel_route_ifp->channel;
1043 }
1044 memcpy(channels + 1, route->channels, DIVERSITY_HOPS - 1);
1045 }
1046
1047 chlen = channels_len(channels);
Paul Jakma57345092011-12-25 17:52:09 +01001048 really_send_update(ifp, route->src->id,
1049 route->src->prefix,
1050 route->src->plen,
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001051 seqno, metric,
1052 channels, chlen);
Paul Jakma57345092011-12-25 17:52:09 +01001053 update_source(route->src, seqno, metric);
1054 last_prefix = route->src->prefix;
1055 last_plen = route->src->plen;
1056 } else {
1057 /* There's no route for this prefix. This can happen shortly
1058 after an xroute has been retracted, so send a retraction. */
1059 really_send_update(ifp, myid, b[i].prefix, b[i].plen,
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001060 myseqno, INFINITY, NULL, -1);
Paul Jakma57345092011-12-25 17:52:09 +01001061 }
1062 }
1063 schedule_flush_now(ifp);
1064 done:
1065 free(b);
1066 }
1067 babel_ifp->update_flush_timeout.tv_sec = 0;
1068 babel_ifp->update_flush_timeout.tv_usec = 0;
1069}
1070
1071static void
1072schedule_update_flush(struct interface *ifp, int urgent)
1073{
1074 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
1075 unsigned msecs;
1076 msecs = update_jitter(babel_ifp, urgent);
1077 if(babel_ifp->update_flush_timeout.tv_sec != 0 &&
1078 timeval_minus_msec(&babel_ifp->update_flush_timeout, &babel_now) < msecs)
1079 return;
1080 set_timeout(&babel_ifp->update_flush_timeout, msecs);
1081}
1082
1083static void
1084buffer_update(struct interface *ifp,
1085 const unsigned char *prefix, unsigned char plen)
1086{
1087 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
1088 if(babel_ifp->num_buffered_updates > 0 &&
1089 babel_ifp->num_buffered_updates >= babel_ifp->update_bufsize)
1090 flushupdates(ifp);
1091
1092 if(babel_ifp->update_bufsize == 0) {
1093 int n;
1094 assert(babel_ifp->buffered_updates == NULL);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001095 /* Allocate enough space to hold a full update. Since the
1096 number of installed routes will grow over time, make sure we
1097 have enough space to send a full-ish frame. */
1098 n = installed_routes_estimate() + xroutes_estimate() + 4;
1099 n = MAX(n, babel_ifp->bufsize / 16);
Paul Jakma57345092011-12-25 17:52:09 +01001100 again:
1101 babel_ifp->buffered_updates = malloc(n *sizeof(struct buffered_update));
1102 if(babel_ifp->buffered_updates == NULL) {
1103 zlog_err("malloc(buffered_updates): %s", safe_strerror(errno));
1104 if(n > 4) {
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001105 /* Try again with a tiny buffer. */
Paul Jakma57345092011-12-25 17:52:09 +01001106 n = 4;
1107 goto again;
1108 }
1109 return;
1110 }
1111 babel_ifp->update_bufsize = n;
1112 babel_ifp->num_buffered_updates = 0;
1113 }
1114
1115 memcpy(babel_ifp->buffered_updates[babel_ifp->num_buffered_updates].prefix,
1116 prefix, 16);
1117 babel_ifp->buffered_updates[babel_ifp->num_buffered_updates].plen = plen;
1118 babel_ifp->num_buffered_updates++;
1119}
1120
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001121static void
1122buffer_update_callback(struct babel_route *route, void *closure)
1123{
1124 buffer_update((struct interface*)closure,
1125 route->src->prefix, route->src->plen);
1126}
1127
Paul Jakma57345092011-12-25 17:52:09 +01001128void
1129send_update(struct interface *ifp, int urgent,
1130 const unsigned char *prefix, unsigned char plen)
1131{
1132 babel_interface_nfo *babel_ifp = NULL;
Paul Jakma57345092011-12-25 17:52:09 +01001133
1134 if(ifp == NULL) {
1135 struct interface *ifp_aux;
1136 struct listnode *linklist_node = NULL;
Denis Ovsienkoef4de4d2012-01-08 15:29:19 +04001137 struct babel_route *route;
Paul Jakma57345092011-12-25 17:52:09 +01001138 FOR_ALL_INTERFACES(ifp_aux, linklist_node)
1139 send_update(ifp_aux, urgent, prefix, plen);
1140 if(prefix) {
1141 /* Since flushupdates only deals with non-wildcard interfaces, we
1142 need to do this now. */
1143 route = find_installed_route(prefix, plen);
1144 if(route && route_metric(route) < INFINITY)
1145 satisfy_request(prefix, plen, route->src->seqno, route->src->id,
1146 NULL);
1147 }
1148 return;
1149 }
1150
1151 if(!if_up(ifp))
1152 return;
1153
1154 babel_ifp = babel_get_if_nfo(ifp);
1155 if(prefix) {
1156 if(!parasitic || find_xroute(prefix, plen)) {
1157 debugf(BABEL_DEBUG_COMMON,"Sending update to %s for %s.",
1158 ifp->name, format_prefix(prefix, plen));
1159 buffer_update(ifp, prefix, plen);
1160 }
1161 } else {
Juliusz Chroboczek52d54422012-02-11 13:08:00 +01001162 send_self_update(ifp);
1163 if(!parasitic) {
1164 debugf(BABEL_DEBUG_COMMON,"Sending update to %s for any.",
1165 ifp->name);
1166 for_all_installed_routes(buffer_update_callback, ifp);
Paul Jakma57345092011-12-25 17:52:09 +01001167 }
1168 set_timeout(&babel_ifp->update_timeout, babel_ifp->update_interval);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001169 babel_ifp->last_update_time = babel_now.tv_sec;
Paul Jakma57345092011-12-25 17:52:09 +01001170 }
1171 schedule_update_flush(ifp, urgent);
1172}
1173
1174void
1175send_update_resend(struct interface *ifp,
1176 const unsigned char *prefix, unsigned char plen)
1177{
Paul Jakma57345092011-12-25 17:52:09 +01001178 assert(prefix != NULL);
1179
1180 send_update(ifp, 1, prefix, plen);
Juliusz Chroboczek52d54422012-02-11 13:08:00 +01001181 record_resend(RESEND_UPDATE, prefix, plen, 0, 0, NULL, resend_delay);
Paul Jakma57345092011-12-25 17:52:09 +01001182}
1183
1184void
1185send_wildcard_retraction(struct interface *ifp)
1186{
1187 babel_interface_nfo *babel_ifp = NULL;
1188 if(ifp == NULL) {
1189 struct interface *ifp_aux;
1190 struct listnode *linklist_node = NULL;
1191 FOR_ALL_INTERFACES(ifp_aux, linklist_node)
1192 send_wildcard_retraction(ifp_aux);
1193 return;
1194 }
1195
1196 if(!if_up(ifp))
1197 return;
1198
1199 babel_ifp = babel_get_if_nfo(ifp);
1200 start_message(ifp, MESSAGE_UPDATE, 10);
1201 accumulate_byte(ifp, 0);
1202 accumulate_byte(ifp, 0x40);
1203 accumulate_byte(ifp, 0);
1204 accumulate_byte(ifp, 0);
1205 accumulate_short(ifp, 0xFFFF);
1206 accumulate_short(ifp, myseqno);
1207 accumulate_short(ifp, 0xFFFF);
1208 end_message(ifp, MESSAGE_UPDATE, 10);
1209
1210 babel_ifp->have_buffered_id = 0;
1211}
1212
1213void
1214update_myseqno()
1215{
1216 myseqno = seqno_plus(myseqno, 1);
1217 seqno_time = babel_now;
1218}
1219
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001220static void
1221send_xroute_update_callback(struct xroute *xroute, void *closure)
1222{
1223 struct interface *ifp = (struct interface*)closure;
1224 send_update(ifp, 0, xroute->prefix, xroute->plen);
1225}
1226
Paul Jakma57345092011-12-25 17:52:09 +01001227void
1228send_self_update(struct interface *ifp)
1229{
Paul Jakma57345092011-12-25 17:52:09 +01001230 if(ifp == NULL) {
1231 struct interface *ifp_aux;
1232 struct listnode *linklist_node = NULL;
1233 FOR_ALL_INTERFACES(ifp_aux, linklist_node) {
1234 if(!if_up(ifp_aux))
1235 continue;
1236 send_self_update(ifp_aux);
1237 }
1238 return;
1239 }
1240
Juliusz Chroboczek52d54422012-02-11 13:08:00 +01001241 debugf(BABEL_DEBUG_COMMON,"Sending self update to %s.", ifp->name);
1242 for_all_xroutes(send_xroute_update_callback, ifp);
Paul Jakma57345092011-12-25 17:52:09 +01001243}
1244
1245void
1246send_ihu(struct neighbour *neigh, struct interface *ifp)
1247{
1248 babel_interface_nfo *babel_ifp = NULL;
1249 int rxcost, interval;
1250 int ll;
1251
1252 if(neigh == NULL && ifp == NULL) {
1253 struct interface *ifp_aux;
1254 struct listnode *linklist_node = NULL;
1255 FOR_ALL_INTERFACES(ifp_aux, linklist_node) {
1256 if(if_up(ifp_aux))
1257 continue;
1258 send_ihu(NULL, ifp_aux);
1259 }
1260 return;
1261 }
1262
1263 if(neigh == NULL) {
1264 struct neighbour *ngh;
1265 FOR_ALL_NEIGHBOURS(ngh) {
1266 if(ngh->ifp == ifp)
1267 send_ihu(ngh, ifp);
1268 }
1269 return;
1270 }
1271
1272
1273 if(ifp && neigh->ifp != ifp)
1274 return;
1275
1276 ifp = neigh->ifp;
1277 babel_ifp = babel_get_if_nfo(ifp);
1278 if(!if_up(ifp))
1279 return;
1280
1281 rxcost = neighbour_rxcost(neigh);
1282 interval = (babel_ifp->hello_interval * 3 + 9) / 10;
1283
1284 /* Conceptually, an IHU is a unicast message. We usually send them as
1285 multicast, since this allows aggregation into a single packet and
1286 avoids an ARP exchange. If we already have a unicast message queued
1287 for this neighbour, however, we might as well piggyback the IHU. */
1288 debugf(BABEL_DEBUG_COMMON,"Sending %sihu %d on %s to %s.",
1289 unicast_neighbour == neigh ? "unicast " : "",
1290 rxcost,
1291 neigh->ifp->name,
1292 format_address(neigh->address));
1293
1294 ll = linklocal(neigh->address);
1295
1296 if(unicast_neighbour != neigh) {
1297 start_message(ifp, MESSAGE_IHU, ll ? 14 : 22);
1298 accumulate_byte(ifp, ll ? 3 : 2);
1299 accumulate_byte(ifp, 0);
1300 accumulate_short(ifp, rxcost);
1301 accumulate_short(ifp, interval);
1302 if(ll)
1303 accumulate_bytes(ifp, neigh->address + 8, 8);
1304 else
1305 accumulate_bytes(ifp, neigh->address, 16);
1306 end_message(ifp, MESSAGE_IHU, ll ? 14 : 22);
1307 } else {
1308 int rc;
1309 rc = start_unicast_message(neigh, MESSAGE_IHU, ll ? 14 : 22);
1310 if(rc < 0) return;
1311 accumulate_unicast_byte(neigh, ll ? 3 : 2);
1312 accumulate_unicast_byte(neigh, 0);
1313 accumulate_unicast_short(neigh, rxcost);
1314 accumulate_unicast_short(neigh, interval);
1315 if(ll)
1316 accumulate_unicast_bytes(neigh, neigh->address + 8, 8);
1317 else
1318 accumulate_unicast_bytes(neigh, neigh->address, 16);
1319 end_unicast_message(neigh, MESSAGE_IHU, ll ? 14 : 22);
1320 }
1321}
1322
1323/* Send IHUs to all marginal neighbours */
1324void
1325send_marginal_ihu(struct interface *ifp)
1326{
1327 struct neighbour *neigh;
1328 FOR_ALL_NEIGHBOURS(neigh) {
1329 if(ifp && neigh->ifp != ifp)
1330 continue;
1331 if(neigh->txcost >= 384 || (neigh->reach & 0xF000) != 0xF000)
1332 send_ihu(neigh, ifp);
1333 }
1334}
1335
1336void
1337send_request(struct interface *ifp,
1338 const unsigned char *prefix, unsigned char plen)
1339{
Paul Jakma57345092011-12-25 17:52:09 +01001340 int v4, len;
1341
1342 if(ifp == NULL) {
1343 struct interface *ifp_aux;
1344 struct listnode *linklist_node = NULL;
1345 FOR_ALL_INTERFACES(ifp_aux, linklist_node) {
1346 if(if_up(ifp_aux))
1347 continue;
1348 send_request(ifp_aux, prefix, plen);
1349 }
1350 return;
1351 }
1352
1353 /* make sure any buffered updates go out before this request. */
1354 flushupdates(ifp);
1355
1356 if(!if_up(ifp))
1357 return;
1358
Paul Jakma57345092011-12-25 17:52:09 +01001359 debugf(BABEL_DEBUG_COMMON,"sending request to %s for %s.",
1360 ifp->name, prefix ? format_prefix(prefix, plen) : "any");
1361 v4 = plen >= 96 && v4mapped(prefix);
1362 len = !prefix ? 2 : v4 ? 6 : 18;
1363
1364 start_message(ifp, MESSAGE_REQUEST, len);
1365 accumulate_byte(ifp, !prefix ? 0 : v4 ? 1 : 2);
1366 accumulate_byte(ifp, !prefix ? 0 : v4 ? plen - 96 : plen);
1367 if(prefix) {
1368 if(v4)
1369 accumulate_bytes(ifp, prefix + 12, 4);
1370 else
1371 accumulate_bytes(ifp, prefix, 16);
1372 }
1373 end_message(ifp, MESSAGE_REQUEST, len);
1374}
1375
1376void
1377send_unicast_request(struct neighbour *neigh,
1378 const unsigned char *prefix, unsigned char plen)
1379{
1380 int rc, v4, len;
1381
1382 /* make sure any buffered updates go out before this request. */
1383 flushupdates(neigh->ifp);
1384
1385 debugf(BABEL_DEBUG_COMMON,"sending unicast request to %s for %s.",
1386 format_address(neigh->address),
1387 prefix ? format_prefix(prefix, plen) : "any");
1388 v4 = plen >= 96 && v4mapped(prefix);
1389 len = !prefix ? 2 : v4 ? 6 : 18;
1390
1391 rc = start_unicast_message(neigh, MESSAGE_REQUEST, len);
1392 if(rc < 0) return;
1393 accumulate_unicast_byte(neigh, !prefix ? 0 : v4 ? 1 : 2);
1394 accumulate_unicast_byte(neigh, !prefix ? 0 : v4 ? plen - 96 : plen);
1395 if(prefix) {
1396 if(v4)
1397 accumulate_unicast_bytes(neigh, prefix + 12, 4);
1398 else
1399 accumulate_unicast_bytes(neigh, prefix, 16);
1400 }
1401 end_unicast_message(neigh, MESSAGE_REQUEST, len);
1402}
1403
1404void
1405send_multihop_request(struct interface *ifp,
1406 const unsigned char *prefix, unsigned char plen,
1407 unsigned short seqno, const unsigned char *id,
1408 unsigned short hop_count)
1409{
Paul Jakma57345092011-12-25 17:52:09 +01001410 int v4, pb, len;
1411
1412 /* Make sure any buffered updates go out before this request. */
1413 flushupdates(ifp);
1414
1415 if(ifp == NULL) {
1416 struct interface *ifp_aux;
1417 struct listnode *linklist_node = NULL;
1418 FOR_ALL_INTERFACES(ifp_aux, linklist_node) {
1419 if(!if_up(ifp_aux))
1420 continue;
1421 send_multihop_request(ifp_aux, prefix, plen, seqno, id, hop_count);
1422 }
1423 return;
1424 }
1425
1426 if(!if_up(ifp))
1427 return;
1428
Paul Jakma57345092011-12-25 17:52:09 +01001429 debugf(BABEL_DEBUG_COMMON,"Sending request (%d) on %s for %s.",
1430 hop_count, ifp->name, format_prefix(prefix, plen));
1431 v4 = plen >= 96 && v4mapped(prefix);
1432 pb = v4 ? ((plen - 96) + 7) / 8 : (plen + 7) / 8;
1433 len = 6 + 8 + pb;
1434
1435 start_message(ifp, MESSAGE_MH_REQUEST, len);
1436 accumulate_byte(ifp, v4 ? 1 : 2);
1437 accumulate_byte(ifp, v4 ? plen - 96 : plen);
1438 accumulate_short(ifp, seqno);
1439 accumulate_byte(ifp, hop_count);
1440 accumulate_byte(ifp, 0);
1441 accumulate_bytes(ifp, id, 8);
1442 if(prefix) {
1443 if(v4)
1444 accumulate_bytes(ifp, prefix + 12, pb);
1445 else
1446 accumulate_bytes(ifp, prefix, pb);
1447 }
1448 end_message(ifp, MESSAGE_MH_REQUEST, len);
1449}
1450
1451void
1452send_unicast_multihop_request(struct neighbour *neigh,
1453 const unsigned char *prefix, unsigned char plen,
1454 unsigned short seqno, const unsigned char *id,
1455 unsigned short hop_count)
1456{
1457 int rc, v4, pb, len;
1458
1459 /* Make sure any buffered updates go out before this request. */
1460 flushupdates(neigh->ifp);
1461
1462 debugf(BABEL_DEBUG_COMMON,"Sending multi-hop request to %s for %s (%d hops).",
1463 format_address(neigh->address),
1464 format_prefix(prefix, plen), hop_count);
1465 v4 = plen >= 96 && v4mapped(prefix);
1466 pb = v4 ? ((plen - 96) + 7) / 8 : (plen + 7) / 8;
1467 len = 6 + 8 + pb;
1468
1469 rc = start_unicast_message(neigh, MESSAGE_MH_REQUEST, len);
1470 if(rc < 0) return;
1471 accumulate_unicast_byte(neigh, v4 ? 1 : 2);
1472 accumulate_unicast_byte(neigh, v4 ? plen - 96 : plen);
1473 accumulate_unicast_short(neigh, seqno);
1474 accumulate_unicast_byte(neigh, hop_count);
1475 accumulate_unicast_byte(neigh, 0);
1476 accumulate_unicast_bytes(neigh, id, 8);
1477 if(prefix) {
1478 if(v4)
1479 accumulate_unicast_bytes(neigh, prefix + 12, pb);
1480 else
1481 accumulate_unicast_bytes(neigh, prefix, pb);
1482 }
1483 end_unicast_message(neigh, MESSAGE_MH_REQUEST, len);
1484}
1485
1486void
1487send_request_resend(struct neighbour *neigh,
1488 const unsigned char *prefix, unsigned char plen,
1489 unsigned short seqno, unsigned char *id)
1490{
Paul Jakma57345092011-12-25 17:52:09 +01001491 if(neigh)
1492 send_unicast_multihop_request(neigh, prefix, plen, seqno, id, 127);
1493 else
1494 send_multihop_request(NULL, prefix, plen, seqno, id, 127);
1495
Paul Jakma57345092011-12-25 17:52:09 +01001496 record_resend(RESEND_REQUEST, prefix, plen, seqno, id,
Juliusz Chroboczek52d54422012-02-11 13:08:00 +01001497 neigh ? neigh->ifp : NULL, resend_delay);
Paul Jakma57345092011-12-25 17:52:09 +01001498}
1499
1500void
1501handle_request(struct neighbour *neigh, const unsigned char *prefix,
1502 unsigned char plen, unsigned char hop_count,
1503 unsigned short seqno, const unsigned char *id)
1504{
1505 struct xroute *xroute;
Denis Ovsienkoef4de4d2012-01-08 15:29:19 +04001506 struct babel_route *route;
Paul Jakma57345092011-12-25 17:52:09 +01001507 struct neighbour *successor = NULL;
1508
1509 xroute = find_xroute(prefix, plen);
1510 route = find_installed_route(prefix, plen);
1511
1512 if(xroute && (!route || xroute->metric <= kernel_metric)) {
1513 if(hop_count > 0 && memcmp(id, myid, 8) == 0) {
1514 if(seqno_compare(seqno, myseqno) > 0) {
1515 if(seqno_minus(seqno, myseqno) > 100) {
1516 /* Hopelessly out-of-date request */
1517 return;
1518 }
1519 update_myseqno();
1520 }
1521 }
1522 send_update(neigh->ifp, 1, prefix, plen);
1523 return;
1524 }
1525
1526 if(route &&
1527 (memcmp(id, route->src->id, 8) != 0 ||
1528 seqno_compare(seqno, route->seqno) <= 0)) {
1529 send_update(neigh->ifp, 1, prefix, plen);
1530 return;
1531 }
1532
1533 if(hop_count <= 1)
1534 return;
1535
1536 if(route && memcmp(id, route->src->id, 8) == 0 &&
1537 seqno_minus(seqno, route->seqno) > 100) {
1538 /* Hopelessly out-of-date */
1539 return;
1540 }
1541
1542 if(request_redundant(neigh->ifp, prefix, plen, seqno, id))
1543 return;
1544
1545 /* Let's try to forward this request. */
1546 if(route && route_metric(route) < INFINITY)
1547 successor = route->neigh;
1548
1549 if(!successor || successor == neigh) {
1550 /* We were about to forward a request to its requestor. Try to
1551 find a different neighbour to forward the request to. */
Denis Ovsienkoef4de4d2012-01-08 15:29:19 +04001552 struct babel_route *other_route;
Paul Jakma57345092011-12-25 17:52:09 +01001553
1554 other_route = find_best_route(prefix, plen, 0, neigh);
1555 if(other_route && route_metric(other_route) < INFINITY)
1556 successor = other_route->neigh;
1557 }
1558
1559 if(!successor || successor == neigh)
1560 /* Give up */
1561 return;
1562
1563 send_unicast_multihop_request(successor, prefix, plen, seqno, id,
1564 hop_count - 1);
1565 record_resend(RESEND_REQUEST, prefix, plen, seqno, id,
1566 neigh->ifp, 0);
1567}