blob: 9dcfc6771c63efa444951405fa0339979677eda0 [file] [log] [blame]
Paul Jakma57345092011-12-25 17:52:09 +01001/*
2 * This file is free software: you may copy, redistribute and/or modify it
3 * under the terms of the GNU General Public License as published by the
4 * Free Software Foundation, either version 2 of the License, or (at your
5 * option) any later version.
6 *
7 * This file is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program. If not, see <http://www.gnu.org/licenses/>.
14 *
15 * This file incorporates work covered by the following copyright and
16 * permission notice:
17 *
18
19Copyright (c) 2007, 2008 by Juliusz Chroboczek
20
21Permission is hereby granted, free of charge, to any person obtaining a copy
22of this software and associated documentation files (the "Software"), to deal
23in the Software without restriction, including without limitation the rights
24to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
25copies of the Software, and to permit persons to whom the Software is
26furnished to do so, subject to the following conditions:
27
28The above copyright notice and this permission notice shall be included in
29all copies or substantial portions of the Software.
30
31THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
34AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
35LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
36OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
37THE SOFTWARE.
38*/
39
Paul Jakma57345092011-12-25 17:52:09 +010040#include <zebra.h>
41#include "if.h"
42
43#include "babeld.h"
44#include "util.h"
45#include "net.h"
46#include "babel_interface.h"
47#include "source.h"
48#include "neighbour.h"
49#include "route.h"
50#include "xroute.h"
51#include "resend.h"
52#include "message.h"
53#include "kernel.h"
54
55unsigned char packet_header[4] = {42, 2};
56
Paul Jakma57345092011-12-25 17:52:09 +010057int split_horizon = 1;
58
59unsigned short myseqno = 0;
60struct timeval seqno_time = {0, 0};
61
62#define UNICAST_BUFSIZE 1024
63int unicast_buffered = 0;
64unsigned char *unicast_buffer = NULL;
65struct neighbour *unicast_neighbour = NULL;
66struct timeval unicast_flush_timeout = {0, 0};
67
68static const unsigned char v4prefix[16] =
69 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0, 0, 0, 0 };
70
Matthieu Boutierc35fafd2012-01-23 23:46:32 +010071/* Parse a network prefix, encoded in the somewhat baroque compressed
72 representation used by Babel. Return the number of bytes parsed. */
Paul Jakma57345092011-12-25 17:52:09 +010073static int
74network_prefix(int ae, int plen, unsigned int omitted,
75 const unsigned char *p, const unsigned char *dp,
76 unsigned int len, unsigned char *p_r)
77{
78 unsigned pb;
79 unsigned char prefix[16];
Matthieu Boutierc35fafd2012-01-23 23:46:32 +010080 int ret = -1;
Paul Jakma57345092011-12-25 17:52:09 +010081
82 if(plen >= 0)
83 pb = (plen + 7) / 8;
84 else if(ae == 1)
85 pb = 4;
86 else
87 pb = 16;
88
89 if(pb > 16)
90 return -1;
91
92 memset(prefix, 0, 16);
93
94 switch(ae) {
Matthieu Boutierc35fafd2012-01-23 23:46:32 +010095 case 0:
96 ret = 0;
97 break;
Paul Jakma57345092011-12-25 17:52:09 +010098 case 1:
99 if(omitted > 4 || pb > 4 || (pb > omitted && len < pb - omitted))
100 return -1;
101 memcpy(prefix, v4prefix, 12);
102 if(omitted) {
103 if (dp == NULL || !v4mapped(dp)) return -1;
104 memcpy(prefix, dp, 12 + omitted);
105 }
106 if(pb > omitted) memcpy(prefix + 12 + omitted, p, pb - omitted);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100107 ret = pb - omitted;
Paul Jakma57345092011-12-25 17:52:09 +0100108 break;
109 case 2:
110 if(omitted > 16 || (pb > omitted && len < pb - omitted)) return -1;
111 if(omitted) {
112 if (dp == NULL || v4mapped(dp)) return -1;
113 memcpy(prefix, dp, omitted);
114 }
115 if(pb > omitted) memcpy(prefix + omitted, p, pb - omitted);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100116 ret = pb - omitted;
Paul Jakma57345092011-12-25 17:52:09 +0100117 break;
118 case 3:
119 if(pb > 8 && len < pb - 8) return -1;
120 prefix[0] = 0xfe;
121 prefix[1] = 0x80;
122 if(pb > 8) memcpy(prefix + 8, p, pb - 8);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100123 ret = pb - 8;
Paul Jakma57345092011-12-25 17:52:09 +0100124 break;
125 default:
126 return -1;
127 }
128
129 mask_prefix(p_r, prefix, plen < 0 ? 128 : ae == 1 ? plen + 96 : plen);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100130 return ret;
131}
132
133static void
134parse_route_attributes(const unsigned char *a, int alen,
135 unsigned char *channels)
136{
137 int type, len, i = 0;
138
139 while(i < alen) {
140 type = a[i];
141 if(type == 0) {
142 i++;
143 continue;
144 }
145
146 if(i + 1 > alen) {
147 fprintf(stderr, "Received truncated attributes.\n");
148 return;
149 }
150 len = a[i + 1];
151 if(i + len > alen) {
152 fprintf(stderr, "Received truncated attributes.\n");
153 return;
154 }
155
156 if(type == 1) {
157 /* Nothing. */
158 } else if(type == 2) {
159 if(len > DIVERSITY_HOPS) {
160 fprintf(stderr,
161 "Received overlong channel information (%d > %d).\n",
162 len, DIVERSITY_HOPS);
163 len = DIVERSITY_HOPS;
164 }
165 if(memchr(a + i + 2, 0, len) != NULL) {
166 /* 0 is reserved. */
167 fprintf(stderr, "Channel information contains 0!");
168 return;
169 }
170 memset(channels, 0, DIVERSITY_HOPS);
171 memcpy(channels, a + i + 2, len);
172 } else {
173 fprintf(stderr, "Received unknown route attribute %d.\n", type);
174 }
175
176 i += len + 2;
177 }
Paul Jakma57345092011-12-25 17:52:09 +0100178}
179
180static int
181network_address(int ae, const unsigned char *a, unsigned int len,
182 unsigned char *a_r)
183{
184 return network_prefix(ae, -1, 0, a, NULL, len, a_r);
185}
186
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100187static int
188channels_len(unsigned char *channels)
189{
190 unsigned char *p = memchr(channels, 0, DIVERSITY_HOPS);
191 return p ? (p - channels) : DIVERSITY_HOPS;
192}
193
Paul Jakma57345092011-12-25 17:52:09 +0100194void
195parse_packet(const unsigned char *from, struct interface *ifp,
196 const unsigned char *packet, int packetlen)
197{
198 int i;
199 const unsigned char *message;
200 unsigned char type, len;
201 int bodylen;
202 struct neighbour *neigh;
203 int have_router_id = 0, have_v4_prefix = 0, have_v6_prefix = 0,
204 have_v4_nh = 0, have_v6_nh = 0;
205 unsigned char router_id[8], v4_prefix[16], v6_prefix[16],
206 v4_nh[16], v6_nh[16];
207
208 if(!linklocal(from)) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100209 zlog_err("Received packet from non-local address %s.",
210 format_address(from));
Paul Jakma57345092011-12-25 17:52:09 +0100211 return;
212 }
213
214 if(packet[0] != 42) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100215 zlog_err("Received malformed packet on %s from %s.",
216 ifp->name, format_address(from));
Paul Jakma57345092011-12-25 17:52:09 +0100217 return;
218 }
219
220 if(packet[1] != 2) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100221 zlog_err("Received packet with unknown version %d on %s from %s.",
222 packet[1], ifp->name, format_address(from));
Paul Jakma57345092011-12-25 17:52:09 +0100223 return;
224 }
225
226 neigh = find_neighbour(from, ifp);
227 if(neigh == NULL) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100228 zlog_err("Couldn't allocate neighbour.");
Paul Jakma57345092011-12-25 17:52:09 +0100229 return;
230 }
231
232 DO_NTOHS(bodylen, packet + 2);
233
234 if(bodylen + 4 > packetlen) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100235 zlog_err("Received truncated packet (%d + 4 > %d).",
236 bodylen, packetlen);
Paul Jakma57345092011-12-25 17:52:09 +0100237 bodylen = packetlen - 4;
238 }
239
240 i = 0;
241 while(i < bodylen) {
242 message = packet + 4 + i;
243 type = message[0];
244 if(type == MESSAGE_PAD1) {
245 debugf(BABEL_DEBUG_COMMON,"Received pad1 from %s on %s.",
246 format_address(from), ifp->name);
247 i++;
248 continue;
249 }
250 if(i + 1 > bodylen) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100251 zlog_err("Received truncated message.");
Paul Jakma57345092011-12-25 17:52:09 +0100252 break;
253 }
254 len = message[1];
255 if(i + len > bodylen) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100256 zlog_err("Received truncated message.");
Paul Jakma57345092011-12-25 17:52:09 +0100257 break;
258 }
259
260 if(type == MESSAGE_PADN) {
261 debugf(BABEL_DEBUG_COMMON,"Received pad%d from %s on %s.",
262 len, format_address(from), ifp->name);
263 } else if(type == MESSAGE_ACK_REQ) {
264 unsigned short nonce, interval;
265 if(len < 6) goto fail;
266 DO_NTOHS(nonce, message + 4);
267 DO_NTOHS(interval, message + 6);
268 debugf(BABEL_DEBUG_COMMON,"Received ack-req (%04X %d) from %s on %s.",
269 nonce, interval, format_address(from), ifp->name);
270 send_ack(neigh, nonce, interval);
271 } else if(type == MESSAGE_ACK) {
272 debugf(BABEL_DEBUG_COMMON,"Received ack from %s on %s.",
273 format_address(from), ifp->name);
274 /* Nothing right now */
275 } else if(type == MESSAGE_HELLO) {
276 unsigned short seqno, interval;
277 int changed;
278 if(len < 6) goto fail;
279 DO_NTOHS(seqno, message + 4);
280 DO_NTOHS(interval, message + 6);
281 debugf(BABEL_DEBUG_COMMON,"Received hello %d (%d) from %s on %s.",
282 seqno, interval,
283 format_address(from), ifp->name);
Paul Jakma57345092011-12-25 17:52:09 +0100284 changed = update_neighbour(neigh, seqno, interval);
285 update_neighbour_metric(neigh, changed);
286 if(interval > 0)
287 schedule_neighbours_check(interval * 10, 0);
288 } else if(type == MESSAGE_IHU) {
289 unsigned short txcost, interval;
290 unsigned char address[16];
291 int rc;
292 if(len < 6) goto fail;
293 DO_NTOHS(txcost, message + 4);
294 DO_NTOHS(interval, message + 6);
295 rc = network_address(message[2], message + 8, len - 6, address);
296 if(rc < 0) goto fail;
297 debugf(BABEL_DEBUG_COMMON,"Received ihu %d (%d) from %s on %s for %s.",
298 txcost, interval,
299 format_address(from), ifp->name,
300 format_address(address));
301 if(message[2] == 0 || is_interface_ll_address(ifp, address)) {
302 int changed = txcost != neigh->txcost;
303 neigh->txcost = txcost;
304 neigh->ihu_time = babel_now;
305 neigh->ihu_interval = interval;
306 update_neighbour_metric(neigh, changed);
307 if(interval > 0)
308 schedule_neighbours_check(interval * 10 * 3, 0);
309 }
310 } else if(type == MESSAGE_ROUTER_ID) {
311 if(len < 10) {
312 have_router_id = 0;
313 goto fail;
314 }
315 memcpy(router_id, message + 4, 8);
316 have_router_id = 1;
317 debugf(BABEL_DEBUG_COMMON,"Received router-id %s from %s on %s.",
318 format_eui64(router_id), format_address(from), ifp->name);
319 } else if(type == MESSAGE_NH) {
320 unsigned char nh[16];
321 int rc;
322 if(len < 2) {
323 have_v4_nh = 0;
324 have_v6_nh = 0;
325 goto fail;
326 }
327 rc = network_address(message[2], message + 4, len - 2,
328 nh);
329 if(rc < 0) {
330 have_v4_nh = 0;
331 have_v6_nh = 0;
332 goto fail;
333 }
334 debugf(BABEL_DEBUG_COMMON,"Received nh %s (%d) from %s on %s.",
335 format_address(nh), message[2],
336 format_address(from), ifp->name);
337 if(message[2] == 1) {
338 memcpy(v4_nh, nh, 16);
339 have_v4_nh = 1;
340 } else {
341 memcpy(v6_nh, nh, 16);
342 have_v6_nh = 1;
343 }
344 } else if(type == MESSAGE_UPDATE) {
345 unsigned char prefix[16], *nh;
346 unsigned char plen;
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100347 unsigned char channels[DIVERSITY_HOPS];
Paul Jakma57345092011-12-25 17:52:09 +0100348 unsigned short interval, seqno, metric;
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100349 int rc, parsed_len;
Paul Jakma57345092011-12-25 17:52:09 +0100350 if(len < 10) {
351 if(len < 2 || message[3] & 0x80)
352 have_v4_prefix = have_v6_prefix = 0;
353 goto fail;
354 }
355 DO_NTOHS(interval, message + 6);
356 DO_NTOHS(seqno, message + 8);
357 DO_NTOHS(metric, message + 10);
358 if(message[5] == 0 ||
359 (message[3] == 1 ? have_v4_prefix : have_v6_prefix))
360 rc = network_prefix(message[2], message[4], message[5],
361 message + 12,
362 message[2] == 1 ? v4_prefix : v6_prefix,
363 len - 10, prefix);
364 else
365 rc = -1;
366 if(rc < 0) {
367 if(message[3] & 0x80)
368 have_v4_prefix = have_v6_prefix = 0;
369 goto fail;
370 }
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100371 parsed_len = 10 + rc;
Paul Jakma57345092011-12-25 17:52:09 +0100372
373 plen = message[4] + (message[2] == 1 ? 96 : 0);
374
375 if(message[3] & 0x80) {
376 if(message[2] == 1) {
377 memcpy(v4_prefix, prefix, 16);
378 have_v4_prefix = 1;
379 } else {
380 memcpy(v6_prefix, prefix, 16);
381 have_v6_prefix = 1;
382 }
383 }
384 if(message[3] & 0x40) {
385 if(message[2] == 1) {
386 memset(router_id, 0, 4);
387 memcpy(router_id + 4, prefix + 12, 4);
388 } else {
389 memcpy(router_id, prefix + 8, 8);
390 }
391 have_router_id = 1;
392 }
393 if(!have_router_id && message[2] != 0) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100394 zlog_err("Received prefix with no router id.");
Paul Jakma57345092011-12-25 17:52:09 +0100395 goto fail;
396 }
397 debugf(BABEL_DEBUG_COMMON,"Received update%s%s for %s from %s on %s.",
398 (message[3] & 0x80) ? "/prefix" : "",
399 (message[3] & 0x40) ? "/id" : "",
400 format_prefix(prefix, plen),
401 format_address(from), ifp->name);
402
403 if(message[2] == 0) {
404 if(metric < 0xFFFF) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100405 zlog_err("Received wildcard update with finite metric.");
Paul Jakma57345092011-12-25 17:52:09 +0100406 goto done;
407 }
408 retract_neighbour_routes(neigh);
409 goto done;
410 } else if(message[2] == 1) {
411 if(!have_v4_nh)
412 goto fail;
413 nh = v4_nh;
414 } else if(have_v6_nh) {
415 nh = v6_nh;
416 } else {
417 nh = neigh->address;
418 }
419
420 if(message[2] == 1) {
421 if(!babel_get_if_nfo(ifp)->ipv4)
422 goto done;
423 }
424
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100425 if((ifp->flags & BABEL_IF_FARAWAY)) {
426 channels[0] = 0;
427 } else {
428 /* This will be overwritten by parse_route_attributes below. */
429 if(metric < 256) {
430 /* Assume non-interfering (wired) link. */
431 channels[0] = 0;
432 } else {
433 /* Assume interfering. */
434 channels[0] = BABEL_IF_CHANNEL_INTERFERING;
435 channels[1] = 0;
436 }
437
438 if(parsed_len < len)
439 parse_route_attributes(message + 2 + parsed_len,
440 len - parsed_len, channels);
441 }
442
Paul Jakma57345092011-12-25 17:52:09 +0100443 update_route(router_id, prefix, plen, seqno, metric, interval,
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100444 neigh, nh,
445 channels, channels_len(channels));
Paul Jakma57345092011-12-25 17:52:09 +0100446 } else if(type == MESSAGE_REQUEST) {
447 unsigned char prefix[16], plen;
448 int rc;
449 if(len < 2) goto fail;
450 rc = network_prefix(message[2], message[3], 0,
451 message + 4, NULL, len - 2, prefix);
452 if(rc < 0) goto fail;
453 plen = message[3] + (message[2] == 1 ? 96 : 0);
454 debugf(BABEL_DEBUG_COMMON,"Received request for %s from %s on %s.",
455 message[2] == 0 ? "any" : format_prefix(prefix, plen),
456 format_address(from), ifp->name);
457 if(message[2] == 0) {
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100458 struct babel_interface *babel_ifp =babel_get_if_nfo(neigh->ifp);
Paul Jakma57345092011-12-25 17:52:09 +0100459 /* If a neighbour is requesting a full route dump from us,
460 we might as well send it an IHU. */
461 send_ihu(neigh, NULL);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100462 /* Since nodes send wildcard requests on boot, booting
463 a large number of nodes at the same time may cause an
464 update storm. Ignore a wildcard request that happens
465 shortly after we sent a full update. */
466 if(babel_ifp->last_update_time <
Juliusz Chroboczek52d54422012-02-11 13:08:00 +0100467 (time_t)(babel_now.tv_sec -
468 MAX(babel_ifp->hello_interval / 100, 1)))
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100469 send_update(neigh->ifp, 0, NULL, 0);
Paul Jakma57345092011-12-25 17:52:09 +0100470 } else {
471 send_update(neigh->ifp, 0, prefix, plen);
472 }
473 } else if(type == MESSAGE_MH_REQUEST) {
474 unsigned char prefix[16], plen;
475 unsigned short seqno;
476 int rc;
477 if(len < 14) goto fail;
478 DO_NTOHS(seqno, message + 4);
479 rc = network_prefix(message[2], message[3], 0,
480 message + 16, NULL, len - 14, prefix);
481 if(rc < 0) goto fail;
482 plen = message[3] + (message[2] == 1 ? 96 : 0);
483 debugf(BABEL_DEBUG_COMMON,"Received request (%d) for %s from %s on %s (%s, %d).",
484 message[6],
485 format_prefix(prefix, plen),
486 format_address(from), ifp->name,
487 format_eui64(message + 8), seqno);
488 handle_request(neigh, prefix, plen, message[6],
489 seqno, message + 8);
490 } else {
491 debugf(BABEL_DEBUG_COMMON,"Received unknown packet type %d from %s on %s.",
492 type, format_address(from), ifp->name);
493 }
494 done:
495 i += len + 2;
496 continue;
497
498 fail:
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100499 zlog_err("Couldn't parse packet (%d, %d) from %s on %s.",
500 message[0], message[1], format_address(from), ifp->name);
Paul Jakma57345092011-12-25 17:52:09 +0100501 goto done;
502 }
503 return;
504}
505
506/* Under normal circumstances, there are enough moderation mechanisms
507 elsewhere in the protocol to make sure that this last-ditch check
508 should never trigger. But I'm superstitious. */
509
510static int
511check_bucket(struct interface *ifp)
512{
513 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
514 if(babel_ifp->bucket <= 0) {
515 int seconds = babel_now.tv_sec - babel_ifp->bucket_time;
516 if(seconds > 0) {
517 babel_ifp->bucket = MIN(BUCKET_TOKENS_MAX,
518 seconds * BUCKET_TOKENS_PER_SEC);
519 }
520 /* Reset bucket time unconditionally, in case clock is stepped. */
521 babel_ifp->bucket_time = babel_now.tv_sec;
522 }
523
524 if(babel_ifp->bucket > 0) {
525 babel_ifp->bucket--;
526 return 1;
527 } else {
528 return 0;
529 }
530}
531
532void
533flushbuf(struct interface *ifp)
534{
535 int rc;
536 struct sockaddr_in6 sin6;
537 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
538
539 assert(babel_ifp->buffered <= babel_ifp->bufsize);
540
541 flushupdates(ifp);
542
543 if(babel_ifp->buffered > 0) {
544 debugf(BABEL_DEBUG_COMMON," (flushing %d buffered bytes on %s)",
545 babel_ifp->buffered, ifp->name);
546 if(check_bucket(ifp)) {
547 memset(&sin6, 0, sizeof(sin6));
548 sin6.sin6_family = AF_INET6;
549 memcpy(&sin6.sin6_addr, protocol_group, 16);
550 sin6.sin6_port = htons(protocol_port);
551 sin6.sin6_scope_id = ifp->ifindex;
552 DO_HTONS(packet_header + 2, babel_ifp->buffered);
553 rc = babel_send(protocol_socket,
554 packet_header, sizeof(packet_header),
555 babel_ifp->sendbuf, babel_ifp->buffered,
556 (struct sockaddr*)&sin6, sizeof(sin6));
557 if(rc < 0)
558 zlog_err("send: %s", safe_strerror(errno));
559 } else {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100560 zlog_err("Warning: bucket full, dropping packet to %s.",
561 ifp->name);
Paul Jakma57345092011-12-25 17:52:09 +0100562 }
563 }
564 VALGRIND_MAKE_MEM_UNDEFINED(babel_ifp->sendbuf, babel_ifp->bufsize);
565 babel_ifp->buffered = 0;
566 babel_ifp->have_buffered_hello = 0;
567 babel_ifp->have_buffered_id = 0;
568 babel_ifp->have_buffered_nh = 0;
569 babel_ifp->have_buffered_prefix = 0;
570 babel_ifp->flush_timeout.tv_sec = 0;
571 babel_ifp->flush_timeout.tv_usec = 0;
572}
573
574static void
575schedule_flush(struct interface *ifp)
576{
577 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
578 unsigned msecs = jitter(babel_ifp, 0);
579 if(babel_ifp->flush_timeout.tv_sec != 0 &&
580 timeval_minus_msec(&babel_ifp->flush_timeout, &babel_now) < msecs)
581 return;
582 set_timeout(&babel_ifp->flush_timeout, msecs);
583}
584
585static void
586schedule_flush_now(struct interface *ifp)
587{
588 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
589 /* Almost now */
590 unsigned msecs = roughly(10);
591 if(babel_ifp->flush_timeout.tv_sec != 0 &&
592 timeval_minus_msec(&babel_ifp->flush_timeout, &babel_now) < msecs)
593 return;
594 set_timeout(&babel_ifp->flush_timeout, msecs);
595}
596
597static void
598schedule_unicast_flush(unsigned msecs)
599{
600 if(!unicast_neighbour)
601 return;
602 if(unicast_flush_timeout.tv_sec != 0 &&
603 timeval_minus_msec(&unicast_flush_timeout, &babel_now) < msecs)
604 return;
605 unicast_flush_timeout.tv_usec = (babel_now.tv_usec + msecs * 1000) %1000000;
606 unicast_flush_timeout.tv_sec =
607 babel_now.tv_sec + (babel_now.tv_usec / 1000 + msecs) / 1000;
608}
609
610static void
611ensure_space(struct interface *ifp, int space)
612{
613 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
614 if(babel_ifp->bufsize - babel_ifp->buffered < space)
615 flushbuf(ifp);
616}
617
618static void
619start_message(struct interface *ifp, int type, int len)
620{
621 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
622 if(babel_ifp->bufsize - babel_ifp->buffered < len + 2)
623 flushbuf(ifp);
624 babel_ifp->sendbuf[babel_ifp->buffered++] = type;
625 babel_ifp->sendbuf[babel_ifp->buffered++] = len;
626}
627
628static void
629end_message(struct interface *ifp, int type, int bytes)
630{
631 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
632 assert(babel_ifp->buffered >= bytes + 2 &&
633 babel_ifp->sendbuf[babel_ifp->buffered - bytes - 2] == type &&
634 babel_ifp->sendbuf[babel_ifp->buffered - bytes - 1] == bytes);
635 schedule_flush(ifp);
636}
637
638static void
639accumulate_byte(struct interface *ifp, unsigned char value)
640{
641 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
642 babel_ifp->sendbuf[babel_ifp->buffered++] = value;
643}
644
645static void
646accumulate_short(struct interface *ifp, unsigned short value)
647{
648 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
649 DO_HTONS(babel_ifp->sendbuf + babel_ifp->buffered, value);
650 babel_ifp->buffered += 2;
651}
652
653static void
654accumulate_bytes(struct interface *ifp,
655 const unsigned char *value, unsigned len)
656{
657 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
658 memcpy(babel_ifp->sendbuf + babel_ifp->buffered, value, len);
659 babel_ifp->buffered += len;
660}
661
662static int
663start_unicast_message(struct neighbour *neigh, int type, int len)
664{
665 if(unicast_neighbour) {
666 if(neigh != unicast_neighbour ||
667 unicast_buffered + len + 2 >=
668 MIN(UNICAST_BUFSIZE, babel_get_if_nfo(neigh->ifp)->bufsize))
669 flush_unicast(0);
670 }
671 if(!unicast_buffer)
672 unicast_buffer = malloc(UNICAST_BUFSIZE);
673 if(!unicast_buffer) {
674 zlog_err("malloc(unicast_buffer): %s", safe_strerror(errno));
675 return -1;
676 }
677
678 unicast_neighbour = neigh;
679
680 unicast_buffer[unicast_buffered++] = type;
681 unicast_buffer[unicast_buffered++] = len;
682 return 1;
683}
684
685static void
686end_unicast_message(struct neighbour *neigh, int type, int bytes)
687{
688 assert(unicast_neighbour == neigh && unicast_buffered >= bytes + 2 &&
689 unicast_buffer[unicast_buffered - bytes - 2] == type &&
690 unicast_buffer[unicast_buffered - bytes - 1] == bytes);
691 schedule_unicast_flush(jitter(babel_get_if_nfo(neigh->ifp), 0));
692}
693
694static void
695accumulate_unicast_byte(struct neighbour *neigh, unsigned char value)
696{
697 unicast_buffer[unicast_buffered++] = value;
698}
699
700static void
701accumulate_unicast_short(struct neighbour *neigh, unsigned short value)
702{
703 DO_HTONS(unicast_buffer + unicast_buffered, value);
704 unicast_buffered += 2;
705}
706
707static void
708accumulate_unicast_bytes(struct neighbour *neigh,
709 const unsigned char *value, unsigned len)
710{
711 memcpy(unicast_buffer + unicast_buffered, value, len);
712 unicast_buffered += len;
713}
714
715void
716send_ack(struct neighbour *neigh, unsigned short nonce, unsigned short interval)
717{
718 int rc;
719 debugf(BABEL_DEBUG_COMMON,"Sending ack (%04x) to %s on %s.",
720 nonce, format_address(neigh->address), neigh->ifp->name);
721 rc = start_unicast_message(neigh, MESSAGE_ACK, 2); if(rc < 0) return;
722 accumulate_unicast_short(neigh, nonce);
723 end_unicast_message(neigh, MESSAGE_ACK, 2);
724 /* Roughly yields a value no larger than 3/2, so this meets the deadline */
725 schedule_unicast_flush(roughly(interval * 6));
726}
727
728void
729send_hello_noupdate(struct interface *ifp, unsigned interval)
730{
731 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
732 /* This avoids sending multiple hellos in a single packet, which breaks
733 link quality estimation. */
734 if(babel_ifp->have_buffered_hello)
735 flushbuf(ifp);
736
737 babel_ifp->hello_seqno = seqno_plus(babel_ifp->hello_seqno, 1);
738 set_timeout(&babel_ifp->hello_timeout, babel_ifp->hello_interval);
739
740 if(!if_up(ifp))
741 return;
742
743 debugf(BABEL_DEBUG_COMMON,"Sending hello %d (%d) to %s.",
744 babel_ifp->hello_seqno, interval, ifp->name);
745
746 start_message(ifp, MESSAGE_HELLO, 6);
747 accumulate_short(ifp, 0);
748 accumulate_short(ifp, babel_ifp->hello_seqno);
749 accumulate_short(ifp, interval > 0xFFFF ? 0xFFFF : interval);
750 end_message(ifp, MESSAGE_HELLO, 6);
751 babel_ifp->have_buffered_hello = 1;
752}
753
754void
755send_hello(struct interface *ifp)
756{
757 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
758 send_hello_noupdate(ifp, (babel_ifp->hello_interval + 9) / 10);
759 /* Send full IHU every 3 hellos, and marginal IHU each time */
Juliusz Chroboczek52d54422012-02-11 13:08:00 +0100760 if(babel_ifp->hello_seqno % 3 == 0)
Paul Jakma57345092011-12-25 17:52:09 +0100761 send_ihu(NULL, ifp);
762 else
763 send_marginal_ihu(ifp);
764}
765
766void
767flush_unicast(int dofree)
768{
769 struct sockaddr_in6 sin6;
770 int rc;
771
772 if(unicast_buffered == 0)
773 goto done;
774
775 if(!if_up(unicast_neighbour->ifp))
776 goto done;
777
778 /* Preserve ordering of messages */
779 flushbuf(unicast_neighbour->ifp);
780
781 if(check_bucket(unicast_neighbour->ifp)) {
782 memset(&sin6, 0, sizeof(sin6));
783 sin6.sin6_family = AF_INET6;
784 memcpy(&sin6.sin6_addr, unicast_neighbour->address, 16);
785 sin6.sin6_port = htons(protocol_port);
786 sin6.sin6_scope_id = unicast_neighbour->ifp->ifindex;
787 DO_HTONS(packet_header + 2, unicast_buffered);
788 rc = babel_send(protocol_socket,
789 packet_header, sizeof(packet_header),
790 unicast_buffer, unicast_buffered,
791 (struct sockaddr*)&sin6, sizeof(sin6));
792 if(rc < 0)
793 zlog_err("send(unicast): %s", safe_strerror(errno));
794 } else {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100795 zlog_err("Warning: bucket full, dropping unicast packet to %s if %s.",
796 format_address(unicast_neighbour->address),
797 unicast_neighbour->ifp->name);
Paul Jakma57345092011-12-25 17:52:09 +0100798 }
799
800 done:
801 VALGRIND_MAKE_MEM_UNDEFINED(unicast_buffer, UNICAST_BUFSIZE);
802 unicast_buffered = 0;
803 if(dofree && unicast_buffer) {
804 free(unicast_buffer);
805 unicast_buffer = NULL;
806 }
807 unicast_neighbour = NULL;
808 unicast_flush_timeout.tv_sec = 0;
809 unicast_flush_timeout.tv_usec = 0;
810}
811
812static void
813really_send_update(struct interface *ifp,
814 const unsigned char *id,
815 const unsigned char *prefix, unsigned char plen,
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100816 unsigned short seqno, unsigned short metric,
817 unsigned char *channels, int channels_len)
Paul Jakma57345092011-12-25 17:52:09 +0100818{
819 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
820 int add_metric, v4, real_plen, omit = 0;
821 const unsigned char *real_prefix;
822 unsigned short flags = 0;
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100823 int channels_size;
824
825 if(diversity_kind != DIVERSITY_CHANNEL)
826 channels_len = -1;
827
828 channels_size = channels_len >= 0 ? channels_len + 2 : 0;
Paul Jakma57345092011-12-25 17:52:09 +0100829
830 if(!if_up(ifp))
831 return;
832
833 add_metric = output_filter(id, prefix, plen, ifp->ifindex);
834 if(add_metric >= INFINITY)
835 return;
836
837 metric = MIN(metric + add_metric, INFINITY);
838 /* Worst case */
839 ensure_space(ifp, 20 + 12 + 28);
840
841 v4 = plen >= 96 && v4mapped(prefix);
842
843 if(v4) {
844 if(!babel_ifp->ipv4)
845 return;
846 if(!babel_ifp->have_buffered_nh ||
847 memcmp(babel_ifp->buffered_nh, babel_ifp->ipv4, 4) != 0) {
848 start_message(ifp, MESSAGE_NH, 6);
849 accumulate_byte(ifp, 1);
850 accumulate_byte(ifp, 0);
851 accumulate_bytes(ifp, babel_ifp->ipv4, 4);
852 end_message(ifp, MESSAGE_NH, 6);
853 memcpy(babel_ifp->buffered_nh, babel_ifp->ipv4, 4);
854 babel_ifp->have_buffered_nh = 1;
855 }
856
857 real_prefix = prefix + 12;
858 real_plen = plen - 96;
859 } else {
860 if(babel_ifp->have_buffered_prefix) {
861 while(omit < plen / 8 &&
862 babel_ifp->buffered_prefix[omit] == prefix[omit])
863 omit++;
864 }
865 if(!babel_ifp->have_buffered_prefix || plen >= 48)
866 flags |= 0x80;
867 real_prefix = prefix;
868 real_plen = plen;
869 }
870
871 if(!babel_ifp->have_buffered_id
872 || memcmp(id, babel_ifp->buffered_id, 8) != 0) {
873 if(real_plen == 128 && memcmp(real_prefix + 8, id, 8) == 0) {
874 flags |= 0x40;
875 } else {
876 start_message(ifp, MESSAGE_ROUTER_ID, 10);
877 accumulate_short(ifp, 0);
878 accumulate_bytes(ifp, id, 8);
879 end_message(ifp, MESSAGE_ROUTER_ID, 10);
880 }
881 memcpy(babel_ifp->buffered_id, id, 16);
882 babel_ifp->have_buffered_id = 1;
883 }
884
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100885 start_message(ifp, MESSAGE_UPDATE, 10 + (real_plen + 7) / 8 - omit +
886 channels_size);
Paul Jakma57345092011-12-25 17:52:09 +0100887 accumulate_byte(ifp, v4 ? 1 : 2);
888 accumulate_byte(ifp, flags);
889 accumulate_byte(ifp, real_plen);
890 accumulate_byte(ifp, omit);
891 accumulate_short(ifp, (babel_ifp->update_interval + 5) / 10);
892 accumulate_short(ifp, seqno);
893 accumulate_short(ifp, metric);
894 accumulate_bytes(ifp, real_prefix + omit, (real_plen + 7) / 8 - omit);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +0100895 /* Note that an empty channels TLV is different from no such TLV. */
896 if(channels_len >= 0) {
897 accumulate_byte(ifp, 2);
898 accumulate_byte(ifp, channels_len);
899 accumulate_bytes(ifp, channels, channels_len);
900 }
901 end_message(ifp, MESSAGE_UPDATE, 10 + (real_plen + 7) / 8 - omit +
902 channels_size);
Paul Jakma57345092011-12-25 17:52:09 +0100903
904 if(flags & 0x80) {
905 memcpy(babel_ifp->buffered_prefix, prefix, 16);
906 babel_ifp->have_buffered_prefix = 1;
907 }
908}
909
910static int
911compare_buffered_updates(const void *av, const void *bv)
912{
913 const struct buffered_update *a = av, *b = bv;
914 int rc, v4a, v4b, ma, mb;
915
916 rc = memcmp(a->id, b->id, 8);
917 if(rc != 0)
918 return rc;
919
920 v4a = (a->plen >= 96 && v4mapped(a->prefix));
921 v4b = (b->plen >= 96 && v4mapped(b->prefix));
922
923 if(v4a > v4b)
924 return 1;
925 else if(v4a < v4b)
926 return -1;
927
928 ma = (!v4a && a->plen == 128 && memcmp(a->prefix + 8, a->id, 8) == 0);
929 mb = (!v4b && b->plen == 128 && memcmp(b->prefix + 8, b->id, 8) == 0);
930
931 if(ma > mb)
932 return -1;
933 else if(mb > ma)
934 return 1;
935
936 if(a->plen < b->plen)
937 return 1;
938 else if(a->plen > b->plen)
939 return -1;
940
941 return memcmp(a->prefix, b->prefix, 16);
942}
943
944void
945flushupdates(struct interface *ifp)
946{
947 babel_interface_nfo *babel_ifp = NULL;
948 struct xroute *xroute;
Denis Ovsienkoef4de4d2012-01-08 15:29:19 +0400949 struct babel_route *route;
Paul Jakma57345092011-12-25 17:52:09 +0100950 const unsigned char *last_prefix = NULL;
951 unsigned char last_plen = 0xFF;
952 int i;
953
954 if(ifp == NULL) {
955 struct interface *ifp_aux;
956 struct listnode *linklist_node = NULL;
957 FOR_ALL_INTERFACES(ifp_aux, linklist_node)
958 flushupdates(ifp_aux);
959 return;
960 }
961
962 babel_ifp = babel_get_if_nfo(ifp);
963 if(babel_ifp->num_buffered_updates > 0) {
964 struct buffered_update *b = babel_ifp->buffered_updates;
965 int n = babel_ifp->num_buffered_updates;
966
967 babel_ifp->buffered_updates = NULL;
968 babel_ifp->update_bufsize = 0;
969 babel_ifp->num_buffered_updates = 0;
970
971 if(!if_up(ifp))
972 goto done;
973
974 debugf(BABEL_DEBUG_COMMON," (flushing %d buffered updates on %s (%d))",
975 n, ifp->name, ifp->ifindex);
976
977 /* In order to send fewer update messages, we want to send updates
978 with the same router-id together, with IPv6 going out before IPv4. */
979
980 for(i = 0; i < n; i++) {
981 route = find_installed_route(b[i].prefix, b[i].plen);
982 if(route)
983 memcpy(b[i].id, route->src->id, 8);
984 else
985 memcpy(b[i].id, myid, 8);
986 }
987
988 qsort(b, n, sizeof(struct buffered_update), compare_buffered_updates);
989
990 for(i = 0; i < n; i++) {
Paul Jakma57345092011-12-25 17:52:09 +0100991 /* The same update may be scheduled multiple times before it is
992 sent out. Since our buffer is now sorted, it is enough to
993 compare with the previous update. */
994
995 if(last_prefix) {
996 if(b[i].plen == last_plen &&
997 memcmp(b[i].prefix, last_prefix, 16) == 0)
998 continue;
999 }
1000
1001 xroute = find_xroute(b[i].prefix, b[i].plen);
1002 route = find_installed_route(b[i].prefix, b[i].plen);
1003
1004 if(xroute && (!route || xroute->metric <= kernel_metric)) {
1005 really_send_update(ifp, myid,
1006 xroute->prefix, xroute->plen,
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001007 myseqno, xroute->metric,
1008 NULL, 0);
Paul Jakma57345092011-12-25 17:52:09 +01001009 last_prefix = xroute->prefix;
1010 last_plen = xroute->plen;
1011 } else if(route) {
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001012 unsigned char channels[DIVERSITY_HOPS];
1013 int chlen;
1014 struct interface *route_ifp = route->neigh->ifp;
1015 struct babel_interface *babel_route_ifp = NULL;
1016 unsigned short metric;
1017 unsigned short seqno;
1018
Paul Jakma57345092011-12-25 17:52:09 +01001019 seqno = route->seqno;
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001020 metric =
1021 route_interferes(route, ifp) ?
1022 route_metric(route) :
1023 route_metric_noninterfering(route);
1024
Paul Jakma57345092011-12-25 17:52:09 +01001025 if(metric < INFINITY)
1026 satisfy_request(route->src->prefix, route->src->plen,
1027 seqno, route->src->id, ifp);
1028 if((babel_ifp->flags & BABEL_IF_SPLIT_HORIZON) &&
1029 route->neigh->ifp == ifp)
1030 continue;
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001031
1032 babel_route_ifp = babel_get_if_nfo(route_ifp);
1033 if(babel_route_ifp->channel ==BABEL_IF_CHANNEL_NONINTERFERING) {
1034 memcpy(channels, route->channels, DIVERSITY_HOPS);
1035 } else {
1036 if(babel_route_ifp->channel == BABEL_IF_CHANNEL_UNKNOWN)
1037 channels[0] = BABEL_IF_CHANNEL_INTERFERING;
1038 else {
1039 assert(babel_route_ifp->channel > 0 &&
1040 babel_route_ifp->channel <= 255);
1041 channels[0] = babel_route_ifp->channel;
1042 }
1043 memcpy(channels + 1, route->channels, DIVERSITY_HOPS - 1);
1044 }
1045
1046 chlen = channels_len(channels);
Paul Jakma57345092011-12-25 17:52:09 +01001047 really_send_update(ifp, route->src->id,
1048 route->src->prefix,
1049 route->src->plen,
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001050 seqno, metric,
1051 channels, chlen);
Paul Jakma57345092011-12-25 17:52:09 +01001052 update_source(route->src, seqno, metric);
1053 last_prefix = route->src->prefix;
1054 last_plen = route->src->plen;
1055 } else {
1056 /* There's no route for this prefix. This can happen shortly
1057 after an xroute has been retracted, so send a retraction. */
1058 really_send_update(ifp, myid, b[i].prefix, b[i].plen,
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001059 myseqno, INFINITY, NULL, -1);
Paul Jakma57345092011-12-25 17:52:09 +01001060 }
1061 }
1062 schedule_flush_now(ifp);
1063 done:
1064 free(b);
1065 }
1066 babel_ifp->update_flush_timeout.tv_sec = 0;
1067 babel_ifp->update_flush_timeout.tv_usec = 0;
1068}
1069
1070static void
1071schedule_update_flush(struct interface *ifp, int urgent)
1072{
1073 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
1074 unsigned msecs;
1075 msecs = update_jitter(babel_ifp, urgent);
1076 if(babel_ifp->update_flush_timeout.tv_sec != 0 &&
1077 timeval_minus_msec(&babel_ifp->update_flush_timeout, &babel_now) < msecs)
1078 return;
1079 set_timeout(&babel_ifp->update_flush_timeout, msecs);
1080}
1081
1082static void
1083buffer_update(struct interface *ifp,
1084 const unsigned char *prefix, unsigned char plen)
1085{
1086 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
1087 if(babel_ifp->num_buffered_updates > 0 &&
1088 babel_ifp->num_buffered_updates >= babel_ifp->update_bufsize)
1089 flushupdates(ifp);
1090
1091 if(babel_ifp->update_bufsize == 0) {
1092 int n;
1093 assert(babel_ifp->buffered_updates == NULL);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001094 /* Allocate enough space to hold a full update. Since the
1095 number of installed routes will grow over time, make sure we
1096 have enough space to send a full-ish frame. */
1097 n = installed_routes_estimate() + xroutes_estimate() + 4;
1098 n = MAX(n, babel_ifp->bufsize / 16);
Paul Jakma57345092011-12-25 17:52:09 +01001099 again:
1100 babel_ifp->buffered_updates = malloc(n *sizeof(struct buffered_update));
1101 if(babel_ifp->buffered_updates == NULL) {
1102 zlog_err("malloc(buffered_updates): %s", safe_strerror(errno));
1103 if(n > 4) {
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001104 /* Try again with a tiny buffer. */
Paul Jakma57345092011-12-25 17:52:09 +01001105 n = 4;
1106 goto again;
1107 }
1108 return;
1109 }
1110 babel_ifp->update_bufsize = n;
1111 babel_ifp->num_buffered_updates = 0;
1112 }
1113
1114 memcpy(babel_ifp->buffered_updates[babel_ifp->num_buffered_updates].prefix,
1115 prefix, 16);
1116 babel_ifp->buffered_updates[babel_ifp->num_buffered_updates].plen = plen;
1117 babel_ifp->num_buffered_updates++;
1118}
1119
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001120static void
1121buffer_update_callback(struct babel_route *route, void *closure)
1122{
1123 buffer_update((struct interface*)closure,
1124 route->src->prefix, route->src->plen);
1125}
1126
Paul Jakma57345092011-12-25 17:52:09 +01001127void
1128send_update(struct interface *ifp, int urgent,
1129 const unsigned char *prefix, unsigned char plen)
1130{
1131 babel_interface_nfo *babel_ifp = NULL;
Paul Jakma57345092011-12-25 17:52:09 +01001132
1133 if(ifp == NULL) {
1134 struct interface *ifp_aux;
1135 struct listnode *linklist_node = NULL;
Denis Ovsienkoef4de4d2012-01-08 15:29:19 +04001136 struct babel_route *route;
Paul Jakma57345092011-12-25 17:52:09 +01001137 FOR_ALL_INTERFACES(ifp_aux, linklist_node)
1138 send_update(ifp_aux, urgent, prefix, plen);
1139 if(prefix) {
1140 /* Since flushupdates only deals with non-wildcard interfaces, we
1141 need to do this now. */
1142 route = find_installed_route(prefix, plen);
1143 if(route && route_metric(route) < INFINITY)
1144 satisfy_request(prefix, plen, route->src->seqno, route->src->id,
1145 NULL);
1146 }
1147 return;
1148 }
1149
1150 if(!if_up(ifp))
1151 return;
1152
1153 babel_ifp = babel_get_if_nfo(ifp);
1154 if(prefix) {
Juliusz Chroboczek6881f262012-02-14 15:43:34 +01001155 debugf(BABEL_DEBUG_COMMON,"Sending update to %s for %s.",
1156 ifp->name, format_prefix(prefix, plen));
1157 buffer_update(ifp, prefix, plen);
Paul Jakma57345092011-12-25 17:52:09 +01001158 } else {
Juliusz Chroboczek52d54422012-02-11 13:08:00 +01001159 send_self_update(ifp);
Juliusz Chroboczek6881f262012-02-14 15:43:34 +01001160 debugf(BABEL_DEBUG_COMMON,"Sending update to %s for any.", ifp->name);
1161 for_all_installed_routes(buffer_update_callback, ifp);
Paul Jakma57345092011-12-25 17:52:09 +01001162 set_timeout(&babel_ifp->update_timeout, babel_ifp->update_interval);
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001163 babel_ifp->last_update_time = babel_now.tv_sec;
Paul Jakma57345092011-12-25 17:52:09 +01001164 }
1165 schedule_update_flush(ifp, urgent);
1166}
1167
1168void
1169send_update_resend(struct interface *ifp,
1170 const unsigned char *prefix, unsigned char plen)
1171{
Paul Jakma57345092011-12-25 17:52:09 +01001172 assert(prefix != NULL);
1173
1174 send_update(ifp, 1, prefix, plen);
Juliusz Chroboczek52d54422012-02-11 13:08:00 +01001175 record_resend(RESEND_UPDATE, prefix, plen, 0, 0, NULL, resend_delay);
Paul Jakma57345092011-12-25 17:52:09 +01001176}
1177
1178void
1179send_wildcard_retraction(struct interface *ifp)
1180{
1181 babel_interface_nfo *babel_ifp = NULL;
1182 if(ifp == NULL) {
1183 struct interface *ifp_aux;
1184 struct listnode *linklist_node = NULL;
1185 FOR_ALL_INTERFACES(ifp_aux, linklist_node)
1186 send_wildcard_retraction(ifp_aux);
1187 return;
1188 }
1189
1190 if(!if_up(ifp))
1191 return;
1192
1193 babel_ifp = babel_get_if_nfo(ifp);
1194 start_message(ifp, MESSAGE_UPDATE, 10);
1195 accumulate_byte(ifp, 0);
1196 accumulate_byte(ifp, 0x40);
1197 accumulate_byte(ifp, 0);
1198 accumulate_byte(ifp, 0);
1199 accumulate_short(ifp, 0xFFFF);
1200 accumulate_short(ifp, myseqno);
1201 accumulate_short(ifp, 0xFFFF);
1202 end_message(ifp, MESSAGE_UPDATE, 10);
1203
1204 babel_ifp->have_buffered_id = 0;
1205}
1206
1207void
1208update_myseqno()
1209{
1210 myseqno = seqno_plus(myseqno, 1);
1211 seqno_time = babel_now;
1212}
1213
Matthieu Boutierc35fafd2012-01-23 23:46:32 +01001214static void
1215send_xroute_update_callback(struct xroute *xroute, void *closure)
1216{
1217 struct interface *ifp = (struct interface*)closure;
1218 send_update(ifp, 0, xroute->prefix, xroute->plen);
1219}
1220
Paul Jakma57345092011-12-25 17:52:09 +01001221void
1222send_self_update(struct interface *ifp)
1223{
Paul Jakma57345092011-12-25 17:52:09 +01001224 if(ifp == NULL) {
1225 struct interface *ifp_aux;
1226 struct listnode *linklist_node = NULL;
1227 FOR_ALL_INTERFACES(ifp_aux, linklist_node) {
1228 if(!if_up(ifp_aux))
1229 continue;
1230 send_self_update(ifp_aux);
1231 }
1232 return;
1233 }
1234
Juliusz Chroboczek52d54422012-02-11 13:08:00 +01001235 debugf(BABEL_DEBUG_COMMON,"Sending self update to %s.", ifp->name);
1236 for_all_xroutes(send_xroute_update_callback, ifp);
Paul Jakma57345092011-12-25 17:52:09 +01001237}
1238
1239void
1240send_ihu(struct neighbour *neigh, struct interface *ifp)
1241{
1242 babel_interface_nfo *babel_ifp = NULL;
1243 int rxcost, interval;
1244 int ll;
1245
1246 if(neigh == NULL && ifp == NULL) {
1247 struct interface *ifp_aux;
1248 struct listnode *linklist_node = NULL;
1249 FOR_ALL_INTERFACES(ifp_aux, linklist_node) {
1250 if(if_up(ifp_aux))
1251 continue;
1252 send_ihu(NULL, ifp_aux);
1253 }
1254 return;
1255 }
1256
1257 if(neigh == NULL) {
1258 struct neighbour *ngh;
1259 FOR_ALL_NEIGHBOURS(ngh) {
1260 if(ngh->ifp == ifp)
1261 send_ihu(ngh, ifp);
1262 }
1263 return;
1264 }
1265
1266
1267 if(ifp && neigh->ifp != ifp)
1268 return;
1269
1270 ifp = neigh->ifp;
1271 babel_ifp = babel_get_if_nfo(ifp);
1272 if(!if_up(ifp))
1273 return;
1274
1275 rxcost = neighbour_rxcost(neigh);
1276 interval = (babel_ifp->hello_interval * 3 + 9) / 10;
1277
1278 /* Conceptually, an IHU is a unicast message. We usually send them as
1279 multicast, since this allows aggregation into a single packet and
1280 avoids an ARP exchange. If we already have a unicast message queued
1281 for this neighbour, however, we might as well piggyback the IHU. */
1282 debugf(BABEL_DEBUG_COMMON,"Sending %sihu %d on %s to %s.",
1283 unicast_neighbour == neigh ? "unicast " : "",
1284 rxcost,
1285 neigh->ifp->name,
1286 format_address(neigh->address));
1287
1288 ll = linklocal(neigh->address);
1289
1290 if(unicast_neighbour != neigh) {
1291 start_message(ifp, MESSAGE_IHU, ll ? 14 : 22);
1292 accumulate_byte(ifp, ll ? 3 : 2);
1293 accumulate_byte(ifp, 0);
1294 accumulate_short(ifp, rxcost);
1295 accumulate_short(ifp, interval);
1296 if(ll)
1297 accumulate_bytes(ifp, neigh->address + 8, 8);
1298 else
1299 accumulate_bytes(ifp, neigh->address, 16);
1300 end_message(ifp, MESSAGE_IHU, ll ? 14 : 22);
1301 } else {
1302 int rc;
1303 rc = start_unicast_message(neigh, MESSAGE_IHU, ll ? 14 : 22);
1304 if(rc < 0) return;
1305 accumulate_unicast_byte(neigh, ll ? 3 : 2);
1306 accumulate_unicast_byte(neigh, 0);
1307 accumulate_unicast_short(neigh, rxcost);
1308 accumulate_unicast_short(neigh, interval);
1309 if(ll)
1310 accumulate_unicast_bytes(neigh, neigh->address + 8, 8);
1311 else
1312 accumulate_unicast_bytes(neigh, neigh->address, 16);
1313 end_unicast_message(neigh, MESSAGE_IHU, ll ? 14 : 22);
1314 }
1315}
1316
1317/* Send IHUs to all marginal neighbours */
1318void
1319send_marginal_ihu(struct interface *ifp)
1320{
1321 struct neighbour *neigh;
1322 FOR_ALL_NEIGHBOURS(neigh) {
1323 if(ifp && neigh->ifp != ifp)
1324 continue;
1325 if(neigh->txcost >= 384 || (neigh->reach & 0xF000) != 0xF000)
1326 send_ihu(neigh, ifp);
1327 }
1328}
1329
1330void
1331send_request(struct interface *ifp,
1332 const unsigned char *prefix, unsigned char plen)
1333{
Paul Jakma57345092011-12-25 17:52:09 +01001334 int v4, len;
1335
1336 if(ifp == NULL) {
1337 struct interface *ifp_aux;
1338 struct listnode *linklist_node = NULL;
1339 FOR_ALL_INTERFACES(ifp_aux, linklist_node) {
1340 if(if_up(ifp_aux))
1341 continue;
1342 send_request(ifp_aux, prefix, plen);
1343 }
1344 return;
1345 }
1346
1347 /* make sure any buffered updates go out before this request. */
1348 flushupdates(ifp);
1349
1350 if(!if_up(ifp))
1351 return;
1352
Paul Jakma57345092011-12-25 17:52:09 +01001353 debugf(BABEL_DEBUG_COMMON,"sending request to %s for %s.",
1354 ifp->name, prefix ? format_prefix(prefix, plen) : "any");
1355 v4 = plen >= 96 && v4mapped(prefix);
1356 len = !prefix ? 2 : v4 ? 6 : 18;
1357
1358 start_message(ifp, MESSAGE_REQUEST, len);
1359 accumulate_byte(ifp, !prefix ? 0 : v4 ? 1 : 2);
1360 accumulate_byte(ifp, !prefix ? 0 : v4 ? plen - 96 : plen);
1361 if(prefix) {
1362 if(v4)
1363 accumulate_bytes(ifp, prefix + 12, 4);
1364 else
1365 accumulate_bytes(ifp, prefix, 16);
1366 }
1367 end_message(ifp, MESSAGE_REQUEST, len);
1368}
1369
1370void
1371send_unicast_request(struct neighbour *neigh,
1372 const unsigned char *prefix, unsigned char plen)
1373{
1374 int rc, v4, len;
1375
1376 /* make sure any buffered updates go out before this request. */
1377 flushupdates(neigh->ifp);
1378
1379 debugf(BABEL_DEBUG_COMMON,"sending unicast request to %s for %s.",
1380 format_address(neigh->address),
1381 prefix ? format_prefix(prefix, plen) : "any");
1382 v4 = plen >= 96 && v4mapped(prefix);
1383 len = !prefix ? 2 : v4 ? 6 : 18;
1384
1385 rc = start_unicast_message(neigh, MESSAGE_REQUEST, len);
1386 if(rc < 0) return;
1387 accumulate_unicast_byte(neigh, !prefix ? 0 : v4 ? 1 : 2);
1388 accumulate_unicast_byte(neigh, !prefix ? 0 : v4 ? plen - 96 : plen);
1389 if(prefix) {
1390 if(v4)
1391 accumulate_unicast_bytes(neigh, prefix + 12, 4);
1392 else
1393 accumulate_unicast_bytes(neigh, prefix, 16);
1394 }
1395 end_unicast_message(neigh, MESSAGE_REQUEST, len);
1396}
1397
1398void
1399send_multihop_request(struct interface *ifp,
1400 const unsigned char *prefix, unsigned char plen,
1401 unsigned short seqno, const unsigned char *id,
1402 unsigned short hop_count)
1403{
Paul Jakma57345092011-12-25 17:52:09 +01001404 int v4, pb, len;
1405
1406 /* Make sure any buffered updates go out before this request. */
1407 flushupdates(ifp);
1408
1409 if(ifp == NULL) {
1410 struct interface *ifp_aux;
1411 struct listnode *linklist_node = NULL;
1412 FOR_ALL_INTERFACES(ifp_aux, linklist_node) {
1413 if(!if_up(ifp_aux))
1414 continue;
1415 send_multihop_request(ifp_aux, prefix, plen, seqno, id, hop_count);
1416 }
1417 return;
1418 }
1419
1420 if(!if_up(ifp))
1421 return;
1422
Paul Jakma57345092011-12-25 17:52:09 +01001423 debugf(BABEL_DEBUG_COMMON,"Sending request (%d) on %s for %s.",
1424 hop_count, ifp->name, format_prefix(prefix, plen));
1425 v4 = plen >= 96 && v4mapped(prefix);
1426 pb = v4 ? ((plen - 96) + 7) / 8 : (plen + 7) / 8;
1427 len = 6 + 8 + pb;
1428
1429 start_message(ifp, MESSAGE_MH_REQUEST, len);
1430 accumulate_byte(ifp, v4 ? 1 : 2);
1431 accumulate_byte(ifp, v4 ? plen - 96 : plen);
1432 accumulate_short(ifp, seqno);
1433 accumulate_byte(ifp, hop_count);
1434 accumulate_byte(ifp, 0);
1435 accumulate_bytes(ifp, id, 8);
1436 if(prefix) {
1437 if(v4)
1438 accumulate_bytes(ifp, prefix + 12, pb);
1439 else
1440 accumulate_bytes(ifp, prefix, pb);
1441 }
1442 end_message(ifp, MESSAGE_MH_REQUEST, len);
1443}
1444
1445void
1446send_unicast_multihop_request(struct neighbour *neigh,
1447 const unsigned char *prefix, unsigned char plen,
1448 unsigned short seqno, const unsigned char *id,
1449 unsigned short hop_count)
1450{
1451 int rc, v4, pb, len;
1452
1453 /* Make sure any buffered updates go out before this request. */
1454 flushupdates(neigh->ifp);
1455
1456 debugf(BABEL_DEBUG_COMMON,"Sending multi-hop request to %s for %s (%d hops).",
1457 format_address(neigh->address),
1458 format_prefix(prefix, plen), hop_count);
1459 v4 = plen >= 96 && v4mapped(prefix);
1460 pb = v4 ? ((plen - 96) + 7) / 8 : (plen + 7) / 8;
1461 len = 6 + 8 + pb;
1462
1463 rc = start_unicast_message(neigh, MESSAGE_MH_REQUEST, len);
1464 if(rc < 0) return;
1465 accumulate_unicast_byte(neigh, v4 ? 1 : 2);
1466 accumulate_unicast_byte(neigh, v4 ? plen - 96 : plen);
1467 accumulate_unicast_short(neigh, seqno);
1468 accumulate_unicast_byte(neigh, hop_count);
1469 accumulate_unicast_byte(neigh, 0);
1470 accumulate_unicast_bytes(neigh, id, 8);
1471 if(prefix) {
1472 if(v4)
1473 accumulate_unicast_bytes(neigh, prefix + 12, pb);
1474 else
1475 accumulate_unicast_bytes(neigh, prefix, pb);
1476 }
1477 end_unicast_message(neigh, MESSAGE_MH_REQUEST, len);
1478}
1479
1480void
1481send_request_resend(struct neighbour *neigh,
1482 const unsigned char *prefix, unsigned char plen,
1483 unsigned short seqno, unsigned char *id)
1484{
Paul Jakma57345092011-12-25 17:52:09 +01001485 if(neigh)
1486 send_unicast_multihop_request(neigh, prefix, plen, seqno, id, 127);
1487 else
1488 send_multihop_request(NULL, prefix, plen, seqno, id, 127);
1489
Paul Jakma57345092011-12-25 17:52:09 +01001490 record_resend(RESEND_REQUEST, prefix, plen, seqno, id,
Juliusz Chroboczek52d54422012-02-11 13:08:00 +01001491 neigh ? neigh->ifp : NULL, resend_delay);
Paul Jakma57345092011-12-25 17:52:09 +01001492}
1493
1494void
1495handle_request(struct neighbour *neigh, const unsigned char *prefix,
1496 unsigned char plen, unsigned char hop_count,
1497 unsigned short seqno, const unsigned char *id)
1498{
1499 struct xroute *xroute;
Denis Ovsienkoef4de4d2012-01-08 15:29:19 +04001500 struct babel_route *route;
Paul Jakma57345092011-12-25 17:52:09 +01001501 struct neighbour *successor = NULL;
1502
1503 xroute = find_xroute(prefix, plen);
1504 route = find_installed_route(prefix, plen);
1505
1506 if(xroute && (!route || xroute->metric <= kernel_metric)) {
1507 if(hop_count > 0 && memcmp(id, myid, 8) == 0) {
1508 if(seqno_compare(seqno, myseqno) > 0) {
1509 if(seqno_minus(seqno, myseqno) > 100) {
1510 /* Hopelessly out-of-date request */
1511 return;
1512 }
1513 update_myseqno();
1514 }
1515 }
1516 send_update(neigh->ifp, 1, prefix, plen);
1517 return;
1518 }
1519
1520 if(route &&
1521 (memcmp(id, route->src->id, 8) != 0 ||
1522 seqno_compare(seqno, route->seqno) <= 0)) {
1523 send_update(neigh->ifp, 1, prefix, plen);
1524 return;
1525 }
1526
1527 if(hop_count <= 1)
1528 return;
1529
1530 if(route && memcmp(id, route->src->id, 8) == 0 &&
1531 seqno_minus(seqno, route->seqno) > 100) {
1532 /* Hopelessly out-of-date */
1533 return;
1534 }
1535
1536 if(request_redundant(neigh->ifp, prefix, plen, seqno, id))
1537 return;
1538
1539 /* Let's try to forward this request. */
1540 if(route && route_metric(route) < INFINITY)
1541 successor = route->neigh;
1542
1543 if(!successor || successor == neigh) {
1544 /* We were about to forward a request to its requestor. Try to
1545 find a different neighbour to forward the request to. */
Denis Ovsienkoef4de4d2012-01-08 15:29:19 +04001546 struct babel_route *other_route;
Paul Jakma57345092011-12-25 17:52:09 +01001547
1548 other_route = find_best_route(prefix, plen, 0, neigh);
1549 if(other_route && route_metric(other_route) < INFINITY)
1550 successor = other_route->neigh;
1551 }
1552
1553 if(!successor || successor == neigh)
1554 /* Give up */
1555 return;
1556
1557 send_unicast_multihop_request(successor, prefix, plen, seqno, id,
1558 hop_count - 1);
1559 record_resend(RESEND_REQUEST, prefix, plen, seqno, id,
1560 neigh->ifp, 0);
1561}