blob: 92c416b9feb1e0c5e2d08a7ac16cfa6464df2fd1 [file] [log] [blame]
Paul Jakma57345092011-12-25 17:52:09 +01001/*
2 * This file is free software: you may copy, redistribute and/or modify it
3 * under the terms of the GNU General Public License as published by the
4 * Free Software Foundation, either version 2 of the License, or (at your
5 * option) any later version.
6 *
7 * This file is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program. If not, see <http://www.gnu.org/licenses/>.
14 *
15 * This file incorporates work covered by the following copyright and
16 * permission notice:
17 *
18
19Copyright (c) 2007, 2008 by Juliusz Chroboczek
20
21Permission is hereby granted, free of charge, to any person obtaining a copy
22of this software and associated documentation files (the "Software"), to deal
23in the Software without restriction, including without limitation the rights
24to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
25copies of the Software, and to permit persons to whom the Software is
26furnished to do so, subject to the following conditions:
27
28The above copyright notice and this permission notice shall be included in
29all copies or substantial portions of the Software.
30
31THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
34AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
35LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
36OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
37THE SOFTWARE.
38*/
39
Paul Jakma57345092011-12-25 17:52:09 +010040#include <zebra.h>
41#include "if.h"
42
43#include "babeld.h"
44#include "util.h"
45#include "net.h"
46#include "babel_interface.h"
47#include "source.h"
48#include "neighbour.h"
49#include "route.h"
50#include "xroute.h"
51#include "resend.h"
52#include "message.h"
53#include "kernel.h"
54
55unsigned char packet_header[4] = {42, 2};
56
57int parasitic = 0;
58int split_horizon = 1;
59
60unsigned short myseqno = 0;
61struct timeval seqno_time = {0, 0};
62
63#define UNICAST_BUFSIZE 1024
64int unicast_buffered = 0;
65unsigned char *unicast_buffer = NULL;
66struct neighbour *unicast_neighbour = NULL;
67struct timeval unicast_flush_timeout = {0, 0};
68
69static const unsigned char v4prefix[16] =
70 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0, 0, 0, 0 };
71
72static int
73network_prefix(int ae, int plen, unsigned int omitted,
74 const unsigned char *p, const unsigned char *dp,
75 unsigned int len, unsigned char *p_r)
76{
77 unsigned pb;
78 unsigned char prefix[16];
79
80 if(plen >= 0)
81 pb = (plen + 7) / 8;
82 else if(ae == 1)
83 pb = 4;
84 else
85 pb = 16;
86
87 if(pb > 16)
88 return -1;
89
90 memset(prefix, 0, 16);
91
92 switch(ae) {
93 case 0: break;
94 case 1:
95 if(omitted > 4 || pb > 4 || (pb > omitted && len < pb - omitted))
96 return -1;
97 memcpy(prefix, v4prefix, 12);
98 if(omitted) {
99 if (dp == NULL || !v4mapped(dp)) return -1;
100 memcpy(prefix, dp, 12 + omitted);
101 }
102 if(pb > omitted) memcpy(prefix + 12 + omitted, p, pb - omitted);
103 break;
104 case 2:
105 if(omitted > 16 || (pb > omitted && len < pb - omitted)) return -1;
106 if(omitted) {
107 if (dp == NULL || v4mapped(dp)) return -1;
108 memcpy(prefix, dp, omitted);
109 }
110 if(pb > omitted) memcpy(prefix + omitted, p, pb - omitted);
111 break;
112 case 3:
113 if(pb > 8 && len < pb - 8) return -1;
114 prefix[0] = 0xfe;
115 prefix[1] = 0x80;
116 if(pb > 8) memcpy(prefix + 8, p, pb - 8);
117 break;
118 default:
119 return -1;
120 }
121
122 mask_prefix(p_r, prefix, plen < 0 ? 128 : ae == 1 ? plen + 96 : plen);
123 return 1;
124}
125
126static int
127network_address(int ae, const unsigned char *a, unsigned int len,
128 unsigned char *a_r)
129{
130 return network_prefix(ae, -1, 0, a, NULL, len, a_r);
131}
132
133void
134parse_packet(const unsigned char *from, struct interface *ifp,
135 const unsigned char *packet, int packetlen)
136{
137 int i;
138 const unsigned char *message;
139 unsigned char type, len;
140 int bodylen;
141 struct neighbour *neigh;
142 int have_router_id = 0, have_v4_prefix = 0, have_v6_prefix = 0,
143 have_v4_nh = 0, have_v6_nh = 0;
144 unsigned char router_id[8], v4_prefix[16], v6_prefix[16],
145 v4_nh[16], v6_nh[16];
146
147 if(!linklocal(from)) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100148 zlog_err("Received packet from non-local address %s.",
149 format_address(from));
Paul Jakma57345092011-12-25 17:52:09 +0100150 return;
151 }
152
153 if(packet[0] != 42) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100154 zlog_err("Received malformed packet on %s from %s.",
155 ifp->name, format_address(from));
Paul Jakma57345092011-12-25 17:52:09 +0100156 return;
157 }
158
159 if(packet[1] != 2) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100160 zlog_err("Received packet with unknown version %d on %s from %s.",
161 packet[1], ifp->name, format_address(from));
Paul Jakma57345092011-12-25 17:52:09 +0100162 return;
163 }
164
165 neigh = find_neighbour(from, ifp);
166 if(neigh == NULL) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100167 zlog_err("Couldn't allocate neighbour.");
Paul Jakma57345092011-12-25 17:52:09 +0100168 return;
169 }
170
171 DO_NTOHS(bodylen, packet + 2);
172
173 if(bodylen + 4 > packetlen) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100174 zlog_err("Received truncated packet (%d + 4 > %d).",
175 bodylen, packetlen);
Paul Jakma57345092011-12-25 17:52:09 +0100176 bodylen = packetlen - 4;
177 }
178
179 i = 0;
180 while(i < bodylen) {
181 message = packet + 4 + i;
182 type = message[0];
183 if(type == MESSAGE_PAD1) {
184 debugf(BABEL_DEBUG_COMMON,"Received pad1 from %s on %s.",
185 format_address(from), ifp->name);
186 i++;
187 continue;
188 }
189 if(i + 1 > bodylen) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100190 zlog_err("Received truncated message.");
Paul Jakma57345092011-12-25 17:52:09 +0100191 break;
192 }
193 len = message[1];
194 if(i + len > bodylen) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100195 zlog_err("Received truncated message.");
Paul Jakma57345092011-12-25 17:52:09 +0100196 break;
197 }
198
199 if(type == MESSAGE_PADN) {
200 debugf(BABEL_DEBUG_COMMON,"Received pad%d from %s on %s.",
201 len, format_address(from), ifp->name);
202 } else if(type == MESSAGE_ACK_REQ) {
203 unsigned short nonce, interval;
204 if(len < 6) goto fail;
205 DO_NTOHS(nonce, message + 4);
206 DO_NTOHS(interval, message + 6);
207 debugf(BABEL_DEBUG_COMMON,"Received ack-req (%04X %d) from %s on %s.",
208 nonce, interval, format_address(from), ifp->name);
209 send_ack(neigh, nonce, interval);
210 } else if(type == MESSAGE_ACK) {
211 debugf(BABEL_DEBUG_COMMON,"Received ack from %s on %s.",
212 format_address(from), ifp->name);
213 /* Nothing right now */
214 } else if(type == MESSAGE_HELLO) {
215 unsigned short seqno, interval;
216 int changed;
217 if(len < 6) goto fail;
218 DO_NTOHS(seqno, message + 4);
219 DO_NTOHS(interval, message + 6);
220 debugf(BABEL_DEBUG_COMMON,"Received hello %d (%d) from %s on %s.",
221 seqno, interval,
222 format_address(from), ifp->name);
223 babel_get_if_nfo(ifp)->activity_time = babel_now.tv_sec;
224 changed = update_neighbour(neigh, seqno, interval);
225 update_neighbour_metric(neigh, changed);
226 if(interval > 0)
227 schedule_neighbours_check(interval * 10, 0);
228 } else if(type == MESSAGE_IHU) {
229 unsigned short txcost, interval;
230 unsigned char address[16];
231 int rc;
232 if(len < 6) goto fail;
233 DO_NTOHS(txcost, message + 4);
234 DO_NTOHS(interval, message + 6);
235 rc = network_address(message[2], message + 8, len - 6, address);
236 if(rc < 0) goto fail;
237 debugf(BABEL_DEBUG_COMMON,"Received ihu %d (%d) from %s on %s for %s.",
238 txcost, interval,
239 format_address(from), ifp->name,
240 format_address(address));
241 if(message[2] == 0 || is_interface_ll_address(ifp, address)) {
242 int changed = txcost != neigh->txcost;
243 neigh->txcost = txcost;
244 neigh->ihu_time = babel_now;
245 neigh->ihu_interval = interval;
246 update_neighbour_metric(neigh, changed);
247 if(interval > 0)
248 schedule_neighbours_check(interval * 10 * 3, 0);
249 }
250 } else if(type == MESSAGE_ROUTER_ID) {
251 if(len < 10) {
252 have_router_id = 0;
253 goto fail;
254 }
255 memcpy(router_id, message + 4, 8);
256 have_router_id = 1;
257 debugf(BABEL_DEBUG_COMMON,"Received router-id %s from %s on %s.",
258 format_eui64(router_id), format_address(from), ifp->name);
259 } else if(type == MESSAGE_NH) {
260 unsigned char nh[16];
261 int rc;
262 if(len < 2) {
263 have_v4_nh = 0;
264 have_v6_nh = 0;
265 goto fail;
266 }
267 rc = network_address(message[2], message + 4, len - 2,
268 nh);
269 if(rc < 0) {
270 have_v4_nh = 0;
271 have_v6_nh = 0;
272 goto fail;
273 }
274 debugf(BABEL_DEBUG_COMMON,"Received nh %s (%d) from %s on %s.",
275 format_address(nh), message[2],
276 format_address(from), ifp->name);
277 if(message[2] == 1) {
278 memcpy(v4_nh, nh, 16);
279 have_v4_nh = 1;
280 } else {
281 memcpy(v6_nh, nh, 16);
282 have_v6_nh = 1;
283 }
284 } else if(type == MESSAGE_UPDATE) {
285 unsigned char prefix[16], *nh;
286 unsigned char plen;
287 unsigned short interval, seqno, metric;
288 int rc;
289 if(len < 10) {
290 if(len < 2 || message[3] & 0x80)
291 have_v4_prefix = have_v6_prefix = 0;
292 goto fail;
293 }
294 DO_NTOHS(interval, message + 6);
295 DO_NTOHS(seqno, message + 8);
296 DO_NTOHS(metric, message + 10);
297 if(message[5] == 0 ||
298 (message[3] == 1 ? have_v4_prefix : have_v6_prefix))
299 rc = network_prefix(message[2], message[4], message[5],
300 message + 12,
301 message[2] == 1 ? v4_prefix : v6_prefix,
302 len - 10, prefix);
303 else
304 rc = -1;
305 if(rc < 0) {
306 if(message[3] & 0x80)
307 have_v4_prefix = have_v6_prefix = 0;
308 goto fail;
309 }
310
311 plen = message[4] + (message[2] == 1 ? 96 : 0);
312
313 if(message[3] & 0x80) {
314 if(message[2] == 1) {
315 memcpy(v4_prefix, prefix, 16);
316 have_v4_prefix = 1;
317 } else {
318 memcpy(v6_prefix, prefix, 16);
319 have_v6_prefix = 1;
320 }
321 }
322 if(message[3] & 0x40) {
323 if(message[2] == 1) {
324 memset(router_id, 0, 4);
325 memcpy(router_id + 4, prefix + 12, 4);
326 } else {
327 memcpy(router_id, prefix + 8, 8);
328 }
329 have_router_id = 1;
330 }
331 if(!have_router_id && message[2] != 0) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100332 zlog_err("Received prefix with no router id.");
Paul Jakma57345092011-12-25 17:52:09 +0100333 goto fail;
334 }
335 debugf(BABEL_DEBUG_COMMON,"Received update%s%s for %s from %s on %s.",
336 (message[3] & 0x80) ? "/prefix" : "",
337 (message[3] & 0x40) ? "/id" : "",
338 format_prefix(prefix, plen),
339 format_address(from), ifp->name);
340
341 if(message[2] == 0) {
342 if(metric < 0xFFFF) {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100343 zlog_err("Received wildcard update with finite metric.");
Paul Jakma57345092011-12-25 17:52:09 +0100344 goto done;
345 }
346 retract_neighbour_routes(neigh);
347 goto done;
348 } else if(message[2] == 1) {
349 if(!have_v4_nh)
350 goto fail;
351 nh = v4_nh;
352 } else if(have_v6_nh) {
353 nh = v6_nh;
354 } else {
355 nh = neigh->address;
356 }
357
358 if(message[2] == 1) {
359 if(!babel_get_if_nfo(ifp)->ipv4)
360 goto done;
361 }
362
363 update_route(router_id, prefix, plen, seqno, metric, interval,
364 neigh, nh);
365 } else if(type == MESSAGE_REQUEST) {
366 unsigned char prefix[16], plen;
367 int rc;
368 if(len < 2) goto fail;
369 rc = network_prefix(message[2], message[3], 0,
370 message + 4, NULL, len - 2, prefix);
371 if(rc < 0) goto fail;
372 plen = message[3] + (message[2] == 1 ? 96 : 0);
373 debugf(BABEL_DEBUG_COMMON,"Received request for %s from %s on %s.",
374 message[2] == 0 ? "any" : format_prefix(prefix, plen),
375 format_address(from), ifp->name);
376 if(message[2] == 0) {
377 /* If a neighbour is requesting a full route dump from us,
378 we might as well send it an IHU. */
379 send_ihu(neigh, NULL);
380 send_update(neigh->ifp, 0, NULL, 0);
381 } else {
382 send_update(neigh->ifp, 0, prefix, plen);
383 }
384 } else if(type == MESSAGE_MH_REQUEST) {
385 unsigned char prefix[16], plen;
386 unsigned short seqno;
387 int rc;
388 if(len < 14) goto fail;
389 DO_NTOHS(seqno, message + 4);
390 rc = network_prefix(message[2], message[3], 0,
391 message + 16, NULL, len - 14, prefix);
392 if(rc < 0) goto fail;
393 plen = message[3] + (message[2] == 1 ? 96 : 0);
394 debugf(BABEL_DEBUG_COMMON,"Received request (%d) for %s from %s on %s (%s, %d).",
395 message[6],
396 format_prefix(prefix, plen),
397 format_address(from), ifp->name,
398 format_eui64(message + 8), seqno);
399 handle_request(neigh, prefix, plen, message[6],
400 seqno, message + 8);
401 } else {
402 debugf(BABEL_DEBUG_COMMON,"Received unknown packet type %d from %s on %s.",
403 type, format_address(from), ifp->name);
404 }
405 done:
406 i += len + 2;
407 continue;
408
409 fail:
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100410 zlog_err("Couldn't parse packet (%d, %d) from %s on %s.",
411 message[0], message[1], format_address(from), ifp->name);
Paul Jakma57345092011-12-25 17:52:09 +0100412 goto done;
413 }
414 return;
415}
416
417/* Under normal circumstances, there are enough moderation mechanisms
418 elsewhere in the protocol to make sure that this last-ditch check
419 should never trigger. But I'm superstitious. */
420
421static int
422check_bucket(struct interface *ifp)
423{
424 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
425 if(babel_ifp->bucket <= 0) {
426 int seconds = babel_now.tv_sec - babel_ifp->bucket_time;
427 if(seconds > 0) {
428 babel_ifp->bucket = MIN(BUCKET_TOKENS_MAX,
429 seconds * BUCKET_TOKENS_PER_SEC);
430 }
431 /* Reset bucket time unconditionally, in case clock is stepped. */
432 babel_ifp->bucket_time = babel_now.tv_sec;
433 }
434
435 if(babel_ifp->bucket > 0) {
436 babel_ifp->bucket--;
437 return 1;
438 } else {
439 return 0;
440 }
441}
442
443void
444flushbuf(struct interface *ifp)
445{
446 int rc;
447 struct sockaddr_in6 sin6;
448 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
449
450 assert(babel_ifp->buffered <= babel_ifp->bufsize);
451
452 flushupdates(ifp);
453
454 if(babel_ifp->buffered > 0) {
455 debugf(BABEL_DEBUG_COMMON," (flushing %d buffered bytes on %s)",
456 babel_ifp->buffered, ifp->name);
457 if(check_bucket(ifp)) {
458 memset(&sin6, 0, sizeof(sin6));
459 sin6.sin6_family = AF_INET6;
460 memcpy(&sin6.sin6_addr, protocol_group, 16);
461 sin6.sin6_port = htons(protocol_port);
462 sin6.sin6_scope_id = ifp->ifindex;
463 DO_HTONS(packet_header + 2, babel_ifp->buffered);
464 rc = babel_send(protocol_socket,
465 packet_header, sizeof(packet_header),
466 babel_ifp->sendbuf, babel_ifp->buffered,
467 (struct sockaddr*)&sin6, sizeof(sin6));
468 if(rc < 0)
469 zlog_err("send: %s", safe_strerror(errno));
470 } else {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100471 zlog_err("Warning: bucket full, dropping packet to %s.",
472 ifp->name);
Paul Jakma57345092011-12-25 17:52:09 +0100473 }
474 }
475 VALGRIND_MAKE_MEM_UNDEFINED(babel_ifp->sendbuf, babel_ifp->bufsize);
476 babel_ifp->buffered = 0;
477 babel_ifp->have_buffered_hello = 0;
478 babel_ifp->have_buffered_id = 0;
479 babel_ifp->have_buffered_nh = 0;
480 babel_ifp->have_buffered_prefix = 0;
481 babel_ifp->flush_timeout.tv_sec = 0;
482 babel_ifp->flush_timeout.tv_usec = 0;
483}
484
485static void
486schedule_flush(struct interface *ifp)
487{
488 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
489 unsigned msecs = jitter(babel_ifp, 0);
490 if(babel_ifp->flush_timeout.tv_sec != 0 &&
491 timeval_minus_msec(&babel_ifp->flush_timeout, &babel_now) < msecs)
492 return;
493 set_timeout(&babel_ifp->flush_timeout, msecs);
494}
495
496static void
497schedule_flush_now(struct interface *ifp)
498{
499 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
500 /* Almost now */
501 unsigned msecs = roughly(10);
502 if(babel_ifp->flush_timeout.tv_sec != 0 &&
503 timeval_minus_msec(&babel_ifp->flush_timeout, &babel_now) < msecs)
504 return;
505 set_timeout(&babel_ifp->flush_timeout, msecs);
506}
507
508static void
509schedule_unicast_flush(unsigned msecs)
510{
511 if(!unicast_neighbour)
512 return;
513 if(unicast_flush_timeout.tv_sec != 0 &&
514 timeval_minus_msec(&unicast_flush_timeout, &babel_now) < msecs)
515 return;
516 unicast_flush_timeout.tv_usec = (babel_now.tv_usec + msecs * 1000) %1000000;
517 unicast_flush_timeout.tv_sec =
518 babel_now.tv_sec + (babel_now.tv_usec / 1000 + msecs) / 1000;
519}
520
521static void
522ensure_space(struct interface *ifp, int space)
523{
524 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
525 if(babel_ifp->bufsize - babel_ifp->buffered < space)
526 flushbuf(ifp);
527}
528
529static void
530start_message(struct interface *ifp, int type, int len)
531{
532 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
533 if(babel_ifp->bufsize - babel_ifp->buffered < len + 2)
534 flushbuf(ifp);
535 babel_ifp->sendbuf[babel_ifp->buffered++] = type;
536 babel_ifp->sendbuf[babel_ifp->buffered++] = len;
537}
538
539static void
540end_message(struct interface *ifp, int type, int bytes)
541{
542 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
543 assert(babel_ifp->buffered >= bytes + 2 &&
544 babel_ifp->sendbuf[babel_ifp->buffered - bytes - 2] == type &&
545 babel_ifp->sendbuf[babel_ifp->buffered - bytes - 1] == bytes);
546 schedule_flush(ifp);
547}
548
549static void
550accumulate_byte(struct interface *ifp, unsigned char value)
551{
552 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
553 babel_ifp->sendbuf[babel_ifp->buffered++] = value;
554}
555
556static void
557accumulate_short(struct interface *ifp, unsigned short value)
558{
559 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
560 DO_HTONS(babel_ifp->sendbuf + babel_ifp->buffered, value);
561 babel_ifp->buffered += 2;
562}
563
564static void
565accumulate_bytes(struct interface *ifp,
566 const unsigned char *value, unsigned len)
567{
568 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
569 memcpy(babel_ifp->sendbuf + babel_ifp->buffered, value, len);
570 babel_ifp->buffered += len;
571}
572
573static int
574start_unicast_message(struct neighbour *neigh, int type, int len)
575{
576 if(unicast_neighbour) {
577 if(neigh != unicast_neighbour ||
578 unicast_buffered + len + 2 >=
579 MIN(UNICAST_BUFSIZE, babel_get_if_nfo(neigh->ifp)->bufsize))
580 flush_unicast(0);
581 }
582 if(!unicast_buffer)
583 unicast_buffer = malloc(UNICAST_BUFSIZE);
584 if(!unicast_buffer) {
585 zlog_err("malloc(unicast_buffer): %s", safe_strerror(errno));
586 return -1;
587 }
588
589 unicast_neighbour = neigh;
590
591 unicast_buffer[unicast_buffered++] = type;
592 unicast_buffer[unicast_buffered++] = len;
593 return 1;
594}
595
596static void
597end_unicast_message(struct neighbour *neigh, int type, int bytes)
598{
599 assert(unicast_neighbour == neigh && unicast_buffered >= bytes + 2 &&
600 unicast_buffer[unicast_buffered - bytes - 2] == type &&
601 unicast_buffer[unicast_buffered - bytes - 1] == bytes);
602 schedule_unicast_flush(jitter(babel_get_if_nfo(neigh->ifp), 0));
603}
604
605static void
606accumulate_unicast_byte(struct neighbour *neigh, unsigned char value)
607{
608 unicast_buffer[unicast_buffered++] = value;
609}
610
611static void
612accumulate_unicast_short(struct neighbour *neigh, unsigned short value)
613{
614 DO_HTONS(unicast_buffer + unicast_buffered, value);
615 unicast_buffered += 2;
616}
617
618static void
619accumulate_unicast_bytes(struct neighbour *neigh,
620 const unsigned char *value, unsigned len)
621{
622 memcpy(unicast_buffer + unicast_buffered, value, len);
623 unicast_buffered += len;
624}
625
626void
627send_ack(struct neighbour *neigh, unsigned short nonce, unsigned short interval)
628{
629 int rc;
630 debugf(BABEL_DEBUG_COMMON,"Sending ack (%04x) to %s on %s.",
631 nonce, format_address(neigh->address), neigh->ifp->name);
632 rc = start_unicast_message(neigh, MESSAGE_ACK, 2); if(rc < 0) return;
633 accumulate_unicast_short(neigh, nonce);
634 end_unicast_message(neigh, MESSAGE_ACK, 2);
635 /* Roughly yields a value no larger than 3/2, so this meets the deadline */
636 schedule_unicast_flush(roughly(interval * 6));
637}
638
639void
640send_hello_noupdate(struct interface *ifp, unsigned interval)
641{
642 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
643 /* This avoids sending multiple hellos in a single packet, which breaks
644 link quality estimation. */
645 if(babel_ifp->have_buffered_hello)
646 flushbuf(ifp);
647
648 babel_ifp->hello_seqno = seqno_plus(babel_ifp->hello_seqno, 1);
649 set_timeout(&babel_ifp->hello_timeout, babel_ifp->hello_interval);
650
651 if(!if_up(ifp))
652 return;
653
654 debugf(BABEL_DEBUG_COMMON,"Sending hello %d (%d) to %s.",
655 babel_ifp->hello_seqno, interval, ifp->name);
656
657 start_message(ifp, MESSAGE_HELLO, 6);
658 accumulate_short(ifp, 0);
659 accumulate_short(ifp, babel_ifp->hello_seqno);
660 accumulate_short(ifp, interval > 0xFFFF ? 0xFFFF : interval);
661 end_message(ifp, MESSAGE_HELLO, 6);
662 babel_ifp->have_buffered_hello = 1;
663}
664
665void
666send_hello(struct interface *ifp)
667{
668 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
669 send_hello_noupdate(ifp, (babel_ifp->hello_interval + 9) / 10);
670 /* Send full IHU every 3 hellos, and marginal IHU each time */
671 if(babel_ifp->hello_seqno % 3 == 0)
672 send_ihu(NULL, ifp);
673 else
674 send_marginal_ihu(ifp);
675}
676
677void
678flush_unicast(int dofree)
679{
680 struct sockaddr_in6 sin6;
681 int rc;
682
683 if(unicast_buffered == 0)
684 goto done;
685
686 if(!if_up(unicast_neighbour->ifp))
687 goto done;
688
689 /* Preserve ordering of messages */
690 flushbuf(unicast_neighbour->ifp);
691
692 if(check_bucket(unicast_neighbour->ifp)) {
693 memset(&sin6, 0, sizeof(sin6));
694 sin6.sin6_family = AF_INET6;
695 memcpy(&sin6.sin6_addr, unicast_neighbour->address, 16);
696 sin6.sin6_port = htons(protocol_port);
697 sin6.sin6_scope_id = unicast_neighbour->ifp->ifindex;
698 DO_HTONS(packet_header + 2, unicast_buffered);
699 rc = babel_send(protocol_socket,
700 packet_header, sizeof(packet_header),
701 unicast_buffer, unicast_buffered,
702 (struct sockaddr*)&sin6, sizeof(sin6));
703 if(rc < 0)
704 zlog_err("send(unicast): %s", safe_strerror(errno));
705 } else {
Matthieu Boutier4eedea52012-01-17 22:46:21 +0100706 zlog_err("Warning: bucket full, dropping unicast packet to %s if %s.",
707 format_address(unicast_neighbour->address),
708 unicast_neighbour->ifp->name);
Paul Jakma57345092011-12-25 17:52:09 +0100709 }
710
711 done:
712 VALGRIND_MAKE_MEM_UNDEFINED(unicast_buffer, UNICAST_BUFSIZE);
713 unicast_buffered = 0;
714 if(dofree && unicast_buffer) {
715 free(unicast_buffer);
716 unicast_buffer = NULL;
717 }
718 unicast_neighbour = NULL;
719 unicast_flush_timeout.tv_sec = 0;
720 unicast_flush_timeout.tv_usec = 0;
721}
722
723static void
724really_send_update(struct interface *ifp,
725 const unsigned char *id,
726 const unsigned char *prefix, unsigned char plen,
727 unsigned short seqno, unsigned short metric)
728{
729 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
730 int add_metric, v4, real_plen, omit = 0;
731 const unsigned char *real_prefix;
732 unsigned short flags = 0;
733
734 if(!if_up(ifp))
735 return;
736
737 add_metric = output_filter(id, prefix, plen, ifp->ifindex);
738 if(add_metric >= INFINITY)
739 return;
740
741 metric = MIN(metric + add_metric, INFINITY);
742 /* Worst case */
743 ensure_space(ifp, 20 + 12 + 28);
744
745 v4 = plen >= 96 && v4mapped(prefix);
746
747 if(v4) {
748 if(!babel_ifp->ipv4)
749 return;
750 if(!babel_ifp->have_buffered_nh ||
751 memcmp(babel_ifp->buffered_nh, babel_ifp->ipv4, 4) != 0) {
752 start_message(ifp, MESSAGE_NH, 6);
753 accumulate_byte(ifp, 1);
754 accumulate_byte(ifp, 0);
755 accumulate_bytes(ifp, babel_ifp->ipv4, 4);
756 end_message(ifp, MESSAGE_NH, 6);
757 memcpy(babel_ifp->buffered_nh, babel_ifp->ipv4, 4);
758 babel_ifp->have_buffered_nh = 1;
759 }
760
761 real_prefix = prefix + 12;
762 real_plen = plen - 96;
763 } else {
764 if(babel_ifp->have_buffered_prefix) {
765 while(omit < plen / 8 &&
766 babel_ifp->buffered_prefix[omit] == prefix[omit])
767 omit++;
768 }
769 if(!babel_ifp->have_buffered_prefix || plen >= 48)
770 flags |= 0x80;
771 real_prefix = prefix;
772 real_plen = plen;
773 }
774
775 if(!babel_ifp->have_buffered_id
776 || memcmp(id, babel_ifp->buffered_id, 8) != 0) {
777 if(real_plen == 128 && memcmp(real_prefix + 8, id, 8) == 0) {
778 flags |= 0x40;
779 } else {
780 start_message(ifp, MESSAGE_ROUTER_ID, 10);
781 accumulate_short(ifp, 0);
782 accumulate_bytes(ifp, id, 8);
783 end_message(ifp, MESSAGE_ROUTER_ID, 10);
784 }
785 memcpy(babel_ifp->buffered_id, id, 16);
786 babel_ifp->have_buffered_id = 1;
787 }
788
789 start_message(ifp, MESSAGE_UPDATE, 10 + (real_plen + 7) / 8 - omit);
790 accumulate_byte(ifp, v4 ? 1 : 2);
791 accumulate_byte(ifp, flags);
792 accumulate_byte(ifp, real_plen);
793 accumulate_byte(ifp, omit);
794 accumulate_short(ifp, (babel_ifp->update_interval + 5) / 10);
795 accumulate_short(ifp, seqno);
796 accumulate_short(ifp, metric);
797 accumulate_bytes(ifp, real_prefix + omit, (real_plen + 7) / 8 - omit);
798 end_message(ifp, MESSAGE_UPDATE, 10 + (real_plen + 7) / 8 - omit);
799
800 if(flags & 0x80) {
801 memcpy(babel_ifp->buffered_prefix, prefix, 16);
802 babel_ifp->have_buffered_prefix = 1;
803 }
804}
805
806static int
807compare_buffered_updates(const void *av, const void *bv)
808{
809 const struct buffered_update *a = av, *b = bv;
810 int rc, v4a, v4b, ma, mb;
811
812 rc = memcmp(a->id, b->id, 8);
813 if(rc != 0)
814 return rc;
815
816 v4a = (a->plen >= 96 && v4mapped(a->prefix));
817 v4b = (b->plen >= 96 && v4mapped(b->prefix));
818
819 if(v4a > v4b)
820 return 1;
821 else if(v4a < v4b)
822 return -1;
823
824 ma = (!v4a && a->plen == 128 && memcmp(a->prefix + 8, a->id, 8) == 0);
825 mb = (!v4b && b->plen == 128 && memcmp(b->prefix + 8, b->id, 8) == 0);
826
827 if(ma > mb)
828 return -1;
829 else if(mb > ma)
830 return 1;
831
832 if(a->plen < b->plen)
833 return 1;
834 else if(a->plen > b->plen)
835 return -1;
836
837 return memcmp(a->prefix, b->prefix, 16);
838}
839
840void
841flushupdates(struct interface *ifp)
842{
843 babel_interface_nfo *babel_ifp = NULL;
844 struct xroute *xroute;
Denis Ovsienkoef4de4d2012-01-08 15:29:19 +0400845 struct babel_route *route;
Paul Jakma57345092011-12-25 17:52:09 +0100846 const unsigned char *last_prefix = NULL;
847 unsigned char last_plen = 0xFF;
848 int i;
849
850 if(ifp == NULL) {
851 struct interface *ifp_aux;
852 struct listnode *linklist_node = NULL;
853 FOR_ALL_INTERFACES(ifp_aux, linklist_node)
854 flushupdates(ifp_aux);
855 return;
856 }
857
858 babel_ifp = babel_get_if_nfo(ifp);
859 if(babel_ifp->num_buffered_updates > 0) {
860 struct buffered_update *b = babel_ifp->buffered_updates;
861 int n = babel_ifp->num_buffered_updates;
862
863 babel_ifp->buffered_updates = NULL;
864 babel_ifp->update_bufsize = 0;
865 babel_ifp->num_buffered_updates = 0;
866
867 if(!if_up(ifp))
868 goto done;
869
870 debugf(BABEL_DEBUG_COMMON," (flushing %d buffered updates on %s (%d))",
871 n, ifp->name, ifp->ifindex);
872
873 /* In order to send fewer update messages, we want to send updates
874 with the same router-id together, with IPv6 going out before IPv4. */
875
876 for(i = 0; i < n; i++) {
877 route = find_installed_route(b[i].prefix, b[i].plen);
878 if(route)
879 memcpy(b[i].id, route->src->id, 8);
880 else
881 memcpy(b[i].id, myid, 8);
882 }
883
884 qsort(b, n, sizeof(struct buffered_update), compare_buffered_updates);
885
886 for(i = 0; i < n; i++) {
887 unsigned short seqno;
888 unsigned short metric;
889
890 /* The same update may be scheduled multiple times before it is
891 sent out. Since our buffer is now sorted, it is enough to
892 compare with the previous update. */
893
894 if(last_prefix) {
895 if(b[i].plen == last_plen &&
896 memcmp(b[i].prefix, last_prefix, 16) == 0)
897 continue;
898 }
899
900 xroute = find_xroute(b[i].prefix, b[i].plen);
901 route = find_installed_route(b[i].prefix, b[i].plen);
902
903 if(xroute && (!route || xroute->metric <= kernel_metric)) {
904 really_send_update(ifp, myid,
905 xroute->prefix, xroute->plen,
906 myseqno, xroute->metric);
907 last_prefix = xroute->prefix;
908 last_plen = xroute->plen;
909 } else if(route) {
910 seqno = route->seqno;
911 metric = route_metric(route);
912 if(metric < INFINITY)
913 satisfy_request(route->src->prefix, route->src->plen,
914 seqno, route->src->id, ifp);
915 if((babel_ifp->flags & BABEL_IF_SPLIT_HORIZON) &&
916 route->neigh->ifp == ifp)
917 continue;
918 really_send_update(ifp, route->src->id,
919 route->src->prefix,
920 route->src->plen,
921 seqno, metric);
922 update_source(route->src, seqno, metric);
923 last_prefix = route->src->prefix;
924 last_plen = route->src->plen;
925 } else {
926 /* There's no route for this prefix. This can happen shortly
927 after an xroute has been retracted, so send a retraction. */
928 really_send_update(ifp, myid, b[i].prefix, b[i].plen,
929 myseqno, INFINITY);
930 }
931 }
932 schedule_flush_now(ifp);
933 done:
934 free(b);
935 }
936 babel_ifp->update_flush_timeout.tv_sec = 0;
937 babel_ifp->update_flush_timeout.tv_usec = 0;
938}
939
940static void
941schedule_update_flush(struct interface *ifp, int urgent)
942{
943 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
944 unsigned msecs;
945 msecs = update_jitter(babel_ifp, urgent);
946 if(babel_ifp->update_flush_timeout.tv_sec != 0 &&
947 timeval_minus_msec(&babel_ifp->update_flush_timeout, &babel_now) < msecs)
948 return;
949 set_timeout(&babel_ifp->update_flush_timeout, msecs);
950}
951
952static void
953buffer_update(struct interface *ifp,
954 const unsigned char *prefix, unsigned char plen)
955{
956 babel_interface_nfo *babel_ifp = babel_get_if_nfo(ifp);
957 if(babel_ifp->num_buffered_updates > 0 &&
958 babel_ifp->num_buffered_updates >= babel_ifp->update_bufsize)
959 flushupdates(ifp);
960
961 if(babel_ifp->update_bufsize == 0) {
962 int n;
963 assert(babel_ifp->buffered_updates == NULL);
964 n = MAX(babel_ifp->bufsize / 16, 4);
965 again:
966 babel_ifp->buffered_updates = malloc(n *sizeof(struct buffered_update));
967 if(babel_ifp->buffered_updates == NULL) {
968 zlog_err("malloc(buffered_updates): %s", safe_strerror(errno));
969 if(n > 4) {
970 n = 4;
971 goto again;
972 }
973 return;
974 }
975 babel_ifp->update_bufsize = n;
976 babel_ifp->num_buffered_updates = 0;
977 }
978
979 memcpy(babel_ifp->buffered_updates[babel_ifp->num_buffered_updates].prefix,
980 prefix, 16);
981 babel_ifp->buffered_updates[babel_ifp->num_buffered_updates].plen = plen;
982 babel_ifp->num_buffered_updates++;
983}
984
985void
986send_update(struct interface *ifp, int urgent,
987 const unsigned char *prefix, unsigned char plen)
988{
989 babel_interface_nfo *babel_ifp = NULL;
990 int i;
991
992 if(ifp == NULL) {
993 struct interface *ifp_aux;
994 struct listnode *linklist_node = NULL;
Denis Ovsienkoef4de4d2012-01-08 15:29:19 +0400995 struct babel_route *route;
Paul Jakma57345092011-12-25 17:52:09 +0100996 FOR_ALL_INTERFACES(ifp_aux, linklist_node)
997 send_update(ifp_aux, urgent, prefix, plen);
998 if(prefix) {
999 /* Since flushupdates only deals with non-wildcard interfaces, we
1000 need to do this now. */
1001 route = find_installed_route(prefix, plen);
1002 if(route && route_metric(route) < INFINITY)
1003 satisfy_request(prefix, plen, route->src->seqno, route->src->id,
1004 NULL);
1005 }
1006 return;
1007 }
1008
1009 if(!if_up(ifp))
1010 return;
1011
1012 babel_ifp = babel_get_if_nfo(ifp);
1013 if(prefix) {
1014 if(!parasitic || find_xroute(prefix, plen)) {
1015 debugf(BABEL_DEBUG_COMMON,"Sending update to %s for %s.",
1016 ifp->name, format_prefix(prefix, plen));
1017 buffer_update(ifp, prefix, plen);
1018 }
1019 } else {
1020 if(!interface_idle(babel_ifp)) {
1021 send_self_update(ifp);
1022 if(!parasitic) {
1023 debugf(BABEL_DEBUG_COMMON,"Sending update to %s for any.", ifp->name);
1024 for(i = 0; i < numroutes; i++)
1025 if(routes[i].installed)
1026 buffer_update(ifp,
1027 routes[i].src->prefix,
1028 routes[i].src->plen);
1029 }
1030 }
1031 set_timeout(&babel_ifp->update_timeout, babel_ifp->update_interval);
1032 }
1033 schedule_update_flush(ifp, urgent);
1034}
1035
1036void
1037send_update_resend(struct interface *ifp,
1038 const unsigned char *prefix, unsigned char plen)
1039{
1040 int delay;
1041
1042 assert(prefix != NULL);
1043
1044 send_update(ifp, 1, prefix, plen);
1045
1046 delay = 2000;
1047 delay = MIN(delay, wireless_hello_interval / 2);
1048 delay = MIN(delay, wired_hello_interval / 2);
1049 delay = MAX(delay, 10);
1050 record_resend(RESEND_UPDATE, prefix, plen, 0, 0, NULL, delay);
1051}
1052
1053void
1054send_wildcard_retraction(struct interface *ifp)
1055{
1056 babel_interface_nfo *babel_ifp = NULL;
1057 if(ifp == NULL) {
1058 struct interface *ifp_aux;
1059 struct listnode *linklist_node = NULL;
1060 FOR_ALL_INTERFACES(ifp_aux, linklist_node)
1061 send_wildcard_retraction(ifp_aux);
1062 return;
1063 }
1064
1065 if(!if_up(ifp))
1066 return;
1067
1068 babel_ifp = babel_get_if_nfo(ifp);
1069 start_message(ifp, MESSAGE_UPDATE, 10);
1070 accumulate_byte(ifp, 0);
1071 accumulate_byte(ifp, 0x40);
1072 accumulate_byte(ifp, 0);
1073 accumulate_byte(ifp, 0);
1074 accumulate_short(ifp, 0xFFFF);
1075 accumulate_short(ifp, myseqno);
1076 accumulate_short(ifp, 0xFFFF);
1077 end_message(ifp, MESSAGE_UPDATE, 10);
1078
1079 babel_ifp->have_buffered_id = 0;
1080}
1081
1082void
1083update_myseqno()
1084{
1085 myseqno = seqno_plus(myseqno, 1);
1086 seqno_time = babel_now;
1087}
1088
1089void
1090send_self_update(struct interface *ifp)
1091{
1092 int i;
1093
1094 if(ifp == NULL) {
1095 struct interface *ifp_aux;
1096 struct listnode *linklist_node = NULL;
1097 FOR_ALL_INTERFACES(ifp_aux, linklist_node) {
1098 if(!if_up(ifp_aux))
1099 continue;
1100 send_self_update(ifp_aux);
1101 }
1102 return;
1103 }
1104
1105 if(!interface_idle(babel_get_if_nfo(ifp))) {
1106 debugf(BABEL_DEBUG_COMMON,"Sending self update to %s.", ifp->name);
1107 for(i = 0; i < numxroutes; i++)
1108 send_update(ifp, 0, xroutes[i].prefix, xroutes[i].plen);
1109 }
1110}
1111
1112void
1113send_ihu(struct neighbour *neigh, struct interface *ifp)
1114{
1115 babel_interface_nfo *babel_ifp = NULL;
1116 int rxcost, interval;
1117 int ll;
1118
1119 if(neigh == NULL && ifp == NULL) {
1120 struct interface *ifp_aux;
1121 struct listnode *linklist_node = NULL;
1122 FOR_ALL_INTERFACES(ifp_aux, linklist_node) {
1123 if(if_up(ifp_aux))
1124 continue;
1125 send_ihu(NULL, ifp_aux);
1126 }
1127 return;
1128 }
1129
1130 if(neigh == NULL) {
1131 struct neighbour *ngh;
1132 FOR_ALL_NEIGHBOURS(ngh) {
1133 if(ngh->ifp == ifp)
1134 send_ihu(ngh, ifp);
1135 }
1136 return;
1137 }
1138
1139
1140 if(ifp && neigh->ifp != ifp)
1141 return;
1142
1143 ifp = neigh->ifp;
1144 babel_ifp = babel_get_if_nfo(ifp);
1145 if(!if_up(ifp))
1146 return;
1147
1148 rxcost = neighbour_rxcost(neigh);
1149 interval = (babel_ifp->hello_interval * 3 + 9) / 10;
1150
1151 /* Conceptually, an IHU is a unicast message. We usually send them as
1152 multicast, since this allows aggregation into a single packet and
1153 avoids an ARP exchange. If we already have a unicast message queued
1154 for this neighbour, however, we might as well piggyback the IHU. */
1155 debugf(BABEL_DEBUG_COMMON,"Sending %sihu %d on %s to %s.",
1156 unicast_neighbour == neigh ? "unicast " : "",
1157 rxcost,
1158 neigh->ifp->name,
1159 format_address(neigh->address));
1160
1161 ll = linklocal(neigh->address);
1162
1163 if(unicast_neighbour != neigh) {
1164 start_message(ifp, MESSAGE_IHU, ll ? 14 : 22);
1165 accumulate_byte(ifp, ll ? 3 : 2);
1166 accumulate_byte(ifp, 0);
1167 accumulate_short(ifp, rxcost);
1168 accumulate_short(ifp, interval);
1169 if(ll)
1170 accumulate_bytes(ifp, neigh->address + 8, 8);
1171 else
1172 accumulate_bytes(ifp, neigh->address, 16);
1173 end_message(ifp, MESSAGE_IHU, ll ? 14 : 22);
1174 } else {
1175 int rc;
1176 rc = start_unicast_message(neigh, MESSAGE_IHU, ll ? 14 : 22);
1177 if(rc < 0) return;
1178 accumulate_unicast_byte(neigh, ll ? 3 : 2);
1179 accumulate_unicast_byte(neigh, 0);
1180 accumulate_unicast_short(neigh, rxcost);
1181 accumulate_unicast_short(neigh, interval);
1182 if(ll)
1183 accumulate_unicast_bytes(neigh, neigh->address + 8, 8);
1184 else
1185 accumulate_unicast_bytes(neigh, neigh->address, 16);
1186 end_unicast_message(neigh, MESSAGE_IHU, ll ? 14 : 22);
1187 }
1188}
1189
1190/* Send IHUs to all marginal neighbours */
1191void
1192send_marginal_ihu(struct interface *ifp)
1193{
1194 struct neighbour *neigh;
1195 FOR_ALL_NEIGHBOURS(neigh) {
1196 if(ifp && neigh->ifp != ifp)
1197 continue;
1198 if(neigh->txcost >= 384 || (neigh->reach & 0xF000) != 0xF000)
1199 send_ihu(neigh, ifp);
1200 }
1201}
1202
1203void
1204send_request(struct interface *ifp,
1205 const unsigned char *prefix, unsigned char plen)
1206{
Paul Jakma57345092011-12-25 17:52:09 +01001207 int v4, len;
1208
1209 if(ifp == NULL) {
1210 struct interface *ifp_aux;
1211 struct listnode *linklist_node = NULL;
1212 FOR_ALL_INTERFACES(ifp_aux, linklist_node) {
1213 if(if_up(ifp_aux))
1214 continue;
1215 send_request(ifp_aux, prefix, plen);
1216 }
1217 return;
1218 }
1219
1220 /* make sure any buffered updates go out before this request. */
1221 flushupdates(ifp);
1222
1223 if(!if_up(ifp))
1224 return;
1225
Paul Jakma57345092011-12-25 17:52:09 +01001226 debugf(BABEL_DEBUG_COMMON,"sending request to %s for %s.",
1227 ifp->name, prefix ? format_prefix(prefix, plen) : "any");
1228 v4 = plen >= 96 && v4mapped(prefix);
1229 len = !prefix ? 2 : v4 ? 6 : 18;
1230
1231 start_message(ifp, MESSAGE_REQUEST, len);
1232 accumulate_byte(ifp, !prefix ? 0 : v4 ? 1 : 2);
1233 accumulate_byte(ifp, !prefix ? 0 : v4 ? plen - 96 : plen);
1234 if(prefix) {
1235 if(v4)
1236 accumulate_bytes(ifp, prefix + 12, 4);
1237 else
1238 accumulate_bytes(ifp, prefix, 16);
1239 }
1240 end_message(ifp, MESSAGE_REQUEST, len);
1241}
1242
1243void
1244send_unicast_request(struct neighbour *neigh,
1245 const unsigned char *prefix, unsigned char plen)
1246{
1247 int rc, v4, len;
1248
1249 /* make sure any buffered updates go out before this request. */
1250 flushupdates(neigh->ifp);
1251
1252 debugf(BABEL_DEBUG_COMMON,"sending unicast request to %s for %s.",
1253 format_address(neigh->address),
1254 prefix ? format_prefix(prefix, plen) : "any");
1255 v4 = plen >= 96 && v4mapped(prefix);
1256 len = !prefix ? 2 : v4 ? 6 : 18;
1257
1258 rc = start_unicast_message(neigh, MESSAGE_REQUEST, len);
1259 if(rc < 0) return;
1260 accumulate_unicast_byte(neigh, !prefix ? 0 : v4 ? 1 : 2);
1261 accumulate_unicast_byte(neigh, !prefix ? 0 : v4 ? plen - 96 : plen);
1262 if(prefix) {
1263 if(v4)
1264 accumulate_unicast_bytes(neigh, prefix + 12, 4);
1265 else
1266 accumulate_unicast_bytes(neigh, prefix, 16);
1267 }
1268 end_unicast_message(neigh, MESSAGE_REQUEST, len);
1269}
1270
1271void
1272send_multihop_request(struct interface *ifp,
1273 const unsigned char *prefix, unsigned char plen,
1274 unsigned short seqno, const unsigned char *id,
1275 unsigned short hop_count)
1276{
Paul Jakma57345092011-12-25 17:52:09 +01001277 int v4, pb, len;
1278
1279 /* Make sure any buffered updates go out before this request. */
1280 flushupdates(ifp);
1281
1282 if(ifp == NULL) {
1283 struct interface *ifp_aux;
1284 struct listnode *linklist_node = NULL;
1285 FOR_ALL_INTERFACES(ifp_aux, linklist_node) {
1286 if(!if_up(ifp_aux))
1287 continue;
1288 send_multihop_request(ifp_aux, prefix, plen, seqno, id, hop_count);
1289 }
1290 return;
1291 }
1292
1293 if(!if_up(ifp))
1294 return;
1295
Paul Jakma57345092011-12-25 17:52:09 +01001296 debugf(BABEL_DEBUG_COMMON,"Sending request (%d) on %s for %s.",
1297 hop_count, ifp->name, format_prefix(prefix, plen));
1298 v4 = plen >= 96 && v4mapped(prefix);
1299 pb = v4 ? ((plen - 96) + 7) / 8 : (plen + 7) / 8;
1300 len = 6 + 8 + pb;
1301
1302 start_message(ifp, MESSAGE_MH_REQUEST, len);
1303 accumulate_byte(ifp, v4 ? 1 : 2);
1304 accumulate_byte(ifp, v4 ? plen - 96 : plen);
1305 accumulate_short(ifp, seqno);
1306 accumulate_byte(ifp, hop_count);
1307 accumulate_byte(ifp, 0);
1308 accumulate_bytes(ifp, id, 8);
1309 if(prefix) {
1310 if(v4)
1311 accumulate_bytes(ifp, prefix + 12, pb);
1312 else
1313 accumulate_bytes(ifp, prefix, pb);
1314 }
1315 end_message(ifp, MESSAGE_MH_REQUEST, len);
1316}
1317
1318void
1319send_unicast_multihop_request(struct neighbour *neigh,
1320 const unsigned char *prefix, unsigned char plen,
1321 unsigned short seqno, const unsigned char *id,
1322 unsigned short hop_count)
1323{
1324 int rc, v4, pb, len;
1325
1326 /* Make sure any buffered updates go out before this request. */
1327 flushupdates(neigh->ifp);
1328
1329 debugf(BABEL_DEBUG_COMMON,"Sending multi-hop request to %s for %s (%d hops).",
1330 format_address(neigh->address),
1331 format_prefix(prefix, plen), hop_count);
1332 v4 = plen >= 96 && v4mapped(prefix);
1333 pb = v4 ? ((plen - 96) + 7) / 8 : (plen + 7) / 8;
1334 len = 6 + 8 + pb;
1335
1336 rc = start_unicast_message(neigh, MESSAGE_MH_REQUEST, len);
1337 if(rc < 0) return;
1338 accumulate_unicast_byte(neigh, v4 ? 1 : 2);
1339 accumulate_unicast_byte(neigh, v4 ? plen - 96 : plen);
1340 accumulate_unicast_short(neigh, seqno);
1341 accumulate_unicast_byte(neigh, hop_count);
1342 accumulate_unicast_byte(neigh, 0);
1343 accumulate_unicast_bytes(neigh, id, 8);
1344 if(prefix) {
1345 if(v4)
1346 accumulate_unicast_bytes(neigh, prefix + 12, pb);
1347 else
1348 accumulate_unicast_bytes(neigh, prefix, pb);
1349 }
1350 end_unicast_message(neigh, MESSAGE_MH_REQUEST, len);
1351}
1352
1353void
1354send_request_resend(struct neighbour *neigh,
1355 const unsigned char *prefix, unsigned char plen,
1356 unsigned short seqno, unsigned char *id)
1357{
1358 int delay;
1359
1360 if(neigh)
1361 send_unicast_multihop_request(neigh, prefix, plen, seqno, id, 127);
1362 else
1363 send_multihop_request(NULL, prefix, plen, seqno, id, 127);
1364
1365 delay = 2000;
1366 delay = MIN(delay, wireless_hello_interval / 2);
1367 delay = MIN(delay, wired_hello_interval / 2);
1368 delay = MAX(delay, 10);
1369 record_resend(RESEND_REQUEST, prefix, plen, seqno, id,
1370 neigh ? neigh->ifp : NULL, delay);
1371}
1372
1373void
1374handle_request(struct neighbour *neigh, const unsigned char *prefix,
1375 unsigned char plen, unsigned char hop_count,
1376 unsigned short seqno, const unsigned char *id)
1377{
1378 struct xroute *xroute;
Denis Ovsienkoef4de4d2012-01-08 15:29:19 +04001379 struct babel_route *route;
Paul Jakma57345092011-12-25 17:52:09 +01001380 struct neighbour *successor = NULL;
1381
1382 xroute = find_xroute(prefix, plen);
1383 route = find_installed_route(prefix, plen);
1384
1385 if(xroute && (!route || xroute->metric <= kernel_metric)) {
1386 if(hop_count > 0 && memcmp(id, myid, 8) == 0) {
1387 if(seqno_compare(seqno, myseqno) > 0) {
1388 if(seqno_minus(seqno, myseqno) > 100) {
1389 /* Hopelessly out-of-date request */
1390 return;
1391 }
1392 update_myseqno();
1393 }
1394 }
1395 send_update(neigh->ifp, 1, prefix, plen);
1396 return;
1397 }
1398
1399 if(route &&
1400 (memcmp(id, route->src->id, 8) != 0 ||
1401 seqno_compare(seqno, route->seqno) <= 0)) {
1402 send_update(neigh->ifp, 1, prefix, plen);
1403 return;
1404 }
1405
1406 if(hop_count <= 1)
1407 return;
1408
1409 if(route && memcmp(id, route->src->id, 8) == 0 &&
1410 seqno_minus(seqno, route->seqno) > 100) {
1411 /* Hopelessly out-of-date */
1412 return;
1413 }
1414
1415 if(request_redundant(neigh->ifp, prefix, plen, seqno, id))
1416 return;
1417
1418 /* Let's try to forward this request. */
1419 if(route && route_metric(route) < INFINITY)
1420 successor = route->neigh;
1421
1422 if(!successor || successor == neigh) {
1423 /* We were about to forward a request to its requestor. Try to
1424 find a different neighbour to forward the request to. */
Denis Ovsienkoef4de4d2012-01-08 15:29:19 +04001425 struct babel_route *other_route;
Paul Jakma57345092011-12-25 17:52:09 +01001426
1427 other_route = find_best_route(prefix, plen, 0, neigh);
1428 if(other_route && route_metric(other_route) < INFINITY)
1429 successor = other_route->neigh;
1430 }
1431
1432 if(!successor || successor == neigh)
1433 /* Give up */
1434 return;
1435
1436 send_unicast_multihop_request(successor, prefix, plen, seqno, id,
1437 hop_count - 1);
1438 record_resend(RESEND_REQUEST, prefix, plen, seqno, id,
1439 neigh->ifp, 0);
1440}