blob: dfc31e7fba3086283367adf322a6ecd06d6bf512 [file] [log] [blame]
Avneesh Sachdev5adc2522012-11-13 22:48:59 +00001/*
2 * Main implementation file for interface to Forwarding Plane Manager.
3 *
4 * Copyright (C) 2012 by Open Source Routing.
5 * Copyright (C) 2012 by Internet Systems Consortium, Inc. ("ISC")
6 *
7 * This file is part of GNU Zebra.
8 *
9 * GNU Zebra is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
12 * later version.
13 *
14 * GNU Zebra is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with GNU Zebra; see the file COPYING. If not, write to the Free
21 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
22 * 02111-1307, USA.
23 */
24
25#include <zebra.h>
26
27#include "log.h"
28#include "stream.h"
29#include "thread.h"
30#include "network.h"
31#include "command.h"
32
33#include "zebra/rib.h"
34
35#include "fpm/fpm.h"
36#include "zebra_fpm.h"
37#include "zebra_fpm_private.h"
38
39/*
40 * Interval at which we attempt to connect to the FPM.
41 */
42#define ZFPM_CONNECT_RETRY_IVL 5
43
44/*
45 * Sizes of outgoing and incoming stream buffers for writing/reading
46 * FPM messages.
47 */
48#define ZFPM_OBUF_SIZE (2 * FPM_MAX_MSG_LEN)
49#define ZFPM_IBUF_SIZE (FPM_MAX_MSG_LEN)
50
51/*
52 * The maximum number of times the FPM socket write callback can call
53 * 'write' before it yields.
54 */
55#define ZFPM_MAX_WRITES_PER_RUN 10
56
57/*
58 * Interval over which we collect statistics.
59 */
60#define ZFPM_STATS_IVL_SECS 10
61
62/*
Jonathan Hart9baa3f12017-04-28 13:59:49 -070063 * Default keepalive interval.
64 */
65#define ZFPM_KEEPALIVE_IVL_SECS 2
66
67#define FPM_STR "Forwarding Plane Manager configuration\n"
68
69/*
Avneesh Sachdev5adc2522012-11-13 22:48:59 +000070 * Structure that holds state for iterating over all route_node
71 * structures that are candidates for being communicated to the FPM.
72 */
73typedef struct zfpm_rnodes_iter_t_
74{
75 rib_tables_iter_t tables_iter;
76 route_table_iter_t iter;
77} zfpm_rnodes_iter_t;
78
79/*
80 * Statistics.
81 */
82typedef struct zfpm_stats_t_ {
83 unsigned long connect_calls;
84 unsigned long connect_no_sock;
85
86 unsigned long read_cb_calls;
87
88 unsigned long write_cb_calls;
89 unsigned long write_calls;
90 unsigned long partial_writes;
91 unsigned long max_writes_hit;
92 unsigned long t_write_yields;
93
94 unsigned long nop_deletes_skipped;
95 unsigned long route_adds;
96 unsigned long route_dels;
97
98 unsigned long updates_triggered;
99 unsigned long redundant_triggers;
100 unsigned long non_fpm_table_triggers;
101
102 unsigned long dests_del_after_update;
103
Jonathan Hart9baa3f12017-04-28 13:59:49 -0700104 unsigned long keepalive_cb_calls;
105
Avneesh Sachdev5adc2522012-11-13 22:48:59 +0000106 unsigned long t_conn_down_starts;
107 unsigned long t_conn_down_dests_processed;
108 unsigned long t_conn_down_yields;
109 unsigned long t_conn_down_finishes;
110
111 unsigned long t_conn_up_starts;
112 unsigned long t_conn_up_dests_processed;
113 unsigned long t_conn_up_yields;
114 unsigned long t_conn_up_aborts;
115 unsigned long t_conn_up_finishes;
116
117} zfpm_stats_t;
118
119/*
120 * States for the FPM state machine.
121 */
122typedef enum {
123
124 /*
125 * In this state we are not yet ready to connect to the FPM. This
126 * can happen when this module is disabled, or if we're cleaning up
127 * after a connection has gone down.
128 */
129 ZFPM_STATE_IDLE,
130
131 /*
132 * Ready to talk to the FPM and periodically trying to connect to
133 * it.
134 */
135 ZFPM_STATE_ACTIVE,
136
137 /*
138 * In the middle of bringing up a TCP connection. Specifically,
139 * waiting for a connect() call to complete asynchronously.
140 */
141 ZFPM_STATE_CONNECTING,
142
143 /*
144 * TCP connection to the FPM is up.
145 */
146 ZFPM_STATE_ESTABLISHED
147
148} zfpm_state_t;
149
150/*
151 * Globals.
152 */
153typedef struct zfpm_glob_t_
154{
155
156 /*
157 * True if the FPM module has been enabled.
158 */
159 int enabled;
160
161 struct thread_master *master;
162
163 zfpm_state_t state;
164
Jonathan Hartf8989de2016-05-09 15:05:16 -0700165 in_addr_t fpm_server;
Avneesh Sachdev5adc2522012-11-13 22:48:59 +0000166 /*
167 * Port on which the FPM is running.
168 */
169 int fpm_port;
170
171 /*
172 * List of rib_dest_t structures to be processed
173 */
174 TAILQ_HEAD (zfpm_dest_q, rib_dest_t_) dest_q;
175
176 /*
177 * Stream socket to the FPM.
178 */
179 int sock;
180
181 /*
182 * Buffers for messages to/from the FPM.
183 */
184 struct stream *obuf;
185 struct stream *ibuf;
186
187 /*
188 * Threads for I/O.
189 */
190 struct thread *t_connect;
191 struct thread *t_write;
192 struct thread *t_read;
193
194 /*
195 * Thread to clean up after the TCP connection to the FPM goes down
196 * and the state that belongs to it.
197 */
198 struct thread *t_conn_down;
199
200 struct {
201 zfpm_rnodes_iter_t iter;
202 } t_conn_down_state;
203
204 /*
205 * Thread to take actions once the TCP conn to the FPM comes up, and
206 * the state that belongs to it.
207 */
208 struct thread *t_conn_up;
209
Jonathan Hart9baa3f12017-04-28 13:59:49 -0700210 /*
211 * Thread to send keepalive messages periodically.
212 */
213 struct thread *t_keepalive;
214
215 uint32_t keepalive_ivl;
216
Avneesh Sachdev5adc2522012-11-13 22:48:59 +0000217 struct {
218 zfpm_rnodes_iter_t iter;
219 } t_conn_up_state;
220
221 unsigned long connect_calls;
222 time_t last_connect_call_time;
223
224 /*
225 * Stats from the start of the current statistics interval up to
226 * now. These are the counters we typically update in the code.
227 */
228 zfpm_stats_t stats;
229
230 /*
231 * Statistics that were gathered in the last collection interval.
232 */
233 zfpm_stats_t last_ivl_stats;
234
235 /*
236 * Cumulative stats from the last clear to the start of the current
237 * statistics interval.
238 */
239 zfpm_stats_t cumulative_stats;
240
241 /*
242 * Stats interval timer.
243 */
244 struct thread *t_stats;
245
246 /*
247 * If non-zero, the last time when statistics were cleared.
248 */
249 time_t last_stats_clear_time;
250
251} zfpm_glob_t;
252
253static zfpm_glob_t zfpm_glob_space;
254static zfpm_glob_t *zfpm_g = &zfpm_glob_space;
255
256static int zfpm_read_cb (struct thread *thread);
257static int zfpm_write_cb (struct thread *thread);
Jonathan Hart9baa3f12017-04-28 13:59:49 -0700258static int zfpm_keepalive_cb (struct thread *thread);
259static int zfpm_create_keepalive (struct thread *thread);
Avneesh Sachdev5adc2522012-11-13 22:48:59 +0000260
261static void zfpm_set_state (zfpm_state_t state, const char *reason);
262static void zfpm_start_connect_timer (const char *reason);
263static void zfpm_start_stats_timer (void);
264
265/*
266 * zfpm_thread_should_yield
267 */
268static inline int
269zfpm_thread_should_yield (struct thread *t)
270{
271 return thread_should_yield (t);
272}
273
274/*
275 * zfpm_state_to_str
276 */
277static const char *
278zfpm_state_to_str (zfpm_state_t state)
279{
280 switch (state)
281 {
282
283 case ZFPM_STATE_IDLE:
284 return "idle";
285
286 case ZFPM_STATE_ACTIVE:
287 return "active";
288
289 case ZFPM_STATE_CONNECTING:
290 return "connecting";
291
292 case ZFPM_STATE_ESTABLISHED:
293 return "established";
294
295 default:
296 return "unknown";
297 }
298}
299
300/*
301 * zfpm_get_time
302 */
303static time_t
304zfpm_get_time (void)
305{
306 struct timeval tv;
307
308 if (quagga_gettime (QUAGGA_CLK_MONOTONIC, &tv) < 0)
309 zlog_warn ("FPM: quagga_gettime failed!!");
310
311 return tv.tv_sec;
312}
313
314/*
315 * zfpm_get_elapsed_time
316 *
317 * Returns the time elapsed (in seconds) since the given time.
318 */
319static time_t
320zfpm_get_elapsed_time (time_t reference)
321{
322 time_t now;
323
324 now = zfpm_get_time ();
325
326 if (now < reference)
327 {
328 assert (0);
329 return 0;
330 }
331
332 return now - reference;
333}
334
335/*
336 * zfpm_is_table_for_fpm
337 *
338 * Returns TRUE if the the given table is to be communicated to the
339 * FPM.
340 */
341static inline int
342zfpm_is_table_for_fpm (struct route_table *table)
343{
344 rib_table_info_t *info;
345
346 info = rib_table_info (table);
347
348 /*
349 * We only send the unicast tables in the main instance to the FPM
350 * at this point.
351 */
352 if (info->vrf->id != 0)
353 return 0;
354
355 if (info->safi != SAFI_UNICAST)
356 return 0;
357
358 return 1;
359}
360
361/*
362 * zfpm_rnodes_iter_init
363 */
364static inline void
365zfpm_rnodes_iter_init (zfpm_rnodes_iter_t *iter)
366{
367 memset (iter, 0, sizeof (*iter));
368 rib_tables_iter_init (&iter->tables_iter);
369
370 /*
371 * This is a hack, but it makes implementing 'next' easier by
372 * ensuring that route_table_iter_next() will return NULL the first
373 * time we call it.
374 */
375 route_table_iter_init (&iter->iter, NULL);
376 route_table_iter_cleanup (&iter->iter);
377}
378
379/*
380 * zfpm_rnodes_iter_next
381 */
382static inline struct route_node *
383zfpm_rnodes_iter_next (zfpm_rnodes_iter_t *iter)
384{
385 struct route_node *rn;
386 struct route_table *table;
387
388 while (1)
389 {
390 rn = route_table_iter_next (&iter->iter);
391 if (rn)
392 return rn;
393
394 /*
395 * We've made our way through this table, go to the next one.
396 */
397 route_table_iter_cleanup (&iter->iter);
398
399 while ((table = rib_tables_iter_next (&iter->tables_iter)))
400 {
401 if (zfpm_is_table_for_fpm (table))
402 break;
403 }
404
405 if (!table)
406 return NULL;
407
408 route_table_iter_init (&iter->iter, table);
409 }
410
411 return NULL;
412}
413
414/*
415 * zfpm_rnodes_iter_pause
416 */
417static inline void
418zfpm_rnodes_iter_pause (zfpm_rnodes_iter_t *iter)
419{
420 route_table_iter_pause (&iter->iter);
421}
422
423/*
424 * zfpm_rnodes_iter_cleanup
425 */
426static inline void
427zfpm_rnodes_iter_cleanup (zfpm_rnodes_iter_t *iter)
428{
429 route_table_iter_cleanup (&iter->iter);
430 rib_tables_iter_cleanup (&iter->tables_iter);
431}
432
433/*
434 * zfpm_stats_init
435 *
436 * Initialize a statistics block.
437 */
438static inline void
439zfpm_stats_init (zfpm_stats_t *stats)
440{
441 memset (stats, 0, sizeof (*stats));
442}
443
444/*
445 * zfpm_stats_reset
446 */
447static inline void
448zfpm_stats_reset (zfpm_stats_t *stats)
449{
450 zfpm_stats_init (stats);
451}
452
453/*
454 * zfpm_stats_copy
455 */
456static inline void
457zfpm_stats_copy (const zfpm_stats_t *src, zfpm_stats_t *dest)
458{
459 memcpy (dest, src, sizeof (*dest));
460}
461
462/*
463 * zfpm_stats_compose
464 *
465 * Total up the statistics in two stats structures ('s1 and 's2') and
466 * return the result in the third argument, 'result'. Note that the
467 * pointer 'result' may be the same as 's1' or 's2'.
468 *
469 * For simplicity, the implementation below assumes that the stats
470 * structure is composed entirely of counters. This can easily be
471 * changed when necessary.
472 */
473static void
474zfpm_stats_compose (const zfpm_stats_t *s1, const zfpm_stats_t *s2,
475 zfpm_stats_t *result)
476{
477 const unsigned long *p1, *p2;
478 unsigned long *result_p;
479 int i, num_counters;
480
481 p1 = (const unsigned long *) s1;
482 p2 = (const unsigned long *) s2;
483 result_p = (unsigned long *) result;
484
485 num_counters = (sizeof (zfpm_stats_t) / sizeof (unsigned long));
486
487 for (i = 0; i < num_counters; i++)
488 {
489 result_p[i] = p1[i] + p2[i];
490 }
491}
492
493/*
494 * zfpm_read_on
495 */
496static inline void
497zfpm_read_on (void)
498{
499 assert (!zfpm_g->t_read);
500 assert (zfpm_g->sock >= 0);
501
502 THREAD_READ_ON (zfpm_g->master, zfpm_g->t_read, zfpm_read_cb, 0,
503 zfpm_g->sock);
504}
505
506/*
507 * zfpm_write_on
508 */
509static inline void
510zfpm_write_on (void)
511{
512 assert (!zfpm_g->t_write);
513 assert (zfpm_g->sock >= 0);
514
515 THREAD_WRITE_ON (zfpm_g->master, zfpm_g->t_write, zfpm_write_cb, 0,
516 zfpm_g->sock);
517}
518
519/*
520 * zfpm_read_off
521 */
522static inline void
523zfpm_read_off (void)
524{
525 THREAD_READ_OFF (zfpm_g->t_read);
526}
527
528/*
529 * zfpm_write_off
530 */
531static inline void
532zfpm_write_off (void)
533{
534 THREAD_WRITE_OFF (zfpm_g->t_write);
535}
536
Jonathan Hart9baa3f12017-04-28 13:59:49 -0700537static void
538zfpm_keepalive_on ()
539{
540 zfpm_g->t_keepalive = NULL;
541 assert (!zfpm_g->t_keepalive);
542
543 THREAD_TIMER_ON (zfpm_g->master, zfpm_g->t_keepalive, zfpm_keepalive_cb,
544 0, zfpm_g->keepalive_ivl);
545}
546
547static void
548zfpm_keepalive_off ()
549{
550 if (zfpm_g->t_keepalive) {
551 THREAD_TIMER_OFF(zfpm_g->t_keepalive);
552 }
553}
554
Avneesh Sachdev5adc2522012-11-13 22:48:59 +0000555/*
556 * zfpm_conn_up_thread_cb
557 *
558 * Callback for actions to be taken when the connection to the FPM
559 * comes up.
560 */
561static int
562zfpm_conn_up_thread_cb (struct thread *thread)
563{
564 struct route_node *rnode;
565 zfpm_rnodes_iter_t *iter;
566 rib_dest_t *dest;
567
568 assert (zfpm_g->t_conn_up);
569 zfpm_g->t_conn_up = NULL;
570
571 iter = &zfpm_g->t_conn_up_state.iter;
572
573 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED)
574 {
575 zfpm_debug ("Connection not up anymore, conn_up thread aborting");
576 zfpm_g->stats.t_conn_up_aborts++;
577 goto done;
578 }
579
580 while ((rnode = zfpm_rnodes_iter_next (iter)))
581 {
582 dest = rib_dest_from_rnode (rnode);
583
584 if (dest)
585 {
586 zfpm_g->stats.t_conn_up_dests_processed++;
587 zfpm_trigger_update (rnode, NULL);
588 }
589
590 /*
591 * Yield if need be.
592 */
593 if (!zfpm_thread_should_yield (thread))
594 continue;
595
596 zfpm_g->stats.t_conn_up_yields++;
597 zfpm_rnodes_iter_pause (iter);
598 zfpm_g->t_conn_up = thread_add_background (zfpm_g->master,
599 zfpm_conn_up_thread_cb,
600 0, 0);
601 return 0;
602 }
603
604 zfpm_g->stats.t_conn_up_finishes++;
605
606 done:
607 zfpm_rnodes_iter_cleanup (iter);
608 return 0;
609}
610
611/*
Jonathan Hart9baa3f12017-04-28 13:59:49 -0700612 * zfpm_keepalive_cb
613 *
614 * Called when the keepalive timer expires.
615 */
616static int
617zfpm_keepalive_cb (struct thread *thread)
618{
619 zfpm_g->stats.keepalive_cb_calls++;
620
621 zfpm_create_keepalive(thread);
622 zfpm_write_on();
623
624 zfpm_keepalive_on();
625
626 return 0;
627}
628
629/*
630 * zfpm_create_keepalive.
631 *
632 * Creates a keepalive message and writes it to the output buffer
633 * ready for sending.
634 */
635static int
636zfpm_create_keepalive (struct thread *thread)
637{
638 fpm_msg_hdr_t *hdr;
639 struct stream *s;
640 unsigned char *buf;
641 uint msg_len = FPM_MSG_HDR_LEN;
642
643 s = zfpm_g->obuf;
644
645 if (STREAM_WRITEABLE (s) < msg_len) {
646 return 1;
647 }
648
649 buf = STREAM_DATA (s) + stream_get_endp (s);
650
651 hdr = (fpm_msg_hdr_t *) buf;
652
653 hdr->version = FPM_PROTO_VERSION;
654 hdr->msg_type = FPM_MSG_TYPE_KEEPALIVE;
655 hdr->msg_len = htons(msg_len);
656
657 stream_forward_endp (s, msg_len);
658
659 return 0;
660}
661
662/*
Avneesh Sachdev5adc2522012-11-13 22:48:59 +0000663 * zfpm_connection_up
664 *
665 * Called when the connection to the FPM comes up.
666 */
667static void
668zfpm_connection_up (const char *detail)
669{
670 assert (zfpm_g->sock >= 0);
671 zfpm_read_on ();
672 zfpm_write_on ();
673 zfpm_set_state (ZFPM_STATE_ESTABLISHED, detail);
674
675 /*
676 * Start thread to push existing routes to the FPM.
677 */
678 assert (!zfpm_g->t_conn_up);
679
680 zfpm_rnodes_iter_init (&zfpm_g->t_conn_up_state.iter);
681
682 zfpm_debug ("Starting conn_up thread");
683 zfpm_g->t_conn_up = thread_add_background (zfpm_g->master,
684 zfpm_conn_up_thread_cb, 0, 0);
685 zfpm_g->stats.t_conn_up_starts++;
Jonathan Hart9baa3f12017-04-28 13:59:49 -0700686
687 zfpm_debug ("Starting keepalive thread");
688 zfpm_keepalive_on();
Avneesh Sachdev5adc2522012-11-13 22:48:59 +0000689}
690
691/*
692 * zfpm_connect_check
693 *
694 * Check if an asynchronous connect() to the FPM is complete.
695 */
696static void
697zfpm_connect_check ()
698{
699 int status;
700 socklen_t slen;
701 int ret;
702
703 zfpm_read_off ();
704 zfpm_write_off ();
705
706 slen = sizeof (status);
707 ret = getsockopt (zfpm_g->sock, SOL_SOCKET, SO_ERROR, (void *) &status,
708 &slen);
709
710 if (ret >= 0 && status == 0)
711 {
712 zfpm_connection_up ("async connect complete");
713 return;
714 }
715
716 /*
717 * getsockopt() failed or indicated an error on the socket.
718 */
719 close (zfpm_g->sock);
720 zfpm_g->sock = -1;
721
722 zfpm_start_connect_timer ("getsockopt() after async connect failed");
723 return;
724}
725
726/*
727 * zfpm_conn_down_thread_cb
728 *
729 * Callback that is invoked to clean up state after the TCP connection
730 * to the FPM goes down.
731 */
732static int
733zfpm_conn_down_thread_cb (struct thread *thread)
734{
735 struct route_node *rnode;
736 zfpm_rnodes_iter_t *iter;
737 rib_dest_t *dest;
738
739 assert (zfpm_g->state == ZFPM_STATE_IDLE);
740
741 assert (zfpm_g->t_conn_down);
742 zfpm_g->t_conn_down = NULL;
743
744 iter = &zfpm_g->t_conn_down_state.iter;
745
746 while ((rnode = zfpm_rnodes_iter_next (iter)))
747 {
748 dest = rib_dest_from_rnode (rnode);
749
750 if (dest)
751 {
752 if (CHECK_FLAG (dest->flags, RIB_DEST_UPDATE_FPM))
753 {
754 TAILQ_REMOVE (&zfpm_g->dest_q, dest, fpm_q_entries);
755 }
756
757 UNSET_FLAG (dest->flags, RIB_DEST_UPDATE_FPM);
758 UNSET_FLAG (dest->flags, RIB_DEST_SENT_TO_FPM);
759
760 zfpm_g->stats.t_conn_down_dests_processed++;
761
762 /*
763 * Check if the dest should be deleted.
764 */
765 rib_gc_dest(rnode);
766 }
767
768 /*
769 * Yield if need be.
770 */
771 if (!zfpm_thread_should_yield (thread))
772 continue;
773
774 zfpm_g->stats.t_conn_down_yields++;
775 zfpm_rnodes_iter_pause (iter);
776 zfpm_g->t_conn_down = thread_add_background (zfpm_g->master,
777 zfpm_conn_down_thread_cb,
778 0, 0);
779 return 0;
780 }
781
782 zfpm_g->stats.t_conn_down_finishes++;
783 zfpm_rnodes_iter_cleanup (iter);
784
Jonathan Hart9baa3f12017-04-28 13:59:49 -0700785 zfpm_keepalive_off ();
786
Avneesh Sachdev5adc2522012-11-13 22:48:59 +0000787 /*
788 * Start the process of connecting to the FPM again.
789 */
790 zfpm_start_connect_timer ("cleanup complete");
791 return 0;
792}
793
794/*
795 * zfpm_connection_down
796 *
797 * Called when the connection to the FPM has gone down.
798 */
799static void
800zfpm_connection_down (const char *detail)
801{
802 if (!detail)
803 detail = "unknown";
804
805 assert (zfpm_g->state == ZFPM_STATE_ESTABLISHED);
806
807 zlog_info ("connection to the FPM has gone down: %s", detail);
808
809 zfpm_read_off ();
810 zfpm_write_off ();
811
812 stream_reset (zfpm_g->ibuf);
813 stream_reset (zfpm_g->obuf);
814
815 if (zfpm_g->sock >= 0) {
816 close (zfpm_g->sock);
817 zfpm_g->sock = -1;
818 }
819
820 /*
821 * Start thread to clean up state after the connection goes down.
822 */
823 assert (!zfpm_g->t_conn_down);
824 zfpm_debug ("Starting conn_down thread");
825 zfpm_rnodes_iter_init (&zfpm_g->t_conn_down_state.iter);
826 zfpm_g->t_conn_down = thread_add_background (zfpm_g->master,
827 zfpm_conn_down_thread_cb, 0, 0);
828 zfpm_g->stats.t_conn_down_starts++;
829
830 zfpm_set_state (ZFPM_STATE_IDLE, detail);
831}
832
833/*
834 * zfpm_read_cb
835 */
836static int
837zfpm_read_cb (struct thread *thread)
838{
839 size_t already;
840 struct stream *ibuf;
841 uint16_t msg_len;
842 fpm_msg_hdr_t *hdr;
843
844 zfpm_g->stats.read_cb_calls++;
845 assert (zfpm_g->t_read);
846 zfpm_g->t_read = NULL;
847
848 /*
849 * Check if async connect is now done.
850 */
851 if (zfpm_g->state == ZFPM_STATE_CONNECTING)
852 {
853 zfpm_connect_check();
854 return 0;
855 }
856
857 assert (zfpm_g->state == ZFPM_STATE_ESTABLISHED);
858 assert (zfpm_g->sock >= 0);
859
860 ibuf = zfpm_g->ibuf;
861
862 already = stream_get_endp (ibuf);
863 if (already < FPM_MSG_HDR_LEN)
864 {
865 ssize_t nbyte;
866
867 nbyte = stream_read_try (ibuf, zfpm_g->sock, FPM_MSG_HDR_LEN - already);
868 if (nbyte == 0 || nbyte == -1)
869 {
870 zfpm_connection_down ("closed socket in read");
871 return 0;
872 }
873
874 if (nbyte != (ssize_t) (FPM_MSG_HDR_LEN - already))
875 goto done;
876
877 already = FPM_MSG_HDR_LEN;
878 }
879
880 stream_set_getp (ibuf, 0);
881
882 hdr = (fpm_msg_hdr_t *) stream_pnt (ibuf);
883
884 if (!fpm_msg_hdr_ok (hdr))
885 {
886 zfpm_connection_down ("invalid message header");
887 return 0;
888 }
889
890 msg_len = fpm_msg_len (hdr);
891
892 /*
893 * Read out the rest of the packet.
894 */
895 if (already < msg_len)
896 {
897 ssize_t nbyte;
898
899 nbyte = stream_read_try (ibuf, zfpm_g->sock, msg_len - already);
900
901 if (nbyte == 0 || nbyte == -1)
902 {
903 zfpm_connection_down ("failed to read message");
904 return 0;
905 }
906
907 if (nbyte != (ssize_t) (msg_len - already))
908 goto done;
909 }
910
911 zfpm_debug ("Read out a full fpm message");
912
913 /*
914 * Just throw it away for now.
915 */
916 stream_reset (ibuf);
917
918 done:
919 zfpm_read_on ();
920 return 0;
921}
922
923/*
924 * zfpm_writes_pending
925 *
926 * Returns TRUE if we may have something to write to the FPM.
927 */
928static int
929zfpm_writes_pending (void)
930{
931
932 /*
933 * Check if there is any data in the outbound buffer that has not
934 * been written to the socket yet.
935 */
936 if (stream_get_endp (zfpm_g->obuf) - stream_get_getp (zfpm_g->obuf))
937 return 1;
938
939 /*
940 * Check if there are any prefixes on the outbound queue.
941 */
942 if (!TAILQ_EMPTY (&zfpm_g->dest_q))
943 return 1;
944
945 return 0;
946}
947
948/*
949 * zfpm_encode_route
950 *
951 * Encode a message to the FPM with information about the given route.
952 *
953 * Returns the number of bytes written to the buffer. 0 or a negative
954 * value indicates an error.
955 */
956static inline int
957zfpm_encode_route (rib_dest_t *dest, struct rib *rib, char *in_buf,
958 size_t in_buf_len)
959{
960#ifndef HAVE_NETLINK
961 return 0;
962#else
963
964 int cmd;
965
966 cmd = rib ? RTM_NEWROUTE : RTM_DELROUTE;
967
968 return zfpm_netlink_encode_route (cmd, dest, rib, in_buf, in_buf_len);
969
970#endif /* HAVE_NETLINK */
971}
972
973/*
974 * zfpm_route_for_update
975 *
976 * Returns the rib that is to be sent to the FPM for a given dest.
977 */
978static struct rib *
979zfpm_route_for_update (rib_dest_t *dest)
980{
981 struct rib *rib;
982
983 RIB_DEST_FOREACH_ROUTE (dest, rib)
984 {
985 if (!CHECK_FLAG (rib->flags, ZEBRA_FLAG_SELECTED))
986 continue;
987
988 return rib;
989 }
990
991 /*
992 * We have no route for this destination.
993 */
994 return NULL;
995}
996
997/*
998 * zfpm_build_updates
999 *
1000 * Process the outgoing queue and write messages to the outbound
1001 * buffer.
1002 */
1003static void
1004zfpm_build_updates (void)
1005{
1006 struct stream *s;
1007 rib_dest_t *dest;
1008 unsigned char *buf, *data, *buf_end;
1009 size_t msg_len;
1010 size_t data_len;
1011 fpm_msg_hdr_t *hdr;
1012 struct rib *rib;
1013 int is_add, write_msg;
1014
1015 s = zfpm_g->obuf;
1016
1017 assert (stream_empty (s));
1018
1019 do {
1020
1021 /*
1022 * Make sure there is enough space to write another message.
1023 */
1024 if (STREAM_WRITEABLE (s) < FPM_MAX_MSG_LEN)
1025 break;
1026
1027 buf = STREAM_DATA (s) + stream_get_endp (s);
1028 buf_end = buf + STREAM_WRITEABLE (s);
1029
1030 dest = TAILQ_FIRST (&zfpm_g->dest_q);
1031 if (!dest)
1032 break;
1033
1034 assert (CHECK_FLAG (dest->flags, RIB_DEST_UPDATE_FPM));
1035
1036 hdr = (fpm_msg_hdr_t *) buf;
1037 hdr->version = FPM_PROTO_VERSION;
1038 hdr->msg_type = FPM_MSG_TYPE_NETLINK;
1039
1040 data = fpm_msg_data (hdr);
1041
1042 rib = zfpm_route_for_update (dest);
1043 is_add = rib ? 1 : 0;
1044
1045 write_msg = 1;
1046
1047 /*
1048 * If this is a route deletion, and we have not sent the route to
1049 * the FPM previously, skip it.
1050 */
1051 if (!is_add && !CHECK_FLAG (dest->flags, RIB_DEST_SENT_TO_FPM))
1052 {
1053 write_msg = 0;
1054 zfpm_g->stats.nop_deletes_skipped++;
1055 }
1056
1057 if (write_msg) {
1058 data_len = zfpm_encode_route (dest, rib, (char *) data, buf_end - data);
1059
1060 assert (data_len);
1061 if (data_len)
1062 {
1063 msg_len = fpm_data_len_to_msg_len (data_len);
1064 hdr->msg_len = htons (msg_len);
1065 stream_forward_endp (s, msg_len);
1066
1067 if (is_add)
1068 zfpm_g->stats.route_adds++;
1069 else
1070 zfpm_g->stats.route_dels++;
1071 }
1072 }
1073
1074 /*
1075 * Remove the dest from the queue, and reset the flag.
1076 */
1077 UNSET_FLAG (dest->flags, RIB_DEST_UPDATE_FPM);
1078 TAILQ_REMOVE (&zfpm_g->dest_q, dest, fpm_q_entries);
1079
1080 if (is_add)
1081 {
1082 SET_FLAG (dest->flags, RIB_DEST_SENT_TO_FPM);
1083 }
1084 else
1085 {
1086 UNSET_FLAG (dest->flags, RIB_DEST_SENT_TO_FPM);
1087 }
1088
1089 /*
1090 * Delete the destination if necessary.
1091 */
1092 if (rib_gc_dest (dest->rnode))
1093 zfpm_g->stats.dests_del_after_update++;
1094
1095 } while (1);
1096
1097}
1098
1099/*
1100 * zfpm_write_cb
1101 */
1102static int
1103zfpm_write_cb (struct thread *thread)
1104{
1105 struct stream *s;
1106 int num_writes;
1107
1108 zfpm_g->stats.write_cb_calls++;
1109 assert (zfpm_g->t_write);
1110 zfpm_g->t_write = NULL;
1111
1112 /*
1113 * Check if async connect is now done.
1114 */
1115 if (zfpm_g->state == ZFPM_STATE_CONNECTING)
1116 {
1117 zfpm_connect_check ();
1118 return 0;
1119 }
1120
1121 assert (zfpm_g->state == ZFPM_STATE_ESTABLISHED);
1122 assert (zfpm_g->sock >= 0);
1123
1124 num_writes = 0;
1125
1126 do
1127 {
1128 int bytes_to_write, bytes_written;
1129
1130 s = zfpm_g->obuf;
1131
1132 /*
1133 * If the stream is empty, try fill it up with data.
1134 */
1135 if (stream_empty (s))
1136 {
1137 zfpm_build_updates ();
1138 }
1139
1140 bytes_to_write = stream_get_endp (s) - stream_get_getp (s);
1141 if (!bytes_to_write)
1142 break;
1143
1144 bytes_written = write (zfpm_g->sock, STREAM_PNT (s), bytes_to_write);
1145 zfpm_g->stats.write_calls++;
1146 num_writes++;
1147
1148 if (bytes_written < 0)
1149 {
1150 if (ERRNO_IO_RETRY (errno))
1151 break;
1152
1153 zfpm_connection_down ("failed to write to socket");
1154 return 0;
1155 }
1156
1157 if (bytes_written != bytes_to_write)
1158 {
1159
1160 /*
1161 * Partial write.
1162 */
1163 stream_forward_getp (s, bytes_written);
1164 zfpm_g->stats.partial_writes++;
1165 break;
1166 }
1167
1168 /*
1169 * We've written out the entire contents of the stream.
1170 */
1171 stream_reset (s);
1172
1173 if (num_writes >= ZFPM_MAX_WRITES_PER_RUN)
1174 {
1175 zfpm_g->stats.max_writes_hit++;
1176 break;
1177 }
1178
1179 if (zfpm_thread_should_yield (thread))
1180 {
1181 zfpm_g->stats.t_write_yields++;
1182 break;
1183 }
1184 } while (1);
1185
1186 if (zfpm_writes_pending ())
1187 zfpm_write_on ();
1188
1189 return 0;
1190}
1191
1192/*
1193 * zfpm_connect_cb
1194 */
1195static int
1196zfpm_connect_cb (struct thread *t)
1197{
1198 int sock, ret;
1199 struct sockaddr_in serv;
1200
1201 assert (zfpm_g->t_connect);
1202 zfpm_g->t_connect = NULL;
1203 assert (zfpm_g->state == ZFPM_STATE_ACTIVE);
1204
1205 sock = socket (AF_INET, SOCK_STREAM, 0);
1206 if (sock < 0)
1207 {
1208 zfpm_debug ("Failed to create socket for connect(): %s", strerror(errno));
1209 zfpm_g->stats.connect_no_sock++;
1210 return 0;
1211 }
1212
1213 set_nonblocking(sock);
1214
1215 /* Make server socket. */
1216 memset (&serv, 0, sizeof (serv));
1217 serv.sin_family = AF_INET;
1218 serv.sin_port = htons (zfpm_g->fpm_port);
1219#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
1220 serv.sin_len = sizeof (struct sockaddr_in);
1221#endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
Jonathan Hartf8989de2016-05-09 15:05:16 -07001222 if (!zfpm_g->fpm_server)
1223 serv.sin_addr.s_addr = htonl (INADDR_LOOPBACK);
1224 else
1225 serv.sin_addr.s_addr = (zfpm_g->fpm_server);
Avneesh Sachdev5adc2522012-11-13 22:48:59 +00001226
1227 /*
1228 * Connect to the FPM.
1229 */
1230 zfpm_g->connect_calls++;
1231 zfpm_g->stats.connect_calls++;
1232 zfpm_g->last_connect_call_time = zfpm_get_time ();
1233
1234 ret = connect (sock, (struct sockaddr *) &serv, sizeof (serv));
1235 if (ret >= 0)
1236 {
1237 zfpm_g->sock = sock;
1238 zfpm_connection_up ("connect succeeded");
1239 return 1;
1240 }
1241
1242 if (errno == EINPROGRESS)
1243 {
1244 zfpm_g->sock = sock;
1245 zfpm_read_on ();
1246 zfpm_write_on ();
1247 zfpm_set_state (ZFPM_STATE_CONNECTING, "async connect in progress");
1248 return 0;
1249 }
1250
1251 zlog_info ("can't connect to FPM %d: %s", sock, safe_strerror (errno));
1252 close (sock);
1253
1254 /*
1255 * Restart timer for retrying connection.
1256 */
1257 zfpm_start_connect_timer ("connect() failed");
1258 return 0;
1259}
1260
1261/*
1262 * zfpm_set_state
1263 *
1264 * Move state machine into the given state.
1265 */
1266static void
1267zfpm_set_state (zfpm_state_t state, const char *reason)
1268{
1269 zfpm_state_t cur_state = zfpm_g->state;
1270
1271 if (!reason)
1272 reason = "Unknown";
1273
1274 if (state == cur_state)
1275 return;
1276
1277 zfpm_debug("beginning state transition %s -> %s. Reason: %s",
1278 zfpm_state_to_str (cur_state), zfpm_state_to_str (state),
1279 reason);
1280
1281 switch (state) {
1282
1283 case ZFPM_STATE_IDLE:
1284 assert (cur_state == ZFPM_STATE_ESTABLISHED);
1285 break;
1286
1287 case ZFPM_STATE_ACTIVE:
1288 assert (cur_state == ZFPM_STATE_IDLE ||
1289 cur_state == ZFPM_STATE_CONNECTING);
1290 assert (zfpm_g->t_connect);
1291 break;
1292
1293 case ZFPM_STATE_CONNECTING:
1294 assert (zfpm_g->sock);
1295 assert (cur_state == ZFPM_STATE_ACTIVE);
1296 assert (zfpm_g->t_read);
1297 assert (zfpm_g->t_write);
1298 break;
1299
1300 case ZFPM_STATE_ESTABLISHED:
1301 assert (cur_state == ZFPM_STATE_ACTIVE ||
1302 cur_state == ZFPM_STATE_CONNECTING);
1303 assert (zfpm_g->sock);
1304 assert (zfpm_g->t_read);
1305 assert (zfpm_g->t_write);
1306 break;
1307 }
1308
1309 zfpm_g->state = state;
1310}
1311
1312/*
1313 * zfpm_calc_connect_delay
1314 *
1315 * Returns the number of seconds after which we should attempt to
1316 * reconnect to the FPM.
1317 */
1318static long
1319zfpm_calc_connect_delay (void)
1320{
1321 time_t elapsed;
1322
1323 /*
1324 * Return 0 if this is our first attempt to connect.
1325 */
1326 if (zfpm_g->connect_calls == 0)
1327 {
1328 return 0;
1329 }
1330
1331 elapsed = zfpm_get_elapsed_time (zfpm_g->last_connect_call_time);
1332
1333 if (elapsed > ZFPM_CONNECT_RETRY_IVL) {
1334 return 0;
1335 }
1336
1337 return ZFPM_CONNECT_RETRY_IVL - elapsed;
1338}
1339
1340/*
1341 * zfpm_start_connect_timer
1342 */
1343static void
1344zfpm_start_connect_timer (const char *reason)
1345{
1346 long delay_secs;
1347
1348 assert (!zfpm_g->t_connect);
1349 assert (zfpm_g->sock < 0);
1350
1351 assert(zfpm_g->state == ZFPM_STATE_IDLE ||
1352 zfpm_g->state == ZFPM_STATE_ACTIVE ||
1353 zfpm_g->state == ZFPM_STATE_CONNECTING);
1354
1355 delay_secs = zfpm_calc_connect_delay();
1356 zfpm_debug ("scheduling connect in %ld seconds", delay_secs);
1357
1358 THREAD_TIMER_ON (zfpm_g->master, zfpm_g->t_connect, zfpm_connect_cb, 0,
1359 delay_secs);
1360 zfpm_set_state (ZFPM_STATE_ACTIVE, reason);
1361}
1362
1363/*
1364 * zfpm_is_enabled
1365 *
1366 * Returns TRUE if the zebra FPM module has been enabled.
1367 */
1368static inline int
1369zfpm_is_enabled (void)
1370{
1371 return zfpm_g->enabled;
1372}
1373
1374/*
1375 * zfpm_conn_is_up
1376 *
1377 * Returns TRUE if the connection to the FPM is up.
1378 */
1379static inline int
1380zfpm_conn_is_up (void)
1381{
1382 if (zfpm_g->state != ZFPM_STATE_ESTABLISHED)
1383 return 0;
1384
1385 assert (zfpm_g->sock >= 0);
1386
1387 return 1;
1388}
1389
1390/*
1391 * zfpm_trigger_update
1392 *
1393 * The zebra code invokes this function to indicate that we should
1394 * send an update to the FPM about the given route_node.
1395 */
1396void
1397zfpm_trigger_update (struct route_node *rn, const char *reason)
1398{
1399 rib_dest_t *dest;
1400 char buf[INET6_ADDRSTRLEN];
1401
1402 /*
1403 * Ignore if the connection is down. We will update the FPM about
1404 * all destinations once the connection comes up.
1405 */
1406 if (!zfpm_conn_is_up ())
1407 return;
1408
1409 dest = rib_dest_from_rnode (rn);
1410
1411 /*
1412 * Ignore the trigger if the dest is not in a table that we would
1413 * send to the FPM.
1414 */
1415 if (!zfpm_is_table_for_fpm (rib_dest_table (dest)))
1416 {
1417 zfpm_g->stats.non_fpm_table_triggers++;
1418 return;
1419 }
1420
1421 if (CHECK_FLAG (dest->flags, RIB_DEST_UPDATE_FPM)) {
1422 zfpm_g->stats.redundant_triggers++;
1423 return;
1424 }
1425
1426 if (reason)
1427 {
1428 zfpm_debug ("%s/%d triggering update to FPM - Reason: %s",
1429 inet_ntop (rn->p.family, &rn->p.u.prefix, buf, sizeof (buf)),
1430 rn->p.prefixlen, reason);
1431 }
1432
1433 SET_FLAG (dest->flags, RIB_DEST_UPDATE_FPM);
1434 TAILQ_INSERT_TAIL (&zfpm_g->dest_q, dest, fpm_q_entries);
1435 zfpm_g->stats.updates_triggered++;
1436
1437 /*
1438 * Make sure that writes are enabled.
1439 */
1440 if (zfpm_g->t_write)
1441 return;
1442
1443 zfpm_write_on ();
1444}
1445
1446/*
1447 * zfpm_stats_timer_cb
1448 */
1449static int
1450zfpm_stats_timer_cb (struct thread *t)
1451{
1452 assert (zfpm_g->t_stats);
1453 zfpm_g->t_stats = NULL;
1454
1455 /*
1456 * Remember the stats collected in the last interval for display
1457 * purposes.
1458 */
1459 zfpm_stats_copy (&zfpm_g->stats, &zfpm_g->last_ivl_stats);
1460
1461 /*
1462 * Add the current set of stats into the cumulative statistics.
1463 */
1464 zfpm_stats_compose (&zfpm_g->cumulative_stats, &zfpm_g->stats,
1465 &zfpm_g->cumulative_stats);
1466
1467 /*
1468 * Start collecting stats afresh over the next interval.
1469 */
1470 zfpm_stats_reset (&zfpm_g->stats);
1471
1472 zfpm_start_stats_timer ();
1473
1474 return 0;
1475}
1476
1477/*
1478 * zfpm_stop_stats_timer
1479 */
1480static void
1481zfpm_stop_stats_timer (void)
1482{
1483 if (!zfpm_g->t_stats)
1484 return;
1485
1486 zfpm_debug ("Stopping existing stats timer");
1487 THREAD_TIMER_OFF (zfpm_g->t_stats);
1488}
1489
1490/*
1491 * zfpm_start_stats_timer
1492 */
1493void
1494zfpm_start_stats_timer (void)
1495{
1496 assert (!zfpm_g->t_stats);
1497
1498 THREAD_TIMER_ON (zfpm_g->master, zfpm_g->t_stats, zfpm_stats_timer_cb, 0,
1499 ZFPM_STATS_IVL_SECS);
1500}
1501
1502/*
1503 * Helper macro for zfpm_show_stats() below.
1504 */
1505#define ZFPM_SHOW_STAT(counter) \
1506 do { \
1507 vty_out (vty, "%-40s %10lu %16lu%s", #counter, total_stats.counter, \
1508 zfpm_g->last_ivl_stats.counter, VTY_NEWLINE); \
1509 } while (0)
1510
1511/*
1512 * zfpm_show_stats
1513 */
1514static void
1515zfpm_show_stats (struct vty *vty)
1516{
1517 zfpm_stats_t total_stats;
1518 time_t elapsed;
1519
1520 vty_out (vty, "%s%-40s %10s Last %2d secs%s%s", VTY_NEWLINE, "Counter",
1521 "Total", ZFPM_STATS_IVL_SECS, VTY_NEWLINE, VTY_NEWLINE);
1522
1523 /*
1524 * Compute the total stats up to this instant.
1525 */
1526 zfpm_stats_compose (&zfpm_g->cumulative_stats, &zfpm_g->stats,
1527 &total_stats);
1528
1529 ZFPM_SHOW_STAT (connect_calls);
1530 ZFPM_SHOW_STAT (connect_no_sock);
1531 ZFPM_SHOW_STAT (read_cb_calls);
1532 ZFPM_SHOW_STAT (write_cb_calls);
1533 ZFPM_SHOW_STAT (write_calls);
1534 ZFPM_SHOW_STAT (partial_writes);
1535 ZFPM_SHOW_STAT (max_writes_hit);
1536 ZFPM_SHOW_STAT (t_write_yields);
1537 ZFPM_SHOW_STAT (nop_deletes_skipped);
1538 ZFPM_SHOW_STAT (route_adds);
1539 ZFPM_SHOW_STAT (route_dels);
1540 ZFPM_SHOW_STAT (updates_triggered);
1541 ZFPM_SHOW_STAT (non_fpm_table_triggers);
1542 ZFPM_SHOW_STAT (redundant_triggers);
1543 ZFPM_SHOW_STAT (dests_del_after_update);
Jonathan Hart9baa3f12017-04-28 13:59:49 -07001544 ZFPM_SHOW_STAT (keepalive_cb_calls);
Avneesh Sachdev5adc2522012-11-13 22:48:59 +00001545 ZFPM_SHOW_STAT (t_conn_down_starts);
1546 ZFPM_SHOW_STAT (t_conn_down_dests_processed);
1547 ZFPM_SHOW_STAT (t_conn_down_yields);
1548 ZFPM_SHOW_STAT (t_conn_down_finishes);
1549 ZFPM_SHOW_STAT (t_conn_up_starts);
1550 ZFPM_SHOW_STAT (t_conn_up_dests_processed);
1551 ZFPM_SHOW_STAT (t_conn_up_yields);
1552 ZFPM_SHOW_STAT (t_conn_up_aborts);
1553 ZFPM_SHOW_STAT (t_conn_up_finishes);
1554
1555 if (!zfpm_g->last_stats_clear_time)
1556 return;
1557
1558 elapsed = zfpm_get_elapsed_time (zfpm_g->last_stats_clear_time);
1559
1560 vty_out (vty, "%sStats were cleared %lu seconds ago%s", VTY_NEWLINE,
1561 (unsigned long) elapsed, VTY_NEWLINE);
1562}
1563
1564/*
1565 * zfpm_clear_stats
1566 */
1567static void
1568zfpm_clear_stats (struct vty *vty)
1569{
1570 if (!zfpm_is_enabled ())
1571 {
1572 vty_out (vty, "The FPM module is not enabled...%s", VTY_NEWLINE);
1573 return;
1574 }
1575
1576 zfpm_stats_reset (&zfpm_g->stats);
1577 zfpm_stats_reset (&zfpm_g->last_ivl_stats);
1578 zfpm_stats_reset (&zfpm_g->cumulative_stats);
1579
1580 zfpm_stop_stats_timer ();
1581 zfpm_start_stats_timer ();
1582
1583 zfpm_g->last_stats_clear_time = zfpm_get_time();
1584
1585 vty_out (vty, "Cleared FPM stats%s", VTY_NEWLINE);
1586}
1587
1588/*
1589 * show_zebra_fpm_stats
1590 */
1591DEFUN (show_zebra_fpm_stats,
1592 show_zebra_fpm_stats_cmd,
1593 "show zebra fpm stats",
1594 SHOW_STR
1595 "Zebra information\n"
1596 "Forwarding Path Manager information\n"
1597 "Statistics\n")
1598{
1599 zfpm_show_stats (vty);
1600 return CMD_SUCCESS;
1601}
1602
1603/*
1604 * clear_zebra_fpm_stats
1605 */
1606DEFUN (clear_zebra_fpm_stats,
1607 clear_zebra_fpm_stats_cmd,
1608 "clear zebra fpm stats",
1609 CLEAR_STR
1610 "Zebra information\n"
1611 "Clear Forwarding Path Manager information\n"
1612 "Statistics\n")
1613{
1614 zfpm_clear_stats (vty);
1615 return CMD_SUCCESS;
1616}
1617
Jonathan Hartf8989de2016-05-09 15:05:16 -07001618/*
1619 * update fpm connection information
1620 */
1621DEFUN ( fpm_remote_ip,
1622 fpm_remote_ip_cmd,
1623 "fpm connection ip A.B.C.D port <1-65535>",
Jonathan Hart9baa3f12017-04-28 13:59:49 -07001624 FPM_STR
1625 "Connection configuration\n"
1626 "Remote FPM server IP\n"
1627 "IP address A.B.C.D\n"
1628 "Remote FPM server port number\n"
1629 "Port number")
Jonathan Hartf8989de2016-05-09 15:05:16 -07001630{
1631
1632 in_addr_t fpm_server;
1633 uint32_t port_no;
1634
1635 fpm_server = inet_addr (argv[0]);
1636 if (fpm_server == INADDR_NONE)
1637 return CMD_ERR_INCOMPLETE;
1638
1639 port_no = atoi (argv[1]);
1640 if (port_no < TCP_MIN_PORT || port_no > TCP_MAX_PORT)
1641 return CMD_ERR_INCOMPLETE;
1642
1643 if (zfpm_g->fpm_server == fpm_server &&
1644 zfpm_g->fpm_port == port_no)
1645 goto cmd_success;
1646
1647 zfpm_g->fpm_server = fpm_server;
1648 zfpm_g->fpm_port = port_no;
1649
1650 if (zfpm_conn_is_up ())
1651 zfpm_connection_down ("Restarting to new connection");
1652
1653cmd_success:
1654 return CMD_SUCCESS;
1655}
1656
1657DEFUN ( no_fpm_remote_ip,
1658 no_fpm_remote_ip_cmd,
1659 "no fpm connection ip A.B.C.D port <1-65535>",
Jonathan Hart9baa3f12017-04-28 13:59:49 -07001660 NO_STR
1661 FPM_STR
1662 "Connection configuration\n"
1663 "Remote FPM server IP\n"
1664 "IP address A.B.C.D\n"
1665 "Remote FPM server port number\n"
1666 "Port number")
Jonathan Hartf8989de2016-05-09 15:05:16 -07001667{
1668 if (zfpm_g->fpm_server != inet_addr (argv[0]) ||
1669 zfpm_g->fpm_port != atoi (argv[1]))
1670 return CMD_ERR_NO_MATCH;
1671
1672 zfpm_g->fpm_server = FPM_DEFAULT_IP;
1673 zfpm_g->fpm_port = FPM_DEFAULT_PORT;
1674
1675 if (zfpm_conn_is_up ())
1676 zfpm_connection_down ("Reverting backto default fpm connection");
1677
1678 return CMD_SUCCESS;
1679}
1680
Jonathan Hart9baa3f12017-04-28 13:59:49 -07001681DEFUN ( fpm_keepalive_timer,
1682 fpm_keepalive_timer_cmd,
1683 "fpm keepalive timer <1-65535>",
1684 FPM_STR
1685 "Keepalive configuration\n"
1686 "Keepalive timer\n"
1687 "Keepalive timer value in seconds")
1688{
1689 uint32_t timer;
1690
1691 timer = atoi (argv[0]);
1692
1693 zfpm_g->keepalive_ivl = timer;
1694
1695 return CMD_SUCCESS;
1696}
1697
1698DEFUN ( no_fpm_keepalive_timer,
1699 no_fpm_keepalive_timer_cmd,
1700 "no fpm keepalive timer <1-65535>",
1701 NO_STR
1702 FPM_STR
1703 "Keepalive configuration\n"
1704 "Keepalive timer\n"
1705 "Keepalive timer value in seconds")
1706{
1707 uint32_t timer;
1708
1709 timer = atoi (argv[0]);
1710
1711 if (zfpm_g->keepalive_ivl != timer) {
1712 return CMD_ERR_NO_MATCH;
1713 }
1714
1715 zfpm_g->keepalive_ivl = ZFPM_KEEPALIVE_IVL_SECS;
1716
1717 return CMD_SUCCESS;
1718}
1719
Jonathan Hartf8989de2016-05-09 15:05:16 -07001720
1721/**
Jonathan Hart9baa3f12017-04-28 13:59:49 -07001722 * zfpm_connection_config_write
Jonathan Hartf8989de2016-05-09 15:05:16 -07001723 *
Jonathan Hart9baa3f12017-04-28 13:59:49 -07001724 * Writes connection-related configuration to vty
Jonathan Hartf8989de2016-05-09 15:05:16 -07001725 */
Jonathan Hart9baa3f12017-04-28 13:59:49 -07001726static int
1727zfpm_connection_config_write (struct vty *vty )
Jonathan Hartf8989de2016-05-09 15:05:16 -07001728{
1729 struct in_addr in;
1730
1731 in.s_addr = zfpm_g->fpm_server;
1732
1733 if (zfpm_g->fpm_server != FPM_DEFAULT_IP ||
1734 zfpm_g->fpm_port != FPM_DEFAULT_PORT)
1735 vty_out (vty,"fpm connection ip %s port %d%s", inet_ntoa (in),zfpm_g->fpm_port,VTY_NEWLINE);
1736
1737 return 0;
1738}
1739
Jonathan Hart9baa3f12017-04-28 13:59:49 -07001740/*
1741 * zfpm_ka_config_write
1742 *
1743 * Writes keepalive-related configuration to vty
1744 */
1745static int
1746zfpm_ka_config_write (struct vty *vty)
1747{
1748 if (zfpm_g->keepalive_ivl != ZFPM_KEEPALIVE_IVL_SECS) {
1749 vty_out (vty, "fpm keepalive timer %d%s", zfpm_g->keepalive_ivl, VTY_NEWLINE);
1750 }
1751
1752 return 0;
1753}
1754
1755/*
1756 * function to write the fpm config info to vty
1757 */
1758static int
1759zfpm_vty_config_write (struct vty *vty)
1760{
1761 zfpm_connection_config_write (vty);
1762 zfpm_ka_config_write (vty);
1763 return 0;
1764}
1765
1766/*
1767 * Zebra node.
1768 * TODO probably shouldn't use this, should define an FPM node
1769 */
1770static struct cmd_node zebra_node =
1771{
1772 ZEBRA_NODE,
1773 "",
1774 1
1775};
Jonathan Hartf8989de2016-05-09 15:05:16 -07001776
Avneesh Sachdev5adc2522012-11-13 22:48:59 +00001777/**
1778 * zfpm_init
1779 *
1780 * One-time initialization of the Zebra FPM module.
1781 *
1782 * @param[in] port port at which FPM is running.
1783 * @param[in] enable TRUE if the zebra FPM module should be enabled
1784 *
1785 * Returns TRUE on success.
1786 */
1787int
1788zfpm_init (struct thread_master *master, int enable, uint16_t port)
1789{
1790 static int initialized = 0;
1791
1792 if (initialized) {
1793 return 1;
1794 }
1795
1796 initialized = 1;
1797
1798 memset (zfpm_g, 0, sizeof (*zfpm_g));
1799 zfpm_g->master = master;
1800 TAILQ_INIT(&zfpm_g->dest_q);
1801 zfpm_g->sock = -1;
1802 zfpm_g->state = ZFPM_STATE_IDLE;
1803
1804 /*
1805 * Netlink must currently be available for the Zebra-FPM interface
1806 * to be enabled.
1807 */
1808#ifndef HAVE_NETLINK
1809 enable = 0;
1810#endif
1811
1812 zfpm_g->enabled = enable;
1813
1814 zfpm_stats_init (&zfpm_g->stats);
1815 zfpm_stats_init (&zfpm_g->last_ivl_stats);
1816 zfpm_stats_init (&zfpm_g->cumulative_stats);
1817
1818 install_element (ENABLE_NODE, &show_zebra_fpm_stats_cmd);
1819 install_element (ENABLE_NODE, &clear_zebra_fpm_stats_cmd);
Jonathan Hart9baa3f12017-04-28 13:59:49 -07001820 install_element (CONFIG_NODE, &fpm_keepalive_timer_cmd);
1821 install_element (CONFIG_NODE, &no_fpm_keepalive_timer_cmd);
Jonathan Hartf8989de2016-05-09 15:05:16 -07001822 install_element (CONFIG_NODE, &fpm_remote_ip_cmd);
1823 install_element (CONFIG_NODE, &no_fpm_remote_ip_cmd);
Avneesh Sachdev5adc2522012-11-13 22:48:59 +00001824
Jonathan Hart9baa3f12017-04-28 13:59:49 -07001825 install_node (&zebra_node, zfpm_vty_config_write);
1826
1827 zfpm_g->enabled = enable;
1828
Avneesh Sachdev5adc2522012-11-13 22:48:59 +00001829 if (!enable) {
1830 return 1;
1831 }
1832
Jonathan Hartf8989de2016-05-09 15:05:16 -07001833 if (!zfpm_g->fpm_server)
1834 zfpm_g->fpm_server = FPM_DEFAULT_IP;
1835
Avneesh Sachdev5adc2522012-11-13 22:48:59 +00001836 if (!port)
1837 port = FPM_DEFAULT_PORT;
1838
1839 zfpm_g->fpm_port = port;
1840
Jonathan Hart9baa3f12017-04-28 13:59:49 -07001841 if (!zfpm_g->keepalive_ivl)
1842 zfpm_g->keepalive_ivl = ZFPM_KEEPALIVE_IVL_SECS;
1843
Avneesh Sachdev5adc2522012-11-13 22:48:59 +00001844 zfpm_g->obuf = stream_new (ZFPM_OBUF_SIZE);
1845 zfpm_g->ibuf = stream_new (ZFPM_IBUF_SIZE);
1846
1847 zfpm_start_stats_timer ();
1848 zfpm_start_connect_timer ("initialized");
1849
1850 return 1;
1851}