bgpd: improve cleanup in bgp_delete()

Signed-off-by: Lou Berger <lberger@labn.net>
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index ff541e8..868fbe5 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -3051,6 +3051,57 @@
       bgp_clear_route (peer, afi, safi, BGP_CLEAR_ROUTE_NORMAL);
 }
 
+/*
+ * Finish freeing things when exiting
+ */
+static void
+bgp_drain_workqueue_immediate (struct work_queue *wq)
+{
+  if (!wq)
+    return;
+
+  if (!wq->thread)
+    {
+      /*
+       * no thread implies no queued items
+       */
+      assert(!wq->items->count);
+      return;
+    }
+
+   while (wq->items->count)
+     {
+       if (wq->thread)
+         thread_cancel(wq->thread);
+       work_queue_run(wq->thread);
+     }
+}
+
+/*
+ * Special function to process clear node queue when bgpd is exiting
+ * and the thread scheduler is no longer running.
+ */
+void
+bgp_peer_clear_node_queue_drain_immediate(struct peer *peer)
+{
+  if (!peer)
+    return;
+
+  bgp_drain_workqueue_immediate(peer->clear_node_queue);
+}
+
+/*
+ * The work queues are not specific to a BGP instance, but the
+ * items in them refer to BGP instances, so this should be called
+ * before each BGP instance is deleted.
+ */
+void
+bgp_process_queues_drain_immediate(void)
+{
+  bgp_drain_workqueue_immediate(bm->process_main_queue);
+  bgp_drain_workqueue_immediate(bm->process_rsclient_queue);
+}
+
 void
 bgp_clear_adj_in (struct peer *peer, afi_t afi, safi_t safi)
 {
@@ -3091,35 +3142,53 @@
     }
 }
 
+static void
+bgp_cleanup_table(struct bgp_table *table, safi_t safi)
+{
+  struct bgp_node *rn;
+  struct bgp_info *ri;
+  struct bgp_info *next;
+
+  for (rn = bgp_table_top (table); rn; rn = bgp_route_next (rn))
+    for (ri = rn->info; ri; ri = next)
+      {
+        next = ri->next;
+        if (CHECK_FLAG (ri->flags, BGP_INFO_SELECTED)
+            && ri->type == ZEBRA_ROUTE_BGP
+            && ri->sub_type == BGP_ROUTE_NORMAL)
+          bgp_zebra_withdraw (&rn->p, ri, safi);
+      }
+}
+
 /* Delete all kernel routes. */
 void
 bgp_cleanup_routes (void)
 {
   struct bgp *bgp;
   struct listnode *node, *nnode;
-  struct bgp_node *rn;
-  struct bgp_table *table;
-  struct bgp_info *ri;
+  afi_t afi;
 
   for (ALL_LIST_ELEMENTS (bm->bgp, node, nnode, bgp))
     {
-      table = bgp->rib[AFI_IP][SAFI_UNICAST];
+      for (afi = AFI_IP; afi < AFI_MAX; ++afi)
+	{
+	  struct bgp_node *rn;
 
-      for (rn = bgp_table_top (table); rn; rn = bgp_route_next (rn))
-	for (ri = rn->info; ri; ri = ri->next)
-	  if (CHECK_FLAG (ri->flags, BGP_INFO_SELECTED)
-	      && ri->type == ZEBRA_ROUTE_BGP 
-	      && ri->sub_type == BGP_ROUTE_NORMAL)
-	    bgp_zebra_withdraw (&rn->p, ri,SAFI_UNICAST);
+	  bgp_cleanup_table(bgp->rib[afi][SAFI_UNICAST], SAFI_UNICAST);
 
-      table = bgp->rib[AFI_IP6][SAFI_UNICAST];
-
-      for (rn = bgp_table_top (table); rn; rn = bgp_route_next (rn))
-	for (ri = rn->info; ri; ri = ri->next)
-	  if (CHECK_FLAG (ri->flags, BGP_INFO_SELECTED)
-	      && ri->type == ZEBRA_ROUTE_BGP 
-	      && ri->sub_type == BGP_ROUTE_NORMAL)
-	    bgp_zebra_withdraw (&rn->p, ri,SAFI_UNICAST);
+	  /*
+	   * VPN and ENCAP tables are two-level (RD is top level)
+	   */
+	  for (rn = bgp_table_top(bgp->rib[afi][SAFI_MPLS_VPN]); rn;
+               rn = bgp_route_next (rn))
+	    if (rn->info)
+                {
+                   bgp_cleanup_table((struct bgp_table *)(rn->info), SAFI_MPLS_VPN);
+                   bgp_table_finish ((struct bgp_table **)&(rn->info));
+		   rn->info = NULL;
+		   bgp_unlock_node(rn);
+                }
+	}
     }
 }