2002-12-13 21:15:29 +01:00
|
|
|
/* Routing Information Base.
|
|
|
|
* Copyright (C) 1997, 98, 99, 2001 Kunihiro Ishiguro
|
|
|
|
*
|
|
|
|
* This file is part of GNU Zebra.
|
|
|
|
*
|
|
|
|
* GNU Zebra is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2, or (at your option) any
|
|
|
|
* later version.
|
|
|
|
*
|
|
|
|
* GNU Zebra is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
2017-05-13 10:25:29 +02:00
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; see the file COPYING; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2002-12-13 21:15:29 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <zebra.h>
|
|
|
|
|
2016-01-18 11:12:10 +01:00
|
|
|
#include "if.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "prefix.h"
|
|
|
|
#include "table.h"
|
|
|
|
#include "memory.h"
|
2015-05-29 05:48:31 +02:00
|
|
|
#include "zebra_memory.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "command.h"
|
|
|
|
#include "log.h"
|
2016-02-23 13:01:47 +01:00
|
|
|
#include "log_int.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "sockunion.h"
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
#include "linklist.h"
|
|
|
|
#include "thread.h"
|
|
|
|
#include "workqueue.h"
|
2007-05-02 18:05:35 +02:00
|
|
|
#include "prefix.h"
|
|
|
|
#include "routemap.h"
|
2015-05-20 02:40:34 +02:00
|
|
|
#include "nexthop.h"
|
2015-05-22 11:39:56 +02:00
|
|
|
#include "vrf.h"
|
2016-04-16 04:19:37 +02:00
|
|
|
#include "mpls.h"
|
2016-12-05 20:05:30 +01:00
|
|
|
#include "srcdest_table.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
#include "zebra/rib.h"
|
|
|
|
#include "zebra/rt.h"
|
2016-04-14 15:20:47 +02:00
|
|
|
#include "zebra/zebra_ns.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "zebra/zserv.h"
|
2016-04-14 15:20:47 +02:00
|
|
|
#include "zebra/zebra_vrf.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "zebra/redistribute.h"
|
2016-05-11 17:47:02 +02:00
|
|
|
#include "zebra/zebra_routemap.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "zebra/debug.h"
|
2015-05-20 02:40:34 +02:00
|
|
|
#include "zebra/zebra_rnh.h"
|
2015-07-26 00:55:47 +02:00
|
|
|
#include "zebra/interface.h"
|
2015-11-16 21:48:07 +01:00
|
|
|
#include "zebra/connected.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-02-13 00:29:37 +01:00
|
|
|
DEFINE_HOOK(rib_update, (struct route_node *rn, const char *reason), (rn, reason))
|
|
|
|
|
2015-08-26 14:21:40 +02:00
|
|
|
/* Should we allow non Quagga processes to delete our routes */
|
|
|
|
extern int allow_delete;
|
|
|
|
|
2006-07-27 21:59:58 +02:00
|
|
|
/* Hold time for RIB process, should be very minimal.
|
|
|
|
* it is useful to able to set it otherwise for testing, hence exported
|
|
|
|
* as global here for test-rig code.
|
|
|
|
*/
|
|
|
|
int rib_process_hold_time = 10;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Each route type's string and default distance value. */
|
2008-08-17 18:41:37 +02:00
|
|
|
static const struct
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
int key;
|
|
|
|
int distance;
|
2011-12-25 17:52:09 +01:00
|
|
|
} route_info[ZEBRA_ROUTE_MAX] =
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2011-12-25 17:52:09 +01:00
|
|
|
[ZEBRA_ROUTE_SYSTEM] = {ZEBRA_ROUTE_SYSTEM, 0},
|
|
|
|
[ZEBRA_ROUTE_KERNEL] = {ZEBRA_ROUTE_KERNEL, 0},
|
|
|
|
[ZEBRA_ROUTE_CONNECT] = {ZEBRA_ROUTE_CONNECT, 0},
|
|
|
|
[ZEBRA_ROUTE_STATIC] = {ZEBRA_ROUTE_STATIC, 1},
|
|
|
|
[ZEBRA_ROUTE_RIP] = {ZEBRA_ROUTE_RIP, 120},
|
|
|
|
[ZEBRA_ROUTE_RIPNG] = {ZEBRA_ROUTE_RIPNG, 120},
|
|
|
|
[ZEBRA_ROUTE_OSPF] = {ZEBRA_ROUTE_OSPF, 110},
|
|
|
|
[ZEBRA_ROUTE_OSPF6] = {ZEBRA_ROUTE_OSPF6, 110},
|
|
|
|
[ZEBRA_ROUTE_ISIS] = {ZEBRA_ROUTE_ISIS, 115},
|
|
|
|
[ZEBRA_ROUTE_BGP] = {ZEBRA_ROUTE_BGP, 20 /* IBGP is 200. */},
|
2017-01-19 16:27:01 +01:00
|
|
|
[ZEBRA_ROUTE_NHRP] = {ZEBRA_ROUTE_NHRP, 10},
|
2009-08-27 00:28:28 +02:00
|
|
|
/* no entry/default: 150 */
|
2002-12-13 21:15:29 +01:00
|
|
|
};
|
2014-06-04 06:53:35 +02:00
|
|
|
|
2015-01-06 19:53:24 +01:00
|
|
|
/* RPF lookup behaviour */
|
|
|
|
static enum multicast_mode ipv4_multicast_mode = MCAST_NO_CONFIG;
|
|
|
|
|
2015-03-03 08:51:53 +01:00
|
|
|
|
|
|
|
static void __attribute__((format (printf, 5, 6)))
|
2014-04-24 20:22:53 +02:00
|
|
|
_rnode_zlog(const char *_func, vrf_id_t vrf_id, struct route_node *rn, int priority,
|
|
|
|
const char *msgfmt, ...)
|
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
char buf[SRCDEST2STR_BUFFER + sizeof(" (MRIB)")];
|
2014-04-24 20:22:53 +02:00
|
|
|
char msgbuf[512];
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, msgfmt);
|
|
|
|
vsnprintf(msgbuf, sizeof(msgbuf), msgfmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
if (rn)
|
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
rib_table_info_t *info = srcdest_rnode_table_info (rn);
|
|
|
|
srcdest_rnode2str(rn, buf, sizeof(buf));
|
2015-01-22 19:02:13 +01:00
|
|
|
|
2015-05-23 10:08:41 +02:00
|
|
|
if (info->safi == SAFI_MULTICAST)
|
|
|
|
strcat(buf, " (MRIB)");
|
2014-04-24 20:22:53 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
snprintf(buf, sizeof(buf), "{(route_node *) NULL}");
|
|
|
|
}
|
|
|
|
|
2016-02-23 11:59:36 +01:00
|
|
|
zlog (priority, "%s: %d:%s: %s", _func, vrf_id, buf, msgbuf);
|
2014-04-24 20:22:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#define rnode_debug(node, vrf_id, ...) \
|
|
|
|
_rnode_zlog(__func__, vrf_id, node, LOG_DEBUG, __VA_ARGS__)
|
|
|
|
#define rnode_info(node, ...) \
|
|
|
|
_rnode_zlog(__func__, vrf_id, node, LOG_INFO, __VA_ARGS__)
|
|
|
|
|
2016-04-16 04:19:37 +02:00
|
|
|
u_char
|
|
|
|
route_distance (int type)
|
|
|
|
{
|
|
|
|
u_char distance;
|
|
|
|
|
|
|
|
if ((unsigned)type >= array_size(route_info))
|
|
|
|
distance = 150;
|
|
|
|
else
|
|
|
|
distance = route_info[type].distance;
|
|
|
|
|
|
|
|
return distance;
|
|
|
|
}
|
|
|
|
|
2015-05-20 03:03:42 +02:00
|
|
|
int
|
|
|
|
is_zebra_valid_kernel_table(u_int32_t table_id)
|
|
|
|
{
|
2016-08-04 15:07:33 +02:00
|
|
|
if ((table_id > ZEBRA_KERNEL_TABLE_MAX))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
#ifdef linux
|
|
|
|
if ((table_id == RT_TABLE_UNSPEC) ||
|
2015-05-20 03:03:42 +02:00
|
|
|
(table_id == RT_TABLE_LOCAL) ||
|
|
|
|
(table_id == RT_TABLE_COMPAT))
|
|
|
|
return 0;
|
2016-08-04 15:07:33 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
return 1;
|
2015-05-20 03:03:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
is_zebra_main_routing_table(u_int32_t table_id)
|
|
|
|
{
|
|
|
|
if ((table_id == RT_TABLE_MAIN) || (table_id == zebrad.rtm_table_default))
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-16 08:48:00 +02:00
|
|
|
int
|
|
|
|
zebra_check_addr (struct prefix *p)
|
|
|
|
{
|
|
|
|
if (p->family == AF_INET)
|
|
|
|
{
|
|
|
|
u_int32_t addr;
|
|
|
|
|
|
|
|
addr = p->u.prefix4.s_addr;
|
|
|
|
addr = ntohl (addr);
|
|
|
|
|
|
|
|
if (IPV4_NET127 (addr)
|
|
|
|
|| IN_CLASSD (addr)
|
|
|
|
|| IPV4_LINKLOCAL(addr))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (p->family == AF_INET6)
|
|
|
|
{
|
|
|
|
if (IN6_IS_ADDR_LOOPBACK (&p->u.prefix6))
|
|
|
|
return 0;
|
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(&p->u.prefix6))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2013-07-05 17:35:37 +02:00
|
|
|
/* Add nexthop to the end of a rib node's nexthop list */
|
2015-05-20 02:40:34 +02:00
|
|
|
void
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_add (struct route_entry *re, struct nexthop *nexthop)
|
2013-07-05 17:35:37 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
nexthop_add(&re->nexthop, nexthop);
|
|
|
|
re->nexthop_num++;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2015-05-20 02:47:22 +02:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* copy_nexthop - copy a nexthop to the rib structure.
|
|
|
|
*/
|
|
|
|
void
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_copy_nexthops (struct route_entry *re, struct nexthop *nh)
|
2015-05-20 02:47:22 +02:00
|
|
|
{
|
|
|
|
struct nexthop *nexthop;
|
|
|
|
|
|
|
|
nexthop = nexthop_new();
|
|
|
|
nexthop->flags = nh->flags;
|
|
|
|
nexthop->type = nh->type;
|
|
|
|
nexthop->ifindex = nh->ifindex;
|
|
|
|
memcpy(&(nexthop->gate), &(nh->gate), sizeof(union g_addr));
|
|
|
|
memcpy(&(nexthop->src), &(nh->src), sizeof(union g_addr));
|
2016-04-20 01:08:10 +02:00
|
|
|
if (nh->nh_label)
|
2016-06-01 19:19:30 +02:00
|
|
|
nexthop_add_labels (nexthop, nh->nh_label_type, nh->nh_label->num_labels,
|
2016-04-20 01:08:10 +02:00
|
|
|
&nh->nh_label->label[0]);
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_add(re, nexthop);
|
2015-05-20 02:47:22 +02:00
|
|
|
if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE))
|
2015-11-27 17:46:54 +01:00
|
|
|
copy_nexthops(&nexthop->resolved, nh->resolved);
|
2015-05-20 02:47:22 +02:00
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Delete specified nexthop from the list. */
|
2016-09-01 13:20:02 +02:00
|
|
|
void
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_delete (struct route_entry *re, struct nexthop *nexthop)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
if (nexthop->next)
|
|
|
|
nexthop->next->prev = nexthop->prev;
|
|
|
|
if (nexthop->prev)
|
|
|
|
nexthop->prev->next = nexthop->next;
|
|
|
|
else
|
2017-06-01 13:26:25 +02:00
|
|
|
re->nexthop = nexthop->next;
|
|
|
|
re->nexthop_num--;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2013-07-05 17:35:37 +02:00
|
|
|
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
struct nexthop *
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_ifindex_add (struct route_entry *re, ifindex_t ifindex)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct nexthop *nexthop;
|
|
|
|
|
2015-11-27 17:46:54 +01:00
|
|
|
nexthop = nexthop_new();
|
2002-12-13 21:15:29 +01:00
|
|
|
nexthop->type = NEXTHOP_TYPE_IFINDEX;
|
|
|
|
nexthop->ifindex = ifindex;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_add (re, nexthop);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
return nexthop;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct nexthop *
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_ipv4_add (struct route_entry *re, struct in_addr *ipv4, struct in_addr *src)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct nexthop *nexthop;
|
|
|
|
|
2015-11-27 17:46:54 +01:00
|
|
|
nexthop = nexthop_new();
|
2002-12-13 21:15:29 +01:00
|
|
|
nexthop->type = NEXTHOP_TYPE_IPV4;
|
|
|
|
nexthop->gate.ipv4 = *ipv4;
|
2007-05-02 18:05:35 +02:00
|
|
|
if (src)
|
|
|
|
nexthop->src.ipv4 = *src;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_add (re, nexthop);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
return nexthop;
|
|
|
|
}
|
|
|
|
|
2012-03-22 09:09:21 +01:00
|
|
|
struct nexthop *
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_ipv4_ifindex_add (struct route_entry *re, struct in_addr *ipv4,
|
2016-01-18 11:12:10 +01:00
|
|
|
struct in_addr *src, ifindex_t ifindex)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct nexthop *nexthop;
|
2015-11-16 21:48:07 +01:00
|
|
|
struct interface *ifp;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-11-27 17:46:54 +01:00
|
|
|
nexthop = nexthop_new();
|
2002-12-13 21:15:29 +01:00
|
|
|
nexthop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
|
|
|
|
nexthop->gate.ipv4 = *ipv4;
|
2007-05-02 18:05:35 +02:00
|
|
|
if (src)
|
|
|
|
nexthop->src.ipv4 = *src;
|
2002-12-13 21:15:29 +01:00
|
|
|
nexthop->ifindex = ifindex;
|
2017-03-10 21:45:28 +01:00
|
|
|
ifp = if_lookup_by_index (nexthop->ifindex, VRF_DEFAULT);
|
2016-02-01 19:55:42 +01:00
|
|
|
/*Pending: need to think if null ifp here is ok during bootup?
|
|
|
|
There was a crash because ifp here was coming to be NULL */
|
|
|
|
if (ifp)
|
2015-11-16 21:48:07 +01:00
|
|
|
if (connected_is_unnumbered(ifp)) {
|
|
|
|
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_add (re, nexthop);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
return nexthop;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct nexthop *
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_ipv6_add (struct route_entry *re, struct in6_addr *ipv6)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct nexthop *nexthop;
|
|
|
|
|
2015-11-27 17:46:54 +01:00
|
|
|
nexthop = nexthop_new();
|
2002-12-13 21:15:29 +01:00
|
|
|
nexthop->type = NEXTHOP_TYPE_IPV6;
|
|
|
|
nexthop->gate.ipv6 = *ipv6;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_add (re, nexthop);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
return nexthop;
|
|
|
|
}
|
|
|
|
|
2015-05-20 02:24:43 +02:00
|
|
|
struct nexthop *
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_ipv6_ifindex_add (struct route_entry *re, struct in6_addr *ipv6,
|
2016-01-18 11:12:10 +01:00
|
|
|
ifindex_t ifindex)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct nexthop *nexthop;
|
|
|
|
|
2015-05-29 05:48:31 +02:00
|
|
|
nexthop = nexthop_new();
|
2002-12-13 21:15:29 +01:00
|
|
|
nexthop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
|
|
|
|
nexthop->gate.ipv6 = *ipv6;
|
|
|
|
nexthop->ifindex = ifindex;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_add (re, nexthop);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
return nexthop;
|
|
|
|
}
|
|
|
|
|
2003-05-25 23:35:06 +02:00
|
|
|
struct nexthop *
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_blackhole_add (struct route_entry *re)
|
2003-05-25 23:35:06 +02:00
|
|
|
{
|
|
|
|
struct nexthop *nexthop;
|
|
|
|
|
2015-11-27 17:46:54 +01:00
|
|
|
nexthop = nexthop_new();
|
2003-05-25 23:35:06 +02:00
|
|
|
nexthop->type = NEXTHOP_TYPE_BLACKHOLE;
|
2017-06-01 13:26:25 +02:00
|
|
|
SET_FLAG (re->flags, ZEBRA_FLAG_BLACKHOLE);
|
2003-05-25 23:35:06 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_add (re, nexthop);
|
2003-05-25 23:35:06 +02:00
|
|
|
|
|
|
|
return nexthop;
|
|
|
|
}
|
|
|
|
|
2017-06-07 13:34:16 +02:00
|
|
|
static void
|
|
|
|
nexthop_set_resolved (afi_t afi, struct nexthop *newhop, struct nexthop *nexthop)
|
|
|
|
{
|
|
|
|
struct nexthop *resolved_hop;
|
|
|
|
|
|
|
|
resolved_hop = nexthop_new();
|
|
|
|
SET_FLAG (resolved_hop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
/* If the resolving route specifies a gateway, use it */
|
|
|
|
if (newhop->type == NEXTHOP_TYPE_IPV4
|
|
|
|
|| newhop->type == NEXTHOP_TYPE_IPV4_IFINDEX)
|
|
|
|
{
|
|
|
|
resolved_hop->type = newhop->type;
|
|
|
|
resolved_hop->gate.ipv4 = newhop->gate.ipv4;
|
|
|
|
|
|
|
|
if (newhop->ifindex)
|
|
|
|
{
|
|
|
|
resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
|
|
|
|
resolved_hop->ifindex = newhop->ifindex;
|
|
|
|
if (newhop->flags & NEXTHOP_FLAG_ONLINK)
|
|
|
|
resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (newhop->type == NEXTHOP_TYPE_IPV6
|
|
|
|
|| newhop->type == NEXTHOP_TYPE_IPV6_IFINDEX)
|
|
|
|
{
|
|
|
|
resolved_hop->type = newhop->type;
|
|
|
|
resolved_hop->gate.ipv6 = newhop->gate.ipv6;
|
|
|
|
|
|
|
|
if (newhop->ifindex)
|
|
|
|
{
|
|
|
|
resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
|
|
|
|
resolved_hop->ifindex = newhop->ifindex;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the resolving route is an interface route,
|
|
|
|
* it means the gateway we are looking up is connected
|
|
|
|
* to that interface. (The actual network is _not_ onlink).
|
|
|
|
* Therefore, the resolved route should have the original
|
|
|
|
* gateway as nexthop as it is directly connected.
|
|
|
|
*
|
|
|
|
* On Linux, we have to set the onlink netlink flag because
|
|
|
|
* otherwise, the kernel won't accept the route.
|
|
|
|
*/
|
|
|
|
if (newhop->type == NEXTHOP_TYPE_IFINDEX)
|
|
|
|
{
|
|
|
|
resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
|
|
|
|
if (afi == AFI_IP)
|
|
|
|
{
|
|
|
|
resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
|
|
|
|
resolved_hop->gate.ipv4 = nexthop->gate.ipv4;
|
|
|
|
}
|
|
|
|
else if (afi == AFI_IP6)
|
|
|
|
{
|
|
|
|
resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
|
|
|
|
resolved_hop->gate.ipv6 = nexthop->gate.ipv6;
|
|
|
|
}
|
|
|
|
resolved_hop->ifindex = newhop->ifindex;
|
|
|
|
}
|
|
|
|
|
|
|
|
nexthop_add(&nexthop->resolved, resolved_hop);
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* If force flag is not set, do not modify falgs at all for uninstall
|
|
|
|
the route from FIB. */
|
2005-06-28 19:17:12 +02:00
|
|
|
static int
|
2017-06-01 13:26:25 +02:00
|
|
|
nexthop_active (afi_t afi, struct route_entry *re, struct nexthop *nexthop, int set,
|
2016-12-12 15:20:49 +01:00
|
|
|
struct route_node *top)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2016-12-12 15:20:49 +01:00
|
|
|
struct prefix p;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct route_table *table;
|
|
|
|
struct route_node *rn;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *match;
|
2013-07-05 17:35:37 +02:00
|
|
|
int resolved;
|
2015-05-20 02:47:22 +02:00
|
|
|
struct nexthop *newhop, *tnewhop;
|
|
|
|
int recursing = 0;
|
2015-05-20 02:58:13 +02:00
|
|
|
struct interface *ifp;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2016-12-12 15:20:49 +01:00
|
|
|
if ((nexthop->type == NEXTHOP_TYPE_IPV4) || nexthop->type == NEXTHOP_TYPE_IPV6)
|
2002-12-13 21:15:29 +01:00
|
|
|
nexthop->ifindex = 0;
|
|
|
|
|
|
|
|
if (set)
|
2013-07-05 17:35:37 +02:00
|
|
|
{
|
|
|
|
UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_RECURSIVE);
|
2017-06-01 13:26:25 +02:00
|
|
|
zebra_deregister_rnh_static_nexthops(re->vrf_id, nexthop->resolved, top);
|
2015-11-27 17:46:54 +01:00
|
|
|
nexthops_free(nexthop->resolved);
|
2013-07-05 17:35:37 +02:00
|
|
|
nexthop->resolved = NULL;
|
2017-06-01 13:26:25 +02:00
|
|
|
re->nexthop_mtu = 0;
|
2013-07-05 17:35:37 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-20 02:47:22 +02:00
|
|
|
/* Skip nexthops that have been filtered out due to route-map */
|
|
|
|
/* The nexthops are specific to this route and so the same */
|
|
|
|
/* nexthop for a different route may not have this flag set */
|
|
|
|
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FILTERED))
|
|
|
|
return 0;
|
|
|
|
|
2015-11-16 21:48:07 +01:00
|
|
|
/*
|
|
|
|
* Check to see if we should trust the passed in information
|
|
|
|
* for UNNUMBERED interfaces as that we won't find the GW
|
|
|
|
* address in the routing table.
|
2015-05-20 02:58:13 +02:00
|
|
|
*/
|
|
|
|
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
|
|
|
|
{
|
2017-03-10 21:45:28 +01:00
|
|
|
ifp = if_lookup_by_index (nexthop->ifindex, VRF_DEFAULT);
|
2015-11-16 21:48:07 +01:00
|
|
|
if (ifp && connected_is_unnumbered(ifp))
|
|
|
|
{
|
|
|
|
if (if_is_operative(ifp))
|
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
2015-05-20 02:58:13 +02:00
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Make lookup prefix. */
|
2016-12-12 15:20:49 +01:00
|
|
|
memset (&p, 0, sizeof (struct prefix));
|
|
|
|
switch (afi)
|
|
|
|
{
|
|
|
|
case AFI_IP:
|
|
|
|
p.family = AF_INET;
|
|
|
|
p.prefixlen = IPV4_MAX_PREFIXLEN;
|
|
|
|
p.u.prefix4 = nexthop->gate.ipv4;
|
|
|
|
break;
|
|
|
|
case AFI_IP6:
|
|
|
|
p.family = AF_INET6;
|
|
|
|
p.prefixlen = IPV6_MAX_PREFIXLEN;
|
|
|
|
p.u.prefix6 = nexthop->gate.ipv6;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert (afi != AFI_IP && afi != AFI_IP6);
|
|
|
|
break;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Lookup table. */
|
2017-06-01 13:26:25 +02:00
|
|
|
table = zebra_vrf_table (afi, SAFI_UNICAST, re->vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
if (! table)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rn = route_node_match (table, (struct prefix *) &p);
|
|
|
|
while (rn)
|
|
|
|
{
|
|
|
|
route_unlock_node (rn);
|
|
|
|
|
2009-12-03 13:34:39 +01:00
|
|
|
/* If lookup self prefix return immediately. */
|
2002-12-13 21:15:29 +01:00
|
|
|
if (rn == top)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Pick up selected route. */
|
2015-06-11 18:11:12 +02:00
|
|
|
/* However, do not resolve over default route unless explicitly allowed. */
|
|
|
|
if (is_default_prefix (&rn->p) &&
|
|
|
|
!nh_resolve_via_default (p.family))
|
|
|
|
return 0;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE (rn, match)
|
2008-08-17 18:39:31 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (match->status, ROUTE_ENTRY_REMOVED))
|
2008-08-17 18:39:31 +02:00
|
|
|
continue;
|
2016-07-15 15:33:48 +02:00
|
|
|
|
|
|
|
/* if the next hop is imported from another table, skip it */
|
|
|
|
if (match->type == ZEBRA_ROUTE_TABLE)
|
|
|
|
continue;
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (match->status, ROUTE_ENTRY_SELECTED_FIB))
|
2008-08-17 18:39:31 +02:00
|
|
|
break;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* If there is no selected route or matched route is EGP, go up
|
|
|
|
tree. */
|
2015-05-20 02:47:22 +02:00
|
|
|
if (! match)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
do {
|
|
|
|
rn = rn->parent;
|
|
|
|
} while (rn && rn->info == NULL);
|
|
|
|
if (rn)
|
|
|
|
route_lock_node (rn);
|
2017-06-07 13:39:35 +02:00
|
|
|
|
|
|
|
continue;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2013-07-05 17:35:38 +02:00
|
|
|
|
2017-06-07 13:39:35 +02:00
|
|
|
/* If the longest prefix match for the nexthop yields
|
|
|
|
* a blackhole, mark it as inactive. */
|
|
|
|
if (CHECK_FLAG (match->flags, ZEBRA_FLAG_BLACKHOLE)
|
|
|
|
|| CHECK_FLAG (match->flags, ZEBRA_FLAG_REJECT))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (match->type == ZEBRA_ROUTE_CONNECT)
|
|
|
|
{
|
|
|
|
/* Directly point connected route. */
|
|
|
|
newhop = match->nexthop;
|
|
|
|
if (newhop)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-07 13:39:35 +02:00
|
|
|
if (nexthop->type == NEXTHOP_TYPE_IPV4 ||
|
|
|
|
nexthop->type == NEXTHOP_TYPE_IPV6)
|
|
|
|
nexthop->ifindex = newhop->ifindex;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-06-07 13:39:35 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
else if (CHECK_FLAG (re->flags, ZEBRA_FLAG_INTERNAL))
|
|
|
|
{
|
|
|
|
resolved = 0;
|
|
|
|
for (newhop = match->nexthop; newhop; newhop = newhop->next)
|
|
|
|
if (CHECK_FLAG (newhop->flags, NEXTHOP_FLAG_FIB)
|
|
|
|
&& ! CHECK_FLAG (newhop->flags, NEXTHOP_FLAG_RECURSIVE))
|
|
|
|
{
|
|
|
|
if (set)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-07 13:39:35 +02:00
|
|
|
SET_FLAG (nexthop->flags, NEXTHOP_FLAG_RECURSIVE);
|
|
|
|
SET_FLAG(re->status, ROUTE_ENTRY_NEXTHOPS_CHANGED);
|
|
|
|
|
|
|
|
nexthop_set_resolved(afi, newhop, nexthop);
|
2015-05-20 02:47:22 +02:00
|
|
|
}
|
2017-06-07 13:39:35 +02:00
|
|
|
resolved = 1;
|
|
|
|
}
|
|
|
|
return resolved;
|
|
|
|
}
|
|
|
|
else if (re->type == ZEBRA_ROUTE_STATIC)
|
|
|
|
{
|
|
|
|
resolved = 0;
|
|
|
|
for (ALL_NEXTHOPS_RO(match->nexthop, newhop, tnewhop, recursing))
|
|
|
|
if (CHECK_FLAG (newhop->flags, NEXTHOP_FLAG_FIB))
|
|
|
|
{
|
|
|
|
if (set)
|
2015-05-20 02:47:22 +02:00
|
|
|
{
|
2017-06-07 13:39:35 +02:00
|
|
|
SET_FLAG (nexthop->flags, NEXTHOP_FLAG_RECURSIVE);
|
2015-05-20 02:47:22 +02:00
|
|
|
|
2017-06-07 13:39:35 +02:00
|
|
|
nexthop_set_resolved(afi, newhop, nexthop);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-06-07 13:39:35 +02:00
|
|
|
resolved = 1;
|
|
|
|
}
|
|
|
|
if (resolved && set)
|
|
|
|
re->nexthop_mtu = match->mtu;
|
|
|
|
return resolved;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *
|
2016-08-24 06:48:37 +02:00
|
|
|
rib_match (afi_t afi, safi_t safi, vrf_id_t vrf_id,
|
|
|
|
union g_addr *addr, struct route_node **rn_out)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2016-08-24 06:48:37 +02:00
|
|
|
struct prefix p;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct route_table *table;
|
|
|
|
struct route_node *rn;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *match;
|
2013-07-05 17:35:37 +02:00
|
|
|
struct nexthop *newhop, *tnewhop;
|
|
|
|
int recursing;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Lookup table. */
|
2016-08-24 06:48:37 +02:00
|
|
|
table = zebra_vrf_table (afi, safi, vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
if (! table)
|
|
|
|
return 0;
|
|
|
|
|
2016-08-24 06:48:37 +02:00
|
|
|
memset (&p, 0, sizeof (struct prefix));
|
|
|
|
p.family = afi;
|
|
|
|
if (afi == AFI_IP)
|
2016-09-14 21:17:50 +02:00
|
|
|
{
|
|
|
|
p.u.prefix4 = addr->ipv4;
|
|
|
|
p.prefixlen = IPV4_MAX_PREFIXLEN;
|
|
|
|
}
|
2016-08-24 06:48:37 +02:00
|
|
|
else
|
2016-09-14 21:17:50 +02:00
|
|
|
{
|
|
|
|
p.u.prefix6 = addr->ipv6;
|
|
|
|
p.prefixlen = IPV6_MAX_PREFIXLEN;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
rn = route_node_match (table, (struct prefix *) &p);
|
|
|
|
|
|
|
|
while (rn)
|
|
|
|
{
|
|
|
|
route_unlock_node (rn);
|
|
|
|
|
|
|
|
/* Pick up selected route. */
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE (rn, match)
|
2008-08-17 18:39:31 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (match->status, ROUTE_ENTRY_REMOVED))
|
2008-08-17 18:39:31 +02:00
|
|
|
continue;
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (match->status, ROUTE_ENTRY_SELECTED_FIB))
|
2008-08-17 18:39:31 +02:00
|
|
|
break;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* If there is no selected route or matched route is EGP, go up
|
|
|
|
tree. */
|
2015-05-20 02:47:22 +02:00
|
|
|
if (! match)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
do {
|
|
|
|
rn = rn->parent;
|
|
|
|
} while (rn && rn->info == NULL);
|
|
|
|
if (rn)
|
|
|
|
route_lock_node (rn);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2015-01-22 19:09:36 +01:00
|
|
|
if (match->type != ZEBRA_ROUTE_CONNECT)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2015-01-22 19:09:36 +01:00
|
|
|
int found = 0;
|
2013-07-05 17:35:37 +02:00
|
|
|
for (ALL_NEXTHOPS_RO(match->nexthop, newhop, tnewhop, recursing))
|
2002-12-13 21:15:29 +01:00
|
|
|
if (CHECK_FLAG (newhop->flags, NEXTHOP_FLAG_FIB))
|
2015-01-22 19:09:36 +01:00
|
|
|
{
|
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!found)
|
|
|
|
return NULL;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2015-01-22 19:09:36 +01:00
|
|
|
|
|
|
|
if (rn_out)
|
|
|
|
*rn_out = rn;
|
|
|
|
return match;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *
|
2016-09-14 21:34:25 +02:00
|
|
|
rib_match_ipv4_multicast (vrf_id_t vrf_id, struct in_addr addr, struct route_node **rn_out)
|
2015-01-06 19:53:24 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re = NULL, *mre = NULL, *ure = NULL;
|
2015-01-06 19:53:24 +01:00
|
|
|
struct route_node *m_rn = NULL, *u_rn = NULL;
|
2016-08-24 06:48:37 +02:00
|
|
|
union g_addr gaddr = { .ipv4 = addr };
|
2015-01-06 19:53:24 +01:00
|
|
|
|
|
|
|
switch (ipv4_multicast_mode)
|
|
|
|
{
|
|
|
|
case MCAST_MRIB_ONLY:
|
2016-09-14 21:34:25 +02:00
|
|
|
return rib_match (AFI_IP, SAFI_MULTICAST, vrf_id, &gaddr, rn_out);
|
2015-01-06 19:53:24 +01:00
|
|
|
case MCAST_URIB_ONLY:
|
2016-09-14 21:34:25 +02:00
|
|
|
return rib_match (AFI_IP, SAFI_UNICAST, vrf_id, &gaddr, rn_out);
|
2015-01-06 19:53:24 +01:00
|
|
|
case MCAST_NO_CONFIG:
|
|
|
|
case MCAST_MIX_MRIB_FIRST:
|
2017-06-01 13:26:25 +02:00
|
|
|
re = mre = rib_match (AFI_IP, SAFI_MULTICAST, vrf_id, &gaddr, &m_rn);
|
|
|
|
if (!mre)
|
|
|
|
re = ure = rib_match (AFI_IP, SAFI_UNICAST, vrf_id, &gaddr, &u_rn);
|
2015-01-06 19:53:24 +01:00
|
|
|
break;
|
|
|
|
case MCAST_MIX_DISTANCE:
|
2017-06-01 13:26:25 +02:00
|
|
|
mre = rib_match (AFI_IP, SAFI_MULTICAST, vrf_id, &gaddr, &m_rn);
|
|
|
|
ure = rib_match (AFI_IP, SAFI_UNICAST, vrf_id, &gaddr, &u_rn);
|
|
|
|
if (mre && ure)
|
|
|
|
re = ure->distance < mre->distance ? ure : mre;
|
|
|
|
else if (mre)
|
|
|
|
re = mre;
|
|
|
|
else if (ure)
|
|
|
|
re = ure;
|
2015-01-06 19:53:24 +01:00
|
|
|
break;
|
|
|
|
case MCAST_MIX_PFXLEN:
|
2017-06-01 13:26:25 +02:00
|
|
|
mre = rib_match (AFI_IP, SAFI_MULTICAST, vrf_id, &gaddr, &m_rn);
|
|
|
|
ure = rib_match (AFI_IP, SAFI_UNICAST, vrf_id, &gaddr, &u_rn);
|
|
|
|
if (mre && ure)
|
|
|
|
re = u_rn->p.prefixlen > m_rn->p.prefixlen ? ure : mre;
|
|
|
|
else if (mre)
|
|
|
|
re = mre;
|
|
|
|
else if (ure)
|
|
|
|
re = ure;
|
2015-01-06 19:53:24 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rn_out)
|
2017-06-01 13:26:25 +02:00
|
|
|
*rn_out = (re == mre) ? m_rn : u_rn;
|
2015-01-06 19:53:24 +01:00
|
|
|
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
|
|
|
{
|
|
|
|
char buf[BUFSIZ];
|
|
|
|
inet_ntop (AF_INET, &addr, buf, BUFSIZ);
|
|
|
|
|
|
|
|
zlog_debug("%s: %s: found %s, using %s",
|
|
|
|
__func__, buf,
|
2017-06-01 13:26:25 +02:00
|
|
|
mre ? (ure ? "MRIB+URIB" : "MRIB") :
|
|
|
|
ure ? "URIB" : "nothing",
|
|
|
|
re == ure ? "URIB" : re == mre ? "MRIB" : "none");
|
2015-01-06 19:53:24 +01:00
|
|
|
}
|
2017-06-01 13:26:25 +02:00
|
|
|
return re;
|
2015-01-06 19:53:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
multicast_mode_ipv4_set (enum multicast_mode mode)
|
|
|
|
{
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
|
|
|
zlog_debug("%s: multicast lookup mode set (%d)", __func__, mode);
|
|
|
|
ipv4_multicast_mode = mode;
|
|
|
|
}
|
|
|
|
|
|
|
|
enum multicast_mode
|
|
|
|
multicast_mode_ipv4_get (void)
|
|
|
|
{
|
|
|
|
return ipv4_multicast_mode;
|
|
|
|
}
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *
|
2015-05-22 11:40:02 +02:00
|
|
|
rib_lookup_ipv4 (struct prefix_ipv4 *p, vrf_id_t vrf_id)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct route_table *table;
|
|
|
|
struct route_node *rn;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *match;
|
2013-07-05 17:35:37 +02:00
|
|
|
struct nexthop *nexthop, *tnexthop;
|
|
|
|
int recursing;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Lookup table. */
|
2015-05-22 11:40:02 +02:00
|
|
|
table = zebra_vrf_table (AFI_IP, SAFI_UNICAST, vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
if (! table)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rn = route_node_lookup (table, (struct prefix *) p);
|
|
|
|
|
|
|
|
/* No route for this prefix. */
|
|
|
|
if (! rn)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Unlock node. */
|
|
|
|
route_unlock_node (rn);
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE (rn, match)
|
2008-08-17 18:39:31 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (match->status, ROUTE_ENTRY_REMOVED))
|
2008-08-17 18:39:31 +02:00
|
|
|
continue;
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (match->status, ROUTE_ENTRY_SELECTED_FIB))
|
2008-08-17 18:39:31 +02:00
|
|
|
break;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-20 02:47:22 +02:00
|
|
|
if (! match)
|
2002-12-13 21:15:29 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (match->type == ZEBRA_ROUTE_CONNECT)
|
|
|
|
return match;
|
|
|
|
|
2013-07-05 17:35:37 +02:00
|
|
|
for (ALL_NEXTHOPS_RO(match->nexthop, nexthop, tnexthop, recursing))
|
2002-12-13 21:15:29 +01:00
|
|
|
if (CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB))
|
|
|
|
return match;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-08-13 18:03:06 +02:00
|
|
|
/*
|
|
|
|
* This clone function, unlike its original rib_lookup_ipv4(), checks
|
|
|
|
* if specified IPv4 route record (prefix/mask -> gate) exists in
|
2017-06-01 13:26:25 +02:00
|
|
|
* the whole RIB and has ROUTE_ENTRY_SELECTED_FIB set.
|
2007-08-13 18:03:06 +02:00
|
|
|
*
|
|
|
|
* Return values:
|
|
|
|
* -1: error
|
|
|
|
* 0: exact match found
|
|
|
|
* 1: a match was found with a different gate
|
|
|
|
* 2: connected route found
|
|
|
|
* 3: no matches found
|
|
|
|
*/
|
|
|
|
int
|
2015-05-22 11:40:02 +02:00
|
|
|
rib_lookup_ipv4_route (struct prefix_ipv4 *p, union sockunion * qgate,
|
|
|
|
vrf_id_t vrf_id)
|
2007-08-13 18:03:06 +02:00
|
|
|
{
|
|
|
|
struct route_table *table;
|
|
|
|
struct route_node *rn;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *match;
|
2013-07-05 17:35:37 +02:00
|
|
|
struct nexthop *nexthop, *tnexthop;
|
|
|
|
int recursing;
|
|
|
|
int nexthops_active;
|
2007-08-13 18:03:06 +02:00
|
|
|
|
|
|
|
/* Lookup table. */
|
2015-05-22 11:40:02 +02:00
|
|
|
table = zebra_vrf_table (AFI_IP, SAFI_UNICAST, vrf_id);
|
2007-08-13 18:03:06 +02:00
|
|
|
if (! table)
|
|
|
|
return ZEBRA_RIB_LOOKUP_ERROR;
|
|
|
|
|
|
|
|
/* Scan the RIB table for exactly matching RIB entry. */
|
|
|
|
rn = route_node_lookup (table, (struct prefix *) p);
|
|
|
|
|
|
|
|
/* No route for this prefix. */
|
|
|
|
if (! rn)
|
|
|
|
return ZEBRA_RIB_NOTFOUND;
|
|
|
|
|
|
|
|
/* Unlock node. */
|
|
|
|
route_unlock_node (rn);
|
|
|
|
|
|
|
|
/* Find out if a "selected" RR for the discovered RIB entry exists ever. */
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE (rn, match)
|
2008-08-17 18:39:31 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (match->status, ROUTE_ENTRY_REMOVED))
|
2008-08-17 18:39:31 +02:00
|
|
|
continue;
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (match->status, ROUTE_ENTRY_SELECTED_FIB))
|
2008-08-17 18:39:31 +02:00
|
|
|
break;
|
|
|
|
}
|
2007-08-13 18:03:06 +02:00
|
|
|
|
|
|
|
/* None such found :( */
|
|
|
|
if (!match)
|
|
|
|
return ZEBRA_RIB_NOTFOUND;
|
|
|
|
|
|
|
|
if (match->type == ZEBRA_ROUTE_CONNECT)
|
|
|
|
return ZEBRA_RIB_FOUND_CONNECTED;
|
|
|
|
|
|
|
|
/* Ok, we have a cood candidate, let's check it's nexthop list... */
|
2013-07-05 17:35:37 +02:00
|
|
|
nexthops_active = 0;
|
|
|
|
for (ALL_NEXTHOPS_RO(match->nexthop, nexthop, tnexthop, recursing))
|
2007-08-13 18:03:06 +02:00
|
|
|
if (CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB))
|
|
|
|
{
|
2013-07-05 17:35:37 +02:00
|
|
|
nexthops_active = 1;
|
|
|
|
if (nexthop->gate.ipv4.s_addr == sockunion2ip (qgate))
|
|
|
|
return ZEBRA_RIB_FOUND_EXACT;
|
2007-08-13 18:03:06 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
2013-07-05 17:35:37 +02:00
|
|
|
{
|
|
|
|
char gate_buf[INET_ADDRSTRLEN], qgate_buf[INET_ADDRSTRLEN];
|
|
|
|
inet_ntop (AF_INET, &nexthop->gate.ipv4.s_addr, gate_buf, INET_ADDRSTRLEN);
|
|
|
|
inet_ntop (AF_INET, &sockunion2ip(qgate), qgate_buf, INET_ADDRSTRLEN);
|
|
|
|
zlog_debug ("%s: qgate == %s, %s == %s", __func__,
|
|
|
|
qgate_buf, recursing ? "rgate" : "gate", gate_buf);
|
|
|
|
}
|
2007-08-13 18:03:06 +02:00
|
|
|
}
|
2013-07-05 17:35:37 +02:00
|
|
|
|
|
|
|
if (nexthops_active)
|
|
|
|
return ZEBRA_RIB_FOUND_NOGATE;
|
2007-08-13 18:03:06 +02:00
|
|
|
|
|
|
|
return ZEBRA_RIB_NOTFOUND;
|
|
|
|
}
|
|
|
|
|
2007-05-02 18:05:35 +02:00
|
|
|
#define RIB_SYSTEM_ROUTE(R) \
|
|
|
|
((R)->type == ZEBRA_ROUTE_KERNEL || (R)->type == ZEBRA_ROUTE_CONNECT)
|
|
|
|
|
2007-08-13 18:03:06 +02:00
|
|
|
/* This function verifies reachability of one given nexthop, which can be
|
|
|
|
* numbered or unnumbered, IPv4 or IPv6. The result is unconditionally stored
|
|
|
|
* in nexthop->flags field. If the 4th parameter, 'set', is non-zero,
|
|
|
|
* nexthop->ifindex will be updated appropriately as well.
|
|
|
|
* An existing route map can turn (otherwise active) nexthop into inactive, but
|
|
|
|
* not vice versa.
|
|
|
|
*
|
|
|
|
* The return value is the final value of 'ACTIVE' flag.
|
|
|
|
*/
|
|
|
|
|
2009-12-08 11:14:27 +01:00
|
|
|
static unsigned
|
2017-06-01 13:26:25 +02:00
|
|
|
nexthop_active_check (struct route_node *rn, struct route_entry *re,
|
2002-12-13 21:15:29 +01:00
|
|
|
struct nexthop *nexthop, int set)
|
|
|
|
{
|
|
|
|
struct interface *ifp;
|
2007-05-02 18:05:35 +02:00
|
|
|
route_map_result_t ret = RMAP_MATCH;
|
|
|
|
int family;
|
2016-12-05 20:05:30 +01:00
|
|
|
char buf[SRCDEST2STR_BUFFER];
|
|
|
|
struct prefix *p, *src_p;
|
|
|
|
srcdest_rnode_prefixes (rn, &p, &src_p);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-20 03:03:42 +02:00
|
|
|
if (rn->p.family == AF_INET)
|
|
|
|
family = AFI_IP;
|
|
|
|
else if (rn->p.family == AF_INET6)
|
|
|
|
family = AFI_IP6;
|
|
|
|
else
|
|
|
|
family = 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
switch (nexthop->type)
|
|
|
|
{
|
|
|
|
case NEXTHOP_TYPE_IFINDEX:
|
2017-06-01 13:26:25 +02:00
|
|
|
ifp = if_lookup_by_index (nexthop->ifindex, re->vrf_id);
|
2008-01-08 21:12:46 +01:00
|
|
|
if (ifp && if_is_operative(ifp))
|
2002-12-13 21:15:29 +01:00
|
|
|
SET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
else
|
|
|
|
UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_IPV4:
|
|
|
|
case NEXTHOP_TYPE_IPV4_IFINDEX:
|
2007-05-02 18:05:35 +02:00
|
|
|
family = AFI_IP;
|
2017-06-01 13:26:25 +02:00
|
|
|
if (nexthop_active (AFI_IP, re, nexthop, set, rn))
|
2002-12-13 21:15:29 +01:00
|
|
|
SET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
else
|
|
|
|
UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_IPV6:
|
2007-05-02 18:05:35 +02:00
|
|
|
family = AFI_IP6;
|
2017-06-01 13:26:25 +02:00
|
|
|
if (nexthop_active (AFI_IP6, re, nexthop, set, rn))
|
2002-12-13 21:15:29 +01:00
|
|
|
SET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
else
|
|
|
|
UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_IPV6_IFINDEX:
|
2015-10-20 23:32:12 +02:00
|
|
|
/* RFC 5549, v4 prefix with v6 NH */
|
|
|
|
if (rn->p.family != AF_INET)
|
|
|
|
family = AFI_IP6;
|
2002-12-13 21:15:29 +01:00
|
|
|
if (IN6_IS_ADDR_LINKLOCAL (&nexthop->gate.ipv6))
|
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
ifp = if_lookup_by_index (nexthop->ifindex, re->vrf_id);
|
2008-01-08 21:12:46 +01:00
|
|
|
if (ifp && if_is_operative(ifp))
|
2002-12-13 21:15:29 +01:00
|
|
|
SET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
else
|
|
|
|
UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (nexthop_active (AFI_IP6, re, nexthop, set, rn))
|
2002-12-13 21:15:29 +01:00
|
|
|
SET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
else
|
|
|
|
UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
}
|
|
|
|
break;
|
2003-05-25 23:35:06 +02:00
|
|
|
case NEXTHOP_TYPE_BLACKHOLE:
|
|
|
|
SET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
break;
|
2002-12-13 21:15:29 +01:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2007-05-02 18:05:35 +02:00
|
|
|
if (! CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE))
|
|
|
|
return 0;
|
|
|
|
|
2013-07-05 17:35:41 +02:00
|
|
|
/* XXX: What exactly do those checks do? Do we support
|
|
|
|
* e.g. IPv4 routes with IPv6 nexthops or vice versa? */
|
2017-06-01 13:26:25 +02:00
|
|
|
if (RIB_SYSTEM_ROUTE(re) ||
|
2016-12-05 20:05:30 +01:00
|
|
|
(family == AFI_IP && p->family != AF_INET) ||
|
|
|
|
(family == AFI_IP6 && p->family != AF_INET6))
|
2007-05-02 18:05:35 +02:00
|
|
|
return CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
|
2013-07-05 17:35:41 +02:00
|
|
|
/* The original code didn't determine the family correctly
|
|
|
|
* e.g. for NEXTHOP_TYPE_IFINDEX. Retrieve the correct afi
|
|
|
|
* from the rib_table_info in those cases.
|
|
|
|
* Possibly it may be better to use only the rib_table_info
|
|
|
|
* in every case.
|
|
|
|
*/
|
|
|
|
if (!family)
|
2016-12-05 20:05:30 +01:00
|
|
|
{
|
|
|
|
rib_table_info_t *info;
|
|
|
|
|
|
|
|
info = srcdest_rnode_table_info(rn);
|
|
|
|
family = info->afi;
|
|
|
|
}
|
2013-07-05 17:35:41 +02:00
|
|
|
|
2015-05-20 02:47:24 +02:00
|
|
|
memset(&nexthop->rmap_src.ipv6, 0, sizeof(union g_addr));
|
|
|
|
|
2015-05-20 03:03:44 +02:00
|
|
|
/* It'll get set if required inside */
|
2017-06-01 13:26:25 +02:00
|
|
|
ret = zebra_route_map_check(family, re->type, p, nexthop, re->vrf_id,
|
|
|
|
re->tag);
|
2007-05-02 18:05:35 +02:00
|
|
|
if (ret == RMAP_DENYMATCH)
|
2015-05-20 02:40:45 +02:00
|
|
|
{
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
srcdest_rnode2str(rn, buf, sizeof(buf));
|
|
|
|
zlog_debug("%u:%s: Filtering out with NH out %s due to route map",
|
2017-06-01 13:26:25 +02:00
|
|
|
re->vrf_id, buf,
|
|
|
|
ifindex2ifname (nexthop->ifindex, re->vrf_id));
|
2015-05-20 02:40:45 +02:00
|
|
|
}
|
|
|
|
UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
return CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
}
|
|
|
|
|
2007-08-14 11:46:48 +02:00
|
|
|
/* Iterate over all nexthops of the given RIB entry and refresh their
|
2017-06-01 13:26:25 +02:00
|
|
|
* ACTIVE flag. re->nexthop_active_num is updated accordingly. If any
|
|
|
|
* nexthop is found to toggle the ACTIVE flag, the whole re structure
|
|
|
|
* is flagged with ROUTE_ENTRY_CHANGED. The 4th 'set' argument is
|
2007-08-14 11:46:48 +02:00
|
|
|
* transparently passed to nexthop_active_check().
|
|
|
|
*
|
|
|
|
* Return value is the new number of active nexthops.
|
|
|
|
*/
|
|
|
|
|
2005-06-28 19:17:12 +02:00
|
|
|
static int
|
2017-06-01 13:26:25 +02:00
|
|
|
nexthop_active_update (struct route_node *rn, struct route_entry *re, int set)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct nexthop *nexthop;
|
2015-05-20 02:47:24 +02:00
|
|
|
union g_addr prev_src;
|
2016-01-18 11:12:10 +01:00
|
|
|
unsigned int prev_active, new_active, old_num_nh;
|
|
|
|
ifindex_t prev_index;
|
2017-06-01 13:26:25 +02:00
|
|
|
old_num_nh = re->nexthop_active_num;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
re->nexthop_active_num = 0;
|
|
|
|
UNSET_FLAG (re->status, ROUTE_ENTRY_CHANGED);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
for (nexthop = re->nexthop; nexthop; nexthop = nexthop->next)
|
2007-08-14 11:46:48 +02:00
|
|
|
{
|
2015-05-20 02:47:24 +02:00
|
|
|
/* No protocol daemon provides src and so we're skipping tracking it */
|
|
|
|
prev_src = nexthop->rmap_src;
|
2007-08-14 11:46:48 +02:00
|
|
|
prev_active = CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
2009-06-24 19:15:36 +02:00
|
|
|
prev_index = nexthop->ifindex;
|
2017-06-01 13:26:25 +02:00
|
|
|
if ((new_active = nexthop_active_check (rn, re, nexthop, set)))
|
|
|
|
re->nexthop_active_num++;
|
2015-05-20 02:47:24 +02:00
|
|
|
/* Don't allow src setting on IPv6 addr for now */
|
2009-06-24 19:15:36 +02:00
|
|
|
if (prev_active != new_active ||
|
2015-05-20 02:47:24 +02:00
|
|
|
prev_index != nexthop->ifindex ||
|
|
|
|
((nexthop->type >= NEXTHOP_TYPE_IFINDEX &&
|
|
|
|
nexthop->type < NEXTHOP_TYPE_IPV6) &&
|
2015-09-16 08:48:00 +02:00
|
|
|
prev_src.ipv4.s_addr != nexthop->rmap_src.ipv4.s_addr) ||
|
|
|
|
((nexthop->type >= NEXTHOP_TYPE_IPV6 &&
|
|
|
|
nexthop->type < NEXTHOP_TYPE_BLACKHOLE) &&
|
|
|
|
!(IPV6_ADDR_SAME (&prev_src.ipv6, &nexthop->rmap_src.ipv6))))
|
2015-05-20 02:47:22 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
SET_FLAG (re->status, ROUTE_ENTRY_CHANGED);
|
|
|
|
SET_FLAG (re->status, ROUTE_ENTRY_NEXTHOPS_CHANGED);
|
2015-05-20 02:47:22 +02:00
|
|
|
}
|
2007-08-14 11:46:48 +02:00
|
|
|
}
|
2015-05-20 02:47:22 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (old_num_nh != re->nexthop_active_num)
|
|
|
|
SET_FLAG (re->status, ROUTE_ENTRY_CHANGED);
|
2015-05-20 02:47:22 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->status, ROUTE_ENTRY_CHANGED))
|
2015-05-20 02:47:22 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
SET_FLAG (re->status, ROUTE_ENTRY_NEXTHOPS_CHANGED);
|
2015-05-20 02:47:22 +02:00
|
|
|
}
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
return re->nexthop_active_num;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2003-10-28 04:47:15 +01:00
|
|
|
|
2017-02-02 18:58:33 +01:00
|
|
|
/*
|
|
|
|
* Is this RIB labeled-unicast? It must be of type BGP and all paths
|
|
|
|
* (nexthops) must have a label.
|
|
|
|
*/
|
|
|
|
int
|
2017-06-01 13:26:25 +02:00
|
|
|
zebra_rib_labeled_unicast (struct route_entry *re)
|
2017-02-02 18:58:33 +01:00
|
|
|
{
|
|
|
|
struct nexthop *nexthop = NULL, *tnexthop;
|
|
|
|
int recursing;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->type != ZEBRA_ROUTE_BGP)
|
2017-02-02 18:58:33 +01:00
|
|
|
return 0;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
for (ALL_NEXTHOPS_RO(re->nexthop, nexthop, tnexthop, recursing))
|
2017-02-02 18:58:33 +01:00
|
|
|
if (!nexthop->nh_label || !nexthop->nh_label->num_labels)
|
|
|
|
return 0;
|
2014-06-04 06:53:35 +02:00
|
|
|
|
2017-02-02 18:58:33 +01:00
|
|
|
return 1;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-06-11 18:11:12 +02:00
|
|
|
/* Update flag indicates whether this is a "replace" or not. Currently, this
|
|
|
|
* is only used for IPv4.
|
|
|
|
*/
|
2016-09-01 13:20:02 +02:00
|
|
|
int
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_install_kernel (struct route_node *rn, struct route_entry *re, struct route_entry *old)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
int ret = 0;
|
2013-07-05 17:35:37 +02:00
|
|
|
struct nexthop *nexthop, *tnexthop;
|
2016-12-05 20:05:30 +01:00
|
|
|
rib_table_info_t *info = srcdest_rnode_table_info(rn);
|
2013-07-05 17:35:37 +02:00
|
|
|
int recursing;
|
2016-12-05 20:05:30 +01:00
|
|
|
struct prefix *p, *src_p;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct zebra_vrf *zvrf = vrf_info_lookup (re->vrf_id);
|
2016-12-05 20:05:30 +01:00
|
|
|
|
|
|
|
srcdest_rnode_prefixes (rn, &p, &src_p);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-01-12 07:05:06 +01:00
|
|
|
if (info->safi != SAFI_UNICAST)
|
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
for (ALL_NEXTHOPS_RO(re->nexthop, nexthop, tnexthop, recursing))
|
2015-01-12 07:05:06 +01:00
|
|
|
SET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-11-13 23:48:59 +01:00
|
|
|
/*
|
|
|
|
* Make sure we update the FPM any time we send new information to
|
|
|
|
* the kernel.
|
|
|
|
*/
|
2017-02-13 00:29:37 +01:00
|
|
|
hook_call(rib_update, rn, "installing in kernel");
|
2017-06-01 13:26:25 +02:00
|
|
|
ret = kernel_route_rib (p, src_p, old, re);
|
2017-05-18 19:13:32 +02:00
|
|
|
zvrf->installs++;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
/* If install succeeds, update FIB flag for nexthops. */
|
|
|
|
if (!ret)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
for (ALL_NEXTHOPS_RO(re->nexthop, nexthop, tnexthop, recursing))
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
{
|
|
|
|
if (CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE))
|
|
|
|
SET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB);
|
|
|
|
else
|
|
|
|
UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
|
|
|
|
return ret;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Uninstall the route from kernel. */
|
2016-09-01 13:20:02 +02:00
|
|
|
int
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_uninstall_kernel (struct route_node *rn, struct route_entry *re)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
int ret = 0;
|
2013-07-05 17:35:37 +02:00
|
|
|
struct nexthop *nexthop, *tnexthop;
|
2016-12-05 20:05:30 +01:00
|
|
|
rib_table_info_t *info = srcdest_rnode_table_info(rn);
|
2013-07-05 17:35:37 +02:00
|
|
|
int recursing;
|
2016-12-05 20:05:30 +01:00
|
|
|
struct prefix *p, *src_p;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct zebra_vrf *zvrf = vrf_info_lookup (re->vrf_id);
|
2016-12-05 20:05:30 +01:00
|
|
|
|
|
|
|
srcdest_rnode_prefixes (rn, &p, &src_p);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-01-12 07:05:06 +01:00
|
|
|
if (info->safi != SAFI_UNICAST)
|
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
for (ALL_NEXTHOPS_RO(re->nexthop, nexthop, tnexthop, recursing))
|
2015-01-12 07:05:06 +01:00
|
|
|
SET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-11-13 23:48:59 +01:00
|
|
|
/*
|
|
|
|
* Make sure we update the FPM any time we send new information to
|
|
|
|
* the kernel.
|
|
|
|
*/
|
2017-02-13 00:29:37 +01:00
|
|
|
hook_call(rib_update, rn, "uninstalling from kernel");
|
2017-06-01 13:26:25 +02:00
|
|
|
ret = kernel_route_rib (p, src_p, re, NULL);
|
2017-05-18 19:13:32 +02:00
|
|
|
zvrf->removals++;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
for (ALL_NEXTHOPS_RO(re->nexthop, nexthop, tnexthop, recursing))
|
2002-12-13 21:15:29 +01:00
|
|
|
UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Uninstall the route from kernel. */
|
2005-06-28 19:17:12 +02:00
|
|
|
static void
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_uninstall (struct route_node *rn, struct route_entry *re)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
rib_table_info_t *info = srcdest_rnode_table_info(rn);
|
2015-01-12 07:05:06 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->status, ROUTE_ENTRY_SELECTED_FIB))
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2015-01-12 07:05:06 +01:00
|
|
|
if (info->safi == SAFI_UNICAST)
|
2017-02-13 00:29:37 +01:00
|
|
|
hook_call(rib_update, rn, "rib_uninstall");
|
2012-11-13 23:48:59 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (! RIB_SYSTEM_ROUTE (re))
|
|
|
|
rib_uninstall_kernel (rn, re);
|
2017-02-02 18:58:33 +01:00
|
|
|
|
|
|
|
/* If labeled-unicast route, uninstall transit LSP. */
|
2017-06-01 13:26:25 +02:00
|
|
|
if (zebra_rib_labeled_unicast (re))
|
|
|
|
zebra_mpls_lsp_uninstall (info->zvrf, rn, re);
|
2017-02-02 18:58:33 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG (re->status, ROUTE_ENTRY_SELECTED_FIB);
|
2016-01-15 16:36:31 +01:00
|
|
|
}
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->flags, ZEBRA_FLAG_SELECTED))
|
2016-01-15 16:36:31 +01:00
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
struct prefix *p, *src_p;
|
|
|
|
srcdest_rnode_prefixes (rn, &p, &src_p);
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
redistribute_delete (p, src_p, re);
|
|
|
|
UNSET_FLAG (re->flags, ZEBRA_FLAG_SELECTED);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-13 23:48:53 +01:00
|
|
|
/*
|
|
|
|
* rib_can_delete_dest
|
|
|
|
*
|
|
|
|
* Returns TRUE if the given dest can be deleted from the table.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
rib_can_delete_dest (rib_dest_t *dest)
|
|
|
|
{
|
|
|
|
if (dest->routes)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-11-13 23:48:59 +01:00
|
|
|
/*
|
|
|
|
* Don't delete the dest if we have to update the FPM about this
|
|
|
|
* prefix.
|
|
|
|
*/
|
|
|
|
if (CHECK_FLAG (dest->flags, RIB_DEST_UPDATE_FPM) ||
|
|
|
|
CHECK_FLAG (dest->flags, RIB_DEST_SENT_TO_FPM))
|
|
|
|
return 0;
|
|
|
|
|
2012-11-13 23:48:53 +01:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rib_gc_dest
|
|
|
|
*
|
|
|
|
* Garbage collect the rib dest corresponding to the given route node
|
|
|
|
* if appropriate.
|
|
|
|
*
|
|
|
|
* Returns TRUE if the dest was deleted, FALSE otherwise.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
rib_gc_dest (struct route_node *rn)
|
|
|
|
{
|
|
|
|
rib_dest_t *dest;
|
2015-11-20 17:48:32 +01:00
|
|
|
struct zebra_vrf *zvrf;
|
2012-11-13 23:48:53 +01:00
|
|
|
|
|
|
|
dest = rib_dest_from_rnode (rn);
|
|
|
|
if (!dest)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!rib_can_delete_dest (dest))
|
|
|
|
return 0;
|
|
|
|
|
2015-11-20 17:48:32 +01:00
|
|
|
zvrf = rib_dest_vrf (dest);
|
2014-04-24 20:22:53 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
2016-10-30 22:50:26 +01:00
|
|
|
rnode_debug (rn, zvrf_id (zvrf), "removing dest from table");
|
2012-11-13 23:48:53 +01:00
|
|
|
|
|
|
|
dest->rnode = NULL;
|
|
|
|
XFREE (MTYPE_RIB_DEST, dest);
|
|
|
|
rn->info = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release the one reference that we keep on the route node.
|
|
|
|
*/
|
|
|
|
route_unlock_node (rn);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
static void
|
2016-01-15 16:36:31 +01:00
|
|
|
rib_process_add_fib(struct zebra_vrf *zvrf, struct route_node *rn,
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *new)
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
{
|
2017-02-13 00:29:37 +01:00
|
|
|
hook_call(rib_update, rn, "new route selected");
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
|
|
|
|
/* Update real nexthop. This may actually determine if nexthop is active or not. */
|
2016-01-15 16:36:31 +01:00
|
|
|
if (!nexthop_active_update (rn, new, 1))
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG(new->status, ROUTE_ENTRY_CHANGED);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
SET_FLAG (new->status, ROUTE_ENTRY_SELECTED_FIB);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
char buf[SRCDEST2STR_BUFFER];
|
|
|
|
srcdest_rnode2str(rn, buf, sizeof(buf));
|
2017-06-01 13:26:25 +02:00
|
|
|
zlog_debug ("%u:%s: Adding route rn %p, re %p (type %d)",
|
2016-12-05 20:05:30 +01:00
|
|
|
zvrf_id (zvrf), buf, rn, new, new->type);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
}
|
|
|
|
|
2017-02-02 18:58:33 +01:00
|
|
|
/* If labeled-unicast route, install transit LSP. */
|
|
|
|
if (zebra_rib_labeled_unicast (new))
|
|
|
|
zebra_mpls_lsp_install (zvrf, rn, new);
|
|
|
|
|
2016-01-15 16:36:31 +01:00
|
|
|
if (!RIB_SYSTEM_ROUTE (new))
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
{
|
2016-12-16 13:48:37 +01:00
|
|
|
if (rib_install_kernel (rn, new, NULL))
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
char buf[SRCDEST2STR_BUFFER];
|
|
|
|
srcdest_rnode2str(rn, buf, sizeof(buf));
|
|
|
|
zlog_warn ("%u:%s: Route install failed",
|
|
|
|
zvrf_id (zvrf), buf);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG(new->status, ROUTE_ENTRY_CHANGED);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-01-15 16:36:31 +01:00
|
|
|
rib_process_del_fib(struct zebra_vrf *zvrf, struct route_node *rn,
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *old)
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
{
|
2017-02-13 00:29:37 +01:00
|
|
|
hook_call(rib_update, rn, "removing existing route");
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
|
2016-01-15 16:36:31 +01:00
|
|
|
/* Uninstall from kernel. */
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
char buf[SRCDEST2STR_BUFFER];
|
|
|
|
srcdest_rnode2str(rn, buf, sizeof(buf));
|
2017-06-01 13:26:25 +02:00
|
|
|
zlog_debug ("%u:%s: Deleting route rn %p, re %p (type %d)",
|
2016-12-05 20:05:30 +01:00
|
|
|
zvrf_id (zvrf), buf, rn, old, old->type);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
}
|
|
|
|
|
2017-02-02 18:58:33 +01:00
|
|
|
/* If labeled-unicast route, uninstall transit LSP. */
|
|
|
|
if (zebra_rib_labeled_unicast (old))
|
|
|
|
zebra_mpls_lsp_uninstall (zvrf, rn, old);
|
|
|
|
|
2016-01-15 16:36:31 +01:00
|
|
|
if (!RIB_SYSTEM_ROUTE (old))
|
|
|
|
rib_uninstall_kernel (rn, old);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG (old->status, ROUTE_ENTRY_SELECTED_FIB);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
|
|
|
|
/* Update nexthop for route, reset changed flag. */
|
2016-01-15 16:36:31 +01:00
|
|
|
nexthop_active_update (rn, old, 1);
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG(old->status, ROUTE_ENTRY_CHANGED);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-01-15 16:36:31 +01:00
|
|
|
rib_process_update_fib (struct zebra_vrf *zvrf, struct route_node *rn,
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *old, struct route_entry *new)
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
{
|
|
|
|
struct nexthop *nexthop = NULL, *tnexthop;
|
|
|
|
int recursing;
|
|
|
|
int nh_active = 0;
|
|
|
|
int installed = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to install or update if a new route has been selected or
|
|
|
|
* something has changed.
|
|
|
|
*/
|
2016-01-15 16:36:31 +01:00
|
|
|
if (new != old ||
|
2017-06-01 13:26:25 +02:00
|
|
|
CHECK_FLAG (new->status, ROUTE_ENTRY_CHANGED))
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
{
|
2017-02-13 00:29:37 +01:00
|
|
|
hook_call(rib_update, rn, "updating existing route");
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
|
|
|
|
/* Update the nexthop; we could determine here that nexthop is inactive. */
|
2016-01-15 16:36:31 +01:00
|
|
|
if (nexthop_active_update (rn, new, 1))
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
nh_active = 1;
|
|
|
|
|
|
|
|
/* If nexthop is active, install the selected route, if appropriate. If
|
|
|
|
* the install succeeds, cleanup flags for prior route, if different from
|
|
|
|
* newly selected.
|
|
|
|
*/
|
|
|
|
if (nh_active)
|
|
|
|
{
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
char buf[SRCDEST2STR_BUFFER];
|
|
|
|
srcdest_rnode2str(rn, buf, sizeof(buf));
|
2016-01-15 16:36:31 +01:00
|
|
|
if (new != old)
|
2017-06-01 13:26:25 +02:00
|
|
|
zlog_debug ("%u:%s: Updating route rn %p, re %p (type %d) "
|
2016-12-05 20:05:30 +01:00
|
|
|
"old %p (type %d)", zvrf_id (zvrf), buf,
|
2016-01-15 16:36:31 +01:00
|
|
|
rn, new, new->type, old, old->type);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
else
|
2017-06-01 13:26:25 +02:00
|
|
|
zlog_debug ("%u:%s: Updating route rn %p, re %p (type %d)",
|
2016-12-05 20:05:30 +01:00
|
|
|
zvrf_id (zvrf), buf, rn, new, new->type);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
}
|
2017-05-12 15:34:51 +02:00
|
|
|
|
|
|
|
/* If labeled-unicast route, uninstall transit LSP. */
|
|
|
|
if (zebra_rib_labeled_unicast (old))
|
|
|
|
zebra_mpls_lsp_uninstall (zvrf, rn, old);
|
|
|
|
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
/* Non-system route should be installed. */
|
2016-01-15 16:36:31 +01:00
|
|
|
if (!RIB_SYSTEM_ROUTE (new))
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
{
|
2017-02-02 18:58:33 +01:00
|
|
|
/* If labeled-unicast route, install transit LSP. */
|
|
|
|
if (zebra_rib_labeled_unicast (new))
|
|
|
|
zebra_mpls_lsp_install (zvrf, rn, new);
|
|
|
|
|
2016-12-16 13:48:37 +01:00
|
|
|
if (rib_install_kernel (rn, new, old))
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
char buf[SRCDEST2STR_BUFFER];
|
|
|
|
srcdest_rnode2str(rn, buf, sizeof(buf));
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
installed = 0;
|
2016-12-05 20:05:30 +01:00
|
|
|
zlog_warn ("%u:%s: Route install failed", zvrf_id (zvrf), buf);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If install succeeded or system route, cleanup flags for prior route. */
|
2016-01-15 16:36:31 +01:00
|
|
|
if (installed && new != old)
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
{
|
2016-01-15 16:36:31 +01:00
|
|
|
if (RIB_SYSTEM_ROUTE(new))
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
{
|
2016-01-15 16:36:31 +01:00
|
|
|
if (!RIB_SYSTEM_ROUTE (old))
|
|
|
|
rib_uninstall_kernel (rn, old);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-01-15 16:36:31 +01:00
|
|
|
for (nexthop = old->nexthop; nexthop; nexthop = nexthop->next)
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update for redistribution. */
|
|
|
|
if (installed)
|
2017-06-01 13:26:25 +02:00
|
|
|
SET_FLAG (new->status, ROUTE_ENTRY_SELECTED_FIB);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If nexthop for selected route is not active or install failed, we
|
|
|
|
* may need to uninstall and delete for redistribution.
|
|
|
|
*/
|
|
|
|
if (!nh_active || !installed)
|
|
|
|
{
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
char buf[SRCDEST2STR_BUFFER];
|
|
|
|
srcdest_rnode2str(rn, buf, sizeof(buf));
|
2016-01-15 16:36:31 +01:00
|
|
|
if (new != old)
|
2017-06-01 13:26:25 +02:00
|
|
|
zlog_debug ("%u:%s: Deleting route rn %p, re %p (type %d) "
|
2016-12-05 20:05:30 +01:00
|
|
|
"old %p (type %d) - %s", zvrf_id (zvrf), buf,
|
2016-01-15 16:36:31 +01:00
|
|
|
rn, new, new->type, old, old->type,
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
nh_active ? "install failed" : "nexthop inactive");
|
|
|
|
else
|
2017-06-01 13:26:25 +02:00
|
|
|
zlog_debug ("%u:%s: Deleting route rn %p, re %p (type %d) - %s",
|
2016-12-05 20:05:30 +01:00
|
|
|
zvrf_id (zvrf), buf, rn, new, new->type,
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
nh_active ? "install failed" : "nexthop inactive");
|
|
|
|
}
|
|
|
|
|
2017-02-02 18:58:33 +01:00
|
|
|
/* If labeled-unicast route, uninstall transit LSP. */
|
|
|
|
if (zebra_rib_labeled_unicast (old))
|
|
|
|
zebra_mpls_lsp_uninstall (zvrf, rn, old);
|
|
|
|
|
2016-01-15 16:36:31 +01:00
|
|
|
if (!RIB_SYSTEM_ROUTE (old))
|
|
|
|
rib_uninstall_kernel (rn, old);
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG (new->status, ROUTE_ENTRY_SELECTED_FIB);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Same route selected; check if in the FIB and if not, re-install. This
|
|
|
|
* is housekeeping code to deal with race conditions in kernel with linux
|
|
|
|
* netlink reporting interface up before IPv4 or IPv6 protocol is ready
|
|
|
|
* to add routes.
|
|
|
|
*/
|
2016-01-15 16:36:31 +01:00
|
|
|
if (!RIB_SYSTEM_ROUTE (new))
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
{
|
|
|
|
int in_fib = 0;
|
|
|
|
|
2016-01-15 16:36:31 +01:00
|
|
|
for (ALL_NEXTHOPS_RO(new->nexthop, nexthop, tnexthop, recursing))
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
if (CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB))
|
|
|
|
{
|
|
|
|
in_fib = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!in_fib)
|
2016-12-16 13:48:37 +01:00
|
|
|
rib_install_kernel (rn, new, NULL);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update prior route. */
|
2016-01-15 16:36:31 +01:00
|
|
|
if (new != old)
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG (old->status, ROUTE_ENTRY_SELECTED_FIB);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
|
|
|
|
/* Set real nexthop. */
|
2016-01-15 16:36:31 +01:00
|
|
|
nexthop_active_update (rn, old, 1);
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG(old->status, ROUTE_ENTRY_CHANGED);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear changed flag. */
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG(new->status, ROUTE_ENTRY_CHANGED);
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
}
|
|
|
|
|
2016-08-24 17:09:14 +02:00
|
|
|
/* Check if 'alternate' RIB entry is better than 'current'. */
|
2017-06-01 13:26:25 +02:00
|
|
|
static struct route_entry *
|
|
|
|
rib_choose_best (struct route_entry *current, struct route_entry *alternate)
|
2016-08-24 17:09:14 +02:00
|
|
|
{
|
|
|
|
if (current == NULL)
|
|
|
|
return alternate;
|
|
|
|
|
|
|
|
/* filter route selection in following order:
|
|
|
|
* - connected beats other types
|
|
|
|
* - lower distance beats higher
|
|
|
|
* - lower metric beats higher for equal distance
|
|
|
|
* - last, hence oldest, route wins tie break.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Connected routes. Pick the last connected
|
|
|
|
* route of the set of lowest metric connected routes.
|
|
|
|
*/
|
|
|
|
if (alternate->type == ZEBRA_ROUTE_CONNECT)
|
|
|
|
{
|
|
|
|
if (current->type != ZEBRA_ROUTE_CONNECT
|
|
|
|
|| alternate->metric <= current->metric)
|
|
|
|
return alternate;
|
|
|
|
|
|
|
|
return current;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (current->type == ZEBRA_ROUTE_CONNECT)
|
|
|
|
return current;
|
|
|
|
|
|
|
|
/* higher distance loses */
|
|
|
|
if (alternate->distance < current->distance)
|
|
|
|
return alternate;
|
|
|
|
if (current->distance < alternate->distance)
|
|
|
|
return current;
|
|
|
|
|
|
|
|
/* metric tie-breaks equal distance */
|
|
|
|
if (alternate->metric <= current->metric)
|
|
|
|
return alternate;
|
|
|
|
|
|
|
|
return current;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Core function for processing routing information base. */
|
2008-06-02 14:03:22 +02:00
|
|
|
static void
|
|
|
|
rib_process (struct route_node *rn)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re;
|
|
|
|
struct route_entry *next;
|
|
|
|
struct route_entry *old_selected = NULL;
|
|
|
|
struct route_entry *new_selected = NULL;
|
|
|
|
struct route_entry *old_fib = NULL;
|
|
|
|
struct route_entry *new_fib = NULL;
|
|
|
|
struct route_entry *best = NULL;
|
2016-12-05 20:05:30 +01:00
|
|
|
char buf[SRCDEST2STR_BUFFER];
|
2015-11-20 17:48:32 +01:00
|
|
|
rib_dest_t *dest;
|
|
|
|
struct zebra_vrf *zvrf = NULL;
|
2016-12-05 20:05:30 +01:00
|
|
|
struct prefix *p, *src_p;
|
|
|
|
srcdest_rnode_prefixes(rn, &p, &src_p);
|
2016-04-08 21:32:53 +02:00
|
|
|
vrf_id_t vrf_id = VRF_UNKNOWN;
|
2015-11-20 17:48:32 +01:00
|
|
|
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
assert (rn);
|
2016-01-15 16:36:31 +01:00
|
|
|
|
2015-11-20 17:48:32 +01:00
|
|
|
dest = rib_dest_from_rnode (rn);
|
|
|
|
if (dest)
|
|
|
|
{
|
|
|
|
zvrf = rib_dest_vrf (dest);
|
2016-10-30 22:50:26 +01:00
|
|
|
vrf_id = zvrf_id (zvrf);
|
2015-11-20 17:48:32 +01:00
|
|
|
}
|
2016-01-15 16:36:31 +01:00
|
|
|
|
2015-11-20 17:48:32 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
2016-12-05 20:05:30 +01:00
|
|
|
srcdest_rnode2str(rn, buf, sizeof(buf));
|
2007-08-06 21:25:11 +02:00
|
|
|
|
2015-11-20 17:48:32 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
2016-12-05 20:05:30 +01:00
|
|
|
zlog_debug ("%u:%s: Processing rn %p", vrf_id, buf, rn);
|
2015-11-20 17:48:32 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE_SAFE (rn, re, next)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2015-11-20 17:48:32 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
2017-06-01 13:26:25 +02:00
|
|
|
zlog_debug ("%u:%s: Examine re %p (type %d) status %x flags %x "
|
2015-11-20 17:48:32 +01:00
|
|
|
"dist %d metric %d",
|
2017-06-01 13:26:25 +02:00
|
|
|
vrf_id, buf, re, re->type, re->status,
|
|
|
|
re->flags, re->distance, re->metric);
|
2015-11-20 17:48:32 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG(re->status, ROUTE_ENTRY_NEXTHOPS_CHANGED);
|
2015-05-20 02:47:22 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
/* Currently selected re. */
|
|
|
|
if (CHECK_FLAG (re->flags, ZEBRA_FLAG_SELECTED))
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
{
|
2016-01-15 16:36:31 +01:00
|
|
|
assert (old_selected == NULL);
|
2017-06-01 13:26:25 +02:00
|
|
|
old_selected = re;
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
}
|
2016-01-15 16:36:31 +01:00
|
|
|
/* Currently in fib */
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->status, ROUTE_ENTRY_SELECTED_FIB))
|
2016-09-23 18:11:00 +02:00
|
|
|
{
|
2016-01-15 16:36:31 +01:00
|
|
|
assert (old_fib == NULL);
|
2017-06-01 13:26:25 +02:00
|
|
|
old_fib = re;
|
2016-09-23 18:11:00 +02:00
|
|
|
}
|
2016-01-15 16:36:31 +01:00
|
|
|
|
|
|
|
/* Skip deleted entries from selection */
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->status, ROUTE_ENTRY_REMOVED))
|
2016-01-15 16:36:31 +01:00
|
|
|
continue;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Skip unreachable nexthop. */
|
2015-05-20 03:04:15 +02:00
|
|
|
/* This first call to nexthop_active_update is merely to determine if
|
|
|
|
* there's any change to nexthops associated with this RIB entry. Now,
|
|
|
|
* rib_process() can be invoked due to an external event such as link
|
|
|
|
* down or due to next-hop-tracking evaluation. In the latter case,
|
|
|
|
* a decision has already been made that the NHs have changed. So, no
|
|
|
|
* need to invoke a potentially expensive call again. Further, since
|
|
|
|
* the change might be in a recursive NH which is not caught in
|
|
|
|
* the nexthop_active_update() code. Thus, we might miss changes to
|
|
|
|
* recursive NHs.
|
2015-05-20 02:47:22 +02:00
|
|
|
*/
|
2017-06-01 13:26:25 +02:00
|
|
|
if (!CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED) &&
|
|
|
|
! nexthop_active_update (rn, re, 0))
|
2016-04-25 22:19:08 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->type == ZEBRA_ROUTE_TABLE)
|
2016-04-25 22:19:08 +02:00
|
|
|
{
|
2016-01-15 16:36:31 +01:00
|
|
|
/* XXX: HERE BE DRAGONS!!!!!
|
|
|
|
* In all honesty, I have not yet figured out what this part
|
2017-06-01 13:26:25 +02:00
|
|
|
* does or why the ROUTE_ENTRY_CHANGED test above is correct
|
2016-01-15 16:36:31 +01:00
|
|
|
* or why we need to delete a route here, and also not whether
|
|
|
|
* this concerns both selected and fib route, or only selected
|
|
|
|
* or only fib */
|
2016-04-25 22:19:08 +02:00
|
|
|
/* This entry was denied by the 'ip protocol table' route-map, we
|
|
|
|
* need to delete it */
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re != old_selected)
|
2016-04-25 22:19:08 +02:00
|
|
|
{
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
2016-12-05 20:05:30 +01:00
|
|
|
zlog_debug ("%s: %s: imported via import-table but denied "
|
2016-04-25 22:19:08 +02:00
|
|
|
"by the ip protocol table route-map",
|
2016-12-05 20:05:30 +01:00
|
|
|
__func__, buf);
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_unlink (rn, re);
|
2016-04-25 22:19:08 +02:00
|
|
|
}
|
|
|
|
else
|
2017-06-01 13:26:25 +02:00
|
|
|
SET_FLAG (re->status, ROUTE_ENTRY_REMOVED);
|
2016-04-25 22:19:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2016-04-25 22:19:08 +02:00
|
|
|
/* Infinite distance. */
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->distance == DISTANCE_INFINITY)
|
2015-06-11 18:11:12 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG (re->status, ROUTE_ENTRY_CHANGED);
|
2015-06-11 18:11:12 +02:00
|
|
|
continue;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->flags, ZEBRA_FLAG_FIB_OVERRIDE))
|
2016-01-15 16:36:31 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
best = rib_choose_best(new_fib, re);
|
2016-01-15 16:36:31 +01:00
|
|
|
if (new_fib && best != new_fib)
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG (new_fib->status, ROUTE_ENTRY_CHANGED);
|
2016-01-15 16:36:31 +01:00
|
|
|
new_fib = best;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
best = rib_choose_best(new_selected, re);
|
2016-01-15 16:36:31 +01:00
|
|
|
if (new_selected && best != new_selected)
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG (new_selected->status, ROUTE_ENTRY_CHANGED);
|
2016-01-15 16:36:31 +01:00
|
|
|
new_selected = best;
|
|
|
|
}
|
2017-06-01 13:26:25 +02:00
|
|
|
if (best != re)
|
|
|
|
UNSET_FLAG (re->status, ROUTE_ENTRY_CHANGED);
|
|
|
|
} /* RNODE_FOREACH_RE */
|
2016-01-15 16:36:31 +01:00
|
|
|
|
|
|
|
/* If no FIB override route, use the selected route also for FIB */
|
|
|
|
if (new_fib == NULL)
|
|
|
|
new_fib = new_selected;
|
2007-08-13 18:03:06 +02:00
|
|
|
|
|
|
|
/* After the cycle is finished, the following pointers will be set:
|
2017-06-01 13:26:25 +02:00
|
|
|
* old_selected --- RE entry currently having SELECTED
|
|
|
|
* new_selected --- RE entry that is newly SELECTED
|
|
|
|
* old_fib --- RE entry currently in kernel FIB
|
|
|
|
* new_fib --- RE entry that is newly to be in kernel FIB
|
2016-01-15 16:36:31 +01:00
|
|
|
*
|
|
|
|
* new_selected will get SELECTED flag, and is going to be redistributed
|
|
|
|
* the zclients. new_fib (which can be new_selected) will be installed in kernel.
|
2007-08-13 18:03:06 +02:00
|
|
|
*/
|
2016-01-15 16:36:31 +01:00
|
|
|
|
2015-11-20 17:48:32 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
2016-01-15 16:36:31 +01:00
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
zlog_debug ("%u:%s: After processing: old_selected %p new_selected %p old_fib %p new_fib %p",
|
|
|
|
vrf_id, buf,
|
2016-01-15 16:36:31 +01:00
|
|
|
(void *)old_selected,
|
|
|
|
(void *)new_selected,
|
|
|
|
(void *)old_fib,
|
|
|
|
(void *)new_fib);
|
|
|
|
}
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
/* Buffer ROUTE_ENTRY_CHANGED here, because it will get cleared if
|
2016-01-15 16:36:31 +01:00
|
|
|
* fib == selected */
|
|
|
|
bool selected_changed = new_selected && CHECK_FLAG(new_selected->status,
|
2017-06-01 13:26:25 +02:00
|
|
|
ROUTE_ENTRY_CHANGED);
|
2016-01-15 16:36:31 +01:00
|
|
|
|
|
|
|
/* Update fib according to selection results */
|
|
|
|
if (new_fib && old_fib)
|
|
|
|
rib_process_update_fib (zvrf, rn, old_fib, new_fib);
|
|
|
|
else if (new_fib)
|
|
|
|
rib_process_add_fib (zvrf, rn, new_fib);
|
|
|
|
else if (old_fib)
|
|
|
|
rib_process_del_fib (zvrf, rn, old_fib);
|
|
|
|
|
|
|
|
/* Redistribute SELECTED entry */
|
|
|
|
if (old_selected != new_selected || selected_changed)
|
|
|
|
{
|
|
|
|
struct nexthop *nexthop, *tnexthop;
|
|
|
|
int recursing;
|
|
|
|
|
|
|
|
/* Check if we have a FIB route for the destination, otherwise,
|
|
|
|
* don't redistribute it */
|
|
|
|
for (ALL_NEXTHOPS_RO(new_fib ? new_fib->nexthop : NULL, nexthop,
|
|
|
|
tnexthop, recursing))
|
|
|
|
{
|
|
|
|
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB))
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!nexthop)
|
|
|
|
new_selected = NULL;
|
2007-08-13 18:03:06 +02:00
|
|
|
|
2016-01-15 16:36:31 +01:00
|
|
|
if (new_selected && new_selected != new_fib)
|
|
|
|
{
|
|
|
|
nexthop_active_update(rn, new_selected, 1);
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG(new_selected->status, ROUTE_ENTRY_CHANGED);
|
2016-01-15 16:36:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (old_selected)
|
|
|
|
{
|
|
|
|
if (!new_selected)
|
2016-12-05 20:05:30 +01:00
|
|
|
redistribute_delete(p, src_p, old_selected);
|
2016-01-15 16:36:31 +01:00
|
|
|
if (old_selected != new_selected)
|
|
|
|
UNSET_FLAG (old_selected->flags, ZEBRA_FLAG_SELECTED);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_selected)
|
|
|
|
{
|
|
|
|
/* Install new or replace existing redistributed entry */
|
|
|
|
SET_FLAG (new_selected->flags, ZEBRA_FLAG_SELECTED);
|
2016-12-05 20:05:30 +01:00
|
|
|
redistribute_update (p, src_p, new_selected, old_selected);
|
2016-01-15 16:36:31 +01:00
|
|
|
}
|
|
|
|
}
|
zebra: Implement recovery for route install failure
Quagga does not have proper recovery for route install failure (in
the kernel). The lack of this may not be a significant issue if the
failure is only an exception. However, the introduction of route
replace presents a new failure scenario which was not there earlier.
Before replace, the update operation involved a delete followed by
add; the failure of add would not leave hanging route entries in the
kernel as they would've got deleted first. With route replace, if
the replace fails, recovery action to delete the route is needed, else
the route remains hanging in the kernel.
In particular, with VRFs and in the presence of ECMP/multipath, a
failure mode exists where Quagga thinks that routes have been cleaned
up and deleted from the kernel but the kernel continues to retain them.
This happens when multiple VRF interfaces are moved from one VRF to
another.
This patch addresses this scenario by implementing proper recovery for
route install failure.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Dinesh Dutt <ddutt@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10361
Reviewed By: CCR-4566
Testing Done: bgp-min, ospf-min, bgp-smoke, ospf-smoke and manual
Note: There are some test failures and results aren't consistent across
runs; Daniel has resolved many of these through other fixes.
2016-04-29 07:09:17 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
/* Remove all RE entries queued for removal */
|
|
|
|
RNODE_FOREACH_RE_SAFE (rn, re, next)
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->status, ROUTE_ENTRY_REMOVED))
|
2016-01-15 16:36:31 +01:00
|
|
|
{
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
rnode_debug (rn, vrf_id, "rn %p, removing re %p",
|
|
|
|
(void *)rn, (void *)re);
|
2016-01-15 16:36:31 +01:00
|
|
|
}
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_unlink(rn, re);
|
2016-01-15 16:36:31 +01:00
|
|
|
}
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
}
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
|
2012-11-13 23:48:53 +01:00
|
|
|
/*
|
|
|
|
* Check if the dest can be deleted now.
|
|
|
|
*/
|
|
|
|
rib_gc_dest (rn);
|
2008-06-02 14:03:22 +02:00
|
|
|
}
|
|
|
|
|
2008-08-12 01:22:15 +02:00
|
|
|
/* Take a list of route_node structs and return 1, if there was a record
|
|
|
|
* picked from it and processed by rib_process(). Don't process more,
|
|
|
|
* than one RN record; operate only in the specified sub-queue.
|
2008-06-02 14:03:22 +02:00
|
|
|
*/
|
2008-08-17 18:44:47 +02:00
|
|
|
static unsigned int
|
2008-06-02 14:03:22 +02:00
|
|
|
process_subq (struct list * subq, u_char qindex)
|
|
|
|
{
|
2008-08-12 01:22:15 +02:00
|
|
|
struct listnode *lnode = listhead (subq);
|
2008-06-02 14:03:22 +02:00
|
|
|
struct route_node *rnode;
|
2015-11-20 17:48:32 +01:00
|
|
|
rib_dest_t *dest;
|
|
|
|
struct zebra_vrf *zvrf = NULL;
|
2008-08-12 01:22:15 +02:00
|
|
|
|
|
|
|
if (!lnode)
|
2008-06-02 14:03:22 +02:00
|
|
|
return 0;
|
2008-08-12 01:22:15 +02:00
|
|
|
|
2008-06-02 14:03:22 +02:00
|
|
|
rnode = listgetdata (lnode);
|
2015-11-20 17:48:32 +01:00
|
|
|
dest = rib_dest_from_rnode (rnode);
|
|
|
|
if (dest)
|
|
|
|
zvrf = rib_dest_vrf (dest);
|
|
|
|
|
2008-06-02 14:03:22 +02:00
|
|
|
rib_process (rnode);
|
2008-08-12 01:22:15 +02:00
|
|
|
|
2015-11-20 17:48:32 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
char buf[SRCDEST2STR_BUFFER];
|
|
|
|
srcdest_rnode2str(rnode, buf, sizeof(buf));
|
|
|
|
zlog_debug ("%u:%s: rn %p dequeued from sub-queue %u",
|
|
|
|
zvrf ? zvrf_id (zvrf) : 0, buf, rnode, qindex);
|
2015-11-20 17:48:32 +01:00
|
|
|
}
|
|
|
|
|
2012-11-13 23:48:53 +01:00
|
|
|
if (rnode->info)
|
|
|
|
UNSET_FLAG (rib_dest_from_rnode (rnode)->flags, RIB_ROUTE_QUEUED (qindex));
|
|
|
|
|
2009-07-18 06:02:26 +02:00
|
|
|
#if 0
|
2008-08-12 01:22:15 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
zlog_debug ("%s: called for route_node (%p, %d) with no ribs",
|
|
|
|
__func__, rnode, rnode->lock);
|
|
|
|
zlog_backtrace(LOG_DEBUG);
|
|
|
|
}
|
2009-07-18 06:02:26 +02:00
|
|
|
#endif
|
2008-06-02 14:03:22 +02:00
|
|
|
route_unlock_node (rnode);
|
|
|
|
list_delete_node (subq, lnode);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
/*
|
|
|
|
* All meta queues have been processed. Trigger next-hop evaluation.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
meta_queue_process_complete (struct work_queue *dummy)
|
|
|
|
{
|
2016-10-29 18:37:11 +02:00
|
|
|
struct vrf *vrf;
|
2016-02-23 04:42:19 +01:00
|
|
|
struct zebra_vrf *zvrf;
|
|
|
|
|
|
|
|
/* Evaluate nexthops for those VRFs which underwent route processing. This
|
|
|
|
* should limit the evaluation to the necessary VRFs in most common
|
|
|
|
* situations.
|
|
|
|
*/
|
2016-10-29 18:37:11 +02:00
|
|
|
RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id)
|
2016-02-23 04:42:19 +01:00
|
|
|
{
|
2016-10-29 18:37:11 +02:00
|
|
|
zvrf = vrf->info;
|
|
|
|
if (zvrf == NULL || !(zvrf->flags & ZEBRA_VRF_RIB_SCHEDULED))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
zvrf->flags &= ~ZEBRA_VRF_RIB_SCHEDULED;
|
2016-10-30 22:50:26 +01:00
|
|
|
zebra_evaluate_rnh(zvrf_id (zvrf), AF_INET, 0, RNH_NEXTHOP_TYPE, NULL);
|
|
|
|
zebra_evaluate_rnh(zvrf_id (zvrf), AF_INET, 0, RNH_IMPORT_CHECK_TYPE, NULL);
|
|
|
|
zebra_evaluate_rnh(zvrf_id (zvrf), AF_INET6, 0, RNH_NEXTHOP_TYPE, NULL);
|
|
|
|
zebra_evaluate_rnh(zvrf_id (zvrf), AF_INET6, 0, RNH_IMPORT_CHECK_TYPE, NULL);
|
2016-02-23 04:42:19 +01:00
|
|
|
}
|
2016-04-18 20:24:10 +02:00
|
|
|
|
|
|
|
/* Schedule LSPs for processing, if needed. */
|
|
|
|
zvrf = vrf_info_lookup(VRF_DEFAULT);
|
|
|
|
if (mpls_should_lsps_be_processed(zvrf))
|
|
|
|
{
|
|
|
|
if (IS_ZEBRA_DEBUG_MPLS)
|
2016-10-30 22:50:26 +01:00
|
|
|
zlog_debug ("%u: Scheduling all LSPs upon RIB completion", zvrf_id (zvrf));
|
2016-04-18 20:24:10 +02:00
|
|
|
zebra_mpls_lsp_schedule (zvrf);
|
|
|
|
mpls_unmark_lsps_for_processing(zvrf);
|
|
|
|
}
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
2008-06-02 14:03:22 +02:00
|
|
|
/* Dispatch the meta queue by picking, processing and unlocking the next RN from
|
|
|
|
* a non-empty sub-queue with lowest priority. wq is equal to zebra->ribq and data
|
|
|
|
* is pointed to the meta queue structure.
|
|
|
|
*/
|
|
|
|
static wq_item_status
|
|
|
|
meta_queue_process (struct work_queue *dummy, void *data)
|
|
|
|
{
|
|
|
|
struct meta_queue * mq = data;
|
2008-08-12 01:22:15 +02:00
|
|
|
unsigned i;
|
|
|
|
|
2008-06-02 14:03:22 +02:00
|
|
|
for (i = 0; i < MQ_SIZE; i++)
|
|
|
|
if (process_subq (mq->subq[i], i))
|
2008-08-12 01:22:15 +02:00
|
|
|
{
|
|
|
|
mq->size--;
|
|
|
|
break;
|
|
|
|
}
|
2008-06-02 14:03:22 +02:00
|
|
|
return mq->size ? WQ_REQUEUE : WQ_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2012-11-13 23:48:53 +01:00
|
|
|
/*
|
|
|
|
* Map from rib types to queue type (priority) in meta queue
|
|
|
|
*/
|
2008-08-12 01:22:15 +02:00
|
|
|
static const u_char meta_queue_map[ZEBRA_ROUTE_MAX] = {
|
2017-06-07 21:43:23 +02:00
|
|
|
[ZEBRA_ROUTE_SYSTEM] = 4,
|
|
|
|
[ZEBRA_ROUTE_KERNEL] = 0,
|
|
|
|
[ZEBRA_ROUTE_CONNECT] = 0,
|
|
|
|
[ZEBRA_ROUTE_STATIC] = 1,
|
|
|
|
[ZEBRA_ROUTE_RIP] = 2,
|
|
|
|
[ZEBRA_ROUTE_RIPNG] = 2,
|
|
|
|
[ZEBRA_ROUTE_OSPF] = 2,
|
|
|
|
[ZEBRA_ROUTE_OSPF6] = 2,
|
|
|
|
[ZEBRA_ROUTE_ISIS] = 2,
|
|
|
|
[ZEBRA_ROUTE_BGP] = 3,
|
|
|
|
[ZEBRA_ROUTE_PIM] = 4, // Shouldn't happen but for safety
|
|
|
|
[ZEBRA_ROUTE_EIGRP] = 2,
|
|
|
|
[ZEBRA_ROUTE_NHRP] = 2,
|
|
|
|
[ZEBRA_ROUTE_HSLS] = 4,
|
|
|
|
[ZEBRA_ROUTE_OLSR] = 4,
|
|
|
|
[ZEBRA_ROUTE_TABLE] = 1,
|
|
|
|
[ZEBRA_ROUTE_LDP] = 4,
|
|
|
|
[ZEBRA_ROUTE_VNC] = 3,
|
|
|
|
[ZEBRA_ROUTE_VNC_DIRECT] = 3,
|
|
|
|
[ZEBRA_ROUTE_VNC_DIRECT_RH] = 3,
|
|
|
|
[ZEBRA_ROUTE_BGP_DIRECT] = 3,
|
|
|
|
[ZEBRA_ROUTE_BGP_DIRECT_EXT] = 3,
|
2017-06-13 09:18:14 +02:00
|
|
|
[ZEBRA_ROUTE_BABEL] = 2,
|
2017-06-07 21:43:23 +02:00
|
|
|
[ZEBRA_ROUTE_ALL] = 4, // Shouldn't happen but for safety
|
2008-08-12 01:22:15 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Look into the RN and queue it into one or more priority queues,
|
|
|
|
* increasing the size for each data push done.
|
2008-06-02 14:03:22 +02:00
|
|
|
*/
|
2008-08-17 18:44:47 +02:00
|
|
|
static void
|
|
|
|
rib_meta_queue_add (struct meta_queue *mq, struct route_node *rn)
|
2008-06-02 14:03:22 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re;
|
2008-08-12 01:22:15 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE (rn, re)
|
2008-06-02 14:03:22 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
u_char qindex = meta_queue_map[re->type];
|
2016-02-23 04:42:19 +01:00
|
|
|
struct zebra_vrf *zvrf;
|
2008-08-12 01:22:15 +02:00
|
|
|
|
|
|
|
/* Invariant: at this point we always have rn->info set. */
|
2012-11-13 23:48:53 +01:00
|
|
|
if (CHECK_FLAG (rib_dest_from_rnode (rn)->flags,
|
|
|
|
RIB_ROUTE_QUEUED (qindex)))
|
2014-04-24 20:22:53 +02:00
|
|
|
{
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
2017-06-01 13:26:25 +02:00
|
|
|
rnode_debug (rn, re->vrf_id, "rn %p is already queued in sub-queue %u",
|
2015-03-03 08:51:53 +01:00
|
|
|
(void *)rn, qindex);
|
2014-04-24 20:22:53 +02:00
|
|
|
continue;
|
|
|
|
}
|
2008-08-12 01:22:15 +02:00
|
|
|
|
2012-11-13 23:48:53 +01:00
|
|
|
SET_FLAG (rib_dest_from_rnode (rn)->flags, RIB_ROUTE_QUEUED (qindex));
|
2008-08-12 01:22:15 +02:00
|
|
|
listnode_add (mq->subq[qindex], rn);
|
|
|
|
route_lock_node (rn);
|
|
|
|
mq->size++;
|
|
|
|
|
2015-11-20 17:48:32 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
2017-06-01 13:26:25 +02:00
|
|
|
rnode_debug (rn, re->vrf_id, "queued rn %p into sub-queue %u",
|
2015-03-03 08:51:53 +01:00
|
|
|
(void *)rn, qindex);
|
2016-02-23 04:42:19 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
zvrf = zebra_vrf_lookup_by_id (re->vrf_id);
|
2016-02-23 04:42:19 +01:00
|
|
|
if (zvrf)
|
|
|
|
zvrf->flags |= ZEBRA_VRF_RIB_SCHEDULED;
|
2008-06-02 14:03:22 +02:00
|
|
|
}
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
}
|
|
|
|
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
/* Add route_node to work queue and schedule processing */
|
2015-05-20 02:47:22 +02:00
|
|
|
void
|
2016-04-13 18:21:47 +02:00
|
|
|
rib_queue_add (struct route_node *rn)
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
{
|
2016-04-13 18:21:47 +02:00
|
|
|
assert (rn);
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
|
2012-03-28 01:35:22 +02:00
|
|
|
/* Pointless to queue a route_node with no RIB entries to add or remove */
|
2012-11-13 23:48:53 +01:00
|
|
|
if (!rnode_to_ribs (rn))
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
{
|
2012-03-28 01:35:22 +02:00
|
|
|
zlog_debug ("%s: called for route_node (%p, %d) with no ribs",
|
2015-03-03 08:51:53 +01:00
|
|
|
__func__, (void *)rn, rn->lock);
|
2012-03-28 01:35:22 +02:00
|
|
|
zlog_backtrace(LOG_DEBUG);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-04-13 18:21:47 +02:00
|
|
|
if (zebrad.ribq == NULL)
|
2012-03-28 01:35:22 +02:00
|
|
|
{
|
|
|
|
zlog_err ("%s: work_queue does not exist!", __func__);
|
|
|
|
return;
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
}
|
|
|
|
|
2009-12-09 15:54:49 +01:00
|
|
|
/*
|
|
|
|
* The RIB queue should normally be either empty or holding the only
|
|
|
|
* work_queue_item element. In the latter case this element would
|
|
|
|
* hold a pointer to the meta queue structure, which must be used to
|
|
|
|
* actually queue the route nodes to process. So create the MQ
|
|
|
|
* holder, if necessary, then push the work into it in any case.
|
2008-06-02 14:03:22 +02:00
|
|
|
* This semantics was introduced after 0.99.9 release.
|
|
|
|
*/
|
2016-04-13 18:21:47 +02:00
|
|
|
if (!zebrad.ribq->items->count)
|
|
|
|
work_queue_add (zebrad.ribq, zebrad.mq);
|
2008-06-02 14:03:22 +02:00
|
|
|
|
2016-04-13 18:21:47 +02:00
|
|
|
rib_meta_queue_add (zebrad.mq, rn);
|
2012-03-28 01:35:22 +02:00
|
|
|
|
|
|
|
return;
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
}
|
|
|
|
|
2008-08-12 01:22:15 +02:00
|
|
|
/* Create new meta queue.
|
|
|
|
A destructor function doesn't seem to be necessary here.
|
|
|
|
*/
|
2008-08-17 18:44:47 +02:00
|
|
|
static struct meta_queue *
|
|
|
|
meta_queue_new (void)
|
2008-06-02 14:03:22 +02:00
|
|
|
{
|
|
|
|
struct meta_queue *new;
|
2008-08-12 01:22:15 +02:00
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
new = XCALLOC (MTYPE_WORK_QUEUE, sizeof (struct meta_queue));
|
|
|
|
assert(new);
|
2008-06-02 14:03:22 +02:00
|
|
|
|
|
|
|
for (i = 0; i < MQ_SIZE; i++)
|
2008-08-12 01:22:15 +02:00
|
|
|
{
|
|
|
|
new->subq[i] = list_new ();
|
|
|
|
assert(new->subq[i]);
|
|
|
|
}
|
|
|
|
|
2008-06-02 14:03:22 +02:00
|
|
|
return new;
|
|
|
|
}
|
|
|
|
|
2016-10-31 18:15:16 +01:00
|
|
|
void
|
|
|
|
meta_queue_free (struct meta_queue *mq)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < MQ_SIZE; i++)
|
|
|
|
list_delete (mq->subq[i]);
|
|
|
|
|
|
|
|
XFREE (MTYPE_WORK_QUEUE, mq);
|
|
|
|
}
|
|
|
|
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
/* initialise zebra rib work queue */
|
2005-06-28 19:17:12 +02:00
|
|
|
static void
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
rib_queue_init (struct zebra_t *zebra)
|
|
|
|
{
|
2012-03-28 01:35:22 +02:00
|
|
|
assert (zebra);
|
|
|
|
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
if (! (zebra->ribq = work_queue_new (zebra->master,
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
"route_node processing")))
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
{
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
zlog_err ("%s: could not initialise work queue!", __func__);
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fill in the work queue spec */
|
2008-06-02 14:03:22 +02:00
|
|
|
zebra->ribq->spec.workfunc = &meta_queue_process;
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
zebra->ribq->spec.errorfunc = NULL;
|
2015-05-20 02:40:34 +02:00
|
|
|
zebra->ribq->spec.completion_func = &meta_queue_process_complete;
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
/* XXX: TODO: These should be runtime configurable via vty */
|
|
|
|
zebra->ribq->spec.max_retries = 3;
|
2006-07-27 21:59:58 +02:00
|
|
|
zebra->ribq->spec.hold = rib_process_hold_time;
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
|
2008-06-02 14:03:22 +02:00
|
|
|
if (!(zebra->mq = meta_queue_new ()))
|
2012-03-28 01:35:22 +02:00
|
|
|
{
|
2008-06-02 14:03:22 +02:00
|
|
|
zlog_err ("%s: could not initialise meta queue!", __func__);
|
2012-03-28 01:35:22 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
return;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
/* RIB updates are processed via a queue of pointers to route_nodes.
|
|
|
|
*
|
|
|
|
* The queue length is bounded by the maximal size of the routing table,
|
|
|
|
* as a route_node will not be requeued, if already queued.
|
|
|
|
*
|
2017-06-01 13:26:25 +02:00
|
|
|
* REs are submitted via rib_addnode or rib_delnode which set minimal
|
|
|
|
* state, or static_install_route (when an existing RE is updated)
|
2006-12-08 01:53:14 +01:00
|
|
|
* and then submit route_node to queue for best-path selection later.
|
2017-06-01 13:26:25 +02:00
|
|
|
* Order of add/delete state changes are preserved for any given RE.
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
*
|
2017-06-01 13:26:25 +02:00
|
|
|
* Deleted REs are reaped during best-path selection.
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
*
|
|
|
|
* rib_addnode
|
2017-06-01 13:26:25 +02:00
|
|
|
* |-> rib_link or unset ROUTE_ENTRY_REMOVE |->Update kernel with
|
|
|
|
* |-------->| | best RE, if required
|
2006-12-08 01:53:14 +01:00
|
|
|
* | |
|
|
|
|
* static_install->|->rib_addqueue...... -> rib_process
|
|
|
|
* | |
|
|
|
|
* |-------->| |-> rib_unlink
|
2017-06-01 13:26:25 +02:00
|
|
|
* |-> set ROUTE_ENTRY_REMOVE |
|
|
|
|
* rib_delnode (RE freed)
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
*
|
2012-11-13 23:48:53 +01:00
|
|
|
* The 'info' pointer of a route_node points to a rib_dest_t
|
|
|
|
* ('dest'). Queueing state for a route_node is kept on the dest. The
|
|
|
|
* dest is created on-demand by rib_link() and is kept around at least
|
|
|
|
* as long as there are ribs hanging off it (@see rib_gc_dest()).
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
*
|
|
|
|
* Refcounting (aka "locking" throughout the GNU Zebra and Quagga code):
|
|
|
|
*
|
|
|
|
* - route_nodes: refcounted by:
|
2012-11-13 23:48:53 +01:00
|
|
|
* - dest attached to route_node:
|
|
|
|
* - managed by: rib_link/rib_gc_dest
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
* - route_node processing queue
|
|
|
|
* - managed by: rib_addqueue, rib_process.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
/* Add RE to head of the route node. */
|
2005-06-28 19:17:12 +02:00
|
|
|
static void
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_link (struct route_node *rn, struct route_entry *re, int process)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *head;
|
2012-11-13 23:48:53 +01:00
|
|
|
rib_dest_t *dest;
|
2015-05-20 03:03:42 +02:00
|
|
|
afi_t afi;
|
2016-05-11 17:47:02 +02:00
|
|
|
const char *rmap_name;
|
2012-11-13 23:48:53 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
assert (re && rn);
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
|
2012-11-13 23:48:53 +01:00
|
|
|
dest = rib_dest_from_rnode (rn);
|
|
|
|
if (!dest)
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
{
|
2015-11-20 17:48:32 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
2017-06-01 13:26:25 +02:00
|
|
|
rnode_debug (rn, re->vrf_id, "rn %p adding dest", rn);
|
2012-11-13 23:48:53 +01:00
|
|
|
|
|
|
|
dest = XCALLOC (MTYPE_RIB_DEST, sizeof (rib_dest_t));
|
|
|
|
route_lock_node (rn); /* rn route table reference */
|
|
|
|
rn->info = dest;
|
|
|
|
dest->rnode = rn;
|
|
|
|
}
|
|
|
|
|
|
|
|
head = dest->routes;
|
|
|
|
if (head)
|
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
head->prev = re;
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
}
|
2017-06-01 13:26:25 +02:00
|
|
|
re->next = head;
|
|
|
|
dest->routes = re;
|
2015-05-20 03:03:42 +02:00
|
|
|
|
2016-02-01 19:55:42 +01:00
|
|
|
afi = (rn->p.family == AF_INET) ? AFI_IP :
|
|
|
|
(rn->p.family == AF_INET6) ? AFI_IP6 : AFI_MAX;
|
2017-06-01 13:26:25 +02:00
|
|
|
if (is_zebra_import_table_enabled (afi, re->table))
|
2016-05-11 17:47:02 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
rmap_name = zebra_get_import_table_route_map (afi, re->table);
|
|
|
|
zebra_add_import_table_entry(rn, re, rmap_name);
|
2016-05-11 17:47:02 +02:00
|
|
|
}
|
2015-05-20 03:03:42 +02:00
|
|
|
else
|
2016-02-01 19:55:42 +01:00
|
|
|
if (process)
|
2016-04-13 18:21:47 +02:00
|
|
|
rib_queue_add (rn);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2016-09-01 13:20:02 +02:00
|
|
|
void
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_addnode (struct route_node *rn, struct route_entry *re, int process)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
/* RE node has been un-removed before route-node is processed.
|
|
|
|
* route_node must hence already be on the queue for processing..
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
*/
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->status, ROUTE_ENTRY_REMOVED))
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
{
|
2014-04-24 20:22:53 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
2017-06-01 13:26:25 +02:00
|
|
|
rnode_debug (rn, re->vrf_id, "rn %p, un-removed re %p", (void *)rn, (void *)re);
|
2014-04-24 20:22:53 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG (re->status, ROUTE_ENTRY_REMOVED);
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
return;
|
|
|
|
}
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_link (rn, re, process);
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
}
|
|
|
|
|
2012-11-13 23:48:53 +01:00
|
|
|
/*
|
|
|
|
* rib_unlink
|
|
|
|
*
|
|
|
|
* Detach a rib structure from a route_node.
|
|
|
|
*
|
|
|
|
* Note that a call to rib_unlink() should be followed by a call to
|
|
|
|
* rib_gc_dest() at some point. This allows a rib_dest_t that is no
|
|
|
|
* longer required to be deleted.
|
|
|
|
*/
|
2016-10-31 18:15:16 +01:00
|
|
|
void
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_unlink (struct route_node *rn, struct route_entry *re)
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
{
|
2012-11-13 23:48:53 +01:00
|
|
|
rib_dest_t *dest;
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
assert (rn && re);
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
|
2014-04-24 20:22:53 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
2017-06-01 13:26:25 +02:00
|
|
|
rnode_debug (rn, re->vrf_id, "rn %p, re %p", (void *)rn, (void *)re);
|
2014-04-24 20:22:53 +02:00
|
|
|
|
2012-11-13 23:48:53 +01:00
|
|
|
dest = rib_dest_from_rnode (rn);
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->next)
|
|
|
|
re->next->prev = re->prev;
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->prev)
|
|
|
|
re->prev->next = re->next;
|
2002-12-13 21:15:29 +01:00
|
|
|
else
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
dest->routes = re->next;
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
}
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
/* free RE and nexthops */
|
|
|
|
zebra_deregister_rnh_static_nexthops (re->vrf_id, re->nexthop, rn);
|
|
|
|
nexthops_free(re->nexthop);
|
|
|
|
XFREE (MTYPE_RE, re);
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-09-01 13:20:02 +02:00
|
|
|
void
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_delnode (struct route_node *rn, struct route_entry *re)
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
{
|
2015-05-20 03:03:42 +02:00
|
|
|
afi_t afi;
|
|
|
|
|
2014-04-24 20:22:53 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
2017-06-01 13:26:25 +02:00
|
|
|
rnode_debug (rn, re->vrf_id, "rn %p, re %p, removing", (void *)rn, (void *)re);
|
|
|
|
SET_FLAG (re->status, ROUTE_ENTRY_REMOVED);
|
2015-05-20 03:03:42 +02:00
|
|
|
|
2016-02-01 19:55:42 +01:00
|
|
|
afi = (rn->p.family == AF_INET) ? AFI_IP :
|
|
|
|
(rn->p.family == AF_INET6) ? AFI_IP6 : AFI_MAX;
|
2017-06-01 13:26:25 +02:00
|
|
|
if (is_zebra_import_table_enabled (afi, re->table))
|
2015-05-20 03:03:42 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
zebra_del_import_table_entry(rn, re);
|
2015-05-20 03:03:42 +02:00
|
|
|
/* Just clean up if non main table */
|
2015-11-20 17:48:32 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
char buf[SRCDEST2STR_BUFFER];
|
|
|
|
srcdest_rnode2str(rn, buf, sizeof(buf));
|
2017-06-01 13:26:25 +02:00
|
|
|
zlog_debug ("%u:%s: Freeing route rn %p, re %p (type %d)",
|
|
|
|
re->vrf_id, buf, rn, re, re->type);
|
2015-11-20 17:48:32 +01:00
|
|
|
}
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_unlink(rn, re);
|
2015-05-20 03:03:42 +02:00
|
|
|
}
|
2016-02-01 19:55:42 +01:00
|
|
|
else
|
|
|
|
{
|
2016-04-13 18:21:47 +02:00
|
|
|
rib_queue_add (rn);
|
2016-02-01 19:55:42 +01:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
/* This function dumps the contents of a given RE entry into
|
2007-08-13 18:03:06 +02:00
|
|
|
* standard debug log. Calling function name and IP prefix in
|
|
|
|
* question are passed as 1st and 2nd arguments.
|
|
|
|
*/
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
void _route_entry_dump (const char * func,
|
|
|
|
union prefixconstptr pp,
|
|
|
|
union prefixconstptr src_pp,
|
|
|
|
const struct route_entry * re)
|
2007-08-13 18:03:06 +02:00
|
|
|
{
|
2013-10-22 19:10:21 +02:00
|
|
|
const struct prefix *p = pp.p;
|
2016-12-05 20:05:30 +01:00
|
|
|
const struct prefix *src_p = src_pp.p;
|
|
|
|
bool is_srcdst = src_p && src_p->prefixlen;
|
2015-05-23 10:08:41 +02:00
|
|
|
char straddr[PREFIX_STRLEN];
|
2016-12-05 20:05:30 +01:00
|
|
|
char srcaddr[PREFIX_STRLEN];
|
2013-07-05 17:35:37 +02:00
|
|
|
struct nexthop *nexthop, *tnexthop;
|
|
|
|
int recursing;
|
2007-08-13 18:03:06 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
zlog_debug ("%s: dumping RE entry %p for %s%s%s vrf %u", func, (const void *)re,
|
2016-12-05 20:05:30 +01:00
|
|
|
prefix2str(pp, straddr, sizeof(straddr)),
|
|
|
|
is_srcdst ? " from " : "",
|
|
|
|
is_srcdst ? prefix2str(src_pp, srcaddr, sizeof(srcaddr)) : "",
|
2017-06-01 13:26:25 +02:00
|
|
|
re->vrf_id);
|
2007-08-13 18:03:06 +02:00
|
|
|
zlog_debug
|
|
|
|
(
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
"%s: refcnt == %lu, uptime == %lu, type == %u, instance == %d, table == %d",
|
2007-08-13 18:03:06 +02:00
|
|
|
func,
|
2017-06-01 13:26:25 +02:00
|
|
|
re->refcnt,
|
|
|
|
(unsigned long) re->uptime,
|
|
|
|
re->type,
|
|
|
|
re->instance,
|
|
|
|
re->table
|
2007-08-13 18:03:06 +02:00
|
|
|
);
|
|
|
|
zlog_debug
|
|
|
|
(
|
2015-11-02 15:50:07 +01:00
|
|
|
"%s: metric == %u, mtu == %u, distance == %u, flags == %u, status == %u",
|
2007-08-13 18:03:06 +02:00
|
|
|
func,
|
2017-06-01 13:26:25 +02:00
|
|
|
re->metric,
|
|
|
|
re->mtu,
|
|
|
|
re->distance,
|
|
|
|
re->flags,
|
|
|
|
re->status
|
2007-08-13 18:03:06 +02:00
|
|
|
);
|
|
|
|
zlog_debug
|
|
|
|
(
|
2016-12-16 15:51:07 +01:00
|
|
|
"%s: nexthop_num == %u, nexthop_active_num == %u",
|
2007-08-13 18:03:06 +02:00
|
|
|
func,
|
2017-06-01 13:26:25 +02:00
|
|
|
re->nexthop_num,
|
|
|
|
re->nexthop_active_num
|
2007-08-13 18:03:06 +02:00
|
|
|
);
|
2012-10-23 18:00:42 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
for (ALL_NEXTHOPS_RO(re->nexthop, nexthop, tnexthop, recursing))
|
2013-07-05 17:35:37 +02:00
|
|
|
{
|
2012-10-23 18:00:42 +02:00
|
|
|
inet_ntop (p->family, &nexthop->gate, straddr, INET6_ADDRSTRLEN);
|
2013-07-05 17:35:37 +02:00
|
|
|
zlog_debug
|
|
|
|
(
|
|
|
|
"%s: %s %s with flags %s%s%s",
|
|
|
|
func,
|
|
|
|
(recursing ? " NH" : "NH"),
|
|
|
|
straddr,
|
|
|
|
(CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_ACTIVE) ? "ACTIVE " : ""),
|
|
|
|
(CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB) ? "FIB " : ""),
|
|
|
|
(CHECK_FLAG (nexthop->flags, NEXTHOP_FLAG_RECURSIVE) ? "RECURSIVE" : "")
|
|
|
|
);
|
|
|
|
}
|
2007-08-13 18:03:06 +02:00
|
|
|
zlog_debug ("%s: dump complete", func);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This is an exported helper to rtm_read() to dump the strange
|
2017-06-01 13:26:25 +02:00
|
|
|
* RE entry found by rib_lookup_ipv4_route()
|
2007-08-13 18:03:06 +02:00
|
|
|
*/
|
|
|
|
|
2016-02-01 19:55:42 +01:00
|
|
|
void rib_lookup_and_dump (struct prefix_ipv4 * p, vrf_id_t vrf_id)
|
2007-08-13 18:03:06 +02:00
|
|
|
{
|
|
|
|
struct route_table *table;
|
|
|
|
struct route_node *rn;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re;
|
2007-08-13 18:03:06 +02:00
|
|
|
char prefix_buf[INET_ADDRSTRLEN];
|
|
|
|
|
|
|
|
/* Lookup table. */
|
2016-02-01 19:55:42 +01:00
|
|
|
table = zebra_vrf_table (AFI_IP, SAFI_UNICAST, vrf_id);
|
2007-08-13 18:03:06 +02:00
|
|
|
if (! table)
|
|
|
|
{
|
2015-05-22 11:39:56 +02:00
|
|
|
zlog_err ("%s: zebra_vrf_table() returned NULL", __func__);
|
2007-08-13 18:03:06 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
/* Scan the RIB table for exactly matching RE entry. */
|
2007-08-13 18:03:06 +02:00
|
|
|
rn = route_node_lookup (table, (struct prefix *) p);
|
|
|
|
|
|
|
|
/* No route for this prefix. */
|
|
|
|
if (! rn)
|
|
|
|
{
|
2015-05-23 10:08:41 +02:00
|
|
|
zlog_debug ("%s: lookup failed for %s", __func__,
|
|
|
|
prefix2str((struct prefix*) p, prefix_buf, sizeof(prefix_buf)));
|
2007-08-13 18:03:06 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unlock node. */
|
|
|
|
route_unlock_node (rn);
|
|
|
|
|
|
|
|
/* let's go */
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE (rn, re)
|
2007-08-13 18:03:06 +02:00
|
|
|
{
|
|
|
|
zlog_debug
|
|
|
|
(
|
2017-06-01 13:26:25 +02:00
|
|
|
"%s: rn %p, re %p: %s, %s",
|
2007-08-13 18:03:06 +02:00
|
|
|
__func__,
|
2015-03-03 08:51:53 +01:00
|
|
|
(void *)rn,
|
2017-06-01 13:26:25 +02:00
|
|
|
(void *)re,
|
|
|
|
(CHECK_FLAG (re->status, ROUTE_ENTRY_REMOVED) ? "removed" : "NOT removed"),
|
|
|
|
(CHECK_FLAG (re->flags, ZEBRA_FLAG_SELECTED) ? "selected" : "NOT selected")
|
2007-08-13 18:03:06 +02:00
|
|
|
);
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_dump (p, NULL, re);
|
2007-08-13 18:03:06 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-26 15:02:24 +01:00
|
|
|
/* Check if requested address assignment will fail due to another
|
|
|
|
* route being installed by zebra in FIB already. Take necessary
|
|
|
|
* actions, if needed: remove such a route from FIB and deSELECT
|
2017-06-01 13:26:25 +02:00
|
|
|
* corresponding RE entry. Then put affected RN into RIBQ head.
|
2008-02-26 15:02:24 +01:00
|
|
|
*/
|
2016-02-01 19:55:42 +01:00
|
|
|
void rib_lookup_and_pushup (struct prefix_ipv4 * p, vrf_id_t vrf_id)
|
2008-02-26 15:02:24 +01:00
|
|
|
{
|
|
|
|
struct route_table *table;
|
|
|
|
struct route_node *rn;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re;
|
2008-02-26 15:02:24 +01:00
|
|
|
unsigned changed = 0;
|
|
|
|
|
2016-02-01 19:55:42 +01:00
|
|
|
if (NULL == (table = zebra_vrf_table (AFI_IP, SAFI_UNICAST, vrf_id)))
|
2008-02-26 15:02:24 +01:00
|
|
|
{
|
2015-05-22 11:39:56 +02:00
|
|
|
zlog_err ("%s: zebra_vrf_table() returned NULL", __func__);
|
2008-02-26 15:02:24 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No matches would be the simplest case. */
|
|
|
|
if (NULL == (rn = route_node_lookup (table, (struct prefix *) p)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Unlock node. */
|
|
|
|
route_unlock_node (rn);
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
/* Check all RE entries. In case any changes have to be done, requeue
|
2008-02-26 15:02:24 +01:00
|
|
|
* the RN into RIBQ head. If the routing message about the new connected
|
|
|
|
* route (generated by the IP address we are going to assign very soon)
|
2017-06-01 13:26:25 +02:00
|
|
|
* comes before the RIBQ is processed, the new RE entry will join
|
2008-02-26 15:02:24 +01:00
|
|
|
* RIBQ record already on head. This is necessary for proper revalidation
|
2017-06-01 13:26:25 +02:00
|
|
|
* of the rest of the RE.
|
2008-02-26 15:02:24 +01:00
|
|
|
*/
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE (rn, re)
|
2008-02-26 15:02:24 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->status, ROUTE_ENTRY_SELECTED_FIB) &&
|
|
|
|
! RIB_SYSTEM_ROUTE (re))
|
2008-02-26 15:02:24 +01:00
|
|
|
{
|
|
|
|
changed = 1;
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
|
|
|
{
|
2015-05-23 10:08:41 +02:00
|
|
|
char buf[PREFIX_STRLEN];
|
|
|
|
zlog_debug ("%u:%s: freeing way for connected prefix",
|
2017-06-01 13:26:25 +02:00
|
|
|
re->vrf_id, prefix2str(&rn->p, buf, sizeof(buf)));
|
|
|
|
route_entry_dump (&rn->p, NULL, re);
|
2008-02-26 15:02:24 +01:00
|
|
|
}
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_uninstall (rn, re);
|
2008-02-26 15:02:24 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (changed)
|
2016-04-13 18:21:47 +02:00
|
|
|
rib_queue_add (rn);
|
2008-02-26 15:02:24 +01:00
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
int
|
2016-08-24 10:01:20 +02:00
|
|
|
rib_add_multipath (afi_t afi, safi_t safi, struct prefix *p,
|
2017-06-01 13:26:25 +02:00
|
|
|
struct prefix_ipv6 *src_p, struct route_entry *re)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct route_table *table;
|
|
|
|
struct route_node *rn;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *same;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct nexthop *nexthop;
|
2015-05-20 02:47:22 +02:00
|
|
|
int ret = 0;
|
2016-08-24 10:01:20 +02:00
|
|
|
int family;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (!re)
|
2016-08-24 10:01:20 +02:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (p->family == AF_INET)
|
|
|
|
family = AFI_IP;
|
|
|
|
else
|
|
|
|
family = AFI_IP6;
|
|
|
|
|
2016-12-05 20:05:30 +01:00
|
|
|
assert(!src_p || family == AFI_IP6);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Lookup table. */
|
2017-06-01 13:26:25 +02:00
|
|
|
table = zebra_vrf_table_with_table_id (family, safi, re->vrf_id, re->table);
|
2002-12-13 21:15:29 +01:00
|
|
|
if (! table)
|
|
|
|
return 0;
|
2011-11-26 18:59:32 +01:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Make it sure prefixlen is applied to the prefix. */
|
2016-08-24 10:01:20 +02:00
|
|
|
apply_mask (p);
|
2016-12-05 20:05:30 +01:00
|
|
|
if (src_p)
|
|
|
|
apply_mask_ipv6 (src_p);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Set default distance by route type. */
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->distance == 0)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
re->distance = route_info[re->type].distance;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* iBGP distance is 200. */
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->type == ZEBRA_ROUTE_BGP
|
|
|
|
&& CHECK_FLAG (re->flags, ZEBRA_FLAG_IBGP))
|
|
|
|
re->distance = 200;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Lookup route node.*/
|
2016-12-05 20:05:30 +01:00
|
|
|
rn = srcdest_rnode_get (table, p, src_p);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* If same type of route are installed, treat it as a implicit
|
|
|
|
withdraw. */
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE (rn, same)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (same->status, ROUTE_ENTRY_REMOVED))
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
continue;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (same->type == re->type && same->instance == re->instance
|
|
|
|
&& same->table == re->table
|
2002-12-13 21:15:29 +01:00
|
|
|
&& same->type != ZEBRA_ROUTE_CONNECT)
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
break;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* If this route is kernel route, set FIB flag to the route. */
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->type == ZEBRA_ROUTE_KERNEL || re->type == ZEBRA_ROUTE_CONNECT)
|
|
|
|
for (nexthop = re->nexthop; nexthop; nexthop = nexthop->next)
|
2002-12-13 21:15:29 +01:00
|
|
|
SET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB);
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
/* Link new re to node.*/
|
2015-11-20 17:48:32 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
rnode_debug(rn, re->vrf_id, "Inserting route rn %p, re %p (type %d) existing %p",
|
|
|
|
(void *)rn, (void *)re, re->type, (void *)same);
|
2015-11-20 17:48:32 +01:00
|
|
|
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_dump (p, src_p, re);
|
2015-11-20 17:48:32 +01:00
|
|
|
}
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_addnode (rn, re, 1);
|
2015-05-20 02:47:22 +02:00
|
|
|
ret = 1;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Free implicit route.*/
|
|
|
|
if (same)
|
2007-08-13 18:03:06 +02:00
|
|
|
{
|
2015-11-20 17:48:32 +01:00
|
|
|
rib_delnode (rn, same);
|
|
|
|
ret = -1;
|
2007-08-13 18:03:06 +02:00
|
|
|
}
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
|
|
|
|
route_unlock_node (rn);
|
2015-05-20 02:47:22 +02:00
|
|
|
return ret;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2016-12-12 18:11:27 +01:00
|
|
|
void
|
2016-08-24 07:39:08 +02:00
|
|
|
rib_delete (afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, u_short instance,
|
2014-04-24 17:41:43 +02:00
|
|
|
int flags, struct prefix *p, struct prefix_ipv6 *src_p,
|
|
|
|
union g_addr *gate, ifindex_t ifindex, u_int32_t table_id)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct route_table *table;
|
|
|
|
struct route_node *rn;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re;
|
|
|
|
struct route_entry *fib = NULL;
|
|
|
|
struct route_entry *same = NULL;
|
2013-07-05 17:35:37 +02:00
|
|
|
struct nexthop *nexthop, *tnexthop;
|
|
|
|
int recursing;
|
2015-11-20 17:48:32 +01:00
|
|
|
char buf2[INET6_ADDRSTRLEN];
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2016-12-05 20:05:30 +01:00
|
|
|
assert(!src_p || afi == AFI_IP6);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Lookup table. */
|
2016-08-24 07:39:08 +02:00
|
|
|
table = zebra_vrf_table_with_table_id (afi, safi, vrf_id, table_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
if (! table)
|
2016-12-12 18:11:27 +01:00
|
|
|
return;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Apply mask. */
|
2016-08-24 07:39:08 +02:00
|
|
|
apply_mask (p);
|
2016-12-05 20:05:30 +01:00
|
|
|
if (src_p)
|
|
|
|
apply_mask_ipv6 (src_p);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Lookup route node. */
|
2016-12-05 20:05:30 +01:00
|
|
|
rn = srcdest_rnode_lookup (table, p, src_p);
|
2002-12-13 21:15:29 +01:00
|
|
|
if (! rn)
|
|
|
|
{
|
2016-12-05 20:05:30 +01:00
|
|
|
char dst_buf[PREFIX_STRLEN], src_buf[PREFIX_STRLEN];
|
|
|
|
|
|
|
|
prefix2str(p, dst_buf, sizeof(dst_buf));
|
|
|
|
if (src_p && src_p->prefixlen)
|
|
|
|
prefix2str(src_p, src_buf, sizeof(src_buf));
|
|
|
|
else
|
|
|
|
src_buf[0] = '\0';
|
|
|
|
|
2015-11-20 17:48:32 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
2016-12-05 20:05:30 +01:00
|
|
|
zlog_debug ("%u:%s%s%s doesn't exist in rib",
|
|
|
|
vrf_id, dst_buf,
|
|
|
|
(src_buf[0] != '\0') ? " from " : "",
|
|
|
|
src_buf);
|
2016-12-12 18:11:27 +01:00
|
|
|
return;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Lookup same type route. */
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE (rn, re)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->status, ROUTE_ENTRY_REMOVED))
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
continue;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->status, ROUTE_ENTRY_SELECTED_FIB))
|
|
|
|
fib = re;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->type != type)
|
2005-09-21 16:58:20 +02:00
|
|
|
continue;
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->instance != instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
continue;
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->type == ZEBRA_ROUTE_CONNECT && (nexthop = re->nexthop) &&
|
2011-12-26 13:35:30 +01:00
|
|
|
nexthop->type == NEXTHOP_TYPE_IFINDEX)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2011-12-26 13:35:30 +01:00
|
|
|
if (nexthop->ifindex != ifindex)
|
|
|
|
continue;
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->refcnt)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
re->refcnt--;
|
2005-09-21 16:58:20 +02:00
|
|
|
route_unlock_node (rn);
|
|
|
|
route_unlock_node (rn);
|
2016-12-12 18:11:27 +01:00
|
|
|
return;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-06-01 13:26:25 +02:00
|
|
|
same = re;
|
2005-09-21 16:58:20 +02:00
|
|
|
break;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2005-09-21 16:58:20 +02:00
|
|
|
/* Make sure that the route found has the same gateway. */
|
2013-07-05 17:35:37 +02:00
|
|
|
else
|
2003-06-19 03:41:37 +02:00
|
|
|
{
|
2013-07-05 17:35:37 +02:00
|
|
|
if (gate == NULL)
|
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
same = re;
|
2013-07-05 17:35:37 +02:00
|
|
|
break;
|
|
|
|
}
|
2017-06-01 13:26:25 +02:00
|
|
|
for (ALL_NEXTHOPS_RO(re->nexthop, nexthop, tnexthop, recursing))
|
2016-08-24 07:39:08 +02:00
|
|
|
if (IPV4_ADDR_SAME (&nexthop->gate.ipv4, gate) ||
|
|
|
|
IPV6_ADDR_SAME (&nexthop->gate.ipv6, gate))
|
2013-07-05 17:35:37 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
same = re;
|
2013-07-05 17:35:37 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (same)
|
|
|
|
break;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
/* If same type of route can't be found and this message is from
|
|
|
|
kernel. */
|
|
|
|
if (! same)
|
|
|
|
{
|
2015-05-20 02:40:43 +02:00
|
|
|
if (fib && type == ZEBRA_ROUTE_KERNEL &&
|
|
|
|
CHECK_FLAG(flags, ZEBRA_FLAG_SELFROUTE))
|
|
|
|
{
|
2015-11-20 17:48:32 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
2015-05-20 02:40:43 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
rnode_debug (rn, vrf_id, "rn %p, re %p (type %d) was deleted from kernel, adding",
|
2016-12-05 20:05:30 +01:00
|
|
|
rn, fib, fib->type);
|
2015-05-20 02:40:43 +02:00
|
|
|
}
|
2015-08-26 14:21:40 +02:00
|
|
|
if (allow_delete)
|
|
|
|
{
|
|
|
|
/* Unset flags. */
|
|
|
|
for (nexthop = fib->nexthop; nexthop; nexthop = nexthop->next)
|
|
|
|
UNSET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB);
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
UNSET_FLAG (fib->status, ROUTE_ENTRY_SELECTED_FIB);
|
2015-08-26 14:21:40 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* This means someone else, other than Zebra, has deleted
|
|
|
|
* a Zebra router from the kernel. We will add it back */
|
2016-12-16 13:48:37 +01:00
|
|
|
rib_install_kernel(rn, fib, NULL);
|
2015-08-26 14:21:40 +02:00
|
|
|
}
|
2015-05-20 02:40:43 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
else
|
|
|
|
{
|
2015-11-20 17:48:32 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
if (gate)
|
2016-12-05 20:05:30 +01:00
|
|
|
rnode_debug(rn, vrf_id, "via %s ifindex %d type %d "
|
2015-05-22 11:40:02 +02:00
|
|
|
"doesn't exist in rib",
|
2016-12-05 20:05:30 +01:00
|
|
|
inet_ntop (family2afi(afi), gate, buf2, INET_ADDRSTRLEN), /* FIXME */
|
2015-05-23 10:08:41 +02:00
|
|
|
ifindex,
|
|
|
|
type);
|
2002-12-13 21:15:29 +01:00
|
|
|
else
|
2016-12-05 20:05:30 +01:00
|
|
|
rnode_debug (rn, vrf_id, "ifindex %d type %d doesn't exist in rib",
|
2015-05-23 10:08:41 +02:00
|
|
|
ifindex,
|
|
|
|
type);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
route_unlock_node (rn);
|
2016-12-12 18:11:27 +01:00
|
|
|
return;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
}
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (same)
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
rib_delnode (rn, same);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
route_unlock_node (rn);
|
2016-12-12 18:11:27 +01:00
|
|
|
return;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2014-06-04 06:53:35 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
|
|
|
|
int
|
2016-08-24 08:20:47 +02:00
|
|
|
rib_add (afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
|
|
|
|
u_short instance, int flags, struct prefix *p,
|
2014-04-24 17:41:43 +02:00
|
|
|
struct prefix_ipv6 *src_p, union g_addr *gate,
|
|
|
|
union g_addr *src, ifindex_t ifindex,
|
2016-08-24 08:20:47 +02:00
|
|
|
u_int32_t table_id, u_int32_t metric, u_int32_t mtu,
|
|
|
|
u_char distance)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re;
|
|
|
|
struct route_entry *same = NULL;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct route_table *table;
|
|
|
|
struct route_node *rn;
|
|
|
|
struct nexthop *nexthop;
|
|
|
|
|
2016-12-05 20:05:30 +01:00
|
|
|
assert(!src_p || afi == AFI_IP6);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Lookup table. */
|
2016-08-24 08:20:47 +02:00
|
|
|
table = zebra_vrf_table_with_table_id (afi, safi, vrf_id, table_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
if (! table)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Make sure mask is applied. */
|
2016-08-24 08:20:47 +02:00
|
|
|
apply_mask (p);
|
2016-12-05 20:05:30 +01:00
|
|
|
if (src_p)
|
|
|
|
apply_mask_ipv6 (src_p);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Set default distance by route type. */
|
2016-08-24 08:20:47 +02:00
|
|
|
if (distance == 0)
|
|
|
|
{
|
|
|
|
if ((unsigned)type >= array_size(route_info))
|
|
|
|
distance = 150;
|
|
|
|
else
|
|
|
|
distance = route_info[type].distance;
|
|
|
|
|
|
|
|
/* iBGP distance is 200. */
|
|
|
|
if (type == ZEBRA_ROUTE_BGP && CHECK_FLAG (flags, ZEBRA_FLAG_IBGP))
|
|
|
|
distance = 200;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Lookup route node.*/
|
2016-12-05 20:05:30 +01:00
|
|
|
rn = srcdest_rnode_get (table, p, src_p);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* If same type of route are installed, treat it as a implicit
|
|
|
|
withdraw. */
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE (rn, re)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->status, ROUTE_ENTRY_REMOVED))
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
continue;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->type != type)
|
2005-09-21 16:58:20 +02:00
|
|
|
continue;
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->instance != instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
continue;
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->type != ZEBRA_ROUTE_CONNECT)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
same = re;
|
2002-12-13 21:15:29 +01:00
|
|
|
break;
|
|
|
|
}
|
2016-08-24 08:20:47 +02:00
|
|
|
/* Duplicate connected route comes in. */
|
2017-06-01 13:26:25 +02:00
|
|
|
else if ((nexthop = re->nexthop) &&
|
2005-09-21 16:58:20 +02:00
|
|
|
nexthop->type == NEXTHOP_TYPE_IFINDEX &&
|
2016-08-24 08:20:47 +02:00
|
|
|
nexthop->ifindex == ifindex &&
|
2017-06-01 13:26:25 +02:00
|
|
|
!CHECK_FLAG (re->status, ROUTE_ENTRY_REMOVED))
|
2005-09-21 16:58:20 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
re->refcnt++;
|
2016-08-24 08:20:47 +02:00
|
|
|
return 0 ;
|
2005-09-21 16:58:20 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
/* Allocate new re structure. */
|
|
|
|
re = XCALLOC (MTYPE_RE, sizeof (struct route_entry));
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
re->type = type;
|
|
|
|
re->instance = instance;
|
|
|
|
re->distance = distance;
|
|
|
|
re->flags = flags;
|
|
|
|
re->metric = metric;
|
|
|
|
re->mtu = mtu;
|
|
|
|
re->table = table_id;
|
|
|
|
re->vrf_id = vrf_id;
|
|
|
|
re->nexthop_num = 0;
|
|
|
|
re->uptime = time (NULL);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Nexthop settings. */
|
|
|
|
if (gate)
|
|
|
|
{
|
2016-08-24 08:20:47 +02:00
|
|
|
if (afi == AFI_IP6)
|
|
|
|
{
|
|
|
|
if (ifindex)
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_ipv6_ifindex_add (re, &gate->ipv6, ifindex);
|
2016-08-24 08:20:47 +02:00
|
|
|
else
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_ipv6_add (re, &gate->ipv6);
|
2016-08-24 08:20:47 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
else
|
2016-08-24 08:20:47 +02:00
|
|
|
{
|
|
|
|
if (ifindex)
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_ipv4_ifindex_add (re, &gate->ipv4, &src->ipv4, ifindex);
|
2016-08-24 08:20:47 +02:00
|
|
|
else
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_ipv4_add (re, &gate->ipv4, &src->ipv4);
|
2016-08-24 08:20:47 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
else
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_nexthop_ifindex_add (re, ifindex);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* If this route is kernel route, set FIB flag to the route. */
|
|
|
|
if (type == ZEBRA_ROUTE_KERNEL || type == ZEBRA_ROUTE_CONNECT)
|
2017-06-01 13:26:25 +02:00
|
|
|
for (nexthop = re->nexthop; nexthop; nexthop = nexthop->next)
|
2002-12-13 21:15:29 +01:00
|
|
|
SET_FLAG (nexthop->flags, NEXTHOP_FLAG_FIB);
|
|
|
|
|
|
|
|
/* Link new rib to node.*/
|
2012-10-23 18:00:42 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB)
|
2015-11-20 17:48:32 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
rnode_debug (rn, vrf_id, "Inserting route rn %p, re %p (type %d) existing %p",
|
|
|
|
(void *)rn, (void *)re, re->type, (void *)same);
|
2015-11-20 17:48:32 +01:00
|
|
|
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
2017-06-01 13:26:25 +02:00
|
|
|
route_entry_dump (p, src_p, re);
|
2015-11-20 17:48:32 +01:00
|
|
|
}
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_addnode (rn, re, 1);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Free implicit route.*/
|
|
|
|
if (same)
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
rib_delnode (rn, same);
|
|
|
|
|
|
|
|
route_unlock_node (rn);
|
2002-12-13 21:15:29 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
/* Schedule routes of a particular table (address-family) based on event. */
|
|
|
|
static void
|
|
|
|
rib_update_table (struct route_table *table, rib_update_event_t event)
|
2015-10-21 07:37:32 +02:00
|
|
|
{
|
|
|
|
struct route_node *rn;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re, *next;
|
2015-10-21 07:37:32 +02:00
|
|
|
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
/* Walk all routes and queue for processing, if appropriate for
|
|
|
|
* the trigger event.
|
|
|
|
*/
|
2016-12-05 20:05:30 +01:00
|
|
|
for (rn = route_top (table); rn; rn = srcdest_route_next (rn))
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
{
|
|
|
|
switch (event)
|
|
|
|
{
|
|
|
|
case RIB_UPDATE_IF_CHANGE:
|
|
|
|
/* Examine all routes that won't get processed by the protocol or
|
|
|
|
* triggered by nexthop evaluation (NHT). This would be system,
|
|
|
|
* kernel and certain static routes. Note that NHT will get
|
|
|
|
* triggered upon an interface event as connected routes always
|
|
|
|
* get queued for processing.
|
|
|
|
*/
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE_SAFE (rn, re, next)
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->type == ZEBRA_ROUTE_OSPF ||
|
|
|
|
re->type == ZEBRA_ROUTE_OSPF6 ||
|
|
|
|
re->type == ZEBRA_ROUTE_BGP)
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
continue; /* protocol will handle. */
|
2017-06-01 13:26:25 +02:00
|
|
|
else if (re->type == ZEBRA_ROUTE_STATIC)
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
{
|
|
|
|
struct nexthop *nh;
|
2017-06-01 13:26:25 +02:00
|
|
|
for (nh = re->nexthop; nh; nh = nh->next)
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
if (!(nh->type == NEXTHOP_TYPE_IPV4 ||
|
|
|
|
nh->type == NEXTHOP_TYPE_IPV6))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* If we only have nexthops to a gateway, NHT will
|
|
|
|
* take care.
|
|
|
|
*/
|
|
|
|
if (nh)
|
2016-04-13 18:21:47 +02:00
|
|
|
rib_queue_add (rn);
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
}
|
|
|
|
else
|
2016-04-13 18:21:47 +02:00
|
|
|
rib_queue_add (rn);
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
}
|
|
|
|
break;
|
2015-10-21 07:37:32 +02:00
|
|
|
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
case RIB_UPDATE_RMAP_CHANGE:
|
|
|
|
case RIB_UPDATE_OTHER:
|
|
|
|
/* Right now, examine all routes. Can restrict to a protocol in
|
|
|
|
* some cases (TODO).
|
|
|
|
*/
|
|
|
|
if (rnode_to_ribs (rn))
|
2016-04-13 18:21:47 +02:00
|
|
|
rib_queue_add (rn);
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-10-21 07:37:32 +02:00
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* RIB update function. */
|
|
|
|
void
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
rib_update (vrf_id_t vrf_id, rib_update_event_t event)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct route_table *table;
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
|
|
|
|
/* Process routes of interested address-families. */
|
2015-05-22 11:40:02 +02:00
|
|
|
table = zebra_vrf_table (AFI_IP, SAFI_UNICAST, vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
if (table)
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
rib_update_table (table, event);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-22 11:40:02 +02:00
|
|
|
table = zebra_vrf_table (AFI_IP6, SAFI_UNICAST, vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
if (table)
|
Zebra: Schedule RIB processing based on trigger event
Currently, when RIB processing is initiated (i.e., by calling rib_update()),
all routes are queued for processing. This is not desirable in all situations
because, sometimes the protocol may have an alternate path. In addition,
with NHT tracking nexthops, there are situations when NHT should be kicked
off first and that can trigger subsequent RIB processing.
This patch addresses this by introducing the notion of a trigger event. This
is only for the situation when the entire RIB is walked. The current triggers
- based on when rib_update() is invoked - are "interface change" and "route-
map change". In the former case, only the relevant routes are walked and
scheduled, in the latter case, currently all routes are scheduled for
processing.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-7662
Reviewed By: CCR-3905
Note: The initial defect in this area was CM-7420. This was addressed in
2.5.4 with an interim change that only walked static routes upon interface
down. The change was considered a bit risky to do for interface up etc. Also,
this did not address scenarios like CM-7662. The current fix addresses CM-7662.
2015-12-09 01:55:43 +01:00
|
|
|
rib_update_table (table, event);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove all routes which comes from non main table. */
|
2005-06-28 19:17:12 +02:00
|
|
|
static void
|
2002-12-13 21:15:29 +01:00
|
|
|
rib_weed_table (struct route_table *table)
|
|
|
|
{
|
|
|
|
struct route_node *rn;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re;
|
|
|
|
struct route_entry *next;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
if (table)
|
2016-12-05 20:05:30 +01:00
|
|
|
for (rn = route_top (table); rn; rn = srcdest_route_next (rn))
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE_SAFE (rn, re, next)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->status, ROUTE_ENTRY_REMOVED))
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
continue;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->table != zebrad.rtm_table_default &&
|
|
|
|
re->table != RT_TABLE_MAIN)
|
|
|
|
rib_delnode (rn, re);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Delete all routes from non main table. */
|
|
|
|
void
|
2005-06-28 19:17:12 +02:00
|
|
|
rib_weed_tables (void)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2016-10-29 18:37:11 +02:00
|
|
|
struct vrf *vrf;
|
2015-05-22 11:40:02 +02:00
|
|
|
struct zebra_vrf *zvrf;
|
|
|
|
|
2016-10-29 18:37:11 +02:00
|
|
|
RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id)
|
|
|
|
if ((zvrf = vrf->info) != NULL)
|
2015-05-22 11:40:02 +02:00
|
|
|
{
|
|
|
|
rib_weed_table (zvrf->table[AFI_IP][SAFI_UNICAST]);
|
|
|
|
rib_weed_table (zvrf->table[AFI_IP6][SAFI_UNICAST]);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2014-06-04 06:53:35 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Delete self installed routes after zebra is relaunched. */
|
2005-06-28 19:17:12 +02:00
|
|
|
static void
|
2002-12-13 21:15:29 +01:00
|
|
|
rib_sweep_table (struct route_table *table)
|
|
|
|
{
|
|
|
|
struct route_node *rn;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re;
|
|
|
|
struct route_entry *next;
|
2002-12-13 21:15:29 +01:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (table)
|
2016-12-05 20:05:30 +01:00
|
|
|
for (rn = route_top (table); rn; rn = srcdest_route_next (rn))
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE_SAFE (rn, re, next)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->status, ROUTE_ENTRY_REMOVED))
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
continue;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->type == ZEBRA_ROUTE_KERNEL &&
|
|
|
|
CHECK_FLAG (re->flags, ZEBRA_FLAG_SELFROUTE))
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
ret = rib_uninstall_kernel (rn, re);
|
2002-12-13 21:15:29 +01:00
|
|
|
if (! ret)
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_delnode (rn, re);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sweep all RIB tables. */
|
|
|
|
void
|
2005-06-28 19:17:12 +02:00
|
|
|
rib_sweep_route (void)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2016-10-29 18:37:11 +02:00
|
|
|
struct vrf *vrf;
|
2015-05-22 11:40:02 +02:00
|
|
|
struct zebra_vrf *zvrf;
|
|
|
|
|
2016-10-29 18:37:11 +02:00
|
|
|
RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id)
|
|
|
|
if ((zvrf = vrf->info) != NULL)
|
2015-05-22 11:40:02 +02:00
|
|
|
{
|
|
|
|
rib_sweep_table (zvrf->table[AFI_IP][SAFI_UNICAST]);
|
|
|
|
rib_sweep_table (zvrf->table[AFI_IP6][SAFI_UNICAST]);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2011-12-11 15:48:47 +01:00
|
|
|
|
|
|
|
/* Remove specific by protocol routes from 'table'. */
|
|
|
|
static unsigned long
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
rib_score_proto_table (u_char proto, u_short instance, struct route_table *table)
|
2011-12-11 15:48:47 +01:00
|
|
|
{
|
|
|
|
struct route_node *rn;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re;
|
|
|
|
struct route_entry *next;
|
2011-12-11 15:48:47 +01:00
|
|
|
unsigned long n = 0;
|
|
|
|
|
|
|
|
if (table)
|
2016-12-05 20:05:30 +01:00
|
|
|
for (rn = route_top (table); rn; rn = srcdest_route_next (rn))
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE_SAFE (rn, re, next)
|
2011-12-11 15:48:47 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (CHECK_FLAG (re->status, ROUTE_ENTRY_REMOVED))
|
2011-12-11 15:48:47 +01:00
|
|
|
continue;
|
2017-06-01 13:26:25 +02:00
|
|
|
if (re->type == proto && re->instance == instance)
|
2011-12-11 15:48:47 +01:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
rib_delnode (rn, re);
|
2011-12-11 15:48:47 +01:00
|
|
|
n++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove specific by protocol routes. */
|
|
|
|
unsigned long
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
rib_score_proto (u_char proto, u_short instance)
|
2011-12-11 15:48:47 +01:00
|
|
|
{
|
2016-10-29 18:37:11 +02:00
|
|
|
struct vrf *vrf;
|
2015-05-22 11:40:02 +02:00
|
|
|
struct zebra_vrf *zvrf;
|
|
|
|
unsigned long cnt = 0;
|
|
|
|
|
2016-10-29 18:37:11 +02:00
|
|
|
RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id)
|
|
|
|
if ((zvrf = vrf->info) != NULL)
|
2015-05-22 11:40:02 +02:00
|
|
|
cnt += rib_score_proto_table (proto, instance, zvrf->table[AFI_IP][SAFI_UNICAST])
|
|
|
|
+rib_score_proto_table (proto, instance, zvrf->table[AFI_IP6][SAFI_UNICAST]);
|
|
|
|
|
|
|
|
return cnt;
|
2011-12-11 15:48:47 +01:00
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Close RIB and clean up kernel routes. */
|
2015-05-22 11:40:09 +02:00
|
|
|
void
|
2002-12-13 21:15:29 +01:00
|
|
|
rib_close_table (struct route_table *table)
|
|
|
|
{
|
|
|
|
struct route_node *rn;
|
2015-01-12 07:05:06 +01:00
|
|
|
rib_table_info_t *info = table->info;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
if (table)
|
2016-12-05 20:05:30 +01:00
|
|
|
for (rn = route_top (table); rn; rn = srcdest_route_next (rn))
|
2017-06-01 13:26:25 +02:00
|
|
|
RNODE_FOREACH_RE (rn, re)
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (!CHECK_FLAG (re->status, ROUTE_ENTRY_SELECTED_FIB))
|
2012-11-13 23:48:53 +01:00
|
|
|
continue;
|
|
|
|
|
2015-01-12 07:05:06 +01:00
|
|
|
if (info->safi == SAFI_UNICAST)
|
2017-02-13 00:29:37 +01:00
|
|
|
hook_call(rib_update, rn, NULL);
|
2012-11-13 23:48:59 +01:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (! RIB_SYSTEM_ROUTE (re))
|
|
|
|
rib_uninstall_kernel (rn, re);
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Routing information base initialize. */
|
|
|
|
void
|
2005-06-28 19:17:12 +02:00
|
|
|
rib_init (void)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2005-04-28 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add lock field for refcounting.
* zserv.h: (struct zebra_t) Add a ribq workqueue to the zebra
'master' struct.
* zserv.c: (zread_ipv4_add) XMALLOC then memset should be XCALLOC.
* zebra_rib.c: Clean up refcounting of route_node, make struct rib
refcounted and convert rib_process to work-queue. In general,
rib's should be rib_addnode'd and delnode'd to route_nodes, and
these symmetrical functions will manage the locking of referenced
route_node and freeing of struct rib - rather than having users
manage each seperately - with much scope for bugs..
(newrib_free) removed and replaced with rib_lock
(rib_lock) new function, check state of lock and increment.
(rib_unlock) new function, check lock state and decrement. Free
struct rib if refcount hits 0, freeing struct nexthop's, as
newrib_free did.
(rib_addnode) Add RIB to route_node, locking both.
(rib_delnode) Delete RIB from route_node, unlocking each.
(rib_process) Converted to a work-queue work function.
Functional changes are minimal, just arguments, comments and
whitespace.
(rib_queue_add_qnode) Helper function to setup a ribq item.
(rib_queue_add) Helper function, same arguments as old
rib_process, to replace in callers of rib_process.
(rib_queue_qnode_del) ribq deconstructor.
(rib_queue_init) Create the ribq.
(rib_init) call rib_queue_init.
(remainder) Sanitise refcounting of route_node's. Convert to
rib_queue_add, rib_addnode and rib_delnode. Change XMALLOC/memset
to XCALLOC. Remove calls to nexthop_delete and nexthop_free.
2005-04-28 19:35:14 +02:00
|
|
|
rib_queue_init (&zebrad);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2012-11-13 23:48:55 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* vrf_id_get_next
|
|
|
|
*
|
|
|
|
* Get the first vrf id that is greater than the given vrf id if any.
|
|
|
|
*
|
|
|
|
* Returns TRUE if a vrf id was found, FALSE otherwise.
|
|
|
|
*/
|
|
|
|
static inline int
|
2015-05-22 11:39:56 +02:00
|
|
|
vrf_id_get_next (vrf_id_t vrf_id, vrf_id_t *next_id_p)
|
2012-11-13 23:48:55 +01:00
|
|
|
{
|
2016-10-29 18:37:11 +02:00
|
|
|
struct vrf *vrf;
|
2015-05-22 11:39:56 +02:00
|
|
|
|
2016-11-02 15:16:58 +01:00
|
|
|
vrf = vrf_lookup_by_id (vrf_id);
|
2016-10-29 18:37:11 +02:00
|
|
|
if (vrf)
|
2012-11-13 23:48:55 +01:00
|
|
|
{
|
2016-10-29 18:37:11 +02:00
|
|
|
vrf = RB_NEXT (vrf_id_head, &vrfs_by_id, vrf);
|
|
|
|
if (vrf) {
|
|
|
|
*next_id_p = vrf->vrf_id;
|
|
|
|
return 1;
|
|
|
|
}
|
2012-11-13 23:48:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rib_tables_iter_next
|
|
|
|
*
|
|
|
|
* Returns the next table in the iteration.
|
|
|
|
*/
|
|
|
|
struct route_table *
|
|
|
|
rib_tables_iter_next (rib_tables_iter_t *iter)
|
|
|
|
{
|
|
|
|
struct route_table *table;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Array that helps us go over all AFI/SAFI combinations via one
|
|
|
|
* index.
|
|
|
|
*/
|
|
|
|
static struct {
|
|
|
|
afi_t afi;
|
|
|
|
safi_t safi;
|
|
|
|
} afi_safis[] = {
|
|
|
|
{ AFI_IP, SAFI_UNICAST },
|
|
|
|
{ AFI_IP, SAFI_MULTICAST },
|
2017-02-02 18:58:33 +01:00
|
|
|
{ AFI_IP, SAFI_LABELED_UNICAST },
|
2012-11-13 23:48:55 +01:00
|
|
|
{ AFI_IP6, SAFI_UNICAST },
|
|
|
|
{ AFI_IP6, SAFI_MULTICAST },
|
2017-02-02 18:58:33 +01:00
|
|
|
{ AFI_IP6, SAFI_LABELED_UNICAST },
|
2012-11-13 23:48:55 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
table = NULL;
|
|
|
|
|
|
|
|
switch (iter->state)
|
|
|
|
{
|
|
|
|
|
|
|
|
case RIB_TABLES_ITER_S_INIT:
|
2015-11-19 13:21:30 +01:00
|
|
|
iter->vrf_id = VRF_DEFAULT;
|
2012-11-13 23:48:55 +01:00
|
|
|
iter->afi_safi_ix = -1;
|
|
|
|
|
|
|
|
/* Fall through */
|
|
|
|
|
|
|
|
case RIB_TABLES_ITER_S_ITERATING:
|
|
|
|
iter->afi_safi_ix++;
|
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
|
|
|
|
while (iter->afi_safi_ix < (int) ZEBRA_NUM_OF (afi_safis))
|
|
|
|
{
|
2015-05-22 11:39:56 +02:00
|
|
|
table = zebra_vrf_table (afi_safis[iter->afi_safi_ix].afi,
|
2012-11-13 23:48:55 +01:00
|
|
|
afi_safis[iter->afi_safi_ix].safi,
|
|
|
|
iter->vrf_id);
|
|
|
|
if (table)
|
|
|
|
break;
|
|
|
|
|
|
|
|
iter->afi_safi_ix++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Found another table in this vrf.
|
|
|
|
*/
|
|
|
|
if (table)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Done with all tables in the current vrf, go to the next
|
|
|
|
* one.
|
|
|
|
*/
|
|
|
|
if (!vrf_id_get_next (iter->vrf_id, &iter->vrf_id))
|
|
|
|
break;
|
|
|
|
|
|
|
|
iter->afi_safi_ix = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RIB_TABLES_ITER_S_DONE:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (table)
|
|
|
|
iter->state = RIB_TABLES_ITER_S_ITERATING;
|
|
|
|
else
|
|
|
|
iter->state = RIB_TABLES_ITER_S_DONE;
|
|
|
|
|
|
|
|
return table;
|
|
|
|
}
|
2015-05-22 11:39:56 +02:00
|
|
|
|