2002-12-13 21:15:29 +01:00
|
|
|
/*
|
|
|
|
* Routing Information Base header
|
|
|
|
* Copyright (C) 1997 Kunihiro Ishiguro
|
|
|
|
*
|
|
|
|
* This file is part of GNU Zebra.
|
|
|
|
*
|
|
|
|
* GNU Zebra is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2, or (at your option) any
|
|
|
|
* later version.
|
|
|
|
*
|
|
|
|
* GNU Zebra is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with GNU Zebra; see the file COPYING. If not, write to the Free
|
|
|
|
* Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
|
|
|
|
* 02111-1307, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ZEBRA_RIB_H
|
|
|
|
#define _ZEBRA_RIB_H
|
|
|
|
|
2015-05-22 11:40:07 +02:00
|
|
|
#include "linklist.h"
|
2005-11-24 16:15:17 +01:00
|
|
|
#include "prefix.h"
|
2012-11-13 23:48:53 +01:00
|
|
|
#include "table.h"
|
2012-11-13 23:48:59 +01:00
|
|
|
#include "queue.h"
|
2015-05-20 02:40:34 +02:00
|
|
|
#include "nexthop.h"
|
2005-11-24 16:15:17 +01:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
#define DISTANCE_INFINITY 255
|
2015-05-20 03:03:42 +02:00
|
|
|
#define ZEBRA_KERNEL_TABLE_MAX 252 /* support for no more than this rt tables */
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
struct rib
|
|
|
|
{
|
|
|
|
/* Link list. */
|
|
|
|
struct rib *next;
|
|
|
|
struct rib *prev;
|
2006-03-30 15:32:09 +02:00
|
|
|
|
|
|
|
/* Nexthop structure */
|
|
|
|
struct nexthop *nexthop;
|
|
|
|
|
|
|
|
/* Refrence count. */
|
|
|
|
unsigned long refcnt;
|
|
|
|
|
|
|
|
/* Uptime. */
|
|
|
|
time_t uptime;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Type fo this route. */
|
|
|
|
int type;
|
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
/* Source protocol instance */
|
|
|
|
u_short instance;
|
|
|
|
|
2015-05-22 11:40:02 +02:00
|
|
|
/* VRF identifier. */
|
|
|
|
vrf_id_t vrf_id;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Which routing table */
|
2015-05-20 03:04:26 +02:00
|
|
|
uint32_t table;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2006-03-30 15:32:09 +02:00
|
|
|
/* Metric */
|
|
|
|
u_int32_t metric;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Distance. */
|
|
|
|
u_char distance;
|
|
|
|
|
2015-05-20 02:46:33 +02:00
|
|
|
/* Tag */
|
|
|
|
u_short tag;
|
|
|
|
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
/* Flags of this route.
|
|
|
|
* This flag's definition is in lib/zebra.h ZEBRA_FLAG_* and is exposed
|
|
|
|
* to clients via Zserv
|
|
|
|
*/
|
2002-12-13 21:15:29 +01:00
|
|
|
u_char flags;
|
|
|
|
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
/* RIB internal status */
|
|
|
|
u_char status;
|
2015-05-20 02:47:22 +02:00
|
|
|
#define RIB_ENTRY_REMOVED 0x1
|
|
|
|
/* to simplify NHT logic when NHs change, instead of doing a NH by NH cmp */
|
|
|
|
#define RIB_ENTRY_NEXTHOPS_CHANGED 0x2
|
[zebra] Bug #268, Fix race between add/delete of routes, sanitise rib queueing
2006-07-27 Paul Jakma <paul.jakma@sun.com>
* rib.h: (struct rib) Add a route_node rn_status flag field,
this has to be copied every time head RIB of a route_node
changes.
Remove the rib lock field, not needed - see below.
Add a status field for RIB-private flags.
* zebra_rib.c: Add a global for the workqueue hold time, useful
for testing.
(general) Fix for bug #268. Problem originally
detailed by Simon Bryden in [quagga-dev 4001].
Essentially, add/delete of a RIB must happen /before/ the
queue. Best-path selection (ie rib_process) and reaping of
freed RIBs can then be done after queueing. Only the route_node
is queued - no important RIB state (i.e. whether a RIB is to be
deleted) is queued.
(struct zebra_queue_node_t) Disappears, no longer need to
track multiple things on the queue, only the route_node.
(rib_{lock,unlock}) removed, RIBs no longer need to be
refcounted, no longer queued.
(rib_queue_qnode_del) Removed, deleted RIBs no longer deleted
via the queue.
(rib_queue_add_qnode) deleted
(rib_queue_add) Only the route_node is queued for best-path
selection, we can check whether it is already queued or
not and avoid queueing same node twice - struct rib * argument
is not needed.
(rib_link/unlink) (un)link RIB from route_node.
(rib_{add,del}node) Front-end to updates of a RIB.
(rib_process) Reap any deleted RIBs via rib_unlink.
Unset the route_node 'QUEUED' flag.
(General) Remove calls to rib_queue_add where add/del node was
called - not needed, update calls where not.
Ignore RIB_ENTRY_REMOVEd ribs in loops through route_nodes
2006-07-27 23:49:00 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Nexthop information. */
|
|
|
|
u_char nexthop_num;
|
|
|
|
u_char nexthop_active_num;
|
|
|
|
u_char nexthop_fib_num;
|
|
|
|
};
|
|
|
|
|
2008-06-02 14:03:22 +02:00
|
|
|
/* meta-queue structure:
|
|
|
|
* sub-queue 0: connected, kernel
|
|
|
|
* sub-queue 1: static
|
|
|
|
* sub-queue 2: RIP, RIPng, OSPF, OSPF6, IS-IS
|
|
|
|
* sub-queue 3: iBGP, eBGP
|
|
|
|
* sub-queue 4: any other origin (if any)
|
|
|
|
*/
|
|
|
|
#define MQ_SIZE 5
|
|
|
|
struct meta_queue
|
|
|
|
{
|
|
|
|
struct list *subq[MQ_SIZE];
|
|
|
|
u_int32_t size; /* sum of lengths of all subqueues */
|
|
|
|
};
|
|
|
|
|
2012-11-13 23:48:53 +01:00
|
|
|
/*
|
|
|
|
* Structure that represents a single destination (prefix).
|
|
|
|
*/
|
|
|
|
typedef struct rib_dest_t_
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Back pointer to the route node for this destination. This helps
|
|
|
|
* us get to the prefix that this structure is for.
|
|
|
|
*/
|
|
|
|
struct route_node *rnode;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Doubly-linked list of routes for this prefix.
|
|
|
|
*/
|
|
|
|
struct rib *routes;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flags, see below.
|
|
|
|
*/
|
|
|
|
u_int32_t flags;
|
|
|
|
|
2012-11-13 23:48:59 +01:00
|
|
|
/*
|
|
|
|
* Linkage to put dest on the FPM processing queue.
|
|
|
|
*/
|
|
|
|
TAILQ_ENTRY(rib_dest_t_) fpm_q_entries;
|
|
|
|
|
2012-11-13 23:48:53 +01:00
|
|
|
} rib_dest_t;
|
|
|
|
|
|
|
|
#define RIB_ROUTE_QUEUED(x) (1 << (x))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The maximum qindex that can be used.
|
|
|
|
*/
|
|
|
|
#define ZEBRA_MAX_QINDEX (MQ_SIZE - 1)
|
|
|
|
|
2012-11-13 23:48:59 +01:00
|
|
|
/*
|
|
|
|
* This flag indicates that a given prefix has been 'advertised' to
|
|
|
|
* the FPM to be installed in the forwarding plane.
|
|
|
|
*/
|
|
|
|
#define RIB_DEST_SENT_TO_FPM (1 << (ZEBRA_MAX_QINDEX + 1))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This flag is set when we need to send an update to the FPM about a
|
|
|
|
* dest.
|
|
|
|
*/
|
|
|
|
#define RIB_DEST_UPDATE_FPM (1 << (ZEBRA_MAX_QINDEX + 2))
|
|
|
|
|
2012-11-13 23:48:53 +01:00
|
|
|
/*
|
|
|
|
* Macro to iterate over each route for a destination (prefix).
|
|
|
|
*/
|
|
|
|
#define RIB_DEST_FOREACH_ROUTE(dest, rib) \
|
|
|
|
for ((rib) = (dest) ? (dest)->routes : NULL; (rib); (rib) = (rib)->next)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Same as above, but allows the current node to be unlinked.
|
|
|
|
*/
|
|
|
|
#define RIB_DEST_FOREACH_ROUTE_SAFE(dest, rib, next) \
|
|
|
|
for ((rib) = (dest) ? (dest)->routes : NULL; \
|
|
|
|
(rib) && ((next) = (rib)->next, 1); \
|
|
|
|
(rib) = (next))
|
|
|
|
|
|
|
|
#define RNODE_FOREACH_RIB(rn, rib) \
|
|
|
|
RIB_DEST_FOREACH_ROUTE (rib_dest_from_rnode (rn), rib)
|
|
|
|
|
|
|
|
#define RNODE_FOREACH_RIB_SAFE(rn, rib, next) \
|
|
|
|
RIB_DEST_FOREACH_ROUTE_SAFE (rib_dest_from_rnode (rn), rib, next)
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Static route information. */
|
2015-10-22 03:34:08 +02:00
|
|
|
struct static_route
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
/* For linked list. */
|
2015-10-22 03:34:08 +02:00
|
|
|
struct static_route *prev;
|
|
|
|
struct static_route *next;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-22 11:40:06 +02:00
|
|
|
/* VRF identifier. */
|
|
|
|
vrf_id_t vrf_id;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Administrative distance. */
|
|
|
|
u_char distance;
|
|
|
|
|
2015-05-20 02:46:33 +02:00
|
|
|
/* Tag */
|
|
|
|
u_short tag;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Flag for this static route's type. */
|
|
|
|
u_char type;
|
2015-10-22 03:34:08 +02:00
|
|
|
#define STATIC_IPV4_GATEWAY 1
|
|
|
|
#define STATIC_IPV4_IFNAME 2
|
|
|
|
#define STATIC_IPV4_BLACKHOLE 3
|
|
|
|
#define STATIC_IPV6_GATEWAY 4
|
|
|
|
#define STATIC_IPV6_GATEWAY_IFNAME 5
|
|
|
|
#define STATIC_IPV6_IFNAME 6
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-10-22 03:34:08 +02:00
|
|
|
/*
|
2015-10-30 13:52:29 +01:00
|
|
|
* Nexthop value.
|
2015-10-22 03:34:08 +02:00
|
|
|
*
|
|
|
|
* Under IPv4 addr and ifname are
|
|
|
|
* used independentyly.
|
|
|
|
* STATIC_IPV4_GATEWAY uses addr
|
|
|
|
* STATIC_IPV4_IFNAME uses ifname
|
|
|
|
*/
|
|
|
|
union g_addr addr;
|
2002-12-13 21:15:29 +01:00
|
|
|
char *ifname;
|
2003-05-25 21:21:25 +02:00
|
|
|
|
|
|
|
/* bit flags */
|
|
|
|
u_char flags;
|
|
|
|
/*
|
|
|
|
see ZEBRA_FLAG_REJECT
|
|
|
|
ZEBRA_FLAG_BLACKHOLE
|
|
|
|
*/
|
2002-12-13 21:15:29 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2013-07-05 17:35:37 +02:00
|
|
|
/* The following for loop allows to iterate over the nexthop
|
|
|
|
* structure of routes.
|
|
|
|
*
|
|
|
|
* We have to maintain quite a bit of state:
|
|
|
|
*
|
|
|
|
* nexthop: The pointer to the current nexthop, either in the
|
|
|
|
* top-level chain or in the resolved chain of ni.
|
|
|
|
* tnexthop: The pointer to the current nexthop in the top-level
|
|
|
|
* nexthop chain.
|
|
|
|
* recursing: Information if nh currently is in the top-level chain
|
|
|
|
* (0) or in a resolved chain (1).
|
|
|
|
*
|
|
|
|
* Initialization: Set `nexthop' and `tnexthop' to the head of the
|
|
|
|
* top-level chain. As nexthop is in the top level chain, set recursing
|
|
|
|
* to 0.
|
|
|
|
*
|
|
|
|
* Iteration check: Check that the `nexthop' pointer is not NULL.
|
|
|
|
*
|
|
|
|
* Iteration step: This is the tricky part. Check if `nexthop' has
|
|
|
|
* NEXTHOP_FLAG_RECURSIVE set. If yes, this implies that `nexthop' is in
|
|
|
|
* the top level chain and has at least one nexthop attached to
|
|
|
|
* `nexthop->resolved'. As we want to descend into `nexthop->resolved',
|
|
|
|
* set `recursing' to 1 and set `nexthop' to `nexthop->resolved'.
|
|
|
|
* `tnexthop' is left alone in that case so we can remember which nexthop
|
|
|
|
* in the top level chain we are currently handling.
|
|
|
|
*
|
|
|
|
* If NEXTHOP_FLAG_RECURSIVE is not set, `nexthop' will progress in its
|
|
|
|
* current chain. If we are recursing, `nexthop' will be set to
|
|
|
|
* `nexthop->next' and `tnexthop' will be left alone. If we are not
|
|
|
|
* recursing, both `tnexthop' and `nexthop' will be set to `nexthop->next'
|
|
|
|
* as we are progressing in the top level chain.
|
|
|
|
* If we encounter `nexthop->next == NULL', we will clear the `recursing'
|
|
|
|
* flag as we arived either at the end of the resolved chain or at the end
|
|
|
|
* of the top level chain. In both cases, we set `tnexthop' and `nexthop'
|
|
|
|
* to `tnexthop->next', progressing to the next position in the top-level
|
|
|
|
* chain and possibly to its end marked by NULL.
|
|
|
|
*/
|
|
|
|
#define ALL_NEXTHOPS_RO(head, nexthop, tnexthop, recursing) \
|
|
|
|
(tnexthop) = (nexthop) = (head), (recursing) = 0; \
|
|
|
|
(nexthop); \
|
|
|
|
(nexthop) = CHECK_FLAG((nexthop)->flags, NEXTHOP_FLAG_RECURSIVE) \
|
|
|
|
? (((recursing) = 1), (nexthop)->resolved) \
|
|
|
|
: ((nexthop)->next ? ((recursing) ? (nexthop)->next \
|
|
|
|
: ((tnexthop) = (nexthop)->next)) \
|
|
|
|
: (((recursing) = 0),((tnexthop) = (tnexthop)->next)))
|
|
|
|
|
2015-11-20 14:33:30 +01:00
|
|
|
#if defined (HAVE_RTADV)
|
2015-05-22 11:40:10 +02:00
|
|
|
/* Structure which hold status of router advertisement. */
|
|
|
|
struct rtadv
|
|
|
|
{
|
|
|
|
int sock;
|
|
|
|
|
|
|
|
int adv_if_count;
|
|
|
|
int adv_msec_if_count;
|
|
|
|
|
|
|
|
struct thread *ra_read;
|
|
|
|
struct thread *ra_timer;
|
|
|
|
};
|
2015-11-20 14:33:30 +01:00
|
|
|
#endif /* HAVE_RTADV */
|
2015-05-22 11:40:10 +02:00
|
|
|
|
2014-07-03 12:23:09 +02:00
|
|
|
#ifdef HAVE_NETLINK
|
|
|
|
/* Socket interface to kernel */
|
|
|
|
struct nlsock
|
|
|
|
{
|
|
|
|
int sock;
|
|
|
|
int seq;
|
|
|
|
struct sockaddr_nl snl;
|
|
|
|
const char *name;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Routing table instance. */
|
2015-05-22 11:39:56 +02:00
|
|
|
struct zebra_vrf
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2015-05-22 11:39:56 +02:00
|
|
|
/* Identifier. */
|
|
|
|
vrf_id_t vrf_id;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Routing table name. */
|
|
|
|
char *name;
|
|
|
|
|
|
|
|
/* Description. */
|
|
|
|
char *desc;
|
|
|
|
|
|
|
|
/* FIB identifier. */
|
|
|
|
u_char fib_id;
|
|
|
|
|
|
|
|
/* Routing table. */
|
|
|
|
struct route_table *table[AFI_MAX][SAFI_MAX];
|
|
|
|
|
|
|
|
/* Static route configuration. */
|
|
|
|
struct route_table *stable[AFI_MAX][SAFI_MAX];
|
2015-05-20 02:40:34 +02:00
|
|
|
|
|
|
|
/* Recursive Nexthop table */
|
|
|
|
struct route_table *rnh_table[AFI_MAX];
|
2015-05-20 03:03:42 +02:00
|
|
|
|
2015-05-20 03:04:20 +02:00
|
|
|
/* Import check table (used mostly by BGP */
|
|
|
|
struct route_table *import_check_table[AFI_MAX];
|
|
|
|
|
2015-05-20 03:03:42 +02:00
|
|
|
/* Routing tables off of main table for redistribute table */
|
|
|
|
struct route_table *other_table[AFI_MAX][ZEBRA_KERNEL_TABLE_MAX];
|
2015-05-22 11:40:07 +02:00
|
|
|
|
2014-07-03 12:23:09 +02:00
|
|
|
#ifdef HAVE_NETLINK
|
|
|
|
struct nlsock netlink; /* kernel messages */
|
|
|
|
struct nlsock netlink_cmd; /* command channel */
|
|
|
|
struct thread *t_netlink;
|
|
|
|
#endif
|
|
|
|
|
2015-05-22 11:40:07 +02:00
|
|
|
/* 2nd pointer type used primarily to quell a warning on
|
|
|
|
* ALL_LIST_ELEMENTS_RO
|
|
|
|
*/
|
|
|
|
struct list _rid_all_sorted_list;
|
|
|
|
struct list _rid_lo_sorted_list;
|
|
|
|
struct list *rid_all_sorted_list;
|
|
|
|
struct list *rid_lo_sorted_list;
|
|
|
|
struct prefix rid_user_assigned;
|
2015-05-22 11:40:10 +02:00
|
|
|
|
2015-11-20 14:33:30 +01:00
|
|
|
#if defined (HAVE_RTADV)
|
2015-05-22 11:40:10 +02:00
|
|
|
struct rtadv rtadv;
|
2015-11-20 14:33:30 +01:00
|
|
|
#endif /* HAVE_RTADV */
|
2002-12-13 21:15:29 +01:00
|
|
|
};
|
|
|
|
|
2012-11-13 23:48:54 +01:00
|
|
|
/*
|
|
|
|
* rib_table_info_t
|
|
|
|
*
|
|
|
|
* Structure that is hung off of a route_table that holds information about
|
|
|
|
* the table.
|
|
|
|
*/
|
|
|
|
typedef struct rib_table_info_t_
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
2015-05-22 11:39:56 +02:00
|
|
|
* Back pointer to zebra_vrf.
|
2012-11-13 23:48:54 +01:00
|
|
|
*/
|
2015-05-22 11:39:56 +02:00
|
|
|
struct zebra_vrf *zvrf;
|
2012-11-13 23:48:54 +01:00
|
|
|
afi_t afi;
|
|
|
|
safi_t safi;
|
|
|
|
|
|
|
|
} rib_table_info_t;
|
|
|
|
|
2012-11-13 23:48:55 +01:00
|
|
|
typedef enum
|
|
|
|
{
|
|
|
|
RIB_TABLES_ITER_S_INIT,
|
|
|
|
RIB_TABLES_ITER_S_ITERATING,
|
|
|
|
RIB_TABLES_ITER_S_DONE
|
|
|
|
} rib_tables_iter_state_t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Structure that holds state for iterating over all tables in the
|
|
|
|
* Routing Information Base.
|
|
|
|
*/
|
|
|
|
typedef struct rib_tables_iter_t_
|
|
|
|
{
|
2015-05-22 11:39:56 +02:00
|
|
|
vrf_id_t vrf_id;
|
2012-11-13 23:48:55 +01:00
|
|
|
int afi_safi_ix;
|
|
|
|
|
|
|
|
rib_tables_iter_state_t state;
|
|
|
|
} rib_tables_iter_t;
|
|
|
|
|
2015-11-27 17:46:54 +01:00
|
|
|
extern struct nexthop *rib_nexthop_ifindex_add (struct rib *, unsigned int);
|
|
|
|
extern struct nexthop *rib_nexthop_ifname_add (struct rib *, char *);
|
|
|
|
extern struct nexthop *rib_nexthop_blackhole_add (struct rib *);
|
|
|
|
extern struct nexthop *rib_nexthop_ipv4_add (struct rib *, struct in_addr *,
|
|
|
|
struct in_addr *);
|
|
|
|
extern struct nexthop *rib_nexthop_ipv4_ifindex_add (struct rib *,
|
|
|
|
struct in_addr *,
|
|
|
|
struct in_addr *,
|
|
|
|
unsigned int);
|
|
|
|
extern void rib_nexthop_add (struct rib *rib, struct nexthop *nexthop);
|
|
|
|
extern void rib_copy_nexthops (struct rib *rib, struct nexthop *nh);
|
2015-05-20 02:40:34 +02:00
|
|
|
|
2013-07-05 17:35:37 +02:00
|
|
|
extern int nexthop_has_fib_child(struct nexthop *);
|
2007-08-13 18:03:06 +02:00
|
|
|
extern void rib_lookup_and_dump (struct prefix_ipv4 *);
|
2008-02-26 15:02:24 +01:00
|
|
|
extern void rib_lookup_and_pushup (struct prefix_ipv4 *);
|
2013-10-22 19:10:21 +02:00
|
|
|
#define rib_dump(prefix ,rib) _rib_dump(__func__, prefix, rib)
|
|
|
|
extern void _rib_dump (const char *,
|
|
|
|
union prefix46constptr, const struct rib *);
|
2015-05-22 11:40:02 +02:00
|
|
|
extern int rib_lookup_ipv4_route (struct prefix_ipv4 *, union sockunion *,
|
|
|
|
vrf_id_t);
|
2007-08-13 18:03:06 +02:00
|
|
|
#define ZEBRA_RIB_LOOKUP_ERROR -1
|
|
|
|
#define ZEBRA_RIB_FOUND_EXACT 0
|
|
|
|
#define ZEBRA_RIB_FOUND_NOGATE 1
|
|
|
|
#define ZEBRA_RIB_FOUND_CONNECTED 2
|
|
|
|
#define ZEBRA_RIB_NOTFOUND 3
|
|
|
|
|
2015-11-27 17:46:54 +01:00
|
|
|
extern struct nexthop *rib_nexthop_ipv6_add (struct rib *, struct in6_addr *);
|
|
|
|
extern struct nexthop *rib_nexthop_ipv6_ifindex_add (struct rib *rib,
|
|
|
|
struct in6_addr *ipv6,
|
|
|
|
unsigned int ifindex);
|
|
|
|
extern struct nexthop *rib_nexthop_ipv6_ifname_add (struct rib *rib,
|
|
|
|
struct in6_addr *ipv6,
|
|
|
|
char *ifname);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-22 11:39:56 +02:00
|
|
|
extern struct zebra_vrf *zebra_vrf_lookup (vrf_id_t vrf_id);
|
|
|
|
extern struct zebra_vrf *zebra_vrf_alloc (vrf_id_t);
|
|
|
|
extern struct route_table *zebra_vrf_table (afi_t, safi_t, vrf_id_t);
|
|
|
|
extern struct route_table *zebra_vrf_static_table (afi_t, safi_t, vrf_id_t);
|
|
|
|
extern struct route_table *zebra_vrf_other_route_table (afi_t afi, u_int32_t table_id,
|
|
|
|
vrf_id_t vrf_id);
|
2015-05-20 03:03:42 +02:00
|
|
|
extern int is_zebra_valid_kernel_table(u_int32_t table_id);
|
|
|
|
extern int is_zebra_main_routing_table(u_int32_t table_id);
|
2015-09-16 08:48:00 +02:00
|
|
|
extern int zebra_check_addr (struct prefix *p);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2005-09-24 16:00:26 +02:00
|
|
|
/* NOTE:
|
|
|
|
* All rib_add_ipv[46]* functions will not just add prefix into RIB, but
|
|
|
|
* also implicitly withdraw equal prefix of same type. */
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
extern int rib_add_ipv4 (int type, u_short instance, int flags, struct prefix_ipv4 *p,
|
2007-05-02 18:05:35 +02:00
|
|
|
struct in_addr *gate, struct in_addr *src,
|
2015-05-22 11:40:02 +02:00
|
|
|
unsigned int ifindex, vrf_id_t vrf_id, u_int32_t table_id,
|
2011-11-26 18:59:32 +01:00
|
|
|
u_int32_t, u_char, safi_t);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2011-11-26 18:59:32 +01:00
|
|
|
extern int rib_add_ipv4_multipath (struct prefix_ipv4 *, struct rib *, safi_t);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
extern int rib_delete_ipv4 (int type, u_short instance, int flags, struct prefix_ipv4 *p,
|
2005-06-28 19:17:12 +02:00
|
|
|
struct in_addr *gate, unsigned int ifindex,
|
2015-05-22 11:40:02 +02:00
|
|
|
vrf_id_t, u_int32_t, safi_t safi);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-22 11:40:02 +02:00
|
|
|
extern struct rib *rib_match_ipv4 (struct in_addr, vrf_id_t);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-22 11:40:02 +02:00
|
|
|
extern struct rib *rib_lookup_ipv4 (struct prefix_ipv4 *, vrf_id_t);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-22 11:40:02 +02:00
|
|
|
extern void rib_update (vrf_id_t);
|
|
|
|
extern void rib_update_static (vrf_id_t);
|
2005-06-28 19:17:12 +02:00
|
|
|
extern void rib_weed_tables (void);
|
|
|
|
extern void rib_sweep_route (void);
|
2015-05-22 11:40:09 +02:00
|
|
|
extern void rib_close_table (struct route_table *);
|
2005-06-28 19:17:12 +02:00
|
|
|
extern void rib_close (void);
|
|
|
|
extern void rib_init (void);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
extern unsigned long rib_score_proto (u_char proto, u_short instance);
|
2015-05-20 02:47:22 +02:00
|
|
|
struct zebra_t;
|
|
|
|
extern void rib_queue_add (struct zebra_t *zebra, struct route_node *rn);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2005-06-28 19:17:12 +02:00
|
|
|
extern int
|
2004-10-12 22:50:58 +02:00
|
|
|
static_add_ipv4 (struct prefix *p, struct in_addr *gate, const char *ifname,
|
2015-05-22 11:40:02 +02:00
|
|
|
u_char flags, u_short tag, u_char distance, vrf_id_t vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2005-06-28 19:17:12 +02:00
|
|
|
extern int
|
2004-10-12 22:50:58 +02:00
|
|
|
static_delete_ipv4 (struct prefix *p, struct in_addr *gate, const char *ifname,
|
2015-05-22 11:40:02 +02:00
|
|
|
u_short tag, u_char distance, vrf_id_t vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2005-06-28 19:17:12 +02:00
|
|
|
extern int
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
rib_add_ipv6 (int type, u_short instance, int flags, struct prefix_ipv6 *p,
|
2015-05-22 11:40:02 +02:00
|
|
|
struct in6_addr *gate, unsigned int ifindex, vrf_id_t vrf_id,
|
|
|
|
u_int32_t table_id, u_int32_t metric, u_char distance, safi_t safi);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2005-06-28 19:17:12 +02:00
|
|
|
extern int
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
rib_delete_ipv6 (int type, u_short instance, int flags, struct prefix_ipv6 *p,
|
2015-05-22 11:40:02 +02:00
|
|
|
struct in6_addr *gate, unsigned int ifindex, vrf_id_t vrf_id,
|
|
|
|
u_int32_t table_id, safi_t safi);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-22 11:40:02 +02:00
|
|
|
extern struct rib *rib_lookup_ipv6 (struct in6_addr *, vrf_id_t);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-22 11:40:02 +02:00
|
|
|
extern struct rib *rib_match_ipv6 (struct in6_addr *, vrf_id_t);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
extern struct route_table *rib_table_ipv6;
|
|
|
|
|
2005-06-28 19:17:12 +02:00
|
|
|
extern int
|
2002-12-13 21:15:29 +01:00
|
|
|
static_add_ipv6 (struct prefix *p, u_char type, struct in6_addr *gate,
|
2015-05-20 02:46:33 +02:00
|
|
|
const char *ifname, u_char flags, u_short tag,
|
2015-05-22 11:40:02 +02:00
|
|
|
u_char distance, vrf_id_t vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-20 02:24:43 +02:00
|
|
|
extern int
|
2015-06-11 18:19:12 +02:00
|
|
|
rib_add_ipv6_multipath (struct prefix *, struct rib *, safi_t,
|
2015-05-20 02:24:43 +02:00
|
|
|
unsigned long);
|
|
|
|
|
2005-06-28 19:17:12 +02:00
|
|
|
extern int
|
2002-12-13 21:15:29 +01:00
|
|
|
static_delete_ipv6 (struct prefix *p, u_char type, struct in6_addr *gate,
|
2015-05-20 02:46:33 +02:00
|
|
|
const char *ifname, u_short tag, u_char distance,
|
2015-05-22 11:40:02 +02:00
|
|
|
vrf_id_t vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2012-11-13 23:48:53 +01:00
|
|
|
extern int rib_gc_dest (struct route_node *rn);
|
2012-11-13 23:48:55 +01:00
|
|
|
extern struct route_table *rib_tables_iter_next (rib_tables_iter_t *iter);
|
2012-11-13 23:48:53 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Inline functions.
|
|
|
|
*/
|
|
|
|
|
2012-11-13 23:48:54 +01:00
|
|
|
/*
|
|
|
|
* rib_table_info
|
|
|
|
*/
|
|
|
|
static inline rib_table_info_t *
|
|
|
|
rib_table_info (struct route_table *table)
|
|
|
|
{
|
|
|
|
return (rib_table_info_t *) table->info;
|
|
|
|
}
|
|
|
|
|
2012-11-13 23:48:53 +01:00
|
|
|
/*
|
|
|
|
* rib_dest_from_rnode
|
|
|
|
*/
|
|
|
|
static inline rib_dest_t *
|
|
|
|
rib_dest_from_rnode (struct route_node *rn)
|
|
|
|
{
|
|
|
|
return (rib_dest_t *) rn->info;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rnode_to_ribs
|
|
|
|
*
|
|
|
|
* Returns a pointer to the list of routes corresponding to the given
|
|
|
|
* route_node.
|
|
|
|
*/
|
|
|
|
static inline struct rib *
|
|
|
|
rnode_to_ribs (struct route_node *rn)
|
|
|
|
{
|
|
|
|
rib_dest_t *dest;
|
|
|
|
|
|
|
|
dest = rib_dest_from_rnode (rn);
|
|
|
|
if (!dest)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return dest->routes;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rib_dest_prefix
|
|
|
|
*/
|
|
|
|
static inline struct prefix *
|
|
|
|
rib_dest_prefix (rib_dest_t *dest)
|
|
|
|
{
|
|
|
|
return &dest->rnode->p;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rib_dest_af
|
|
|
|
*
|
|
|
|
* Returns the address family that the destination is for.
|
|
|
|
*/
|
|
|
|
static inline u_char
|
|
|
|
rib_dest_af (rib_dest_t *dest)
|
|
|
|
{
|
|
|
|
return dest->rnode->p.family;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rib_dest_table
|
|
|
|
*/
|
|
|
|
static inline struct route_table *
|
|
|
|
rib_dest_table (rib_dest_t *dest)
|
|
|
|
{
|
|
|
|
return dest->rnode->table;
|
|
|
|
}
|
|
|
|
|
2012-11-13 23:48:54 +01:00
|
|
|
/*
|
|
|
|
* rib_dest_vrf
|
|
|
|
*/
|
2015-05-22 11:39:56 +02:00
|
|
|
static inline struct zebra_vrf *
|
2012-11-13 23:48:54 +01:00
|
|
|
rib_dest_vrf (rib_dest_t *dest)
|
|
|
|
{
|
2015-05-22 11:39:56 +02:00
|
|
|
return rib_table_info (rib_dest_table (dest))->zvrf;
|
2012-11-13 23:48:54 +01:00
|
|
|
}
|
|
|
|
|
2012-11-13 23:48:55 +01:00
|
|
|
/*
|
|
|
|
* rib_tables_iter_init
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
rib_tables_iter_init (rib_tables_iter_t *iter)
|
|
|
|
|
|
|
|
{
|
|
|
|
memset (iter, 0, sizeof (*iter));
|
|
|
|
iter->state = RIB_TABLES_ITER_S_INIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rib_tables_iter_started
|
|
|
|
*
|
|
|
|
* Returns TRUE if this iterator has started iterating over the set of
|
|
|
|
* tables.
|
|
|
|
*/
|
|
|
|
static inline int
|
|
|
|
rib_tables_iter_started (rib_tables_iter_t *iter)
|
|
|
|
{
|
|
|
|
return iter->state != RIB_TABLES_ITER_S_INIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rib_tables_iter_cleanup
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
rib_tables_iter_cleanup (rib_tables_iter_t *iter)
|
|
|
|
{
|
|
|
|
iter->state = RIB_TABLES_ITER_S_DONE;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
#endif /*_ZEBRA_RIB_H */
|