2002-12-13 21:15:29 +01:00
|
|
|
/*
|
|
|
|
* Zebra connect library for OSPFd
|
|
|
|
* Copyright (C) 1997, 98, 99, 2000 Kunihiro Ishiguro, Toshiaki Takada
|
|
|
|
*
|
|
|
|
* This file is part of GNU Zebra.
|
|
|
|
*
|
|
|
|
* GNU Zebra is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2, or (at your option) any
|
|
|
|
* later version.
|
|
|
|
*
|
|
|
|
* GNU Zebra is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
2017-05-13 10:25:29 +02:00
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; see the file COPYING; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2002-12-13 21:15:29 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <zebra.h>
|
|
|
|
|
|
|
|
#include "thread.h"
|
|
|
|
#include "command.h"
|
|
|
|
#include "network.h"
|
|
|
|
#include "prefix.h"
|
|
|
|
#include "routemap.h"
|
|
|
|
#include "table.h"
|
|
|
|
#include "stream.h"
|
|
|
|
#include "memory.h"
|
|
|
|
#include "zclient.h"
|
|
|
|
#include "filter.h"
|
2004-05-10 09:43:59 +02:00
|
|
|
#include "plist.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "log.h"
|
2016-06-21 12:39:58 +02:00
|
|
|
#include "lib/bfd.h"
|
2016-09-02 16:32:14 +02:00
|
|
|
#include "nexthop.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
#include "ospfd/ospfd.h"
|
|
|
|
#include "ospfd/ospf_interface.h"
|
|
|
|
#include "ospfd/ospf_ism.h"
|
|
|
|
#include "ospfd/ospf_asbr.h"
|
|
|
|
#include "ospfd/ospf_asbr.h"
|
|
|
|
#include "ospfd/ospf_abr.h"
|
|
|
|
#include "ospfd/ospf_lsa.h"
|
|
|
|
#include "ospfd/ospf_dump.h"
|
|
|
|
#include "ospfd/ospf_route.h"
|
2015-05-20 02:47:23 +02:00
|
|
|
#include "ospfd/ospf_lsdb.h"
|
|
|
|
#include "ospfd/ospf_neighbor.h"
|
|
|
|
#include "ospfd/ospf_nsm.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "ospfd/ospf_zebra.h"
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
#include "ospfd/ospf_te.h"
|
2020-06-02 19:24:46 +02:00
|
|
|
#include "ospfd/ospf_sr.h"
|
2020-07-22 19:31:14 +02:00
|
|
|
#include "ospfd/ospf_ldp_sync.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-31 22:12:33 +02:00
|
|
|
DEFINE_MTYPE_STATIC(OSPFD, OSPF_EXTERNAL, "OSPF External route table");
|
|
|
|
DEFINE_MTYPE_STATIC(OSPFD, OSPF_REDISTRIBUTE, "OSPF Redistriute");
|
|
|
|
|
2020-05-03 11:25:55 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Zebra structure to hold current status. */
|
|
|
|
struct zclient *zclient = NULL;
|
2020-06-16 16:49:38 +02:00
|
|
|
/* and for the Synchronous connection to the Label Manager */
|
|
|
|
static struct zclient *zclient_sync;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* For registering threads. */
|
|
|
|
extern struct thread_master *master;
|
2004-10-03 20:18:34 +02:00
|
|
|
|
|
|
|
/* Router-id update message from zebra. */
|
2019-05-03 21:42:59 +02:00
|
|
|
static int ospf_router_id_update_zebra(ZAPI_CALLBACK_ARGS)
|
2004-10-03 20:18:34 +02:00
|
|
|
{
|
2017-08-25 22:51:12 +02:00
|
|
|
struct ospf *ospf = NULL;
|
2004-10-03 20:18:34 +02:00
|
|
|
struct prefix router_id;
|
|
|
|
zebra_router_id_update_read(zclient->ibuf, &router_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_INTERFACE))
|
|
|
|
zlog_debug("Zebra rcvd: router id update %pFX vrf %s id %u",
|
|
|
|
&router_id, ospf_vrf_id_to_name(vrf_id), vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
ospf = ospf_lookup_by_vrf_id(vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-02 15:54:45 +01:00
|
|
|
if (ospf != NULL) {
|
|
|
|
ospf->router_id_zebra = router_id.u.prefix4;
|
2005-11-20 15:50:45 +01:00
|
|
|
ospf_router_id_update(ospf);
|
2017-11-02 15:54:45 +01:00
|
|
|
} else {
|
2020-10-18 13:33:54 +02:00
|
|
|
if (IS_DEBUG_OSPF_EVENT)
|
2017-08-25 22:51:12 +02:00
|
|
|
zlog_debug(
|
2020-10-18 13:33:54 +02:00
|
|
|
"%s: ospf instance not found for vrf %s id %u router_id %pFX",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__, ospf_vrf_id_to_name(vrf_id), vrf_id,
|
2020-10-18 13:33:54 +02:00
|
|
|
&router_id);
|
2017-08-25 22:51:12 +02:00
|
|
|
}
|
2004-10-03 20:18:34 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int ospf_interface_address_add(ZAPI_CALLBACK_ARGS)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct connected *c;
|
2017-08-25 22:51:12 +02:00
|
|
|
struct ospf *ospf = NULL;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
c = zebra_interface_address_read(cmd, zclient->ibuf, vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
if (c == NULL)
|
|
|
|
return 0;
|
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_INTERFACE))
|
|
|
|
zlog_debug("Zebra: interface %s address add %pFX vrf %s id %u",
|
|
|
|
c->ifp->name, c->address,
|
|
|
|
ospf_vrf_id_to_name(vrf_id), vrf_id);
|
2006-11-30 17:17:02 +01:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
ospf = ospf_lookup_by_vrf_id(vrf_id);
|
2017-09-07 17:08:09 +02:00
|
|
|
if (!ospf)
|
|
|
|
return 0;
|
2017-08-25 22:51:12 +02:00
|
|
|
|
|
|
|
ospf_if_update(ospf, c->ifp);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-09-19 04:26:55 +02:00
|
|
|
ospf_if_interface(c->ifp);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int ospf_interface_address_delete(ZAPI_CALLBACK_ARGS)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct connected *c;
|
|
|
|
struct interface *ifp;
|
|
|
|
struct ospf_interface *oi;
|
|
|
|
struct route_node *rn;
|
|
|
|
struct prefix p;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
c = zebra_interface_address_read(cmd, zclient->ibuf, vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (c == NULL)
|
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_INTERFACE))
|
|
|
|
zlog_debug("Zebra: interface %s address delete %pFX",
|
|
|
|
c->ifp->name, c->address);
|
2006-11-30 17:17:02 +01:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
ifp = c->ifp;
|
|
|
|
p = *c->address;
|
2021-07-01 16:42:03 +02:00
|
|
|
p.prefixlen = IPV4_MAX_BITLEN;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
rn = route_node_lookup(IF_OIFS(ifp), &p);
|
2003-06-19 04:13:25 +02:00
|
|
|
if (!rn) {
|
2019-10-30 01:16:28 +01:00
|
|
|
connected_free(&c);
|
2006-01-10 23:11:54 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
assert(rn->info);
|
|
|
|
oi = rn->info;
|
2010-03-08 13:58:09 +01:00
|
|
|
route_unlock_node(rn);
|
2003-06-19 04:13:25 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Call interface hook functions to clean up */
|
|
|
|
ospf_if_free(oi);
|
2003-06-19 04:13:25 +02:00
|
|
|
|
2019-09-19 04:26:55 +02:00
|
|
|
ospf_if_interface(c->ifp);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-10-30 01:16:28 +01:00
|
|
|
connected_free(&c);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2003-06-19 04:11:23 +02:00
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int ospf_interface_link_params(ZAPI_CALLBACK_ARGS)
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
{
|
|
|
|
struct interface *ifp;
|
2021-06-17 15:26:36 +02:00
|
|
|
bool changed = false;
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
|
2021-06-17 15:26:36 +02:00
|
|
|
ifp = zebra_interface_link_params_read(zclient->ibuf, vrf_id, &changed);
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
|
2021-06-17 15:26:36 +02:00
|
|
|
if (ifp == NULL || !changed)
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Update TE TLV */
|
|
|
|
ospf_mpls_te_update_if(ifp);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* VRF update for an interface. */
|
2019-05-03 21:42:59 +02:00
|
|
|
static int ospf_interface_vrf_update(ZAPI_CALLBACK_ARGS)
|
2017-08-25 22:51:12 +02:00
|
|
|
{
|
|
|
|
struct interface *ifp = NULL;
|
|
|
|
vrf_id_t new_vrf_id;
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
ifp = zebra_interface_vrf_update_read(zclient->ibuf, vrf_id,
|
|
|
|
&new_vrf_id);
|
|
|
|
if (!ifp)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (IS_DEBUG_OSPF_EVENT)
|
|
|
|
zlog_debug(
|
|
|
|
"%s: Rx Interface %s VRF change vrf_id %u New vrf %s id %u",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__, ifp->name, vrf_id,
|
2017-08-25 22:51:12 +02:00
|
|
|
ospf_vrf_id_to_name(new_vrf_id), new_vrf_id);
|
|
|
|
|
|
|
|
/*if_update(ifp, ifp->name, strlen(ifp->name), new_vrf_id);*/
|
2019-06-24 01:46:39 +02:00
|
|
|
if_update_to_new_vrf(ifp, new_vrf_id);
|
2017-08-25 22:51:12 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-05 10:44:21 +02:00
|
|
|
/* Nexthop, ifindex, distance and metric information. */
|
|
|
|
static void ospf_zebra_add_nexthop(struct ospf *ospf, struct ospf_path *path,
|
|
|
|
struct zapi_route *api)
|
|
|
|
{
|
|
|
|
struct zapi_nexthop *api_nh;
|
|
|
|
struct zapi_nexthop *api_nh_backup;
|
|
|
|
|
|
|
|
/* TI-LFA backup path label stack comes first, if present */
|
|
|
|
if (path->srni.backup_label_stack) {
|
|
|
|
api_nh_backup = &api->backup_nexthops[api->backup_nexthop_num];
|
|
|
|
api_nh_backup->vrf_id = ospf->vrf_id;
|
|
|
|
|
2020-12-15 12:20:28 +01:00
|
|
|
api_nh_backup->type = NEXTHOP_TYPE_IPV4;
|
2020-08-05 10:44:21 +02:00
|
|
|
api_nh_backup->gate.ipv4 = path->srni.backup_nexthop;
|
|
|
|
|
|
|
|
api_nh_backup->label_num =
|
|
|
|
path->srni.backup_label_stack->num_labels;
|
|
|
|
memcpy(api_nh_backup->labels,
|
|
|
|
path->srni.backup_label_stack->label,
|
|
|
|
sizeof(mpls_label_t) * api_nh_backup->label_num);
|
|
|
|
|
|
|
|
api->backup_nexthop_num++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And here comes the primary nexthop */
|
|
|
|
api_nh = &api->nexthops[api->nexthop_num];
|
|
|
|
#ifdef HAVE_NETLINK
|
|
|
|
if (path->unnumbered
|
|
|
|
|| (path->nexthop.s_addr != INADDR_ANY && path->ifindex != 0)) {
|
|
|
|
#else /* HAVE_NETLINK */
|
|
|
|
if (path->nexthop.s_addr != INADDR_ANY && path->ifindex != 0) {
|
|
|
|
#endif /* HAVE_NETLINK */
|
|
|
|
api_nh->gate.ipv4 = path->nexthop;
|
|
|
|
api_nh->ifindex = path->ifindex;
|
|
|
|
api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
|
|
|
|
} else if (path->nexthop.s_addr != INADDR_ANY) {
|
|
|
|
api_nh->gate.ipv4 = path->nexthop;
|
|
|
|
api_nh->type = NEXTHOP_TYPE_IPV4;
|
|
|
|
} else {
|
|
|
|
api_nh->ifindex = path->ifindex;
|
|
|
|
api_nh->type = NEXTHOP_TYPE_IFINDEX;
|
|
|
|
}
|
|
|
|
api_nh->vrf_id = ospf->vrf_id;
|
|
|
|
|
|
|
|
/* Set TI-LFA backup nexthop info if present */
|
|
|
|
if (path->srni.backup_label_stack) {
|
|
|
|
SET_FLAG(api->message, ZAPI_MESSAGE_BACKUP_NEXTHOPS);
|
|
|
|
SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP);
|
|
|
|
|
|
|
|
/* Just care about a single TI-LFA backup path for now */
|
|
|
|
api_nh->backup_num = 1;
|
|
|
|
api_nh->backup_idx[0] = api->backup_nexthop_num - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
api->nexthop_num++;
|
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
void ospf_zebra_add(struct ospf *ospf, struct prefix_ipv4 *p,
|
|
|
|
struct ospf_route * or)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-08-21 02:18:58 +02:00
|
|
|
struct zapi_route api;
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t distance;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct ospf_path *path;
|
2004-09-23 21:18:23 +02:00
|
|
|
struct listnode *node;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-05-31 15:27:51 +02:00
|
|
|
if (ospf->gr_info.restart_in_progress) {
|
|
|
|
if (IS_DEBUG_OSPF_GR)
|
|
|
|
zlog_debug(
|
|
|
|
"Zebra: Graceful Restart in progress -- not installing %pFX",
|
|
|
|
p);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
memset(&api, 0, sizeof(api));
|
2017-08-25 22:51:12 +02:00
|
|
|
api.vrf_id = ospf->vrf_id;
|
2017-08-21 02:18:58 +02:00
|
|
|
api.type = ZEBRA_ROUTE_OSPF;
|
|
|
|
api.instance = ospf->instance;
|
|
|
|
api.safi = SAFI_UNICAST;
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
memcpy(&api.prefix, p, sizeof(*p));
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
/* Metric value. */
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_METRIC);
|
|
|
|
if (or->path_type == OSPF_PATH_TYPE1_EXTERNAL)
|
|
|
|
api.metric = or->cost + or->u.ext.type2_cost;
|
|
|
|
else if (or->path_type == OSPF_PATH_TYPE2_EXTERNAL)
|
|
|
|
api.metric = or->u.ext.type2_cost;
|
|
|
|
else
|
|
|
|
api.metric = or->cost;
|
2017-08-12 18:03:29 +02:00
|
|
|
|
|
|
|
/* Check if path type is ASE */
|
|
|
|
if (((or->path_type == OSPF_PATH_TYPE1_EXTERNAL)
|
|
|
|
|| (or->path_type == OSPF_PATH_TYPE2_EXTERNAL))
|
2017-08-21 02:18:58 +02:00
|
|
|
&& (or->u.ext.tag > 0) && (or->u.ext.tag <= ROUTE_TAG_MAX)) {
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_TAG);
|
|
|
|
api.tag = or->u.ext.tag;
|
|
|
|
}
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
/* Distance value. */
|
2017-08-25 22:51:12 +02:00
|
|
|
distance = ospf_distance_apply(ospf, p, or);
|
2017-08-21 02:18:58 +02:00
|
|
|
if (distance) {
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_DISTANCE);
|
|
|
|
api.distance = distance;
|
|
|
|
}
|
2017-08-12 18:03:29 +02:00
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(or->paths, node, path)) {
|
2021-02-11 11:05:12 +01:00
|
|
|
if (api.nexthop_num >= ospf->max_multipath)
|
2017-09-21 14:49:31 +02:00
|
|
|
break;
|
2020-08-05 10:44:21 +02:00
|
|
|
|
|
|
|
ospf_zebra_add_nexthop(ospf, path, &api);
|
2003-06-19 04:11:23 +02:00
|
|
|
|
2017-08-12 18:03:29 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
|
2018-08-25 00:15:36 +02:00
|
|
|
struct interface *ifp;
|
|
|
|
|
|
|
|
ifp = if_lookup_by_index(path->ifindex, ospf->vrf_id);
|
2018-04-23 21:57:25 +02:00
|
|
|
|
2017-08-12 18:03:29 +02:00
|
|
|
zlog_debug(
|
2020-10-18 13:33:54 +02:00
|
|
|
"Zebra: Route add %pFX nexthop %pI4, ifindex=%d %s",
|
|
|
|
p, &path->nexthop, path->ifindex,
|
|
|
|
ifp ? ifp->name : " ");
|
2003-06-19 04:13:25 +02:00
|
|
|
}
|
2017-08-12 18:03:29 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
void ospf_zebra_delete(struct ospf *ospf, struct prefix_ipv4 *p,
|
|
|
|
struct ospf_route * or)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-08-21 02:18:58 +02:00
|
|
|
struct zapi_route api;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-05-31 15:27:51 +02:00
|
|
|
if (ospf->gr_info.restart_in_progress) {
|
|
|
|
if (IS_DEBUG_OSPF_GR)
|
|
|
|
zlog_debug(
|
|
|
|
"Zebra: Graceful Restart in progress -- not uninstalling %pFX",
|
|
|
|
p);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
memset(&api, 0, sizeof(api));
|
2017-08-25 22:51:12 +02:00
|
|
|
api.vrf_id = ospf->vrf_id;
|
2017-08-21 02:18:58 +02:00
|
|
|
api.type = ZEBRA_ROUTE_OSPF;
|
|
|
|
api.instance = ospf->instance;
|
|
|
|
api.safi = SAFI_UNICAST;
|
|
|
|
memcpy(&api.prefix, p, sizeof(*p));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
|
|
|
zlog_debug("Zebra: Route delete %pFX", p);
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, &api);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
void ospf_zebra_add_discard(struct ospf *ospf, struct prefix_ipv4 *p)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-08-21 02:18:58 +02:00
|
|
|
struct zapi_route api;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-05-31 15:27:51 +02:00
|
|
|
if (ospf->gr_info.restart_in_progress) {
|
|
|
|
if (IS_DEBUG_OSPF_GR)
|
|
|
|
zlog_debug(
|
|
|
|
"Zebra: Graceful Restart in progress -- not installing %pFX",
|
|
|
|
p);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
memset(&api, 0, sizeof(api));
|
2017-08-25 22:51:12 +02:00
|
|
|
api.vrf_id = ospf->vrf_id;
|
2017-08-12 18:03:29 +02:00
|
|
|
api.type = ZEBRA_ROUTE_OSPF;
|
|
|
|
api.instance = ospf->instance;
|
|
|
|
api.safi = SAFI_UNICAST;
|
2017-08-21 02:18:58 +02:00
|
|
|
memcpy(&api.prefix, p, sizeof(*p));
|
2012-04-11 23:52:46 +02:00
|
|
|
zapi_route_set_blackhole(&api, BLACKHOLE_NULL);
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
|
|
|
zlog_debug("Zebra: Route add discard %pFX", p);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
void ospf_zebra_delete_discard(struct ospf *ospf, struct prefix_ipv4 *p)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-08-21 02:18:58 +02:00
|
|
|
struct zapi_route api;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-05-31 15:27:51 +02:00
|
|
|
if (ospf->gr_info.restart_in_progress) {
|
|
|
|
if (IS_DEBUG_OSPF_GR)
|
|
|
|
zlog_debug(
|
|
|
|
"Zebra: Graceful Restart in progress -- not uninstalling %pFX",
|
|
|
|
p);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
memset(&api, 0, sizeof(api));
|
2017-08-25 22:51:12 +02:00
|
|
|
api.vrf_id = ospf->vrf_id;
|
2017-08-12 18:03:29 +02:00
|
|
|
api.type = ZEBRA_ROUTE_OSPF;
|
|
|
|
api.instance = ospf->instance;
|
|
|
|
api.safi = SAFI_UNICAST;
|
2017-08-21 02:18:58 +02:00
|
|
|
memcpy(&api.prefix, p, sizeof(*p));
|
2012-04-11 23:52:46 +02:00
|
|
|
zapi_route_set_blackhole(&api, BLACKHOLE_NULL);
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, &api);
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
|
|
|
zlog_debug("Zebra: Route delete discard %pFX", p);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
struct ospf_external *ospf_external_lookup(struct ospf *ospf, uint8_t type,
|
|
|
|
unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
|
|
|
struct list *ext_list;
|
|
|
|
struct listnode *node;
|
|
|
|
struct ospf_external *ext;
|
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
ext_list = ospf->external[type];
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
if (!ext_list)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(ext_list, node, ext))
|
|
|
|
if (ext->instance == instance)
|
|
|
|
return ext;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
struct ospf_external *ospf_external_add(struct ospf *ospf, uint8_t type,
|
|
|
|
unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
|
|
|
struct list *ext_list;
|
|
|
|
struct ospf_external *ext;
|
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
ext = ospf_external_lookup(ospf, type, instance);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
if (ext)
|
|
|
|
return ext;
|
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
if (!ospf->external[type])
|
|
|
|
ospf->external[type] = list_new();
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
ext_list = ospf->external[type];
|
2019-02-25 21:30:31 +01:00
|
|
|
ext = XCALLOC(MTYPE_OSPF_EXTERNAL, sizeof(struct ospf_external));
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
ext->instance = instance;
|
|
|
|
EXTERNAL_INFO(ext) = route_table_init();
|
|
|
|
|
|
|
|
listnode_add(ext_list, ext);
|
|
|
|
|
|
|
|
return ext;
|
|
|
|
}
|
|
|
|
|
2020-05-03 11:25:55 +02:00
|
|
|
/*
|
|
|
|
* Walk all the ei received from zebra for a route type and apply
|
|
|
|
* default route-map.
|
|
|
|
*/
|
|
|
|
bool ospf_external_default_routemap_apply_walk(struct ospf *ospf,
|
|
|
|
struct list *ext_list,
|
|
|
|
struct external_info *default_ei)
|
|
|
|
{
|
|
|
|
struct listnode *node;
|
|
|
|
struct ospf_external *ext;
|
|
|
|
struct route_node *rn;
|
|
|
|
struct external_info *ei = NULL;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(ext_list, node, ext)) {
|
|
|
|
if (!ext->external_info)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (rn = route_top(ext->external_info); rn;
|
|
|
|
rn = route_next(rn)) {
|
|
|
|
ei = rn->info;
|
|
|
|
if (!ei)
|
|
|
|
continue;
|
|
|
|
ret = ospf_external_info_apply_default_routemap(
|
|
|
|
ospf, ei, default_ei);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret && ei) {
|
|
|
|
if (IS_DEBUG_OSPF_DEFAULT_INFO)
|
2020-10-21 19:56:26 +02:00
|
|
|
zlog_debug("Default originate routemap permit ei: %pI4",
|
|
|
|
&ei->p.prefix);
|
2020-05-03 11:25:55 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function to originate or flush default after applying
|
|
|
|
* route-map on all ei.
|
|
|
|
*/
|
|
|
|
static int ospf_external_lsa_default_routemap_timer(struct thread *thread)
|
|
|
|
{
|
|
|
|
struct list *ext_list;
|
|
|
|
struct ospf *ospf = THREAD_ARG(thread);
|
|
|
|
struct prefix_ipv4 p;
|
|
|
|
int type;
|
|
|
|
int ret = 0;
|
|
|
|
struct ospf_lsa *lsa;
|
|
|
|
struct external_info *default_ei;
|
|
|
|
|
|
|
|
p.family = AF_INET;
|
|
|
|
p.prefixlen = 0;
|
|
|
|
p.prefix.s_addr = INADDR_ANY;
|
|
|
|
|
|
|
|
/* Get the default extenal info. */
|
|
|
|
default_ei = ospf_external_info_lookup(ospf, DEFAULT_ROUTE,
|
|
|
|
ospf->instance, &p);
|
|
|
|
if (!default_ei) {
|
|
|
|
/* Nothing to be done here. */
|
|
|
|
if (IS_DEBUG_OSPF_DEFAULT_INFO)
|
|
|
|
zlog_debug("Default originate info not present");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For all the ei apply route-map */
|
|
|
|
for (type = 0; type <= ZEBRA_ROUTE_MAX; type++) {
|
|
|
|
ext_list = ospf->external[type];
|
|
|
|
if (!ext_list || type == ZEBRA_ROUTE_OSPF)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = ospf_external_default_routemap_apply_walk(ospf, ext_list,
|
|
|
|
default_ei);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the default LSA. */
|
|
|
|
lsa = ospf_external_info_find_lsa(ospf, &p);
|
|
|
|
|
|
|
|
/* If permit then originate default. */
|
|
|
|
if (ret && !lsa)
|
|
|
|
ospf_external_lsa_originate(ospf, default_ei);
|
|
|
|
else if (ret && lsa && IS_LSA_MAXAGE(lsa))
|
2020-08-15 11:55:40 +02:00
|
|
|
ospf_external_lsa_refresh(ospf, lsa, default_ei, true, false);
|
2020-05-03 11:25:55 +02:00
|
|
|
else if (!ret && lsa)
|
|
|
|
ospf_external_lsa_flush(ospf, DEFAULT_ROUTE, &default_ei->p, 0);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
void ospf_external_del(struct ospf *ospf, uint8_t type, unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
|
|
|
struct ospf_external *ext;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
ext = ospf_external_lookup(ospf, type, instance);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
if (ext) {
|
|
|
|
if (EXTERNAL_INFO(ext))
|
|
|
|
route_table_finish(EXTERNAL_INFO(ext));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
listnode_delete(ospf->external[type], ext);
|
|
|
|
|
|
|
|
if (!ospf->external[type]->count)
|
2018-10-02 11:39:51 +02:00
|
|
|
list_delete(&ospf->external[type]);
|
2017-11-21 02:21:03 +01:00
|
|
|
|
2017-07-13 22:33:29 +02:00
|
|
|
XFREE(MTYPE_OSPF_EXTERNAL, ext);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
2020-05-03 11:25:55 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if default needs to be flushed too.
|
|
|
|
*/
|
|
|
|
thread_add_event(master, ospf_external_lsa_default_routemap_timer, ospf,
|
|
|
|
0, &ospf->t_default_routemap_timer);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
|
|
|
|
2020-06-02 19:24:46 +02:00
|
|
|
/* Update NHLFE for Prefix SID */
|
|
|
|
void ospf_zebra_update_prefix_sid(const struct sr_prefix *srp)
|
|
|
|
{
|
|
|
|
struct zapi_labels zl;
|
|
|
|
struct zapi_nexthop *znh;
|
2020-11-12 13:53:49 +01:00
|
|
|
struct zapi_nexthop *znh_backup;
|
2020-06-02 19:24:46 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct ospf_path *path;
|
|
|
|
|
|
|
|
/* Prepare message. */
|
|
|
|
memset(&zl, 0, sizeof(zl));
|
|
|
|
zl.type = ZEBRA_LSP_OSPF_SR;
|
|
|
|
zl.local_label = srp->label_in;
|
|
|
|
|
|
|
|
switch (srp->type) {
|
|
|
|
case LOCAL_SID:
|
|
|
|
/* Set Label for local Prefix */
|
|
|
|
znh = &zl.nexthops[zl.nexthop_num++];
|
|
|
|
znh->type = NEXTHOP_TYPE_IFINDEX;
|
|
|
|
znh->ifindex = srp->nhlfe.ifindex;
|
|
|
|
znh->label_num = 1;
|
|
|
|
znh->labels[0] = srp->nhlfe.label_out;
|
2021-02-10 19:20:24 +01:00
|
|
|
|
|
|
|
osr_debug("SR (%s): Configure Prefix %pFX with labels %u/%u",
|
|
|
|
__func__, (struct prefix *)&srp->prefv4,
|
|
|
|
srp->label_in, srp->nhlfe.label_out);
|
|
|
|
|
2020-06-02 19:24:46 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PREF_SID:
|
|
|
|
/* Update route in the RIB too. */
|
|
|
|
SET_FLAG(zl.message, ZAPI_LABELS_FTN);
|
|
|
|
zl.route.prefix.u.prefix4 = srp->prefv4.prefix;
|
|
|
|
zl.route.prefix.prefixlen = srp->prefv4.prefixlen;
|
|
|
|
zl.route.prefix.family = srp->prefv4.family;
|
|
|
|
zl.route.type = ZEBRA_ROUTE_OSPF;
|
|
|
|
zl.route.instance = 0;
|
|
|
|
|
|
|
|
/* Check that SRP contains at least one valid path */
|
|
|
|
if (srp->route == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
2021-02-10 19:20:24 +01:00
|
|
|
|
|
|
|
osr_debug("SR (%s): Configure Prefix %pFX with",
|
|
|
|
__func__, (struct prefix *)&srp->prefv4);
|
|
|
|
|
2020-06-02 19:24:46 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(srp->route->paths, node, path)) {
|
|
|
|
if (path->srni.label_out == MPLS_INVALID_LABEL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (zl.nexthop_num >= MULTIPATH_NUM)
|
|
|
|
break;
|
|
|
|
|
2020-12-15 12:20:28 +01:00
|
|
|
/*
|
|
|
|
* TI-LFA backup path label stack comes first, if
|
|
|
|
* present.
|
|
|
|
*/
|
2020-11-12 13:53:49 +01:00
|
|
|
if (path->srni.backup_label_stack) {
|
|
|
|
znh_backup = &zl.backup_nexthops
|
|
|
|
[zl.backup_nexthop_num++];
|
2020-12-15 12:20:28 +01:00
|
|
|
znh_backup->type = NEXTHOP_TYPE_IPV4;
|
2020-11-12 13:53:49 +01:00
|
|
|
znh_backup->gate.ipv4 =
|
|
|
|
path->srni.backup_nexthop;
|
|
|
|
|
|
|
|
memcpy(znh_backup->labels,
|
|
|
|
path->srni.backup_label_stack->label,
|
|
|
|
sizeof(mpls_label_t)
|
2020-12-15 12:20:28 +01:00
|
|
|
* path->srni.backup_label_stack
|
|
|
|
->num_labels);
|
|
|
|
|
|
|
|
znh_backup->label_num =
|
|
|
|
path->srni.backup_label_stack
|
|
|
|
->num_labels;
|
|
|
|
if (path->srni.label_out
|
|
|
|
!= MPLS_LABEL_IPV4_EXPLICIT_NULL
|
|
|
|
&& path->srni.label_out
|
|
|
|
!= MPLS_LABEL_IMPLICIT_NULL)
|
|
|
|
znh_backup->labels
|
|
|
|
[znh_backup->label_num++] =
|
|
|
|
path->srni.label_out;
|
2020-11-12 13:53:49 +01:00
|
|
|
}
|
|
|
|
|
2020-06-02 19:24:46 +02:00
|
|
|
znh = &zl.nexthops[zl.nexthop_num++];
|
|
|
|
znh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
|
|
|
|
znh->gate.ipv4 = path->nexthop;
|
|
|
|
znh->ifindex = path->ifindex;
|
|
|
|
znh->label_num = 1;
|
|
|
|
znh->labels[0] = path->srni.label_out;
|
2020-11-12 13:53:49 +01:00
|
|
|
|
2021-02-10 19:20:24 +01:00
|
|
|
osr_debug(" |- labels %u/%u", srp->label_in,
|
2021-03-09 11:00:32 +01:00
|
|
|
path->srni.label_out);
|
2021-02-10 19:20:24 +01:00
|
|
|
|
2020-11-12 13:53:49 +01:00
|
|
|
/* Set TI-LFA backup nexthop info if present */
|
|
|
|
if (path->srni.backup_label_stack) {
|
|
|
|
SET_FLAG(zl.message, ZAPI_LABELS_HAS_BACKUPS);
|
|
|
|
SET_FLAG(znh->flags,
|
|
|
|
ZAPI_NEXTHOP_FLAG_HAS_BACKUP);
|
|
|
|
|
|
|
|
/* Just care about a single TI-LFA backup path
|
|
|
|
* for now */
|
|
|
|
znh->backup_num = 1;
|
|
|
|
znh->backup_idx[0] = zl.backup_nexthop_num - 1;
|
|
|
|
}
|
2020-06-02 19:24:46 +02:00
|
|
|
}
|
|
|
|
break;
|
2021-04-18 00:01:53 +02:00
|
|
|
case ADJ_SID:
|
|
|
|
case LAN_ADJ_SID:
|
2020-06-02 19:24:46 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finally, send message to zebra. */
|
|
|
|
(void)zebra_send_mpls_labels(zclient, ZEBRA_MPLS_LABELS_REPLACE, &zl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove NHLFE for Prefix-SID */
|
|
|
|
void ospf_zebra_delete_prefix_sid(const struct sr_prefix *srp)
|
|
|
|
{
|
|
|
|
struct zapi_labels zl;
|
|
|
|
|
|
|
|
osr_debug("SR (%s): Delete Labels %u for Prefix %pFX", __func__,
|
|
|
|
srp->label_in, (struct prefix *)&srp->prefv4);
|
|
|
|
|
|
|
|
/* Prepare message. */
|
|
|
|
memset(&zl, 0, sizeof(zl));
|
|
|
|
zl.type = ZEBRA_LSP_OSPF_SR;
|
|
|
|
zl.local_label = srp->label_in;
|
|
|
|
|
|
|
|
if (srp->type == PREF_SID) {
|
|
|
|
/* Update route in the RIB too */
|
|
|
|
SET_FLAG(zl.message, ZAPI_LABELS_FTN);
|
|
|
|
zl.route.prefix.u.prefix4 = srp->prefv4.prefix;
|
|
|
|
zl.route.prefix.prefixlen = srp->prefv4.prefixlen;
|
|
|
|
zl.route.prefix.family = srp->prefv4.family;
|
|
|
|
zl.route.type = ZEBRA_ROUTE_OSPF;
|
|
|
|
zl.route.instance = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send message to zebra. */
|
|
|
|
(void)zebra_send_mpls_labels(zclient, ZEBRA_MPLS_LABELS_DELETE, &zl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send MPLS Label entry to Zebra for installation or deletion */
|
|
|
|
void ospf_zebra_send_adjacency_sid(int cmd, struct sr_nhlfe nhlfe)
|
|
|
|
{
|
|
|
|
struct zapi_labels zl;
|
|
|
|
struct zapi_nexthop *znh;
|
|
|
|
|
|
|
|
osr_debug("SR (%s): %s Labels %u/%u for Adjacency via %u", __func__,
|
|
|
|
cmd == ZEBRA_MPLS_LABELS_ADD ? "Add" : "Delete",
|
|
|
|
nhlfe.label_in, nhlfe.label_out, nhlfe.ifindex);
|
|
|
|
|
|
|
|
memset(&zl, 0, sizeof(zl));
|
|
|
|
zl.type = ZEBRA_LSP_OSPF_SR;
|
|
|
|
zl.local_label = nhlfe.label_in;
|
|
|
|
zl.nexthop_num = 1;
|
|
|
|
znh = &zl.nexthops[0];
|
|
|
|
znh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
|
|
|
|
znh->gate.ipv4 = nhlfe.nexthop;
|
|
|
|
znh->ifindex = nhlfe.ifindex;
|
|
|
|
znh->label_num = 1;
|
|
|
|
znh->labels[0] = nhlfe.label_out;
|
|
|
|
|
|
|
|
(void)zebra_send_mpls_labels(zclient, cmd, &zl);
|
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
struct ospf_redist *ospf_redist_lookup(struct ospf *ospf, uint8_t type,
|
|
|
|
unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
|
|
|
struct list *red_list;
|
|
|
|
struct listnode *node;
|
|
|
|
struct ospf_redist *red;
|
|
|
|
|
|
|
|
red_list = ospf->redist[type];
|
|
|
|
if (!red_list)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(red_list, node, red))
|
|
|
|
if (red->instance == instance)
|
|
|
|
return red;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
struct ospf_redist *ospf_redist_add(struct ospf *ospf, uint8_t type,
|
|
|
|
unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
|
|
|
struct list *red_list;
|
|
|
|
struct ospf_redist *red;
|
|
|
|
|
|
|
|
red = ospf_redist_lookup(ospf, type, instance);
|
|
|
|
if (red)
|
|
|
|
return red;
|
|
|
|
|
|
|
|
if (!ospf->redist[type])
|
|
|
|
ospf->redist[type] = list_new();
|
|
|
|
|
|
|
|
red_list = ospf->redist[type];
|
2019-02-25 21:30:31 +01:00
|
|
|
red = XCALLOC(MTYPE_OSPF_REDISTRIBUTE, sizeof(struct ospf_redist));
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
red->instance = instance;
|
|
|
|
red->dmetric.type = -1;
|
|
|
|
red->dmetric.value = -1;
|
2018-10-01 13:38:01 +02:00
|
|
|
ROUTEMAP_NAME(red) = NULL;
|
|
|
|
ROUTEMAP(red) = NULL;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
|
|
|
listnode_add(red_list, red);
|
|
|
|
|
|
|
|
return red;
|
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
void ospf_redist_del(struct ospf *ospf, uint8_t type, unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
|
|
|
struct ospf_redist *red;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
red = ospf_redist_lookup(ospf, type, instance);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
if (red) {
|
|
|
|
listnode_delete(ospf->redist[type], red);
|
|
|
|
if (!ospf->redist[type]->count) {
|
2018-10-02 11:39:51 +02:00
|
|
|
list_delete(&ospf->redist[type]);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
2017-06-23 02:19:00 +02:00
|
|
|
ospf_routemap_unset(red);
|
2017-07-13 22:33:29 +02:00
|
|
|
XFREE(MTYPE_OSPF_REDISTRIBUTE, red);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
int ospf_is_type_redistributed(struct ospf *ospf, int type,
|
|
|
|
unsigned short instance)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
return (DEFAULT_ROUTE_TYPE(type)
|
2019-01-11 22:20:13 +01:00
|
|
|
? vrf_bitmap_check(zclient->default_information[AFI_IP],
|
2017-08-25 22:51:12 +02:00
|
|
|
ospf->vrf_id)
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
: ((instance
|
|
|
|
&& redist_check_instance(
|
2021-04-18 00:03:46 +02:00
|
|
|
&zclient->mi_redist[AFI_IP][type],
|
|
|
|
instance))
|
2016-03-01 16:03:19 +01:00
|
|
|
|| (!instance
|
|
|
|
&& vrf_bitmap_check(
|
2021-04-18 00:03:46 +02:00
|
|
|
zclient->redist[AFI_IP][type],
|
|
|
|
ospf->vrf_id))));
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2021-05-05 18:26:19 +02:00
|
|
|
int ospf_redistribute_update(struct ospf *ospf, struct ospf_redist *red,
|
|
|
|
int type, unsigned short instance, int mtype,
|
|
|
|
int mvalue)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
int force = 0;
|
2018-06-25 17:18:17 +02:00
|
|
|
|
2021-05-05 18:26:19 +02:00
|
|
|
if (mtype != red->dmetric.type) {
|
|
|
|
red->dmetric.type = mtype;
|
|
|
|
force = LSA_REFRESH_FORCE;
|
|
|
|
}
|
|
|
|
if (mvalue != red->dmetric.value) {
|
|
|
|
red->dmetric.value = mvalue;
|
|
|
|
force = LSA_REFRESH_FORCE;
|
2018-06-25 17:18:17 +02:00
|
|
|
}
|
|
|
|
|
2021-05-05 18:26:19 +02:00
|
|
|
ospf_external_lsa_refresh_type(ospf, type, instance, force);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-05-05 18:26:19 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
|
|
|
zlog_debug(
|
|
|
|
"Redistribute[%s][%d]: Refresh Type[%d], Metric[%d]",
|
|
|
|
ospf_redist_string(type), instance,
|
|
|
|
metric_type(ospf, type, instance),
|
|
|
|
metric_value(ospf, type, instance));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-05-05 18:26:19 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-05-05 18:26:19 +02:00
|
|
|
int ospf_redistribute_set(struct ospf *ospf, struct ospf_redist *red, int type,
|
|
|
|
unsigned short instance, int mtype, int mvalue)
|
|
|
|
{
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
red->dmetric.type = mtype;
|
|
|
|
red->dmetric.value = mvalue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
ospf_external_add(ospf, type, instance);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
zclient_redistribute(ZEBRA_REDISTRIBUTE_ADD, zclient, AFI_IP, type,
|
2017-08-25 22:51:12 +02:00
|
|
|
instance, ospf->vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
2017-08-25 22:51:12 +02:00
|
|
|
zlog_debug(
|
|
|
|
"Redistribute[%s][%d] vrf id %u: Start Type[%d], Metric[%d]",
|
|
|
|
ospf_redist_string(type), instance, ospf->vrf_id,
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
metric_type(ospf, type, instance),
|
|
|
|
metric_value(ospf, type, instance));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2003-03-25 06:07:42 +01:00
|
|
|
ospf_asbr_status_update(ospf, ++ospf->redistribute);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
int ospf_redistribute_unset(struct ospf *ospf, int type,
|
|
|
|
unsigned short instance)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
if (type == zclient->redist_default && instance == zclient->instance)
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
zclient_redistribute(ZEBRA_REDISTRIBUTE_DELETE, zclient, AFI_IP, type,
|
2017-08-25 22:51:12 +02:00
|
|
|
instance, ospf->vrf_id);
|
2003-06-19 04:13:25 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
2017-08-25 22:51:12 +02:00
|
|
|
zlog_debug("Redistribute[%s][%d] vrf id %u: Stop",
|
|
|
|
ospf_redist_string(type), instance, ospf->vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Remove the routes from OSPF table. */
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
ospf_redistribute_withdraw(ospf, type, instance);
|
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
ospf_external_del(ospf, type, instance);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2003-03-25 06:07:42 +01:00
|
|
|
ospf_asbr_status_update(ospf, --ospf->redistribute);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2003-04-04 04:44:16 +02:00
|
|
|
int ospf_redistribute_default_set(struct ospf *ospf, int originate, int mtype,
|
2003-06-19 04:13:25 +02:00
|
|
|
int mvalue)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2018-10-01 13:38:01 +02:00
|
|
|
struct prefix_ipv4 p;
|
|
|
|
struct in_addr nexthop;
|
|
|
|
int cur_originate = ospf->default_originate;
|
2019-07-29 14:46:05 +02:00
|
|
|
const char *type_str = NULL;
|
2018-10-01 13:38:01 +02:00
|
|
|
|
2020-02-06 07:49:02 +01:00
|
|
|
nexthop.s_addr = INADDR_ANY;
|
2018-10-01 13:38:01 +02:00
|
|
|
p.family = AF_INET;
|
2020-02-06 07:49:02 +01:00
|
|
|
p.prefix.s_addr = INADDR_ANY;
|
2018-10-01 13:38:01 +02:00
|
|
|
p.prefixlen = 0;
|
|
|
|
|
2006-10-24 21:04:26 +02:00
|
|
|
ospf->default_originate = originate;
|
2003-04-04 04:44:16 +02:00
|
|
|
|
2019-07-29 14:46:05 +02:00
|
|
|
if (cur_originate == originate) {
|
2018-10-01 13:38:01 +02:00
|
|
|
/* Refresh the lsa since metric might different */
|
2002-12-13 21:15:29 +01:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
2004-12-08 20:06:51 +01:00
|
|
|
zlog_debug(
|
|
|
|
"Redistribute[%s]: Refresh Type[%d], Metric[%d]",
|
2005-10-01 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* zebra.h: Declare new functions zebra_route_string() and
zebra_route_char().
* log.c: (zroute_lookup,zebra_route_string,zebra_route_char) New
functions to map zebra route numbers to strings.
* zebra_vty.c: (route_type_str) Remove obsolete function: use new
library function zebra_route_string() instead. Note that there
are a few differences: for IPv6 routes, we now get "ripng" and
"ospf6" instead of the old behavior ("rip" and "ospf").
(route_type_char) Remove obsolete function: ues new library function
zebra_route_char() instead. Note that there is one difference:
the old function returned 'S' for a ZEBRA_ROUTE_SYSTEM route,
whereas the new one returns 'X'.
(vty_show_ip_route_detail,vty_show_ipv6_route_detail) Replace
route_type_str() with zebra_route_string().
(vty_show_ip_route,vty_show_ipv6_route) Replace route_type_char()
with zebra_route_char().
* bgp_vty.c: (bgp_config_write_redistribute) Use new library function
zebra_route_string instead of a local hard-coded table.
* ospf6_asbr.c: Remove local hard-coded tables zroute_name and
zroute_abname. Change the ZROUTE_NAME macro to use new library
function zebra_route_string(). Remove the ZROUTE_ABNAME macro.
(ospf6_asbr_external_route_show): Replace ZROUTE_ABNAME() with
a call to zebra_route_char(), and be sure to fix the format string,
since we now have a char instead of a char *.
* ospf6_zebra.c: Remove local hard-coded tables zebra_route_name and
zebra_route_abname. Note that the zebra_route_name[] table
contained mixed-case strings, whereas the zebra_route_string()
function returns lower-case strings.
(ospf6_zebra_read_ipv6): Change debug message to use new library
function zebra_route_string() instead of zebra_route_name[].
(show_zebra): Use new library function zebra_route_string() instead
of zebra_route_name[].
* ospf_dump.c: Remove local hard-coded table ospf_redistributed_proto.
(ospf_redist_string) New function implemented using new library
function zebra_route_string(). Note that there are a few differences
in the output that will result: the new function returns strings
that are lower-case, whereas the old table was mixed case. Also,
the old table mapped ZEBRA_ROUTE_OSPF6 to "OSPFv3", whereas the
new function returns "ospf6".
* ospfd.h: Remove extern struct message ospf_redistributed_proto[],
and add extern const char *ospf_redist_string(u_int route_type)
instead.
* ospf_asbr.c: (ospf_external_info_add) In two messages, use
ospf_redist_string instead of LOOKUP(ospf_redistributed_proto).
* ospf_vty.c: Remove local hard-coded table distribute_str.
(config_write_ospf_redistribute,config_write_ospf_distribute): Use
new library function zebra_route_string() instead of distribute_str[].
* ospf_zebra.c: (ospf_redistribute_set,ospf_redistribute_unset,
ospf_redistribute_default_set,ospf_redistribute_check)
In debug messages, use ospf_redist_string() instead of
LOOKUP(ospf_redistributed_proto).
* rip_zebra.c: (config_write_rip_redistribute): Remove local hard-coded
table str[]. Replace str[] with calls to new library function
zebra_route_string().
* ripd.c: Remove local hard-coded table route_info[].
(show_ip_rip) Replace uses of str[] with calls to new library
functions zebra_route_char and zebra_route_string.
* ripng_zebra.c: (ripng_redistribute_write) Remove local hard-coded
table str[]. Replace str[i] with new library function
zebra_route_string(i).
* ripngd.c: Remove local hard-coded table route_info[].
(show_ipv6_ripng) Use new library function zebra_route_char() instead
of table route_info[].
2005-10-01 19:38:06 +02:00
|
|
|
ospf_redist_string(DEFAULT_ROUTE),
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
metric_type(ospf, DEFAULT_ROUTE, 0),
|
|
|
|
metric_value(ospf, DEFAULT_ROUTE, 0));
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2018-10-01 13:38:01 +02:00
|
|
|
ospf_external_lsa_refresh_default(ospf);
|
2019-07-29 14:46:05 +02:00
|
|
|
return CMD_SUCCESS;
|
2018-10-01 13:38:01 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-07-29 14:46:05 +02:00
|
|
|
switch (cur_originate) {
|
|
|
|
case DEFAULT_ORIGINATE_NONE:
|
|
|
|
break;
|
|
|
|
case DEFAULT_ORIGINATE_ZEBRA:
|
2018-10-01 13:38:01 +02:00
|
|
|
zclient_redistribute_default(ZEBRA_REDISTRIBUTE_DEFAULT_DELETE,
|
2019-01-11 22:20:13 +01:00
|
|
|
zclient, AFI_IP, ospf->vrf_id);
|
2019-07-29 14:46:05 +02:00
|
|
|
ospf->redistribute--;
|
|
|
|
break;
|
|
|
|
case DEFAULT_ORIGINATE_ALWAYS:
|
|
|
|
ospf_external_info_delete(ospf, DEFAULT_ROUTE, 0, p);
|
|
|
|
ospf_external_del(ospf, DEFAULT_ROUTE, 0);
|
|
|
|
ospf->redistribute--;
|
|
|
|
break;
|
2018-10-01 13:38:01 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-07-29 14:46:05 +02:00
|
|
|
switch (originate) {
|
|
|
|
case DEFAULT_ORIGINATE_NONE:
|
|
|
|
type_str = "none";
|
|
|
|
break;
|
|
|
|
case DEFAULT_ORIGINATE_ZEBRA:
|
|
|
|
type_str = "normal";
|
|
|
|
ospf->redistribute++;
|
|
|
|
zclient_redistribute_default(ZEBRA_REDISTRIBUTE_DEFAULT_ADD,
|
|
|
|
zclient, AFI_IP, ospf->vrf_id);
|
|
|
|
break;
|
|
|
|
case DEFAULT_ORIGINATE_ALWAYS:
|
|
|
|
type_str = "always";
|
|
|
|
ospf->redistribute++;
|
|
|
|
ospf_external_add(ospf, DEFAULT_ROUTE, 0);
|
|
|
|
ospf_external_info_add(ospf, DEFAULT_ROUTE, 0, p, 0, nexthop,
|
|
|
|
0);
|
|
|
|
break;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
2019-07-29 14:46:05 +02:00
|
|
|
zlog_debug("Redistribute[DEFAULT]: %s Type[%d], Metric[%d]",
|
|
|
|
type_str,
|
|
|
|
metric_type(ospf, DEFAULT_ROUTE, 0),
|
|
|
|
metric_value(ospf, DEFAULT_ROUTE, 0));
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-07-29 14:46:05 +02:00
|
|
|
ospf_external_lsa_refresh_default(ospf);
|
|
|
|
ospf_asbr_status_update(ospf, ospf->redistribute);
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2003-04-04 04:44:16 +02:00
|
|
|
static int ospf_external_lsa_originate_check(struct ospf *ospf,
|
2003-06-19 04:13:25 +02:00
|
|
|
struct external_info *ei)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
/* If prefix is multicast, then do not originate LSA. */
|
|
|
|
if (IN_MULTICAST(htonl(ei->p.prefix.s_addr))) {
|
|
|
|
zlog_info(
|
2020-10-21 19:56:26 +02:00
|
|
|
"LSA[Type5:%pI4]: Not originate AS-external-LSA, Prefix belongs multicast",
|
|
|
|
&ei->p.prefix);
|
2002-12-13 21:15:29 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Take care of default-originate. */
|
2021-07-08 19:09:20 +02:00
|
|
|
if (is_default_prefix4(&ei->p))
|
2003-03-25 06:07:42 +01:00
|
|
|
if (ospf->default_originate == DEFAULT_ORIGINATE_NONE) {
|
2011-09-10 21:29:19 +02:00
|
|
|
zlog_info(
|
2020-03-27 12:35:23 +01:00
|
|
|
"LSA[Type5:0.0.0.0]: Not originate AS-external-LSA for default");
|
2003-06-19 04:13:25 +02:00
|
|
|
return 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If connected prefix is OSPF enable interface, then do not announce. */
|
2003-06-19 04:13:25 +02:00
|
|
|
int ospf_distribute_check_connected(struct ospf *ospf, struct external_info *ei)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2009-08-07 13:48:15 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct ospf_interface *oi;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
|
2009-08-07 13:48:15 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(ospf->oiflist, node, oi))
|
|
|
|
if (prefix_match(oi->address, (struct prefix *)&ei->p))
|
|
|
|
return 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-05-03 11:25:55 +02:00
|
|
|
|
|
|
|
/* Apply default route-map on ei received. */
|
|
|
|
int ospf_external_info_apply_default_routemap(struct ospf *ospf,
|
|
|
|
struct external_info *ei,
|
|
|
|
struct external_info *default_ei)
|
|
|
|
{
|
|
|
|
struct ospf_redist *red;
|
|
|
|
int type = default_ei->type;
|
|
|
|
struct prefix_ipv4 *p = &ei->p;
|
|
|
|
struct route_map_set_values save_values;
|
|
|
|
|
|
|
|
|
|
|
|
if (!ospf_external_lsa_originate_check(ospf, default_ei))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
save_values = default_ei->route_map_set;
|
|
|
|
ospf_reset_route_map_set_values(&default_ei->route_map_set);
|
|
|
|
|
|
|
|
/* apply route-map if needed */
|
|
|
|
red = ospf_redist_lookup(ospf, type, ospf->instance);
|
|
|
|
if (red && ROUTEMAP_NAME(red)) {
|
|
|
|
route_map_result_t ret;
|
|
|
|
|
2020-11-14 01:35:20 +01:00
|
|
|
ret = route_map_apply(ROUTEMAP(red), (struct prefix *)p, ei);
|
2020-05-03 11:25:55 +02:00
|
|
|
|
|
|
|
if (ret == RMAP_DENYMATCH) {
|
|
|
|
ei->route_map_set = save_values;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Default originated is based on route-map condition then
|
|
|
|
* apply route-map on received external info. Originate or
|
|
|
|
* flush based on route-map condition.
|
|
|
|
*/
|
|
|
|
static bool ospf_external_lsa_default_routemap_apply(struct ospf *ospf,
|
|
|
|
struct external_info *ei,
|
|
|
|
int cmd)
|
|
|
|
{
|
|
|
|
struct external_info *default_ei;
|
|
|
|
struct prefix_ipv4 p;
|
|
|
|
struct ospf_lsa *lsa;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
p.family = AF_INET;
|
|
|
|
p.prefixlen = 0;
|
|
|
|
p.prefix.s_addr = INADDR_ANY;
|
|
|
|
|
|
|
|
|
|
|
|
/* Get the default extenal info. */
|
|
|
|
default_ei = ospf_external_info_lookup(ospf, DEFAULT_ROUTE,
|
|
|
|
ospf->instance, &p);
|
|
|
|
if (!default_ei) {
|
|
|
|
/* Nothing to be done here. */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_DEBUG_OSPF_DEFAULT_INFO)
|
2020-10-21 19:56:26 +02:00
|
|
|
zlog_debug("Apply default originate routemap on ei: %pI4 cmd: %d",
|
|
|
|
&ei->p.prefix, cmd);
|
2020-05-03 11:25:55 +02:00
|
|
|
|
|
|
|
ret = ospf_external_info_apply_default_routemap(ospf, ei, default_ei);
|
|
|
|
|
|
|
|
/* If deny then nothing to be done both in add and del case. */
|
|
|
|
if (!ret) {
|
|
|
|
if (IS_DEBUG_OSPF_DEFAULT_INFO)
|
2020-10-21 19:56:26 +02:00
|
|
|
zlog_debug("Default originte routemap deny for ei: %pI4",
|
|
|
|
&ei->p.prefix);
|
2020-05-03 11:25:55 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the default LSA. */
|
|
|
|
lsa = ospf_external_info_find_lsa(ospf, &p);
|
|
|
|
|
|
|
|
/* If this is add route and permit then ooriginate default. */
|
|
|
|
if (cmd == ZEBRA_REDISTRIBUTE_ROUTE_ADD) {
|
|
|
|
/* If permit and default already advertise then return. */
|
|
|
|
if (lsa && !IS_LSA_MAXAGE(lsa)) {
|
|
|
|
if (IS_DEBUG_OSPF_DEFAULT_INFO)
|
2020-10-29 21:06:13 +01:00
|
|
|
zlog_debug("Default lsa already originated");
|
2020-05-03 11:25:55 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_DEBUG_OSPF_DEFAULT_INFO)
|
|
|
|
zlog_debug("Originating/Refreshing default lsa");
|
|
|
|
|
|
|
|
if (lsa && IS_LSA_MAXAGE(lsa))
|
|
|
|
/* Refresh lsa.*/
|
2020-08-15 11:55:40 +02:00
|
|
|
ospf_external_lsa_refresh(ospf, lsa, default_ei, true,
|
|
|
|
false);
|
2020-05-03 11:25:55 +02:00
|
|
|
else
|
|
|
|
/* If permit and default not advertised then advertise.
|
|
|
|
*/
|
|
|
|
ospf_external_lsa_originate(ospf, default_ei);
|
|
|
|
|
|
|
|
} else if (cmd == ZEBRA_REDISTRIBUTE_ROUTE_DEL) {
|
|
|
|
/* If deny and lsa is not originated then nothing to be done.*/
|
|
|
|
if (!lsa) {
|
|
|
|
if (IS_DEBUG_OSPF_DEFAULT_INFO)
|
|
|
|
zlog_debug(
|
|
|
|
"Default lsa not originated, not flushing");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_DEBUG_OSPF_DEFAULT_INFO)
|
|
|
|
zlog_debug(
|
2020-10-21 19:56:26 +02:00
|
|
|
"Running default route-map again as ei: %pI4 deleted",
|
|
|
|
&ei->p.prefix);
|
2020-05-03 11:25:55 +02:00
|
|
|
/*
|
|
|
|
* if this route delete was permitted then we need to check
|
|
|
|
* there are any other external info which can still trigger
|
|
|
|
* default route origination else flush it.
|
|
|
|
*/
|
|
|
|
thread_add_event(master,
|
|
|
|
ospf_external_lsa_default_routemap_timer, ospf,
|
|
|
|
0, &ospf->t_default_routemap_timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* return 1 if external LSA must be originated, 0 otherwise */
|
2003-03-25 06:07:42 +01:00
|
|
|
int ospf_redistribute_check(struct ospf *ospf, struct external_info *ei,
|
2003-06-19 04:13:25 +02:00
|
|
|
int *changed)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct route_map_set_values save_values;
|
|
|
|
struct prefix_ipv4 *p = &ei->p;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
struct ospf_redist *red;
|
2021-07-08 19:09:20 +02:00
|
|
|
uint8_t type = is_default_prefix4(&ei->p) ? DEFAULT_ROUTE : ei->type;
|
|
|
|
unsigned short instance = is_default_prefix4(&ei->p) ? 0 : ei->instance;
|
2020-05-18 06:40:48 +02:00
|
|
|
route_tag_t saved_tag = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-05-03 11:25:55 +02:00
|
|
|
/* Default is handled differently. */
|
|
|
|
if (type == DEFAULT_ROUTE)
|
|
|
|
return 1;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (changed)
|
|
|
|
*changed = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2003-04-04 04:44:16 +02:00
|
|
|
if (!ospf_external_lsa_originate_check(ospf, ei))
|
2002-12-13 21:15:29 +01:00
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Take care connected route. */
|
2003-03-25 06:07:42 +01:00
|
|
|
if (type == ZEBRA_ROUTE_CONNECT
|
|
|
|
&& !ospf_distribute_check_connected(ospf, ei))
|
2002-12-13 21:15:29 +01:00
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2003-04-04 04:44:16 +02:00
|
|
|
if (!DEFAULT_ROUTE_TYPE(type) && DISTRIBUTE_NAME(ospf, type))
|
2002-12-13 21:15:29 +01:00
|
|
|
/* distirbute-list exists, but access-list may not? */
|
2003-04-04 04:44:16 +02:00
|
|
|
if (DISTRIBUTE_LIST(ospf, type))
|
|
|
|
if (access_list_apply(DISTRIBUTE_LIST(ospf, type), p)
|
|
|
|
== FILTER_DENY) {
|
2020-10-18 13:33:54 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
2004-12-08 20:06:51 +01:00
|
|
|
zlog_debug(
|
2020-10-18 13:33:54 +02:00
|
|
|
"Redistribute[%s]: %pFX filtered by distribute-list.",
|
|
|
|
ospf_redist_string(type), p);
|
2003-06-19 04:13:25 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
save_values = ei->route_map_set;
|
|
|
|
ospf_reset_route_map_set_values(&ei->route_map_set);
|
2020-05-18 07:02:34 +02:00
|
|
|
|
2020-05-18 06:40:48 +02:00
|
|
|
saved_tag = ei->tag;
|
2020-05-18 07:02:34 +02:00
|
|
|
/* Resetting with original route tag */
|
|
|
|
ei->tag = ei->orig_tag;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* apply route-map if needed */
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
red = ospf_redist_lookup(ospf, type, instance);
|
|
|
|
if (red && ROUTEMAP_NAME(red)) {
|
lib: Introducing a 3rd state for route-map match cmd: RMAP_NOOP
Introducing a 3rd state for route_map_apply library function: RMAP_NOOP
Traditionally route map MATCH rule apis were designed to return
a binary response, consisting of either RMAP_MATCH or RMAP_NOMATCH.
(Route-map SET rule apis return RMAP_OKAY or RMAP_ERROR).
Depending on this response, the following statemachine decided the
course of action:
State1:
If match cmd returns RMAP_MATCH then, keep existing behaviour.
If routemap type is PERMIT, execute set cmds or call cmds if applicable,
otherwise PERMIT!
Else If routemap type is DENY, we DENYMATCH right away
State2:
If match cmd returns RMAP_NOMATCH, continue on to next route-map. If there
are no other rules or if all the rules return RMAP_NOMATCH, return DENYMATCH
We require a 3rd state because of the following situation:
The issue - what if, the rule api needs to abort or ignore a rule?:
"match evpn vni xx" route-map filter can be applied to incoming routes
regardless of whether the tunnel type is vxlan or mpls.
This rule should be N/A for mpls based evpn route, but applicable to only
vxlan based evpn route.
Also, this rule should be applicable for routes with VNI label only, and
not for routes without labels. For example, type 3 and type 4 EVPN routes
do not have labels, so, this match cmd should let them through.
Today, the filter produces either a match or nomatch response regardless of
whether it is mpls/vxlan, resulting in either permitting or denying the
route.. So an mpls evpn route may get filtered out incorrectly.
Eg: "route-map RM1 permit 10 ; match evpn vni 20" or
"route-map RM2 deny 20 ; match vni 20"
With the introduction of the 3rd state, we can abort this rule check safely.
How? The rules api can now return RMAP_NOOP to indicate
that it encountered an invalid check, and needs to abort just that rule,
but continue with other rules.
As a result we have a 3rd state:
State3:
If match cmd returned RMAP_NOOP
Then, proceed to other route-map, otherwise if there are no more
rules or if all the rules return RMAP_NOOP, then, return RMAP_PERMITMATCH.
Signed-off-by: Lakshman Krishnamoorthy <lkrishnamoor@vmware.com>
2019-06-19 23:04:36 +02:00
|
|
|
route_map_result_t ret;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-11-14 01:35:20 +01:00
|
|
|
ret = route_map_apply(ROUTEMAP(red), (struct prefix *)p, ei);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (ret == RMAP_DENYMATCH) {
|
2003-06-19 04:13:25 +02:00
|
|
|
ei->route_map_set = save_values;
|
2020-10-18 13:33:54 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
2004-12-08 20:06:51 +01:00
|
|
|
zlog_debug(
|
2020-10-18 13:33:54 +02:00
|
|
|
"Redistribute[%s]: %pFX filtered by route-map.",
|
|
|
|
ospf_redist_string(type), p);
|
2003-06-19 04:13:25 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* check if 'route-map set' changed something */
|
2020-05-18 06:40:48 +02:00
|
|
|
if (changed) {
|
2003-06-19 04:13:25 +02:00
|
|
|
*changed = !ospf_route_map_set_compare(
|
|
|
|
&ei->route_map_set, &save_values);
|
2020-05-18 06:40:48 +02:00
|
|
|
|
|
|
|
/* check if tag is modified */
|
|
|
|
*changed |= (saved_tag != ei->tag);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* OSPF route-map set for redistribution */
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
void ospf_routemap_set(struct ospf_redist *red, const char *name)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2019-02-04 14:22:39 +01:00
|
|
|
if (ROUTEMAP_NAME(red)) {
|
|
|
|
route_map_counter_decrement(ROUTEMAP(red));
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
free(ROUTEMAP_NAME(red));
|
2019-02-04 14:22:39 +01:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
ROUTEMAP_NAME(red) = strdup(name);
|
|
|
|
ROUTEMAP(red) = route_map_lookup_by_name(name);
|
2019-02-04 14:22:39 +01:00
|
|
|
route_map_counter_increment(ROUTEMAP(red));
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
void ospf_routemap_unset(struct ospf_redist *red)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2019-02-04 14:22:39 +01:00
|
|
|
if (ROUTEMAP_NAME(red)) {
|
|
|
|
route_map_counter_decrement(ROUTEMAP(red));
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
free(ROUTEMAP_NAME(red));
|
2019-02-04 14:22:39 +01:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
ROUTEMAP_NAME(red) = NULL;
|
|
|
|
ROUTEMAP(red) = NULL;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2021-05-31 15:27:51 +02:00
|
|
|
static int ospf_zebra_gr_update(struct ospf *ospf, int command,
|
|
|
|
uint32_t stale_time)
|
|
|
|
{
|
|
|
|
struct zapi_cap api;
|
|
|
|
|
|
|
|
if (!zclient || zclient->sock < 0 || !ospf)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
memset(&api, 0, sizeof(struct zapi_cap));
|
|
|
|
api.cap = command;
|
|
|
|
api.stale_removal_time = stale_time;
|
|
|
|
api.vrf_id = ospf->vrf_id;
|
|
|
|
|
|
|
|
(void)zclient_capabilities_send(ZEBRA_CLIENT_CAPABILITIES, zclient,
|
|
|
|
&api);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ospf_zebra_gr_enable(struct ospf *ospf, uint32_t stale_time)
|
|
|
|
{
|
|
|
|
return ospf_zebra_gr_update(ospf, ZEBRA_CLIENT_GR_CAPABILITIES,
|
|
|
|
stale_time);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ospf_zebra_gr_disable(struct ospf *ospf)
|
|
|
|
{
|
|
|
|
return ospf_zebra_gr_update(ospf, ZEBRA_CLIENT_GR_DISABLE, 0);
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Zebra route add and delete treatment. */
|
2019-05-03 21:42:59 +02:00
|
|
|
static int ospf_zebra_read_route(ZAPI_CALLBACK_ARGS)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-08-21 03:10:50 +02:00
|
|
|
struct zapi_route api;
|
|
|
|
struct prefix_ipv4 p;
|
2002-12-13 21:15:29 +01:00
|
|
|
unsigned long ifindex;
|
|
|
|
struct in_addr nexthop;
|
|
|
|
struct external_info *ei;
|
2003-04-04 04:44:16 +02:00
|
|
|
struct ospf *ospf;
|
2015-10-21 00:17:07 +02:00
|
|
|
int i;
|
2018-10-01 13:38:01 +02:00
|
|
|
uint8_t rt_type;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
ospf = ospf_lookup_by_vrf_id(vrf_id);
|
2017-08-21 03:10:50 +02:00
|
|
|
if (ospf == NULL)
|
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 03:10:50 +02:00
|
|
|
if (zapi_route_decode(zclient->ibuf, &api) < 0)
|
|
|
|
return -1;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 03:10:50 +02:00
|
|
|
ifindex = api.nexthops[0].ifindex;
|
|
|
|
nexthop = api.nexthops[0].gate.ipv4;
|
2018-10-01 13:38:01 +02:00
|
|
|
rt_type = api.type;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 03:10:50 +02:00
|
|
|
memcpy(&p, &api.prefix, sizeof(p));
|
2004-04-20 19:25:12 +02:00
|
|
|
if (IPV4_NET127(ntohl(p.prefix.s_addr)))
|
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-10-01 13:38:01 +02:00
|
|
|
/* Re-destributed route is default route.
|
|
|
|
* Here, route type is used as 'ZEBRA_ROUTE_KERNEL' for
|
|
|
|
* updating ex-info. But in resetting (no default-info
|
|
|
|
* originate)ZEBRA_ROUTE_MAX is used to delete the ex-info.
|
|
|
|
* Resolved this inconsistency by maintaining same route type.
|
|
|
|
*/
|
2021-07-08 19:09:20 +02:00
|
|
|
if (is_default_prefix4(&p))
|
2018-10-01 13:38:01 +02:00
|
|
|
rt_type = DEFAULT_ROUTE;
|
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
|
|
|
zlog_debug("%s: cmd %s from client %s: vrf_id %d, p %pFX",
|
2019-07-24 21:57:34 +02:00
|
|
|
__func__, zserv_command_string(cmd),
|
2020-10-18 13:33:54 +02:00
|
|
|
zebra_route_string(api.type), vrf_id, &api.prefix);
|
2018-02-28 08:59:42 +01:00
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
if (cmd == ZEBRA_REDISTRIBUTE_ROUTE_ADD) {
|
2004-02-13 18:40:51 +01:00
|
|
|
/* XXX|HACK|TODO|FIXME:
|
2012-04-11 23:52:46 +02:00
|
|
|
* Maybe we should ignore reject/blackhole routes? Testing
|
|
|
|
* shows that there is no problems though and this is only way
|
|
|
|
* to "summarize" routes in ASBR at the moment. Maybe we need
|
|
|
|
* just a better generalised solution for these types?
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
|
|
|
|
2017-08-21 03:10:50 +02:00
|
|
|
/* Protocol tag overwrites all other tag value sent by zebra */
|
2018-10-01 13:38:01 +02:00
|
|
|
if (ospf->dtag[rt_type] > 0)
|
|
|
|
api.tag = ospf->dtag[rt_type];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2003-07-15 14:52:22 +02:00
|
|
|
/*
|
2004-04-23 10:51:10 +02:00
|
|
|
* Given zebra sends update for a prefix via ADD message, it
|
|
|
|
* should
|
|
|
|
* be considered as an implicit DEL for that prefix with other
|
|
|
|
* source
|
|
|
|
* types.
|
|
|
|
*/
|
2018-10-01 13:38:01 +02:00
|
|
|
for (i = 0; i <= ZEBRA_ROUTE_MAX; i++)
|
|
|
|
if (i != rt_type)
|
2017-11-21 02:21:03 +01:00
|
|
|
ospf_external_info_delete(ospf, i, api.instance,
|
|
|
|
p);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-10-01 13:38:01 +02:00
|
|
|
ei = ospf_external_info_add(ospf, rt_type, api.instance, p,
|
2017-11-21 02:21:03 +01:00
|
|
|
ifindex, nexthop, api.tag);
|
2004-04-23 10:51:10 +02:00
|
|
|
if (ei == NULL) {
|
|
|
|
/* Nothing has changed, so nothing to do; return */
|
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2020-02-06 07:49:02 +01:00
|
|
|
if (ospf->router_id.s_addr != INADDR_ANY) {
|
2021-07-08 19:09:20 +02:00
|
|
|
if (is_default_prefix4(&p))
|
2021-04-17 23:57:25 +02:00
|
|
|
ospf_external_lsa_refresh_default(ospf);
|
|
|
|
else {
|
|
|
|
struct ospf_external_aggr_rt *aggr;
|
|
|
|
struct as_external_lsa *al;
|
|
|
|
struct ospf_lsa *lsa = NULL;
|
|
|
|
struct in_addr mask;
|
|
|
|
|
|
|
|
aggr = ospf_external_aggr_match(ospf, &ei->p);
|
|
|
|
|
|
|
|
if (aggr) {
|
|
|
|
/* Check the AS-external-LSA
|
|
|
|
* should be originated.
|
|
|
|
*/
|
|
|
|
if (!ospf_redistribute_check(ospf, ei,
|
|
|
|
NULL))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (IS_DEBUG_OSPF(lsa, EXTNL_LSA_AGGR))
|
|
|
|
zlog_debug(
|
|
|
|
"%s: Send Aggreate LSA (%pI4/%d)",
|
|
|
|
__func__,
|
|
|
|
&aggr->p.prefix,
|
|
|
|
aggr->p.prefixlen);
|
|
|
|
|
|
|
|
ospf_originate_summary_lsa(ospf, aggr,
|
|
|
|
ei);
|
|
|
|
|
|
|
|
/* Handling the case where the
|
|
|
|
* external route prefix
|
|
|
|
* and aggegate prefix is same
|
|
|
|
* If same dont flush the
|
|
|
|
* originated
|
|
|
|
* external LSA.
|
|
|
|
*/
|
|
|
|
if (prefix_same(
|
|
|
|
(struct prefix *)&aggr->p,
|
|
|
|
(struct prefix *)&ei->p))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
lsa = ospf_external_info_find_lsa(
|
|
|
|
ospf, &ei->p);
|
|
|
|
|
|
|
|
if (lsa) {
|
|
|
|
al = (struct as_external_lsa *)
|
|
|
|
lsa->data;
|
|
|
|
masklen2ip(ei->p.prefixlen,
|
|
|
|
&mask);
|
|
|
|
|
|
|
|
if (mask.s_addr
|
|
|
|
!= al->mask.s_addr)
|
|
|
|
return 0;
|
2020-09-05 17:15:51 +02:00
|
|
|
|
2021-04-17 23:57:25 +02:00
|
|
|
ospf_external_lsa_flush(
|
|
|
|
ospf, ei->type, &ei->p,
|
|
|
|
0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
struct ospf_lsa *current;
|
|
|
|
|
|
|
|
current = ospf_external_info_find_lsa(
|
|
|
|
ospf, &ei->p);
|
|
|
|
if (!current) {
|
|
|
|
/* Check the
|
|
|
|
* AS-external-LSA
|
|
|
|
* should be
|
|
|
|
* originated.
|
2020-09-05 17:15:51 +02:00
|
|
|
*/
|
|
|
|
if (!ospf_redistribute_check(
|
|
|
|
ospf, ei, NULL))
|
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-04-17 23:57:25 +02:00
|
|
|
ospf_external_lsa_originate(
|
|
|
|
ospf, ei);
|
|
|
|
} else {
|
2015-10-21 06:38:38 +02:00
|
|
|
if (IS_DEBUG_OSPF(
|
2021-04-17 23:57:25 +02:00
|
|
|
zebra,
|
|
|
|
ZEBRA_REDISTRIBUTE))
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
zlog_debug(
|
2021-04-17 23:57:25 +02:00
|
|
|
"%s: %pI4 refreshing LSA",
|
2020-09-05 17:15:51 +02:00
|
|
|
__func__,
|
2021-04-17 23:57:25 +02:00
|
|
|
&p.prefix);
|
|
|
|
ospf_external_lsa_refresh(
|
|
|
|
ospf, current, ei,
|
|
|
|
LSA_REFRESH_FORCE,
|
|
|
|
false);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-05-03 11:25:55 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if default-information originate is
|
|
|
|
* with some routemap prefix/access list match.
|
|
|
|
*/
|
|
|
|
ospf_external_lsa_default_routemap_apply(ospf, ei, cmd);
|
|
|
|
|
2021-04-17 23:57:25 +02:00
|
|
|
} else { /* if (cmd == ZEBRA_REDISTRIBUTE_ROUTE_DEL) */
|
2020-09-05 17:15:51 +02:00
|
|
|
struct ospf_external_aggr_rt *aggr;
|
|
|
|
|
2020-05-03 11:25:55 +02:00
|
|
|
ei = ospf_external_info_lookup(ospf, rt_type, api.instance, &p);
|
2020-09-05 17:15:51 +02:00
|
|
|
if (ei == NULL)
|
|
|
|
return 0;
|
2021-04-17 23:57:25 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if default-information originate i
|
|
|
|
* with some routemap prefix/access list match.
|
|
|
|
* Apply before ei is deleted.
|
|
|
|
*/
|
|
|
|
ospf_external_lsa_default_routemap_apply(ospf, ei, cmd);
|
2020-05-03 11:25:55 +02:00
|
|
|
|
2020-09-05 17:15:51 +02:00
|
|
|
aggr = ospf_external_aggr_match(ospf, &ei->p);
|
|
|
|
|
|
|
|
if (aggr && (ei->aggr_route == aggr)) {
|
|
|
|
ospf_unlink_ei_from_aggr(ospf, aggr, ei);
|
|
|
|
|
|
|
|
ospf_external_info_delete(ospf, rt_type, api.instance,
|
|
|
|
p);
|
|
|
|
} else {
|
|
|
|
ospf_external_info_delete(ospf, rt_type, api.instance,
|
|
|
|
p);
|
|
|
|
|
2021-07-08 19:09:20 +02:00
|
|
|
if (is_default_prefix4(&p))
|
2020-09-05 17:15:51 +02:00
|
|
|
ospf_external_lsa_refresh_default(ospf);
|
|
|
|
else
|
|
|
|
ospf_external_lsa_flush(ospf, rt_type, &p,
|
|
|
|
ifindex /*, nexthop */);
|
|
|
|
}
|
2015-10-21 06:38:38 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2014-06-04 06:53:35 +02:00
|
|
|
|
2003-06-19 04:13:25 +02:00
|
|
|
|
2004-10-11 13:00:30 +02:00
|
|
|
int ospf_distribute_list_out_set(struct ospf *ospf, int type, const char *name)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
/* Lookup access-list for distribute-list. */
|
2003-04-04 04:44:16 +02:00
|
|
|
DISTRIBUTE_LIST(ospf, type) = access_list_lookup(AFI_IP, name);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Clear previous distribute-name. */
|
2003-04-04 04:44:16 +02:00
|
|
|
if (DISTRIBUTE_NAME(ospf, type))
|
|
|
|
free(DISTRIBUTE_NAME(ospf, type));
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Set distribute-name. */
|
2003-04-04 04:44:16 +02:00
|
|
|
DISTRIBUTE_NAME(ospf, type) = strdup(name);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* If access-list have been set, schedule update timer. */
|
2003-04-04 04:44:16 +02:00
|
|
|
if (DISTRIBUTE_LIST(ospf, type))
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
ospf_distribute_list_update(ospf, type, 0);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2004-10-11 13:00:30 +02:00
|
|
|
int ospf_distribute_list_out_unset(struct ospf *ospf, int type,
|
|
|
|
const char *name)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
/* Schedule update timer. */
|
2003-04-04 04:44:16 +02:00
|
|
|
if (DISTRIBUTE_LIST(ospf, type))
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
ospf_distribute_list_update(ospf, type, 0);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Unset distribute-list. */
|
2003-04-04 04:44:16 +02:00
|
|
|
DISTRIBUTE_LIST(ospf, type) = NULL;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Clear distribute-name. */
|
2003-04-04 04:44:16 +02:00
|
|
|
if (DISTRIBUTE_NAME(ospf, type))
|
|
|
|
free(DISTRIBUTE_NAME(ospf, type));
|
2003-06-19 04:13:25 +02:00
|
|
|
|
2003-04-04 04:44:16 +02:00
|
|
|
DISTRIBUTE_NAME(ospf, type) = NULL;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* distribute-list update timer. */
|
|
|
|
static int ospf_distribute_list_update_timer(struct thread *thread)
|
|
|
|
{
|
|
|
|
struct route_node *rn;
|
|
|
|
struct external_info *ei;
|
|
|
|
struct route_table *rt;
|
|
|
|
struct ospf_lsa *lsa;
|
2021-07-16 20:01:01 +02:00
|
|
|
int type, default_refresh = 0;
|
|
|
|
struct ospf *ospf = THREAD_ARG(thread);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2003-04-04 04:44:16 +02:00
|
|
|
if (ospf == NULL)
|
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2003-03-25 06:07:42 +01:00
|
|
|
ospf->t_distribute_update = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
zlog_info("Zebra[Redistribute]: distribute-list update timer fired!");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
if (IS_DEBUG_OSPF_EVENT) {
|
2021-07-16 20:01:01 +02:00
|
|
|
zlog_debug("%s: ospf distribute-list update vrf %s id %d",
|
|
|
|
__func__, ospf_vrf_id_to_name(ospf->vrf_id),
|
|
|
|
ospf->vrf_id);
|
2017-08-25 22:51:12 +02:00
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* foreach all external info. */
|
2010-04-14 11:05:27 +02:00
|
|
|
for (type = 0; type <= ZEBRA_ROUTE_MAX; type++) {
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
struct list *ext_list;
|
|
|
|
struct listnode *node;
|
|
|
|
struct ospf_external *ext;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
ext_list = ospf->external[type];
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
if (!ext_list)
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(ext_list, node, ext)) {
|
|
|
|
rt = ext->external_info;
|
|
|
|
if (!rt)
|
|
|
|
continue;
|
2021-04-17 23:57:25 +02:00
|
|
|
for (rn = route_top(rt); rn; rn = route_next(rn)) {
|
|
|
|
ei = rn->info;
|
|
|
|
if (!ei)
|
|
|
|
continue;
|
|
|
|
|
2021-07-08 19:09:20 +02:00
|
|
|
if (is_default_prefix4(&ei->p))
|
2021-04-17 23:57:25 +02:00
|
|
|
default_refresh = 1;
|
|
|
|
else {
|
|
|
|
struct ospf_external_aggr_rt *aggr;
|
|
|
|
|
|
|
|
aggr = ospf_external_aggr_match(ospf,
|
|
|
|
&ei->p);
|
|
|
|
if (aggr) {
|
|
|
|
/* Check the
|
|
|
|
* AS-external-LSA
|
|
|
|
* should be originated.
|
|
|
|
*/
|
|
|
|
if (!ospf_redistribute_check(
|
|
|
|
ospf, ei, NULL)) {
|
|
|
|
|
|
|
|
ospf_unlink_ei_from_aggr(
|
2020-09-05 17:15:51 +02:00
|
|
|
ospf, aggr, ei);
|
2021-04-17 23:57:25 +02:00
|
|
|
continue;
|
2020-09-05 17:15:51 +02:00
|
|
|
}
|
2021-04-17 23:57:25 +02:00
|
|
|
|
|
|
|
if (IS_DEBUG_OSPF(
|
|
|
|
lsa,
|
|
|
|
EXTNL_LSA_AGGR))
|
|
|
|
zlog_debug(
|
|
|
|
"%s: Send Aggregate LSA (%pI4/%d)",
|
|
|
|
__func__,
|
|
|
|
&aggr->p.prefix,
|
|
|
|
aggr->p.prefixlen);
|
|
|
|
|
|
|
|
/* Originate Aggregate
|
|
|
|
* LSA
|
|
|
|
*/
|
|
|
|
ospf_originate_summary_lsa(
|
|
|
|
ospf, aggr, ei);
|
|
|
|
} else if (
|
|
|
|
(lsa = ospf_external_info_find_lsa(
|
|
|
|
ospf, &ei->p))) {
|
|
|
|
int force =
|
|
|
|
LSA_REFRESH_IF_CHANGED;
|
|
|
|
/* If this is a MaxAge
|
|
|
|
* LSA, we need to
|
|
|
|
* force refresh it
|
|
|
|
* because distribute
|
|
|
|
* settings might have
|
|
|
|
* changed and now,
|
|
|
|
* this LSA needs to be
|
|
|
|
* originated, not be
|
|
|
|
* removed.
|
|
|
|
* If we don't force
|
|
|
|
* refresh it, it will
|
|
|
|
* remain a MaxAge LSA
|
|
|
|
* because it will look
|
|
|
|
* like it hasn't
|
|
|
|
* changed. Neighbors
|
|
|
|
* will not receive
|
|
|
|
* updates for this LSA.
|
|
|
|
*/
|
|
|
|
if (IS_LSA_MAXAGE(lsa))
|
|
|
|
force = LSA_REFRESH_FORCE;
|
|
|
|
|
|
|
|
ospf_external_lsa_refresh(
|
|
|
|
ospf, lsa, ei, force,
|
|
|
|
false);
|
|
|
|
} else {
|
|
|
|
if (!ospf_redistribute_check(
|
|
|
|
ospf, ei, NULL))
|
|
|
|
continue;
|
|
|
|
ospf_external_lsa_originate(
|
|
|
|
ospf, ei);
|
2020-09-05 17:15:51 +02:00
|
|
|
}
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
2021-04-17 23:57:25 +02:00
|
|
|
}
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
2010-04-14 11:05:27 +02:00
|
|
|
}
|
2010-04-14 16:01:25 +02:00
|
|
|
if (default_refresh)
|
|
|
|
ospf_external_lsa_refresh_default(ospf);
|
2017-08-25 22:51:12 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update distribute-list and set timer to apply access-list. */
|
2018-03-27 21:13:34 +02:00
|
|
|
void ospf_distribute_list_update(struct ospf *ospf, int type,
|
|
|
|
unsigned short instance)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
struct ospf_external *ext;
|
2017-08-25 22:51:12 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* External info does not exist. */
|
2017-11-21 02:21:03 +01:00
|
|
|
ext = ospf_external_lookup(ospf, type, instance);
|
2021-07-16 20:01:01 +02:00
|
|
|
if (!ext || !EXTERNAL_INFO(ext))
|
2002-12-13 21:15:29 +01:00
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-07-16 20:01:01 +02:00
|
|
|
/* Set timer. If timer is already started, this call does nothing. */
|
|
|
|
thread_add_timer_msec(master, ospf_distribute_list_update_timer, ospf,
|
2020-04-08 07:57:15 +02:00
|
|
|
ospf->min_ls_interval,
|
2017-05-05 23:22:25 +02:00
|
|
|
&ospf->t_distribute_update);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If access-list is updated, apply some check. */
|
|
|
|
static void ospf_filter_update(struct access_list *access)
|
|
|
|
{
|
2003-04-04 04:44:16 +02:00
|
|
|
struct ospf *ospf;
|
2002-12-13 21:15:29 +01:00
|
|
|
int type;
|
|
|
|
int abr_inv = 0;
|
|
|
|
struct ospf_area *area;
|
2017-08-25 22:51:12 +02:00
|
|
|
struct listnode *node, *n1;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
/* If OSPF instance does not exist, return right now. */
|
2017-08-25 22:51:12 +02:00
|
|
|
if (listcount(om->ospf) == 0)
|
2002-12-13 21:15:29 +01:00
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Iterate all ospf [VRF] instances */
|
2017-09-07 17:08:09 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(om->ospf, n1, ospf)) {
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Update distribute-list, and apply filter. */
|
|
|
|
for (type = 0; type <= ZEBRA_ROUTE_MAX; type++) {
|
|
|
|
struct list *red_list;
|
|
|
|
struct ospf_redist *red;
|
|
|
|
|
|
|
|
red_list = ospf->redist[type];
|
|
|
|
if (red_list)
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(red_list, node,
|
|
|
|
red)) {
|
|
|
|
if (ROUTEMAP(red)) {
|
|
|
|
/* if route-map is not NULL it
|
|
|
|
* may be
|
|
|
|
* using this access list */
|
|
|
|
ospf_distribute_list_update(
|
|
|
|
ospf, type,
|
|
|
|
red->instance);
|
|
|
|
}
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* There is place for route-map for default-information
|
|
|
|
* (ZEBRA_ROUTE_MAX),
|
|
|
|
* but no distribute list. */
|
|
|
|
if (type == ZEBRA_ROUTE_MAX)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (DISTRIBUTE_NAME(ospf, type)) {
|
|
|
|
/* Keep old access-list for distribute-list. */
|
|
|
|
struct access_list *old =
|
|
|
|
DISTRIBUTE_LIST(ospf, type);
|
|
|
|
|
|
|
|
/* Update access-list for distribute-list. */
|
|
|
|
DISTRIBUTE_LIST(ospf, type) =
|
|
|
|
access_list_lookup(
|
|
|
|
AFI_IP,
|
|
|
|
DISTRIBUTE_NAME(ospf, type));
|
|
|
|
|
|
|
|
/* No update for this distribute type. */
|
|
|
|
if (old == NULL
|
|
|
|
&& DISTRIBUTE_LIST(ospf, type) == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Schedule distribute-list update timer. */
|
|
|
|
if (DISTRIBUTE_LIST(ospf, type) == NULL
|
|
|
|
|| strcmp(DISTRIBUTE_NAME(ospf, type),
|
|
|
|
access->name)
|
|
|
|
== 0)
|
|
|
|
ospf_distribute_list_update(ospf, type,
|
|
|
|
0);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Update Area access-list. */
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(ospf->areas, node, area)) {
|
|
|
|
if (EXPORT_NAME(area)) {
|
|
|
|
EXPORT_LIST(area) = NULL;
|
|
|
|
abr_inv++;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
if (IMPORT_NAME(area)) {
|
|
|
|
IMPORT_LIST(area) = NULL;
|
|
|
|
abr_inv++;
|
|
|
|
}
|
2005-04-07 Paul Jakma <paul.jakma@sun.com>
* (global): Fix up list loops to match changes in lib/linklist,
and some basic auditing of usage.
* configure.ac: define QUAGGA_NO_DEPRECATED_INTERFACES
* HACKING: Add notes about deprecating interfaces and commands.
* lib/linklist.h: Add usage comments.
Rename getdata macro to listgetdata.
Rename nextnode to listnextnode and fix its odd behaviour to be
less dangerous.
Make listgetdata macro assert node is not null, NULL list entries
should be bug condition.
ALL_LIST_ELEMENTS, new macro, forward-referencing macro for use
with for loop, Suggested by Jim Carlson of Sun.
Add ALL_LIST_ELEMENTS_RO for cases which obviously do not need the
"safety" of previous macro.
LISTNODE_ADD and DELETE macros renamed to ATTACH, DETACH, to
distinguish from the similarly named functions, and reflect their
effect better.
Add a QUAGGA_NO_DEPRECATED_INTERFACES define guarded section
with the old defines which were modified above,
for backwards compatibility - guarded to prevent Quagga using it..
* lib/linklist.c: fix up for linklist.h changes.
* ospf6d/ospf6_abr.c: (ospf6_abr_examin_brouter) change to a single
scan of the area list, rather than scanning all areas first for
INTER_ROUTER and then again for INTER_NETWORK. According to
16.2, the scan should be area specific anyway, and further
ospf6d does not seem to implement 16.3 anyway.
2005-04-07 09:30:20 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Schedule ABR tasks -- this will be changed -- takada. */
|
|
|
|
if (IS_OSPF_ABR(ospf) && abr_inv)
|
|
|
|
ospf_schedule_abr_task(ospf);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2004-05-10 09:43:59 +02:00
|
|
|
|
|
|
|
/* If prefix-list is updated, do some updates. */
|
|
|
|
void ospf_prefix_list_update(struct prefix_list *plist)
|
|
|
|
{
|
2017-08-25 22:51:12 +02:00
|
|
|
struct ospf *ospf = NULL;
|
2004-05-10 09:43:59 +02:00
|
|
|
int type;
|
|
|
|
int abr_inv = 0;
|
|
|
|
struct ospf_area *area;
|
2017-08-25 22:51:12 +02:00
|
|
|
struct listnode *node, *n1;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2004-05-10 09:43:59 +02:00
|
|
|
/* If OSPF instatnce does not exist, return right now. */
|
2017-08-25 22:51:12 +02:00
|
|
|
if (listcount(om->ospf) == 0)
|
2004-05-10 09:43:59 +02:00
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Iterate all ospf [VRF] instances */
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(om->ospf, n1, ospf)) {
|
|
|
|
|
|
|
|
/* Update all route-maps which are used
|
|
|
|
* as redistribution filters.
|
|
|
|
* They might use prefix-list.
|
|
|
|
*/
|
|
|
|
for (type = 0; type <= ZEBRA_ROUTE_MAX; type++) {
|
|
|
|
struct list *red_list;
|
|
|
|
struct ospf_redist *red;
|
|
|
|
|
|
|
|
red_list = ospf->redist[type];
|
2021-04-17 23:57:25 +02:00
|
|
|
if (!red_list)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(red_list, node, red)) {
|
|
|
|
if (ROUTEMAP(red)) {
|
|
|
|
/* if route-map is not NULL
|
|
|
|
* it may be using
|
|
|
|
* this prefix list */
|
|
|
|
ospf_distribute_list_update(
|
|
|
|
ospf, type, red->instance);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
2017-08-25 22:51:12 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Update area filter-lists. */
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(ospf->areas, node, area)) {
|
|
|
|
/* Update filter-list in. */
|
2021-04-17 23:57:25 +02:00
|
|
|
if (PREFIX_NAME_IN(area)
|
|
|
|
&& strcmp(PREFIX_NAME_IN(area),
|
|
|
|
prefix_list_name(plist))
|
|
|
|
== 0) {
|
|
|
|
PREFIX_LIST_IN(area) = prefix_list_lookup(
|
|
|
|
AFI_IP, PREFIX_NAME_IN(area));
|
|
|
|
abr_inv++;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Update filter-list out. */
|
2021-04-17 23:57:25 +02:00
|
|
|
if (PREFIX_NAME_OUT(area)
|
|
|
|
&& strcmp(PREFIX_NAME_OUT(area),
|
|
|
|
prefix_list_name(plist))
|
|
|
|
== 0) {
|
|
|
|
PREFIX_LIST_IN(area) = prefix_list_lookup(
|
|
|
|
AFI_IP, PREFIX_NAME_OUT(area));
|
|
|
|
abr_inv++;
|
|
|
|
}
|
2017-08-25 22:51:12 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Schedule ABR task. */
|
|
|
|
if (IS_OSPF_ABR(ospf) && abr_inv)
|
|
|
|
ospf_schedule_abr_task(ospf);
|
|
|
|
}
|
2004-05-10 09:43:59 +02:00
|
|
|
}
|
2003-06-19 04:13:25 +02:00
|
|
|
|
2005-05-06 23:37:42 +02:00
|
|
|
static struct ospf_distance *ospf_distance_new(void)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2008-08-18 23:13:29 +02:00
|
|
|
return XCALLOC(MTYPE_OSPF_DISTANCE, sizeof(struct ospf_distance));
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ospf_distance_free(struct ospf_distance *odistance)
|
|
|
|
{
|
|
|
|
XFREE(MTYPE_OSPF_DISTANCE, odistance);
|
|
|
|
}
|
|
|
|
|
2004-10-11 13:00:30 +02:00
|
|
|
int ospf_distance_set(struct vty *vty, struct ospf *ospf,
|
|
|
|
const char *distance_str, const char *ip_str,
|
|
|
|
const char *access_list_str)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct prefix_ipv4 p;
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t distance;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct route_node *rn;
|
|
|
|
struct ospf_distance *odistance;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
ret = str2prefix_ipv4(ip_str, &p);
|
|
|
|
if (ret == 0) {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Malformed prefix\n");
|
2017-07-13 21:56:08 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
distance = atoi(distance_str);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Get OSPF distance node. */
|
2003-03-25 06:07:42 +01:00
|
|
|
rn = route_node_get(ospf->distance_table, (struct prefix *)&p);
|
2002-12-13 21:15:29 +01:00
|
|
|
if (rn->info) {
|
|
|
|
odistance = rn->info;
|
|
|
|
route_unlock_node(rn);
|
|
|
|
} else {
|
|
|
|
odistance = ospf_distance_new();
|
|
|
|
rn->info = odistance;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Set distance value. */
|
|
|
|
odistance->distance = distance;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Reset access-list configuration. */
|
|
|
|
if (odistance->access_list) {
|
|
|
|
free(odistance->access_list);
|
|
|
|
odistance->access_list = NULL;
|
|
|
|
}
|
|
|
|
if (access_list_str)
|
|
|
|
odistance->access_list = strdup(access_list_str);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2004-10-11 13:00:30 +02:00
|
|
|
int ospf_distance_unset(struct vty *vty, struct ospf *ospf,
|
|
|
|
const char *distance_str, const char *ip_str,
|
|
|
|
char const *access_list_str)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct prefix_ipv4 p;
|
|
|
|
struct route_node *rn;
|
|
|
|
struct ospf_distance *odistance;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
ret = str2prefix_ipv4(ip_str, &p);
|
|
|
|
if (ret == 0) {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Malformed prefix\n");
|
2017-07-13 21:56:08 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2003-06-19 04:13:25 +02:00
|
|
|
rn = route_node_lookup(ospf->distance_table, (struct prefix *)&p);
|
|
|
|
if (!rn) {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Can't find specified prefix\n");
|
2017-07-13 21:56:08 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
odistance = rn->info;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (odistance->access_list)
|
|
|
|
free(odistance->access_list);
|
|
|
|
ospf_distance_free(odistance);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
rn->info = NULL;
|
|
|
|
route_unlock_node(rn);
|
|
|
|
route_unlock_node(rn);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2003-03-25 06:07:42 +01:00
|
|
|
void ospf_distance_reset(struct ospf *ospf)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct route_node *rn;
|
|
|
|
struct ospf_distance *odistance;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-04-17 23:57:25 +02:00
|
|
|
for (rn = route_top(ospf->distance_table); rn; rn = route_next(rn)) {
|
|
|
|
odistance = rn->info;
|
|
|
|
if (!odistance)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (odistance->access_list)
|
|
|
|
free(odistance->access_list);
|
|
|
|
ospf_distance_free(odistance);
|
|
|
|
rn->info = NULL;
|
|
|
|
route_unlock_node(rn);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t ospf_distance_apply(struct ospf *ospf, struct prefix_ipv4 *p,
|
|
|
|
struct ospf_route * or)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
|
2003-03-25 06:07:42 +01:00
|
|
|
if (ospf == NULL)
|
2002-12-13 21:15:29 +01:00
|
|
|
return 0;
|
|
|
|
|
2021-04-17 23:57:25 +02:00
|
|
|
if (ospf->distance_intra && or->path_type == OSPF_PATH_INTRA_AREA)
|
|
|
|
return ospf->distance_intra;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2021-04-17 23:57:25 +02:00
|
|
|
if (ospf->distance_inter && or->path_type == OSPF_PATH_INTER_AREA)
|
|
|
|
return ospf->distance_inter;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2021-04-17 23:57:25 +02:00
|
|
|
if (ospf->distance_external
|
|
|
|
&& (or->path_type == OSPF_PATH_TYPE1_EXTERNAL ||
|
|
|
|
or->path_type == OSPF_PATH_TYPE2_EXTERNAL))
|
|
|
|
return ospf->distance_external;
|
2003-06-19 04:13:25 +02:00
|
|
|
|
2003-03-25 06:07:42 +01:00
|
|
|
if (ospf->distance_all)
|
|
|
|
return ospf->distance_all;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
void ospf_zebra_vrf_register(struct ospf *ospf)
|
|
|
|
{
|
|
|
|
if (!zclient || zclient->sock < 0 || !ospf)
|
|
|
|
return;
|
|
|
|
|
2017-11-02 15:54:45 +01:00
|
|
|
if (ospf->vrf_id != VRF_UNKNOWN) {
|
2017-08-25 22:51:12 +02:00
|
|
|
if (IS_DEBUG_OSPF_EVENT)
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: Register VRF %s id %u", __func__,
|
2017-08-25 22:51:12 +02:00
|
|
|
ospf_vrf_id_to_name(ospf->vrf_id),
|
|
|
|
ospf->vrf_id);
|
|
|
|
zclient_send_reg_requests(zclient, ospf->vrf_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ospf_zebra_vrf_deregister(struct ospf *ospf)
|
|
|
|
{
|
|
|
|
if (!zclient || zclient->sock < 0 || !ospf)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (ospf->vrf_id != VRF_DEFAULT && ospf->vrf_id != VRF_UNKNOWN) {
|
|
|
|
if (IS_DEBUG_OSPF_EVENT)
|
2017-11-02 15:54:45 +01:00
|
|
|
zlog_debug("%s: De-Register VRF %s id %u to Zebra.",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__, ospf_vrf_id_to_name(ospf->vrf_id),
|
2017-08-25 22:51:12 +02:00
|
|
|
ospf->vrf_id);
|
|
|
|
/* Deregister for router-id, interfaces,
|
|
|
|
* redistributed routes. */
|
|
|
|
zclient_send_dereg_requests(zclient, ospf->vrf_id);
|
|
|
|
}
|
|
|
|
}
|
2020-04-07 19:36:12 +02:00
|
|
|
|
2020-06-16 16:49:38 +02:00
|
|
|
/* Label Manager Functions */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Check if Label Manager is Ready or not.
|
|
|
|
*
|
|
|
|
* @return True if Label Manager is ready, False otherwise
|
|
|
|
*/
|
|
|
|
bool ospf_zebra_label_manager_ready(void)
|
|
|
|
{
|
|
|
|
return (zclient_sync->sock > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Request Label Range to the Label Manager.
|
|
|
|
*
|
|
|
|
* @param base base label of the label range to request
|
|
|
|
* @param chunk_size size of the label range to request
|
|
|
|
*
|
|
|
|
* @return 0 on success, -1 on failure
|
|
|
|
*/
|
|
|
|
int ospf_zebra_request_label_range(uint32_t base, uint32_t chunk_size)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
uint32_t start, end;
|
|
|
|
|
|
|
|
if (zclient_sync->sock < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ret = lm_get_label_chunk(zclient_sync, 0, base, chunk_size, &start,
|
|
|
|
&end);
|
|
|
|
if (ret < 0) {
|
|
|
|
zlog_warn("%s: error getting label range!", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Release Label Range to the Label Manager.
|
|
|
|
*
|
|
|
|
* @param start start of label range to release
|
|
|
|
* @param end end of label range to release
|
|
|
|
*
|
|
|
|
* @return 0 on success, -1 otherwise
|
|
|
|
*/
|
|
|
|
int ospf_zebra_release_label_range(uint32_t start, uint32_t end)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (zclient_sync->sock < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ret = lm_release_label_chunk(zclient_sync, start, end);
|
|
|
|
if (ret < 0) {
|
|
|
|
zlog_warn("%s: error releasing label range!", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Connect to the Label Manager.
|
|
|
|
*
|
|
|
|
* @return 0 on success, -1 otherwise
|
|
|
|
*/
|
|
|
|
int ospf_zebra_label_manager_connect(void)
|
|
|
|
{
|
|
|
|
/* Connect to label manager. */
|
|
|
|
if (zclient_socket_connect(zclient_sync) < 0) {
|
|
|
|
zlog_warn("%s: failed connecting synchronous zclient!",
|
|
|
|
__func__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/* make socket non-blocking */
|
|
|
|
set_nonblocking(zclient_sync->sock);
|
|
|
|
|
|
|
|
/* Send hello to notify zebra this is a synchronous client */
|
2020-11-11 20:14:37 +01:00
|
|
|
if (zclient_send_hello(zclient_sync) == ZCLIENT_SEND_FAILURE) {
|
2020-06-16 16:49:38 +02:00
|
|
|
zlog_warn("%s: failed sending hello for synchronous zclient!",
|
|
|
|
__func__);
|
|
|
|
close(zclient_sync->sock);
|
|
|
|
zclient_sync->sock = -1;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Connect to label manager */
|
|
|
|
if (lm_label_manager_connect(zclient_sync, 0) != 0) {
|
|
|
|
zlog_warn("%s: failed connecting to label manager!", __func__);
|
|
|
|
if (zclient_sync->sock > 0) {
|
|
|
|
close(zclient_sync->sock);
|
|
|
|
zclient_sync->sock = -1;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
osr_debug("SR (%s): Successfully connected to the Label Manager",
|
|
|
|
__func__);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
static void ospf_zebra_connected(struct zclient *zclient)
|
|
|
|
{
|
2016-06-21 12:39:58 +02:00
|
|
|
/* Send the client registration */
|
2019-03-26 14:29:13 +01:00
|
|
|
bfd_client_sendmsg(zclient, ZEBRA_BFD_CLIENT_REGISTER, VRF_DEFAULT);
|
2016-06-21 12:39:58 +02:00
|
|
|
|
2016-02-12 20:37:33 +01:00
|
|
|
zclient_send_reg_requests(zclient, VRF_DEFAULT);
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
}
|
|
|
|
|
2020-07-22 19:31:14 +02:00
|
|
|
/*
|
|
|
|
* opaque messages between processes
|
|
|
|
*/
|
|
|
|
static int ospf_opaque_msg_handler(ZAPI_CALLBACK_ARGS)
|
|
|
|
{
|
|
|
|
struct stream *s;
|
|
|
|
struct zapi_opaque_msg info;
|
|
|
|
struct ldp_igp_sync_if_state state;
|
|
|
|
struct ldp_igp_sync_announce announce;
|
2021-01-05 16:05:09 +01:00
|
|
|
struct zapi_opaque_reg_info dst;
|
2020-07-22 19:31:14 +02:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
s = zclient->ibuf;
|
|
|
|
|
|
|
|
if (zclient_opaque_decode(s, &info) != 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
switch (info.type) {
|
2021-01-05 16:05:09 +01:00
|
|
|
case LINK_STATE_SYNC:
|
|
|
|
STREAM_GETC(s, dst.proto);
|
|
|
|
STREAM_GETW(s, dst.instance);
|
|
|
|
STREAM_GETL(s, dst.session_id);
|
|
|
|
dst.type = LINK_STATE_SYNC;
|
|
|
|
ret = ospf_te_sync_ted(dst);
|
|
|
|
break;
|
2020-07-22 19:31:14 +02:00
|
|
|
case LDP_IGP_SYNC_IF_STATE_UPDATE:
|
|
|
|
STREAM_GET(&state, s, sizeof(state));
|
|
|
|
ret = ospf_ldp_sync_state_update(state);
|
|
|
|
break;
|
|
|
|
case LDP_IGP_SYNC_ANNOUNCE_UPDATE:
|
|
|
|
STREAM_GET(&announce, s, sizeof(announce));
|
|
|
|
ret = ospf_ldp_sync_announce_update(announce);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
stream_failure:
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-12-08 15:44:27 +01:00
|
|
|
static int ospf_zebra_client_close_notify(ZAPI_CALLBACK_ARGS)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
struct zapi_client_close_info info;
|
|
|
|
|
|
|
|
if (zapi_client_close_notify_decode(zclient->ibuf, &info) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ospf_ldp_sync_handle_client_close(&info);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-10-20 13:07:47 +02:00
|
|
|
static zclient_handler *const ospf_handlers[] = {
|
|
|
|
[ZEBRA_ROUTER_ID_UPDATE] = ospf_router_id_update_zebra,
|
|
|
|
[ZEBRA_INTERFACE_ADDRESS_ADD] = ospf_interface_address_add,
|
|
|
|
[ZEBRA_INTERFACE_ADDRESS_DELETE] = ospf_interface_address_delete,
|
|
|
|
[ZEBRA_INTERFACE_LINK_PARAMS] = ospf_interface_link_params,
|
|
|
|
[ZEBRA_INTERFACE_VRF_UPDATE] = ospf_interface_vrf_update,
|
|
|
|
|
|
|
|
[ZEBRA_REDISTRIBUTE_ROUTE_ADD] = ospf_zebra_read_route,
|
|
|
|
[ZEBRA_REDISTRIBUTE_ROUTE_DEL] = ospf_zebra_read_route,
|
|
|
|
|
|
|
|
[ZEBRA_OPAQUE_MESSAGE] = ospf_opaque_msg_handler,
|
|
|
|
|
|
|
|
[ZEBRA_CLIENT_CLOSE_NOTIFY] = ospf_zebra_client_close_notify,
|
|
|
|
};
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
void ospf_zebra_init(struct thread_master *master, unsigned short instance)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
/* Allocate zebra structure. */
|
2021-10-20 13:07:47 +02:00
|
|
|
zclient = zclient_new(master, &zclient_options_default, ospf_handlers,
|
|
|
|
array_size(ospf_handlers));
|
2017-10-11 16:37:20 +02:00
|
|
|
zclient_init(zclient, ZEBRA_ROUTE_OSPF, instance, &ospfd_privs);
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
zclient->zebra_connected = ospf_zebra_connected;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-06-16 16:49:38 +02:00
|
|
|
/* Initialize special zclient for synchronous message exchanges. */
|
|
|
|
struct zclient_options options = zclient_options_default;
|
|
|
|
options.synchronous = true;
|
2021-10-20 13:07:47 +02:00
|
|
|
zclient_sync = zclient_new(master, &options, NULL, 0);
|
2020-06-16 16:49:38 +02:00
|
|
|
zclient_sync->sock = -1;
|
|
|
|
zclient_sync->redist_default = ZEBRA_ROUTE_OSPF;
|
|
|
|
zclient_sync->instance = instance;
|
|
|
|
/*
|
|
|
|
* session_id must be different from default value (0) to distinguish
|
|
|
|
* the asynchronous socket from the synchronous one
|
|
|
|
*/
|
|
|
|
zclient_sync->session_id = 1;
|
|
|
|
zclient_sync->privs = &ospfd_privs;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
access_list_add_hook(ospf_filter_update);
|
|
|
|
access_list_delete_hook(ospf_filter_update);
|
2004-05-10 09:43:59 +02:00
|
|
|
prefix_list_add_hook(ospf_prefix_list_update);
|
|
|
|
prefix_list_delete_hook(ospf_prefix_list_update);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2020-08-06 13:36:22 +02:00
|
|
|
|
|
|
|
void ospf_zebra_send_arp(const struct interface *ifp, const struct prefix *p)
|
|
|
|
{
|
|
|
|
zclient_send_neigh_discovery_req(zclient, ifp, p);
|
|
|
|
}
|