2002-12-13 21:15:29 +01:00
|
|
|
/*
|
|
|
|
* Zebra connect library for OSPFd
|
|
|
|
* Copyright (C) 1997, 98, 99, 2000 Kunihiro Ishiguro, Toshiaki Takada
|
|
|
|
*
|
|
|
|
* This file is part of GNU Zebra.
|
|
|
|
*
|
|
|
|
* GNU Zebra is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2, or (at your option) any
|
|
|
|
* later version.
|
|
|
|
*
|
|
|
|
* GNU Zebra is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
2017-05-13 10:25:29 +02:00
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; see the file COPYING; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2002-12-13 21:15:29 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <zebra.h>
|
|
|
|
|
|
|
|
#include "thread.h"
|
|
|
|
#include "command.h"
|
|
|
|
#include "network.h"
|
|
|
|
#include "prefix.h"
|
|
|
|
#include "routemap.h"
|
|
|
|
#include "table.h"
|
|
|
|
#include "stream.h"
|
|
|
|
#include "memory.h"
|
|
|
|
#include "zclient.h"
|
|
|
|
#include "filter.h"
|
2004-05-10 09:43:59 +02:00
|
|
|
#include "plist.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "log.h"
|
2016-06-21 12:39:58 +02:00
|
|
|
#include "lib/bfd.h"
|
2016-09-02 16:32:14 +02:00
|
|
|
#include "nexthop.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
#include "ospfd/ospfd.h"
|
|
|
|
#include "ospfd/ospf_interface.h"
|
|
|
|
#include "ospfd/ospf_ism.h"
|
|
|
|
#include "ospfd/ospf_asbr.h"
|
|
|
|
#include "ospfd/ospf_asbr.h"
|
|
|
|
#include "ospfd/ospf_abr.h"
|
|
|
|
#include "ospfd/ospf_lsa.h"
|
|
|
|
#include "ospfd/ospf_dump.h"
|
|
|
|
#include "ospfd/ospf_route.h"
|
2015-05-20 02:47:23 +02:00
|
|
|
#include "ospfd/ospf_lsdb.h"
|
|
|
|
#include "ospfd/ospf_neighbor.h"
|
|
|
|
#include "ospfd/ospf_nsm.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "ospfd/ospf_zebra.h"
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
#include "ospfd/ospf_te.h"
|
2020-06-02 19:24:46 +02:00
|
|
|
#include "ospfd/ospf_sr.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-31 22:12:33 +02:00
|
|
|
DEFINE_MTYPE_STATIC(OSPFD, OSPF_EXTERNAL, "OSPF External route table")
|
|
|
|
DEFINE_MTYPE_STATIC(OSPFD, OSPF_REDISTRIBUTE, "OSPF Redistriute")
|
2017-08-25 22:51:12 +02:00
|
|
|
DEFINE_MTYPE_STATIC(OSPFD, OSPF_DIST_ARGS, "OSPF Distribute arguments")
|
2017-07-31 22:12:33 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Zebra structure to hold current status. */
|
|
|
|
struct zclient *zclient = NULL;
|
|
|
|
|
|
|
|
/* For registering threads. */
|
|
|
|
extern struct thread_master *master;
|
2004-10-03 20:18:34 +02:00
|
|
|
|
|
|
|
/* Router-id update message from zebra. */
|
2019-05-03 21:42:59 +02:00
|
|
|
static int ospf_router_id_update_zebra(ZAPI_CALLBACK_ARGS)
|
2004-10-03 20:18:34 +02:00
|
|
|
{
|
2017-08-25 22:51:12 +02:00
|
|
|
struct ospf *ospf = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
struct prefix router_id;
|
|
|
|
zebra_router_id_update_read(zclient->ibuf, &router_id);
|
|
|
|
|
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_INTERFACE)) {
|
|
|
|
char buf[PREFIX2STR_BUFFER];
|
|
|
|
prefix2str(&router_id, buf, sizeof(buf));
|
2018-03-06 20:02:52 +01:00
|
|
|
zlog_debug("Zebra rcvd: router id update %s vrf %s id %u", buf,
|
|
|
|
ospf_vrf_id_to_name(vrf_id), vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
ospf = ospf_lookup_by_vrf_id(vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-02 15:54:45 +01:00
|
|
|
if (ospf != NULL) {
|
|
|
|
ospf->router_id_zebra = router_id.u.prefix4;
|
2017-07-17 14:03:14 +02:00
|
|
|
ospf_router_id_update(ospf);
|
2017-11-02 15:54:45 +01:00
|
|
|
} else {
|
2017-08-25 22:51:12 +02:00
|
|
|
if (IS_DEBUG_OSPF_EVENT) {
|
|
|
|
char buf[PREFIX2STR_BUFFER];
|
|
|
|
|
|
|
|
prefix2str(&router_id, buf, sizeof(buf));
|
2018-03-06 20:02:52 +01:00
|
|
|
zlog_debug(
|
|
|
|
"%s: ospf instance not found for vrf %s id %u router_id %s",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__, ospf_vrf_id_to_name(vrf_id), vrf_id,
|
|
|
|
buf);
|
2017-08-25 22:51:12 +02:00
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
return 0;
|
2004-10-03 20:18:34 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int ospf_interface_address_add(ZAPI_CALLBACK_ARGS)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct connected *c;
|
2017-08-25 22:51:12 +02:00
|
|
|
struct ospf *ospf = NULL;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
c = zebra_interface_address_read(cmd, zclient->ibuf, vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (c == NULL)
|
|
|
|
return 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_INTERFACE)) {
|
|
|
|
char buf[PREFIX2STR_BUFFER];
|
|
|
|
prefix2str(c->address, buf, sizeof(buf));
|
2017-08-25 22:51:12 +02:00
|
|
|
zlog_debug("Zebra: interface %s address add %s vrf %s id %u",
|
|
|
|
c->ifp->name, buf, ospf_vrf_id_to_name(vrf_id),
|
|
|
|
vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2006-11-30 17:17:02 +01:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
ospf = ospf_lookup_by_vrf_id(vrf_id);
|
2017-09-07 17:08:09 +02:00
|
|
|
if (!ospf)
|
|
|
|
return 0;
|
2017-08-25 22:51:12 +02:00
|
|
|
|
|
|
|
ospf_if_update(ospf, c->ifp);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-09-19 04:26:55 +02:00
|
|
|
ospf_if_interface(c->ifp);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int ospf_interface_address_delete(ZAPI_CALLBACK_ARGS)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct connected *c;
|
|
|
|
struct interface *ifp;
|
|
|
|
struct ospf_interface *oi;
|
|
|
|
struct route_node *rn;
|
|
|
|
struct prefix p;
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
c = zebra_interface_address_read(cmd, zclient->ibuf, vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
if (c == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_INTERFACE)) {
|
|
|
|
char buf[PREFIX2STR_BUFFER];
|
|
|
|
prefix2str(c->address, buf, sizeof(buf));
|
|
|
|
zlog_debug("Zebra: interface %s address delete %s",
|
|
|
|
c->ifp->name, buf);
|
|
|
|
}
|
2006-11-30 17:17:02 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
ifp = c->ifp;
|
|
|
|
p = *c->address;
|
|
|
|
p.prefixlen = IPV4_MAX_PREFIXLEN;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
rn = route_node_lookup(IF_OIFS(ifp), &p);
|
|
|
|
if (!rn) {
|
2019-10-30 01:16:28 +01:00
|
|
|
connected_free(&c);
|
2017-07-17 14:03:14 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
assert(rn->info);
|
|
|
|
oi = rn->info;
|
|
|
|
route_unlock_node(rn);
|
2003-06-19 04:13:25 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
/* Call interface hook functions to clean up */
|
|
|
|
ospf_if_free(oi);
|
2003-06-19 04:13:25 +02:00
|
|
|
|
2019-09-19 04:26:55 +02:00
|
|
|
ospf_if_interface(c->ifp);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-10-30 01:16:28 +01:00
|
|
|
connected_free(&c);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2003-06-19 04:11:23 +02:00
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int ospf_interface_link_params(ZAPI_CALLBACK_ARGS)
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct interface *ifp;
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
|
2019-01-18 19:06:00 +01:00
|
|
|
ifp = zebra_interface_link_params_read(zclient->ibuf, vrf_id);
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (ifp == NULL)
|
|
|
|
return 0;
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
/* Update TE TLV */
|
|
|
|
ospf_mpls_te_update_if(ifp);
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return 0;
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* VRF update for an interface. */
|
2019-05-03 21:42:59 +02:00
|
|
|
static int ospf_interface_vrf_update(ZAPI_CALLBACK_ARGS)
|
2017-08-25 22:51:12 +02:00
|
|
|
{
|
|
|
|
struct interface *ifp = NULL;
|
|
|
|
vrf_id_t new_vrf_id;
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
ifp = zebra_interface_vrf_update_read(zclient->ibuf, vrf_id,
|
2018-03-06 20:02:52 +01:00
|
|
|
&new_vrf_id);
|
2017-08-25 22:51:12 +02:00
|
|
|
if (!ifp)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (IS_DEBUG_OSPF_EVENT)
|
2018-03-06 20:02:52 +01:00
|
|
|
zlog_debug(
|
|
|
|
"%s: Rx Interface %s VRF change vrf_id %u New vrf %s id %u",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__, ifp->name, vrf_id,
|
2018-03-06 20:02:52 +01:00
|
|
|
ospf_vrf_id_to_name(new_vrf_id), new_vrf_id);
|
2017-08-25 22:51:12 +02:00
|
|
|
|
|
|
|
/*if_update(ifp, ifp->name, strlen(ifp->name), new_vrf_id);*/
|
2019-06-24 01:46:39 +02:00
|
|
|
if_update_to_new_vrf(ifp, new_vrf_id);
|
2017-08-25 22:51:12 +02:00
|
|
|
|
2018-03-06 20:02:52 +01:00
|
|
|
return 0;
|
2017-08-25 22:51:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void ospf_zebra_add(struct ospf *ospf, struct prefix_ipv4 *p,
|
2018-03-06 20:02:52 +01:00
|
|
|
struct ospf_route * or)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-08-21 02:18:58 +02:00
|
|
|
struct zapi_route api;
|
|
|
|
struct zapi_nexthop *api_nh;
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t distance;
|
2017-07-17 14:03:14 +02:00
|
|
|
struct ospf_path *path;
|
|
|
|
struct listnode *node;
|
2017-08-21 02:18:58 +02:00
|
|
|
int count = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
memset(&api, 0, sizeof(api));
|
2017-08-25 22:51:12 +02:00
|
|
|
api.vrf_id = ospf->vrf_id;
|
2017-08-21 02:18:58 +02:00
|
|
|
api.type = ZEBRA_ROUTE_OSPF;
|
|
|
|
api.instance = ospf->instance;
|
|
|
|
api.safi = SAFI_UNICAST;
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
memcpy(&api.prefix, p, sizeof(*p));
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
/* Metric value. */
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_METRIC);
|
|
|
|
if (or->path_type == OSPF_PATH_TYPE1_EXTERNAL)
|
|
|
|
api.metric = or->cost + or->u.ext.type2_cost;
|
|
|
|
else if (or->path_type == OSPF_PATH_TYPE2_EXTERNAL)
|
|
|
|
api.metric = or->u.ext.type2_cost;
|
|
|
|
else
|
|
|
|
api.metric = or->cost;
|
2017-08-12 18:03:29 +02:00
|
|
|
|
|
|
|
/* Check if path type is ASE */
|
|
|
|
if (((or->path_type == OSPF_PATH_TYPE1_EXTERNAL)
|
|
|
|
|| (or->path_type == OSPF_PATH_TYPE2_EXTERNAL))
|
2017-08-21 02:18:58 +02:00
|
|
|
&& (or->u.ext.tag > 0) && (or->u.ext.tag <= ROUTE_TAG_MAX)) {
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_TAG);
|
|
|
|
api.tag = or->u.ext.tag;
|
|
|
|
}
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
/* Distance value. */
|
2017-08-25 22:51:12 +02:00
|
|
|
distance = ospf_distance_apply(ospf, p, or);
|
2017-08-21 02:18:58 +02:00
|
|
|
if (distance) {
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_DISTANCE);
|
|
|
|
api.distance = distance;
|
|
|
|
}
|
2017-08-12 18:03:29 +02:00
|
|
|
|
|
|
|
/* Nexthop, ifindex, distance and metric information. */
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(or->paths, node, path)) {
|
2017-09-21 14:49:31 +02:00
|
|
|
if (count >= MULTIPATH_NUM)
|
|
|
|
break;
|
2017-08-21 02:18:58 +02:00
|
|
|
api_nh = &api.nexthops[count];
|
2015-05-20 02:58:13 +02:00
|
|
|
#ifdef HAVE_NETLINK
|
2017-08-12 18:03:29 +02:00
|
|
|
if (path->unnumbered || (path->nexthop.s_addr != INADDR_ANY
|
|
|
|
&& path->ifindex != 0)) {
|
2015-05-20 02:58:13 +02:00
|
|
|
#else /* HAVE_NETLINK */
|
2017-08-12 18:03:29 +02:00
|
|
|
if (path->nexthop.s_addr != INADDR_ANY && path->ifindex != 0) {
|
2017-08-21 02:18:58 +02:00
|
|
|
#endif /* HAVE_NETLINK */
|
|
|
|
api_nh->gate.ipv4 = path->nexthop;
|
|
|
|
api_nh->ifindex = path->ifindex;
|
|
|
|
api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
|
2017-08-12 18:03:29 +02:00
|
|
|
} else if (path->nexthop.s_addr != INADDR_ANY) {
|
2017-08-21 02:18:58 +02:00
|
|
|
api_nh->gate.ipv4 = path->nexthop;
|
|
|
|
api_nh->type = NEXTHOP_TYPE_IPV4;
|
2017-08-12 18:03:29 +02:00
|
|
|
} else {
|
2017-08-21 02:18:58 +02:00
|
|
|
api_nh->ifindex = path->ifindex;
|
|
|
|
api_nh->type = NEXTHOP_TYPE_IFINDEX;
|
2017-08-12 18:03:29 +02:00
|
|
|
}
|
2018-02-08 15:12:12 +01:00
|
|
|
api_nh->vrf_id = ospf->vrf_id;
|
2017-08-21 02:18:58 +02:00
|
|
|
count++;
|
2003-06-19 04:11:23 +02:00
|
|
|
|
2017-08-12 18:03:29 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
|
2018-08-25 00:15:36 +02:00
|
|
|
char buf[2][INET_ADDRSTRLEN];
|
|
|
|
struct interface *ifp;
|
|
|
|
|
|
|
|
ifp = if_lookup_by_index(path->ifindex, ospf->vrf_id);
|
2018-04-23 21:57:25 +02:00
|
|
|
|
2017-08-12 18:03:29 +02:00
|
|
|
zlog_debug(
|
2018-08-25 00:15:36 +02:00
|
|
|
"Zebra: Route add %s nexthop %s, ifindex=%d %s",
|
2018-04-23 21:57:25 +02:00
|
|
|
prefix2str(p, buf[0], sizeof(buf[0])),
|
|
|
|
inet_ntop(AF_INET, &path->nexthop,
|
|
|
|
buf[1], sizeof(buf[1])),
|
2018-08-25 00:15:36 +02:00
|
|
|
path->ifindex, ifp ? ifp->name : " ");
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2017-08-12 18:03:29 +02:00
|
|
|
}
|
2017-09-21 14:49:31 +02:00
|
|
|
api.nexthop_num = count;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
void ospf_zebra_delete(struct ospf *ospf, struct prefix_ipv4 *p,
|
2018-03-06 20:02:52 +01:00
|
|
|
struct ospf_route * or)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-08-21 02:18:58 +02:00
|
|
|
struct zapi_route api;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
memset(&api, 0, sizeof(api));
|
2017-08-25 22:51:12 +02:00
|
|
|
api.vrf_id = ospf->vrf_id;
|
2017-08-21 02:18:58 +02:00
|
|
|
api.type = ZEBRA_ROUTE_OSPF;
|
|
|
|
api.instance = ospf->instance;
|
|
|
|
api.safi = SAFI_UNICAST;
|
|
|
|
memcpy(&api.prefix, p, sizeof(*p));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
|
2018-04-23 21:57:25 +02:00
|
|
|
char buf[PREFIX2STR_BUFFER];
|
|
|
|
zlog_debug("Zebra: Route delete %s",
|
|
|
|
prefix2str(p, buf, sizeof(buf)));
|
2012-07-07 17:06:13 +02:00
|
|
|
}
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, &api);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
void ospf_zebra_add_discard(struct ospf *ospf, struct prefix_ipv4 *p)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-08-21 02:18:58 +02:00
|
|
|
struct zapi_route api;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
memset(&api, 0, sizeof(api));
|
2017-08-25 22:51:12 +02:00
|
|
|
api.vrf_id = ospf->vrf_id;
|
2017-08-12 18:03:29 +02:00
|
|
|
api.type = ZEBRA_ROUTE_OSPF;
|
|
|
|
api.instance = ospf->instance;
|
|
|
|
api.safi = SAFI_UNICAST;
|
2017-08-21 02:18:58 +02:00
|
|
|
memcpy(&api.prefix, p, sizeof(*p));
|
2012-04-11 23:52:46 +02:00
|
|
|
zapi_route_set_blackhole(&api, BLACKHOLE_NULL);
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2018-04-23 21:57:25 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
|
|
|
|
char buf[PREFIX2STR_BUFFER];
|
|
|
|
zlog_debug("Zebra: Route add discard %s",
|
|
|
|
prefix2str(p, buf, sizeof(buf)));
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
void ospf_zebra_delete_discard(struct ospf *ospf, struct prefix_ipv4 *p)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-08-21 02:18:58 +02:00
|
|
|
struct zapi_route api;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
memset(&api, 0, sizeof(api));
|
2017-08-25 22:51:12 +02:00
|
|
|
api.vrf_id = ospf->vrf_id;
|
2017-08-12 18:03:29 +02:00
|
|
|
api.type = ZEBRA_ROUTE_OSPF;
|
|
|
|
api.instance = ospf->instance;
|
|
|
|
api.safi = SAFI_UNICAST;
|
2017-08-21 02:18:58 +02:00
|
|
|
memcpy(&api.prefix, p, sizeof(*p));
|
2012-04-11 23:52:46 +02:00
|
|
|
zapi_route_set_blackhole(&api, BLACKHOLE_NULL);
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2017-08-21 02:18:58 +02:00
|
|
|
zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, &api);
|
2017-08-12 18:03:29 +02:00
|
|
|
|
2018-04-23 21:57:25 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
|
|
|
|
char buf[PREFIX2STR_BUFFER];
|
|
|
|
zlog_debug("Zebra: Route delete discard %s",
|
|
|
|
prefix2str(p, buf, sizeof(buf)));
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
struct ospf_external *ospf_external_lookup(struct ospf *ospf, uint8_t type,
|
|
|
|
unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct list *ext_list;
|
|
|
|
struct listnode *node;
|
|
|
|
struct ospf_external *ext;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
ext_list = ospf->external[type];
|
2017-07-17 14:03:14 +02:00
|
|
|
if (!ext_list)
|
|
|
|
return (NULL);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(ext_list, node, ext))
|
|
|
|
if (ext->instance == instance)
|
|
|
|
return ext;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return NULL;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
struct ospf_external *ospf_external_add(struct ospf *ospf, uint8_t type,
|
|
|
|
unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct list *ext_list;
|
|
|
|
struct ospf_external *ext;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
ext = ospf_external_lookup(ospf, type, instance);
|
2017-07-17 14:03:14 +02:00
|
|
|
if (ext)
|
|
|
|
return ext;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
if (!ospf->external[type])
|
|
|
|
ospf->external[type] = list_new();
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
ext_list = ospf->external[type];
|
2019-02-25 21:30:31 +01:00
|
|
|
ext = XCALLOC(MTYPE_OSPF_EXTERNAL, sizeof(struct ospf_external));
|
2017-07-17 14:03:14 +02:00
|
|
|
ext->instance = instance;
|
|
|
|
EXTERNAL_INFO(ext) = route_table_init();
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
listnode_add(ext_list, ext);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return ext;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
void ospf_external_del(struct ospf *ospf, uint8_t type, unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct ospf_external *ext;
|
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
ext = ospf_external_lookup(ospf, type, instance);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
if (ext) {
|
|
|
|
if (EXTERNAL_INFO(ext))
|
|
|
|
route_table_finish(EXTERNAL_INFO(ext));
|
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
listnode_delete(ospf->external[type], ext);
|
|
|
|
|
|
|
|
if (!ospf->external[type]->count)
|
2018-10-02 11:39:51 +02:00
|
|
|
list_delete(&ospf->external[type]);
|
2017-11-21 02:21:03 +01:00
|
|
|
|
2017-07-13 22:33:29 +02:00
|
|
|
XFREE(MTYPE_OSPF_EXTERNAL, ext);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
|
|
|
|
2020-06-02 19:24:46 +02:00
|
|
|
/* Update NHLFE for Prefix SID */
|
|
|
|
void ospf_zebra_update_prefix_sid(const struct sr_prefix *srp)
|
|
|
|
{
|
|
|
|
struct zapi_labels zl;
|
|
|
|
struct zapi_nexthop *znh;
|
|
|
|
struct listnode *node;
|
|
|
|
struct ospf_path *path;
|
|
|
|
|
|
|
|
osr_debug("SR (%s): Update Labels %u for Prefix %pFX", __func__,
|
|
|
|
srp->label_in, (struct prefix *)&srp->prefv4);
|
|
|
|
|
|
|
|
/* Prepare message. */
|
|
|
|
memset(&zl, 0, sizeof(zl));
|
|
|
|
zl.type = ZEBRA_LSP_OSPF_SR;
|
|
|
|
zl.local_label = srp->label_in;
|
|
|
|
|
|
|
|
switch (srp->type) {
|
|
|
|
case LOCAL_SID:
|
|
|
|
/* Set Label for local Prefix */
|
|
|
|
znh = &zl.nexthops[zl.nexthop_num++];
|
|
|
|
znh->type = NEXTHOP_TYPE_IFINDEX;
|
|
|
|
znh->ifindex = srp->nhlfe.ifindex;
|
|
|
|
znh->label_num = 1;
|
|
|
|
znh->labels[0] = srp->nhlfe.label_out;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PREF_SID:
|
|
|
|
/* Update route in the RIB too. */
|
|
|
|
SET_FLAG(zl.message, ZAPI_LABELS_FTN);
|
|
|
|
zl.route.prefix.u.prefix4 = srp->prefv4.prefix;
|
|
|
|
zl.route.prefix.prefixlen = srp->prefv4.prefixlen;
|
|
|
|
zl.route.prefix.family = srp->prefv4.family;
|
|
|
|
zl.route.type = ZEBRA_ROUTE_OSPF;
|
|
|
|
zl.route.instance = 0;
|
|
|
|
|
|
|
|
/* Check that SRP contains at least one valid path */
|
|
|
|
if (srp->route == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(srp->route->paths, node, path)) {
|
|
|
|
if (path->srni.label_out == MPLS_INVALID_LABEL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (zl.nexthop_num >= MULTIPATH_NUM)
|
|
|
|
break;
|
|
|
|
|
|
|
|
znh = &zl.nexthops[zl.nexthop_num++];
|
|
|
|
znh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
|
|
|
|
znh->gate.ipv4 = path->nexthop;
|
|
|
|
znh->ifindex = path->ifindex;
|
|
|
|
znh->label_num = 1;
|
|
|
|
znh->labels[0] = path->srni.label_out;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finally, send message to zebra. */
|
|
|
|
(void)zebra_send_mpls_labels(zclient, ZEBRA_MPLS_LABELS_REPLACE, &zl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove NHLFE for Prefix-SID */
|
|
|
|
void ospf_zebra_delete_prefix_sid(const struct sr_prefix *srp)
|
|
|
|
{
|
|
|
|
struct zapi_labels zl;
|
|
|
|
|
|
|
|
osr_debug("SR (%s): Delete Labels %u for Prefix %pFX", __func__,
|
|
|
|
srp->label_in, (struct prefix *)&srp->prefv4);
|
|
|
|
|
|
|
|
/* Prepare message. */
|
|
|
|
memset(&zl, 0, sizeof(zl));
|
|
|
|
zl.type = ZEBRA_LSP_OSPF_SR;
|
|
|
|
zl.local_label = srp->label_in;
|
|
|
|
|
|
|
|
if (srp->type == PREF_SID) {
|
|
|
|
/* Update route in the RIB too */
|
|
|
|
SET_FLAG(zl.message, ZAPI_LABELS_FTN);
|
|
|
|
zl.route.prefix.u.prefix4 = srp->prefv4.prefix;
|
|
|
|
zl.route.prefix.prefixlen = srp->prefv4.prefixlen;
|
|
|
|
zl.route.prefix.family = srp->prefv4.family;
|
|
|
|
zl.route.type = ZEBRA_ROUTE_OSPF;
|
|
|
|
zl.route.instance = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send message to zebra. */
|
|
|
|
(void)zebra_send_mpls_labels(zclient, ZEBRA_MPLS_LABELS_DELETE, &zl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send MPLS Label entry to Zebra for installation or deletion */
|
|
|
|
void ospf_zebra_send_adjacency_sid(int cmd, struct sr_nhlfe nhlfe)
|
|
|
|
{
|
|
|
|
struct zapi_labels zl;
|
|
|
|
struct zapi_nexthop *znh;
|
|
|
|
|
|
|
|
osr_debug("SR (%s): %s Labels %u/%u for Adjacency via %u", __func__,
|
|
|
|
cmd == ZEBRA_MPLS_LABELS_ADD ? "Add" : "Delete",
|
|
|
|
nhlfe.label_in, nhlfe.label_out, nhlfe.ifindex);
|
|
|
|
|
|
|
|
memset(&zl, 0, sizeof(zl));
|
|
|
|
zl.type = ZEBRA_LSP_OSPF_SR;
|
|
|
|
zl.local_label = nhlfe.label_in;
|
|
|
|
zl.nexthop_num = 1;
|
|
|
|
znh = &zl.nexthops[0];
|
|
|
|
znh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
|
|
|
|
znh->gate.ipv4 = nhlfe.nexthop;
|
|
|
|
znh->ifindex = nhlfe.ifindex;
|
|
|
|
znh->label_num = 1;
|
|
|
|
znh->labels[0] = nhlfe.label_out;
|
|
|
|
|
|
|
|
(void)zebra_send_mpls_labels(zclient, cmd, &zl);
|
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
struct ospf_redist *ospf_redist_lookup(struct ospf *ospf, uint8_t type,
|
|
|
|
unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct list *red_list;
|
|
|
|
struct listnode *node;
|
|
|
|
struct ospf_redist *red;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
red_list = ospf->redist[type];
|
|
|
|
if (!red_list)
|
|
|
|
return (NULL);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(red_list, node, red))
|
|
|
|
if (red->instance == instance)
|
|
|
|
return red;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return NULL;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
struct ospf_redist *ospf_redist_add(struct ospf *ospf, uint8_t type,
|
|
|
|
unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct list *red_list;
|
|
|
|
struct ospf_redist *red;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
red = ospf_redist_lookup(ospf, type, instance);
|
|
|
|
if (red)
|
|
|
|
return red;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (!ospf->redist[type])
|
|
|
|
ospf->redist[type] = list_new();
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
red_list = ospf->redist[type];
|
2019-02-25 21:30:31 +01:00
|
|
|
red = XCALLOC(MTYPE_OSPF_REDISTRIBUTE, sizeof(struct ospf_redist));
|
2017-07-17 14:03:14 +02:00
|
|
|
red->instance = instance;
|
|
|
|
red->dmetric.type = -1;
|
|
|
|
red->dmetric.value = -1;
|
2018-10-01 13:38:01 +02:00
|
|
|
ROUTEMAP_NAME(red) = NULL;
|
|
|
|
ROUTEMAP(red) = NULL;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
listnode_add(red_list, red);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return red;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
void ospf_redist_del(struct ospf *ospf, uint8_t type, unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct ospf_redist *red;
|
|
|
|
|
|
|
|
red = ospf_redist_lookup(ospf, type, instance);
|
|
|
|
|
|
|
|
if (red) {
|
|
|
|
listnode_delete(ospf->redist[type], red);
|
|
|
|
if (!ospf->redist[type]->count) {
|
2018-10-02 11:39:51 +02:00
|
|
|
list_delete(&ospf->redist[type]);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2017-06-23 02:19:00 +02:00
|
|
|
ospf_routemap_unset(red);
|
2017-07-13 22:33:29 +02:00
|
|
|
XFREE(MTYPE_OSPF_REDISTRIBUTE, red);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
int ospf_is_type_redistributed(struct ospf *ospf, int type,
|
|
|
|
unsigned short instance)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
return (DEFAULT_ROUTE_TYPE(type)
|
2019-01-11 22:20:13 +01:00
|
|
|
? vrf_bitmap_check(zclient->default_information[AFI_IP],
|
2017-08-25 22:51:12 +02:00
|
|
|
ospf->vrf_id)
|
2017-07-17 14:03:14 +02:00
|
|
|
: ((instance
|
|
|
|
&& redist_check_instance(
|
|
|
|
&zclient->mi_redist[AFI_IP][type],
|
|
|
|
instance))
|
|
|
|
|| (!instance
|
|
|
|
&& vrf_bitmap_check(
|
|
|
|
zclient->redist[AFI_IP][type],
|
2017-08-25 22:51:12 +02:00
|
|
|
ospf->vrf_id))));
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
int ospf_redistribute_set(struct ospf *ospf, int type, unsigned short instance,
|
2017-07-17 14:03:14 +02:00
|
|
|
int mtype, int mvalue)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
int force = 0;
|
|
|
|
struct ospf_redist *red;
|
|
|
|
|
|
|
|
red = ospf_redist_lookup(ospf, type, instance);
|
2018-06-25 17:18:17 +02:00
|
|
|
|
|
|
|
if (red == NULL) {
|
|
|
|
zlog_err(
|
|
|
|
"Redistribute[%s][%d]: Lookup failed Type[%d] , Metric[%d]",
|
|
|
|
ospf_redist_string(type), instance,
|
|
|
|
metric_type(ospf, type, instance),
|
|
|
|
metric_value(ospf, type, instance));
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
if (ospf_is_type_redistributed(ospf, type, instance)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
if (mtype != red->dmetric.type) {
|
|
|
|
red->dmetric.type = mtype;
|
|
|
|
force = LSA_REFRESH_FORCE;
|
|
|
|
}
|
|
|
|
if (mvalue != red->dmetric.value) {
|
|
|
|
red->dmetric.value = mvalue;
|
|
|
|
force = LSA_REFRESH_FORCE;
|
|
|
|
}
|
|
|
|
|
|
|
|
ospf_external_lsa_refresh_type(ospf, type, instance, force);
|
|
|
|
|
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
|
|
|
zlog_debug(
|
|
|
|
"Redistribute[%s][%d]: Refresh Type[%d], Metric[%d]",
|
|
|
|
ospf_redist_string(type), instance,
|
|
|
|
metric_type(ospf, type, instance),
|
|
|
|
metric_value(ospf, type, instance));
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
red->dmetric.type = mtype;
|
|
|
|
red->dmetric.value = mvalue;
|
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
ospf_external_add(ospf, type, instance);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
zclient_redistribute(ZEBRA_REDISTRIBUTE_ADD, zclient, AFI_IP, type,
|
2017-08-25 22:51:12 +02:00
|
|
|
instance, ospf->vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
2018-03-06 20:02:52 +01:00
|
|
|
zlog_debug(
|
|
|
|
"Redistribute[%s][%d] vrf id %u: Start Type[%d], Metric[%d]",
|
|
|
|
ospf_redist_string(type), instance, ospf->vrf_id,
|
|
|
|
metric_type(ospf, type, instance),
|
|
|
|
metric_value(ospf, type, instance));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
ospf_asbr_status_update(ospf, ++ospf->redistribute);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
int ospf_redistribute_unset(struct ospf *ospf, int type,
|
|
|
|
unsigned short instance)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
if (type == zclient->redist_default && instance == zclient->instance)
|
|
|
|
return CMD_SUCCESS;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
if (!ospf_is_type_redistributed(ospf, type, instance))
|
2017-07-17 14:03:14 +02:00
|
|
|
return CMD_SUCCESS;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
zclient_redistribute(ZEBRA_REDISTRIBUTE_DELETE, zclient, AFI_IP, type,
|
2017-08-25 22:51:12 +02:00
|
|
|
instance, ospf->vrf_id);
|
2003-06-19 04:13:25 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
2017-08-25 22:51:12 +02:00
|
|
|
zlog_debug("Redistribute[%s][%d] vrf id %u: Stop",
|
|
|
|
ospf_redist_string(type), instance, ospf->vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
/* Remove the routes from OSPF table. */
|
|
|
|
ospf_redistribute_withdraw(ospf, type, instance);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
ospf_external_del(ospf, type, instance);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
ospf_asbr_status_update(ospf, --ospf->redistribute);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return CMD_SUCCESS;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
int ospf_redistribute_default_set(struct ospf *ospf, int originate, int mtype,
|
|
|
|
int mvalue)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2018-10-01 13:38:01 +02:00
|
|
|
struct prefix_ipv4 p;
|
|
|
|
struct in_addr nexthop;
|
|
|
|
int cur_originate = ospf->default_originate;
|
2019-07-29 14:46:05 +02:00
|
|
|
const char *type_str = NULL;
|
2018-10-01 13:38:01 +02:00
|
|
|
|
2020-02-06 07:49:02 +01:00
|
|
|
nexthop.s_addr = INADDR_ANY;
|
2018-10-01 13:38:01 +02:00
|
|
|
p.family = AF_INET;
|
2020-02-06 07:49:02 +01:00
|
|
|
p.prefix.s_addr = INADDR_ANY;
|
2018-10-01 13:38:01 +02:00
|
|
|
p.prefixlen = 0;
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
ospf->default_originate = originate;
|
2003-04-04 04:44:16 +02:00
|
|
|
|
2019-07-29 14:46:05 +02:00
|
|
|
if (cur_originate == originate) {
|
2018-10-01 13:38:01 +02:00
|
|
|
/* Refresh the lsa since metric might different */
|
2017-07-17 14:03:14 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
|
|
|
zlog_debug(
|
|
|
|
"Redistribute[%s]: Refresh Type[%d], Metric[%d]",
|
|
|
|
ospf_redist_string(DEFAULT_ROUTE),
|
|
|
|
metric_type(ospf, DEFAULT_ROUTE, 0),
|
|
|
|
metric_value(ospf, DEFAULT_ROUTE, 0));
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2018-10-01 13:38:01 +02:00
|
|
|
ospf_external_lsa_refresh_default(ospf);
|
2019-07-29 14:46:05 +02:00
|
|
|
return CMD_SUCCESS;
|
2018-10-01 13:38:01 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-07-29 14:46:05 +02:00
|
|
|
switch (cur_originate) {
|
|
|
|
case DEFAULT_ORIGINATE_NONE:
|
|
|
|
break;
|
|
|
|
case DEFAULT_ORIGINATE_ZEBRA:
|
2018-10-01 13:38:01 +02:00
|
|
|
zclient_redistribute_default(ZEBRA_REDISTRIBUTE_DEFAULT_DELETE,
|
2019-01-11 22:20:13 +01:00
|
|
|
zclient, AFI_IP, ospf->vrf_id);
|
2019-07-29 14:46:05 +02:00
|
|
|
ospf->redistribute--;
|
|
|
|
break;
|
|
|
|
case DEFAULT_ORIGINATE_ALWAYS:
|
|
|
|
ospf_external_info_delete(ospf, DEFAULT_ROUTE, 0, p);
|
|
|
|
ospf_external_del(ospf, DEFAULT_ROUTE, 0);
|
|
|
|
ospf->redistribute--;
|
|
|
|
break;
|
2018-10-01 13:38:01 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-07-29 14:46:05 +02:00
|
|
|
switch (originate) {
|
|
|
|
case DEFAULT_ORIGINATE_NONE:
|
|
|
|
type_str = "none";
|
|
|
|
break;
|
|
|
|
case DEFAULT_ORIGINATE_ZEBRA:
|
|
|
|
type_str = "normal";
|
|
|
|
ospf->redistribute++;
|
|
|
|
zclient_redistribute_default(ZEBRA_REDISTRIBUTE_DEFAULT_ADD,
|
|
|
|
zclient, AFI_IP, ospf->vrf_id);
|
|
|
|
break;
|
|
|
|
case DEFAULT_ORIGINATE_ALWAYS:
|
|
|
|
type_str = "always";
|
|
|
|
ospf->redistribute++;
|
|
|
|
ospf_external_add(ospf, DEFAULT_ROUTE, 0);
|
|
|
|
ospf_external_info_add(ospf, DEFAULT_ROUTE, 0, p, 0, nexthop,
|
|
|
|
0);
|
|
|
|
break;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
|
2019-07-29 14:46:05 +02:00
|
|
|
zlog_debug("Redistribute[DEFAULT]: %s Type[%d], Metric[%d]",
|
|
|
|
type_str,
|
|
|
|
metric_type(ospf, DEFAULT_ROUTE, 0),
|
|
|
|
metric_value(ospf, DEFAULT_ROUTE, 0));
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-07-29 14:46:05 +02:00
|
|
|
ospf_external_lsa_refresh_default(ospf);
|
|
|
|
ospf_asbr_status_update(ospf, ospf->redistribute);
|
2017-07-17 14:03:14 +02:00
|
|
|
return CMD_SUCCESS;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
static int ospf_external_lsa_originate_check(struct ospf *ospf,
|
|
|
|
struct external_info *ei)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
/* If prefix is multicast, then do not originate LSA. */
|
|
|
|
if (IN_MULTICAST(htonl(ei->p.prefix.s_addr))) {
|
|
|
|
zlog_info(
|
|
|
|
"LSA[Type5:%s]: Not originate AS-external-LSA, "
|
|
|
|
"Prefix belongs multicast",
|
|
|
|
inet_ntoa(ei->p.prefix));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Take care of default-originate. */
|
|
|
|
if (is_prefix_default(&ei->p))
|
|
|
|
if (ospf->default_originate == DEFAULT_ORIGINATE_NONE) {
|
|
|
|
zlog_info(
|
|
|
|
"LSA[Type5:0.0.0.0]: Not originate AS-external-LSA "
|
|
|
|
"for default");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If connected prefix is OSPF enable interface, then do not announce. */
|
2017-07-17 14:03:14 +02:00
|
|
|
int ospf_distribute_check_connected(struct ospf *ospf, struct external_info *ei)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct ospf_interface *oi;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(ospf->oiflist, node, oi))
|
|
|
|
if (prefix_match(oi->address, (struct prefix *)&ei->p))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* return 1 if external LSA must be originated, 0 otherwise */
|
2017-07-17 14:03:14 +02:00
|
|
|
int ospf_redistribute_check(struct ospf *ospf, struct external_info *ei,
|
|
|
|
int *changed)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct route_map_set_values save_values;
|
|
|
|
struct prefix_ipv4 *p = &ei->p;
|
|
|
|
struct ospf_redist *red;
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t type = is_prefix_default(&ei->p) ? DEFAULT_ROUTE : ei->type;
|
|
|
|
unsigned short instance = is_prefix_default(&ei->p) ? 0 : ei->instance;
|
2020-05-18 06:40:48 +02:00
|
|
|
route_tag_t saved_tag = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
if (changed)
|
|
|
|
*changed = 0;
|
|
|
|
|
|
|
|
if (!ospf_external_lsa_originate_check(ospf, ei))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Take care connected route. */
|
|
|
|
if (type == ZEBRA_ROUTE_CONNECT
|
|
|
|
&& !ospf_distribute_check_connected(ospf, ei))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!DEFAULT_ROUTE_TYPE(type) && DISTRIBUTE_NAME(ospf, type))
|
|
|
|
/* distirbute-list exists, but access-list may not? */
|
|
|
|
if (DISTRIBUTE_LIST(ospf, type))
|
|
|
|
if (access_list_apply(DISTRIBUTE_LIST(ospf, type), p)
|
|
|
|
== FILTER_DENY) {
|
2018-04-23 21:57:25 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
|
|
|
|
char buf[PREFIX2STR_BUFFER];
|
2017-07-17 14:03:14 +02:00
|
|
|
zlog_debug(
|
2018-04-23 21:57:25 +02:00
|
|
|
"Redistribute[%s]: %s filtered by distribute-list.",
|
2017-07-17 14:03:14 +02:00
|
|
|
ospf_redist_string(type),
|
2018-04-23 21:57:25 +02:00
|
|
|
prefix2str(p, buf, sizeof(buf)));
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
save_values = ei->route_map_set;
|
|
|
|
ospf_reset_route_map_set_values(&ei->route_map_set);
|
2020-05-18 07:02:34 +02:00
|
|
|
|
2020-05-18 06:40:48 +02:00
|
|
|
saved_tag = ei->tag;
|
2020-05-18 07:02:34 +02:00
|
|
|
/* Resetting with original route tag */
|
|
|
|
ei->tag = ei->orig_tag;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
/* apply route-map if needed */
|
|
|
|
red = ospf_redist_lookup(ospf, type, instance);
|
|
|
|
if (red && ROUTEMAP_NAME(red)) {
|
lib: Introducing a 3rd state for route-map match cmd: RMAP_NOOP
Introducing a 3rd state for route_map_apply library function: RMAP_NOOP
Traditionally route map MATCH rule apis were designed to return
a binary response, consisting of either RMAP_MATCH or RMAP_NOMATCH.
(Route-map SET rule apis return RMAP_OKAY or RMAP_ERROR).
Depending on this response, the following statemachine decided the
course of action:
State1:
If match cmd returns RMAP_MATCH then, keep existing behaviour.
If routemap type is PERMIT, execute set cmds or call cmds if applicable,
otherwise PERMIT!
Else If routemap type is DENY, we DENYMATCH right away
State2:
If match cmd returns RMAP_NOMATCH, continue on to next route-map. If there
are no other rules or if all the rules return RMAP_NOMATCH, return DENYMATCH
We require a 3rd state because of the following situation:
The issue - what if, the rule api needs to abort or ignore a rule?:
"match evpn vni xx" route-map filter can be applied to incoming routes
regardless of whether the tunnel type is vxlan or mpls.
This rule should be N/A for mpls based evpn route, but applicable to only
vxlan based evpn route.
Also, this rule should be applicable for routes with VNI label only, and
not for routes without labels. For example, type 3 and type 4 EVPN routes
do not have labels, so, this match cmd should let them through.
Today, the filter produces either a match or nomatch response regardless of
whether it is mpls/vxlan, resulting in either permitting or denying the
route.. So an mpls evpn route may get filtered out incorrectly.
Eg: "route-map RM1 permit 10 ; match evpn vni 20" or
"route-map RM2 deny 20 ; match vni 20"
With the introduction of the 3rd state, we can abort this rule check safely.
How? The rules api can now return RMAP_NOOP to indicate
that it encountered an invalid check, and needs to abort just that rule,
but continue with other rules.
As a result we have a 3rd state:
State3:
If match cmd returned RMAP_NOOP
Then, proceed to other route-map, otherwise if there are no more
rules or if all the rules return RMAP_NOOP, then, return RMAP_PERMITMATCH.
Signed-off-by: Lakshman Krishnamoorthy <lkrishnamoor@vmware.com>
2019-06-19 23:04:36 +02:00
|
|
|
route_map_result_t ret;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
ret = route_map_apply(ROUTEMAP(red), (struct prefix *)p,
|
|
|
|
RMAP_OSPF, ei);
|
|
|
|
|
|
|
|
if (ret == RMAP_DENYMATCH) {
|
|
|
|
ei->route_map_set = save_values;
|
2018-04-23 21:57:25 +02:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
|
|
|
|
char buf[PREFIX2STR_BUFFER];
|
2017-07-17 14:03:14 +02:00
|
|
|
zlog_debug(
|
2018-04-23 21:57:25 +02:00
|
|
|
"Redistribute[%s]: %s filtered by route-map.",
|
2017-07-17 14:03:14 +02:00
|
|
|
ospf_redist_string(type),
|
2018-04-23 21:57:25 +02:00
|
|
|
prefix2str(p, buf, sizeof(buf)));
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check if 'route-map set' changed something */
|
2020-05-18 06:40:48 +02:00
|
|
|
if (changed) {
|
2017-07-17 14:03:14 +02:00
|
|
|
*changed = !ospf_route_map_set_compare(
|
|
|
|
&ei->route_map_set, &save_values);
|
2020-05-18 06:40:48 +02:00
|
|
|
|
|
|
|
/* check if tag is modified */
|
|
|
|
*changed |= (saved_tag != ei->tag);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* OSPF route-map set for redistribution */
|
2017-07-17 14:03:14 +02:00
|
|
|
void ospf_routemap_set(struct ospf_redist *red, const char *name)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2019-02-04 14:22:39 +01:00
|
|
|
if (ROUTEMAP_NAME(red)) {
|
|
|
|
route_map_counter_decrement(ROUTEMAP(red));
|
2017-07-17 14:03:14 +02:00
|
|
|
free(ROUTEMAP_NAME(red));
|
2019-02-04 14:22:39 +01:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
ROUTEMAP_NAME(red) = strdup(name);
|
|
|
|
ROUTEMAP(red) = route_map_lookup_by_name(name);
|
2019-02-04 14:22:39 +01:00
|
|
|
route_map_counter_increment(ROUTEMAP(red));
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
void ospf_routemap_unset(struct ospf_redist *red)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2019-02-04 14:22:39 +01:00
|
|
|
if (ROUTEMAP_NAME(red)) {
|
|
|
|
route_map_counter_decrement(ROUTEMAP(red));
|
2017-07-17 14:03:14 +02:00
|
|
|
free(ROUTEMAP_NAME(red));
|
2019-02-04 14:22:39 +01:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
ROUTEMAP_NAME(red) = NULL;
|
|
|
|
ROUTEMAP(red) = NULL;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Zebra route add and delete treatment. */
|
2019-05-03 21:42:59 +02:00
|
|
|
static int ospf_zebra_read_route(ZAPI_CALLBACK_ARGS)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-08-21 03:10:50 +02:00
|
|
|
struct zapi_route api;
|
|
|
|
struct prefix_ipv4 p;
|
2017-07-17 14:03:14 +02:00
|
|
|
unsigned long ifindex;
|
|
|
|
struct in_addr nexthop;
|
|
|
|
struct external_info *ei;
|
|
|
|
struct ospf *ospf;
|
|
|
|
int i;
|
2018-10-01 13:38:01 +02:00
|
|
|
uint8_t rt_type;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
ospf = ospf_lookup_by_vrf_id(vrf_id);
|
2017-08-21 03:10:50 +02:00
|
|
|
if (ospf == NULL)
|
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 03:10:50 +02:00
|
|
|
if (zapi_route_decode(zclient->ibuf, &api) < 0)
|
|
|
|
return -1;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 03:10:50 +02:00
|
|
|
ifindex = api.nexthops[0].ifindex;
|
|
|
|
nexthop = api.nexthops[0].gate.ipv4;
|
2018-10-01 13:38:01 +02:00
|
|
|
rt_type = api.type;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 03:10:50 +02:00
|
|
|
memcpy(&p, &api.prefix, sizeof(p));
|
2017-07-17 14:03:14 +02:00
|
|
|
if (IPV4_NET127(ntohl(p.prefix.s_addr)))
|
|
|
|
return 0;
|
|
|
|
|
2018-10-01 13:38:01 +02:00
|
|
|
/* Re-destributed route is default route.
|
|
|
|
* Here, route type is used as 'ZEBRA_ROUTE_KERNEL' for
|
|
|
|
* updating ex-info. But in resetting (no default-info
|
|
|
|
* originate)ZEBRA_ROUTE_MAX is used to delete the ex-info.
|
|
|
|
* Resolved this inconsistency by maintaining same route type.
|
|
|
|
*/
|
|
|
|
if (is_prefix_default(&p))
|
|
|
|
rt_type = DEFAULT_ROUTE;
|
|
|
|
|
2018-02-28 08:59:42 +01:00
|
|
|
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
|
|
|
|
char buf_prefix[PREFIX_STRLEN];
|
|
|
|
prefix2str(&api.prefix, buf_prefix, sizeof(buf_prefix));
|
|
|
|
|
2019-07-24 21:57:34 +02:00
|
|
|
zlog_debug("%s: cmd %s from client %s: vrf_id %d, p %s",
|
|
|
|
__func__, zserv_command_string(cmd),
|
2018-02-28 08:59:42 +01:00
|
|
|
zebra_route_string(api.type), vrf_id, buf_prefix);
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
if (cmd == ZEBRA_REDISTRIBUTE_ROUTE_ADD) {
|
2017-07-17 14:03:14 +02:00
|
|
|
/* XXX|HACK|TODO|FIXME:
|
2012-04-11 23:52:46 +02:00
|
|
|
* Maybe we should ignore reject/blackhole routes? Testing
|
|
|
|
* shows that there is no problems though and this is only way
|
|
|
|
* to "summarize" routes in ASBR at the moment. Maybe we need
|
|
|
|
* just a better generalised solution for these types?
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
|
|
|
|
2017-08-21 03:10:50 +02:00
|
|
|
/* Protocol tag overwrites all other tag value sent by zebra */
|
2018-10-01 13:38:01 +02:00
|
|
|
if (ospf->dtag[rt_type] > 0)
|
|
|
|
api.tag = ospf->dtag[rt_type];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Given zebra sends update for a prefix via ADD message, it
|
|
|
|
* should
|
|
|
|
* be considered as an implicit DEL for that prefix with other
|
|
|
|
* source
|
|
|
|
* types.
|
|
|
|
*/
|
2018-10-01 13:38:01 +02:00
|
|
|
for (i = 0; i <= ZEBRA_ROUTE_MAX; i++)
|
|
|
|
if (i != rt_type)
|
2018-03-06 20:02:52 +01:00
|
|
|
ospf_external_info_delete(ospf, i, api.instance,
|
|
|
|
p);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-10-01 13:38:01 +02:00
|
|
|
ei = ospf_external_info_add(ospf, rt_type, api.instance, p,
|
2017-11-21 02:21:03 +01:00
|
|
|
ifindex, nexthop, api.tag);
|
2017-07-17 14:03:14 +02:00
|
|
|
if (ei == NULL) {
|
|
|
|
/* Nothing has changed, so nothing to do; return */
|
|
|
|
return 0;
|
|
|
|
}
|
2020-02-06 07:49:02 +01:00
|
|
|
if (ospf->router_id.s_addr != INADDR_ANY) {
|
2017-07-17 14:03:14 +02:00
|
|
|
if (ei) {
|
|
|
|
if (is_prefix_default(&p))
|
|
|
|
ospf_external_lsa_refresh_default(ospf);
|
|
|
|
else {
|
|
|
|
struct ospf_lsa *current;
|
|
|
|
|
|
|
|
current = ospf_external_info_find_lsa(
|
|
|
|
ospf, &ei->p);
|
|
|
|
if (!current)
|
|
|
|
ospf_external_lsa_originate(
|
|
|
|
ospf, ei);
|
|
|
|
else {
|
|
|
|
if (IS_DEBUG_OSPF(
|
|
|
|
zebra,
|
|
|
|
ZEBRA_REDISTRIBUTE))
|
|
|
|
zlog_debug(
|
2017-08-21 03:10:50 +02:00
|
|
|
"ospf_zebra_read_route() : %s refreshing LSA",
|
2017-07-17 14:03:14 +02:00
|
|
|
inet_ntoa(
|
|
|
|
p.prefix));
|
|
|
|
ospf_external_lsa_refresh(
|
|
|
|
ospf, current, ei,
|
|
|
|
LSA_REFRESH_FORCE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-05-03 21:42:59 +02:00
|
|
|
} else /* if (cmd == ZEBRA_REDISTRIBUTE_ROUTE_DEL) */
|
2015-10-21 06:38:38 +02:00
|
|
|
{
|
2018-10-01 13:38:01 +02:00
|
|
|
ospf_external_info_delete(ospf, rt_type, api.instance, p);
|
2017-07-17 14:03:14 +02:00
|
|
|
if (is_prefix_default(&p))
|
|
|
|
ospf_external_lsa_refresh_default(ospf);
|
|
|
|
else
|
2018-10-01 13:38:01 +02:00
|
|
|
ospf_external_lsa_flush(ospf, rt_type, &p,
|
2017-07-17 14:03:14 +02:00
|
|
|
ifindex /*, nexthop */);
|
2015-10-21 06:38:38 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
return 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2014-06-04 06:53:35 +02:00
|
|
|
|
2003-06-19 04:13:25 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
int ospf_distribute_list_out_set(struct ospf *ospf, int type, const char *name)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
/* Lookup access-list for distribute-list. */
|
|
|
|
DISTRIBUTE_LIST(ospf, type) = access_list_lookup(AFI_IP, name);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
/* Clear previous distribute-name. */
|
|
|
|
if (DISTRIBUTE_NAME(ospf, type))
|
|
|
|
free(DISTRIBUTE_NAME(ospf, type));
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
/* Set distribute-name. */
|
|
|
|
DISTRIBUTE_NAME(ospf, type) = strdup(name);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
/* If access-list have been set, schedule update timer. */
|
|
|
|
if (DISTRIBUTE_LIST(ospf, type))
|
|
|
|
ospf_distribute_list_update(ospf, type, 0);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return CMD_SUCCESS;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
int ospf_distribute_list_out_unset(struct ospf *ospf, int type,
|
|
|
|
const char *name)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
/* Schedule update timer. */
|
|
|
|
if (DISTRIBUTE_LIST(ospf, type))
|
|
|
|
ospf_distribute_list_update(ospf, type, 0);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
/* Unset distribute-list. */
|
|
|
|
DISTRIBUTE_LIST(ospf, type) = NULL;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
/* Clear distribute-name. */
|
|
|
|
if (DISTRIBUTE_NAME(ospf, type))
|
|
|
|
free(DISTRIBUTE_NAME(ospf, type));
|
2003-06-19 04:13:25 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
DISTRIBUTE_NAME(ospf, type) = NULL;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return CMD_SUCCESS;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* distribute-list update timer. */
|
2017-07-17 14:03:14 +02:00
|
|
|
static int ospf_distribute_list_update_timer(struct thread *thread)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct route_node *rn;
|
|
|
|
struct external_info *ei;
|
|
|
|
struct route_table *rt;
|
|
|
|
struct ospf_lsa *lsa;
|
2017-08-25 22:51:12 +02:00
|
|
|
int type, default_refresh = 0, arg_type;
|
|
|
|
struct ospf *ospf = NULL;
|
2018-03-06 20:02:52 +01:00
|
|
|
void **arg = THREAD_ARG(thread);
|
2017-08-25 22:51:12 +02:00
|
|
|
|
|
|
|
ospf = (struct ospf *)arg[0];
|
|
|
|
arg_type = (int)(intptr_t)arg[1];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
if (ospf == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ospf->t_distribute_update = NULL;
|
|
|
|
|
|
|
|
zlog_info("Zebra[Redistribute]: distribute-list update timer fired!");
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
if (IS_DEBUG_OSPF_EVENT) {
|
2018-03-06 20:02:52 +01:00
|
|
|
zlog_debug(
|
|
|
|
"%s: ospf distribute-list update arg_type %d vrf %s id %d",
|
2020-03-06 15:23:22 +01:00
|
|
|
__func__, arg_type, ospf_vrf_id_to_name(ospf->vrf_id),
|
|
|
|
ospf->vrf_id);
|
2017-08-25 22:51:12 +02:00
|
|
|
}
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
/* foreach all external info. */
|
|
|
|
for (type = 0; type <= ZEBRA_ROUTE_MAX; type++) {
|
|
|
|
struct list *ext_list;
|
|
|
|
struct listnode *node;
|
|
|
|
struct ospf_external *ext;
|
|
|
|
|
2017-11-21 02:21:03 +01:00
|
|
|
ext_list = ospf->external[type];
|
2017-07-17 14:03:14 +02:00
|
|
|
if (!ext_list)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(ext_list, node, ext)) {
|
|
|
|
rt = ext->external_info;
|
|
|
|
if (!rt)
|
|
|
|
continue;
|
|
|
|
for (rn = route_top(rt); rn; rn = route_next(rn))
|
|
|
|
if ((ei = rn->info) != NULL) {
|
|
|
|
if (is_prefix_default(&ei->p))
|
|
|
|
default_refresh = 1;
|
|
|
|
else if (
|
|
|
|
(lsa = ospf_external_info_find_lsa(
|
|
|
|
ospf, &ei->p)))
|
|
|
|
ospf_external_lsa_refresh(
|
|
|
|
ospf, lsa, ei,
|
|
|
|
LSA_REFRESH_IF_CHANGED);
|
|
|
|
else
|
|
|
|
ospf_external_lsa_originate(
|
|
|
|
ospf, ei);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (default_refresh)
|
|
|
|
ospf_external_lsa_refresh_default(ospf);
|
2017-08-25 22:51:12 +02:00
|
|
|
|
|
|
|
XFREE(MTYPE_OSPF_DIST_ARGS, arg);
|
2017-07-17 14:03:14 +02:00
|
|
|
return 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Update distribute-list and set timer to apply access-list. */
|
2018-03-27 21:13:34 +02:00
|
|
|
void ospf_distribute_list_update(struct ospf *ospf, int type,
|
|
|
|
unsigned short instance)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct ospf_external *ext;
|
2018-03-06 20:02:52 +01:00
|
|
|
void **args = XCALLOC(MTYPE_OSPF_DIST_ARGS, sizeof(void *) * 2);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
args[0] = ospf;
|
2018-03-06 20:02:52 +01:00
|
|
|
args[1] = (void *)((ptrdiff_t)type);
|
2017-08-25 22:51:12 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
/* External info does not exist. */
|
2017-11-21 02:21:03 +01:00
|
|
|
ext = ospf_external_lookup(ospf, type, instance);
|
2020-02-24 14:37:34 +01:00
|
|
|
if (!ext || !EXTERNAL_INFO(ext)) {
|
2017-10-09 22:08:45 +02:00
|
|
|
XFREE(MTYPE_OSPF_DIST_ARGS, args);
|
2017-07-17 14:03:14 +02:00
|
|
|
return;
|
2017-10-09 22:08:45 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
/* If exists previously invoked thread, then let it continue. */
|
2017-10-09 22:08:45 +02:00
|
|
|
if (ospf->t_distribute_update) {
|
|
|
|
XFREE(MTYPE_OSPF_DIST_ARGS, args);
|
2017-07-17 14:03:14 +02:00
|
|
|
return;
|
2017-10-09 22:08:45 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
/* Set timer. */
|
|
|
|
ospf->t_distribute_update = NULL;
|
2020-04-08 07:57:15 +02:00
|
|
|
thread_add_timer_msec(master, ospf_distribute_list_update_timer, args,
|
|
|
|
ospf->min_ls_interval,
|
2017-07-17 14:03:14 +02:00
|
|
|
&ospf->t_distribute_update);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If access-list is updated, apply some check. */
|
2017-07-17 14:03:14 +02:00
|
|
|
static void ospf_filter_update(struct access_list *access)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct ospf *ospf;
|
|
|
|
int type;
|
|
|
|
int abr_inv = 0;
|
|
|
|
struct ospf_area *area;
|
2017-08-25 22:51:12 +02:00
|
|
|
struct listnode *node, *n1;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
/* If OSPF instance does not exist, return right now. */
|
2017-08-25 22:51:12 +02:00
|
|
|
if (listcount(om->ospf) == 0)
|
2017-07-17 14:03:14 +02:00
|
|
|
return;
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Iterate all ospf [VRF] instances */
|
2017-09-07 17:08:09 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(om->ospf, n1, ospf)) {
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Update distribute-list, and apply filter. */
|
|
|
|
for (type = 0; type <= ZEBRA_ROUTE_MAX; type++) {
|
|
|
|
struct list *red_list;
|
|
|
|
struct ospf_redist *red;
|
|
|
|
|
|
|
|
red_list = ospf->redist[type];
|
|
|
|
if (red_list)
|
2018-03-06 20:02:52 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(red_list, node,
|
|
|
|
red)) {
|
2017-08-25 22:51:12 +02:00
|
|
|
if (ROUTEMAP(red)) {
|
2018-03-06 20:02:52 +01:00
|
|
|
/* if route-map is not NULL it
|
|
|
|
* may be
|
2017-08-25 22:51:12 +02:00
|
|
|
* using this access list */
|
|
|
|
ospf_distribute_list_update(
|
2018-03-06 20:02:52 +01:00
|
|
|
ospf, type,
|
|
|
|
red->instance);
|
2017-08-25 22:51:12 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* There is place for route-map for default-information
|
|
|
|
* (ZEBRA_ROUTE_MAX),
|
|
|
|
* but no distribute list. */
|
|
|
|
if (type == ZEBRA_ROUTE_MAX)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (DISTRIBUTE_NAME(ospf, type)) {
|
|
|
|
/* Keep old access-list for distribute-list. */
|
2018-03-06 20:02:52 +01:00
|
|
|
struct access_list *old =
|
|
|
|
DISTRIBUTE_LIST(ospf, type);
|
2017-08-25 22:51:12 +02:00
|
|
|
|
|
|
|
/* Update access-list for distribute-list. */
|
2018-03-06 20:02:52 +01:00
|
|
|
DISTRIBUTE_LIST(ospf, type) =
|
|
|
|
access_list_lookup(
|
|
|
|
AFI_IP,
|
|
|
|
DISTRIBUTE_NAME(ospf, type));
|
2017-08-25 22:51:12 +02:00
|
|
|
|
|
|
|
/* No update for this distribute type. */
|
2018-03-06 20:02:52 +01:00
|
|
|
if (old == NULL
|
|
|
|
&& DISTRIBUTE_LIST(ospf, type) == NULL)
|
2017-08-25 22:51:12 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Schedule distribute-list update timer. */
|
|
|
|
if (DISTRIBUTE_LIST(ospf, type) == NULL
|
2018-03-06 20:02:52 +01:00
|
|
|
|| strcmp(DISTRIBUTE_NAME(ospf, type),
|
|
|
|
access->name)
|
|
|
|
== 0)
|
|
|
|
ospf_distribute_list_update(ospf, type,
|
|
|
|
0);
|
2017-08-25 22:51:12 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Update Area access-list. */
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(ospf->areas, node, area)) {
|
|
|
|
if (EXPORT_NAME(area)) {
|
|
|
|
EXPORT_LIST(area) = NULL;
|
|
|
|
abr_inv++;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
if (IMPORT_NAME(area)) {
|
|
|
|
IMPORT_LIST(area) = NULL;
|
|
|
|
abr_inv++;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Schedule ABR tasks -- this will be changed -- takada. */
|
|
|
|
if (IS_OSPF_ABR(ospf) && abr_inv)
|
|
|
|
ospf_schedule_abr_task(ospf);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2004-05-10 09:43:59 +02:00
|
|
|
|
|
|
|
/* If prefix-list is updated, do some updates. */
|
2017-07-17 14:03:14 +02:00
|
|
|
void ospf_prefix_list_update(struct prefix_list *plist)
|
2004-05-10 09:43:59 +02:00
|
|
|
{
|
2017-08-25 22:51:12 +02:00
|
|
|
struct ospf *ospf = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
int type;
|
|
|
|
int abr_inv = 0;
|
|
|
|
struct ospf_area *area;
|
2017-08-25 22:51:12 +02:00
|
|
|
struct listnode *node, *n1;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
/* If OSPF instatnce does not exist, return right now. */
|
2017-08-25 22:51:12 +02:00
|
|
|
if (listcount(om->ospf) == 0)
|
2017-07-17 14:03:14 +02:00
|
|
|
return;
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Iterate all ospf [VRF] instances */
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(om->ospf, n1, ospf)) {
|
|
|
|
|
|
|
|
/* Update all route-maps which are used
|
|
|
|
* as redistribution filters.
|
|
|
|
* They might use prefix-list.
|
|
|
|
*/
|
|
|
|
for (type = 0; type <= ZEBRA_ROUTE_MAX; type++) {
|
|
|
|
struct list *red_list;
|
|
|
|
struct ospf_redist *red;
|
|
|
|
|
|
|
|
red_list = ospf->redist[type];
|
|
|
|
if (red_list) {
|
2018-03-06 20:02:52 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(red_list, node,
|
|
|
|
red)) {
|
2017-08-25 22:51:12 +02:00
|
|
|
if (ROUTEMAP(red)) {
|
|
|
|
/* if route-map is not NULL
|
|
|
|
* it may be using
|
|
|
|
* this prefix list */
|
|
|
|
ospf_distribute_list_update(
|
|
|
|
ospf, type,
|
|
|
|
red->instance);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
}
|
2017-08-25 22:51:12 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Update area filter-lists. */
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(ospf->areas, node, area)) {
|
|
|
|
/* Update filter-list in. */
|
|
|
|
if (PREFIX_NAME_IN(area))
|
|
|
|
if (strcmp(PREFIX_NAME_IN(area),
|
2018-03-06 20:02:52 +01:00
|
|
|
prefix_list_name(plist))
|
|
|
|
== 0) {
|
2017-08-25 22:51:12 +02:00
|
|
|
PREFIX_LIST_IN(area) =
|
|
|
|
prefix_list_lookup(
|
2018-03-06 20:02:52 +01:00
|
|
|
AFI_IP,
|
|
|
|
PREFIX_NAME_IN(area));
|
2017-08-25 22:51:12 +02:00
|
|
|
abr_inv++;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Update filter-list out. */
|
|
|
|
if (PREFIX_NAME_OUT(area))
|
|
|
|
if (strcmp(PREFIX_NAME_OUT(area),
|
2018-03-06 20:02:52 +01:00
|
|
|
prefix_list_name(plist))
|
|
|
|
== 0) {
|
2017-08-25 22:51:12 +02:00
|
|
|
PREFIX_LIST_IN(area) =
|
|
|
|
prefix_list_lookup(
|
2018-03-06 20:02:52 +01:00
|
|
|
AFI_IP,
|
|
|
|
PREFIX_NAME_OUT(area));
|
2017-08-25 22:51:12 +02:00
|
|
|
abr_inv++;
|
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
/* Schedule ABR task. */
|
|
|
|
if (IS_OSPF_ABR(ospf) && abr_inv)
|
|
|
|
ospf_schedule_abr_task(ospf);
|
|
|
|
}
|
2004-05-10 09:43:59 +02:00
|
|
|
}
|
2003-06-19 04:13:25 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
static struct ospf_distance *ospf_distance_new(void)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
return XCALLOC(MTYPE_OSPF_DISTANCE, sizeof(struct ospf_distance));
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
static void ospf_distance_free(struct ospf_distance *odistance)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
XFREE(MTYPE_OSPF_DISTANCE, odistance);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
int ospf_distance_set(struct vty *vty, struct ospf *ospf,
|
|
|
|
const char *distance_str, const char *ip_str,
|
|
|
|
const char *access_list_str)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
int ret;
|
|
|
|
struct prefix_ipv4 p;
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t distance;
|
2017-07-17 14:03:14 +02:00
|
|
|
struct route_node *rn;
|
|
|
|
struct ospf_distance *odistance;
|
|
|
|
|
|
|
|
ret = str2prefix_ipv4(ip_str, &p);
|
|
|
|
if (ret == 0) {
|
|
|
|
vty_out(vty, "Malformed prefix\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
distance = atoi(distance_str);
|
|
|
|
|
|
|
|
/* Get OSPF distance node. */
|
|
|
|
rn = route_node_get(ospf->distance_table, (struct prefix *)&p);
|
|
|
|
if (rn->info) {
|
|
|
|
odistance = rn->info;
|
|
|
|
route_unlock_node(rn);
|
|
|
|
} else {
|
|
|
|
odistance = ospf_distance_new();
|
|
|
|
rn->info = odistance;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set distance value. */
|
|
|
|
odistance->distance = distance;
|
|
|
|
|
|
|
|
/* Reset access-list configuration. */
|
|
|
|
if (odistance->access_list) {
|
|
|
|
free(odistance->access_list);
|
|
|
|
odistance->access_list = NULL;
|
|
|
|
}
|
|
|
|
if (access_list_str)
|
|
|
|
odistance->access_list = strdup(access_list_str);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
int ospf_distance_unset(struct vty *vty, struct ospf *ospf,
|
|
|
|
const char *distance_str, const char *ip_str,
|
|
|
|
char const *access_list_str)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
int ret;
|
|
|
|
struct prefix_ipv4 p;
|
|
|
|
struct route_node *rn;
|
|
|
|
struct ospf_distance *odistance;
|
|
|
|
|
|
|
|
ret = str2prefix_ipv4(ip_str, &p);
|
|
|
|
if (ret == 0) {
|
|
|
|
vty_out(vty, "Malformed prefix\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
rn = route_node_lookup(ospf->distance_table, (struct prefix *)&p);
|
|
|
|
if (!rn) {
|
|
|
|
vty_out(vty, "Can't find specified prefix\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
odistance = rn->info;
|
|
|
|
|
|
|
|
if (odistance->access_list)
|
|
|
|
free(odistance->access_list);
|
|
|
|
ospf_distance_free(odistance);
|
|
|
|
|
|
|
|
rn->info = NULL;
|
|
|
|
route_unlock_node(rn);
|
|
|
|
route_unlock_node(rn);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
void ospf_distance_reset(struct ospf *ospf)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct route_node *rn;
|
|
|
|
struct ospf_distance *odistance;
|
|
|
|
|
|
|
|
for (rn = route_top(ospf->distance_table); rn; rn = route_next(rn))
|
|
|
|
if ((odistance = rn->info) != NULL) {
|
|
|
|
if (odistance->access_list)
|
|
|
|
free(odistance->access_list);
|
|
|
|
ospf_distance_free(odistance);
|
|
|
|
rn->info = NULL;
|
|
|
|
route_unlock_node(rn);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t ospf_distance_apply(struct ospf *ospf, struct prefix_ipv4 *p,
|
|
|
|
struct ospf_route * or)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (ospf == NULL)
|
|
|
|
return 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (ospf->distance_intra)
|
|
|
|
if (or->path_type == OSPF_PATH_INTRA_AREA)
|
|
|
|
return ospf->distance_intra;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (ospf->distance_inter)
|
|
|
|
if (or->path_type == OSPF_PATH_INTER_AREA)
|
|
|
|
return ospf->distance_inter;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (ospf->distance_external)
|
|
|
|
if (or->path_type == OSPF_PATH_TYPE1_EXTERNAL ||
|
|
|
|
or->path_type == OSPF_PATH_TYPE2_EXTERNAL)
|
|
|
|
return ospf->distance_external;
|
2003-06-19 04:13:25 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (ospf->distance_all)
|
|
|
|
return ospf->distance_all;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-08-25 22:51:12 +02:00
|
|
|
void ospf_zebra_vrf_register(struct ospf *ospf)
|
|
|
|
{
|
|
|
|
if (!zclient || zclient->sock < 0 || !ospf)
|
|
|
|
return;
|
|
|
|
|
2017-11-02 15:54:45 +01:00
|
|
|
if (ospf->vrf_id != VRF_UNKNOWN) {
|
2017-08-25 22:51:12 +02:00
|
|
|
if (IS_DEBUG_OSPF_EVENT)
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: Register VRF %s id %u", __func__,
|
2017-08-25 22:51:12 +02:00
|
|
|
ospf_vrf_id_to_name(ospf->vrf_id),
|
|
|
|
ospf->vrf_id);
|
|
|
|
zclient_send_reg_requests(zclient, ospf->vrf_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ospf_zebra_vrf_deregister(struct ospf *ospf)
|
|
|
|
{
|
|
|
|
if (!zclient || zclient->sock < 0 || !ospf)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (ospf->vrf_id != VRF_DEFAULT && ospf->vrf_id != VRF_UNKNOWN) {
|
|
|
|
if (IS_DEBUG_OSPF_EVENT)
|
2017-11-02 15:54:45 +01:00
|
|
|
zlog_debug("%s: De-Register VRF %s id %u to Zebra.",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__, ospf_vrf_id_to_name(ospf->vrf_id),
|
2017-08-25 22:51:12 +02:00
|
|
|
ospf->vrf_id);
|
|
|
|
/* Deregister for router-id, interfaces,
|
|
|
|
* redistributed routes. */
|
|
|
|
zclient_send_dereg_requests(zclient, ospf->vrf_id);
|
|
|
|
}
|
|
|
|
}
|
2020-04-07 19:36:12 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
static void ospf_zebra_connected(struct zclient *zclient)
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
/* Send the client registration */
|
2019-03-26 14:29:13 +01:00
|
|
|
bfd_client_sendmsg(zclient, ZEBRA_BFD_CLIENT_REGISTER, VRF_DEFAULT);
|
2016-06-21 12:39:58 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
zclient_send_reg_requests(zclient, VRF_DEFAULT);
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
void ospf_zebra_init(struct thread_master *master, unsigned short instance)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
/* Allocate zebra structure. */
|
2018-11-02 13:54:58 +01:00
|
|
|
zclient = zclient_new(master, &zclient_options_default);
|
2017-10-11 16:37:20 +02:00
|
|
|
zclient_init(zclient, ZEBRA_ROUTE_OSPF, instance, &ospfd_privs);
|
2017-07-17 14:03:14 +02:00
|
|
|
zclient->zebra_connected = ospf_zebra_connected;
|
|
|
|
zclient->router_id_update = ospf_router_id_update_zebra;
|
|
|
|
zclient->interface_address_add = ospf_interface_address_add;
|
|
|
|
zclient->interface_address_delete = ospf_interface_address_delete;
|
|
|
|
zclient->interface_link_params = ospf_interface_link_params;
|
2017-08-25 22:51:12 +02:00
|
|
|
zclient->interface_vrf_update = ospf_interface_vrf_update;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 03:10:50 +02:00
|
|
|
zclient->redistribute_route_add = ospf_zebra_read_route;
|
|
|
|
zclient->redistribute_route_del = ospf_zebra_read_route;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
access_list_add_hook(ospf_filter_update);
|
|
|
|
access_list_delete_hook(ospf_filter_update);
|
|
|
|
prefix_list_add_hook(ospf_prefix_list_update);
|
|
|
|
prefix_list_delete_hook(ospf_prefix_list_update);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|