2019-05-13 21:46:05 +02:00
|
|
|
/* Zebra Nexthop Group Code.
|
|
|
|
* Copyright (C) 2019 Cumulus Networks, Inc.
|
|
|
|
* Donald Sharp
|
|
|
|
* Stephen Worley
|
|
|
|
*
|
|
|
|
* This file is part of FRR.
|
|
|
|
*
|
|
|
|
* FRR is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2, or (at your option) any
|
|
|
|
* later version.
|
|
|
|
*
|
|
|
|
* FRR is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with FRR; see the file COPYING. If not, write to the Free
|
|
|
|
* Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
|
|
|
|
* 02111-1307, USA.
|
|
|
|
*/
|
|
|
|
#include <zebra.h>
|
|
|
|
|
|
|
|
#include "lib/nexthop.h"
|
2019-06-24 20:04:13 +02:00
|
|
|
#include "lib/nexthop_group_private.h"
|
2019-05-13 21:46:05 +02:00
|
|
|
#include "lib/routemap.h"
|
zebra: Append rparent labels when resolving nexthop
When resolving a nexthop, append its labels to the one its
resolving to along with the labels that may already be present there.
Before we were ignoring labels if the resolving level was greater than
two.
Before:
```
S> 2.2.2.2/32 [1/0] via 7.7.7.7 (recursive), label 2222, 00:00:07
* via 7.7.7.7, dummy1 onlink, label 1111, 00:00:07
S> 3.3.3.3/32 [1/0] via 2.2.2.2 (recursive), label 3333, 00:00:04
* via 7.7.7.7, dummy1 onlink, label 1111, 00:00:04
K>* 7.7.7.7/32 [0/0] is directly connected, dummy1, label 1111, 00:00:17
C>* 192.168.122.0/24 is directly connected, ens3, 00:00:17
K>* 192.168.122.1/32 [0/100] is directly connected, ens3, 00:00:17
ubuntu_nh#
```
This patch:
```
S> 2.2.2.2/32 [1/0] via 7.7.7.7 (recursive), label 2222, 00:00:04
* via 7.7.7.7, dummy1 onlink, label 1111/2222, 00:00:04
S> 3.3.3.3/32 [1/0] via 2.2.2.2 (recursive), label 3333, 00:00:02
* via 7.7.7.7, dummy1 onlink, label 1111/2222/3333, 00:00:02
K>* 7.7.7.7/32 [0/0] is directly connected, dummy1, label 1111, 00:00:11
C>* 192.168.122.0/24 is directly connected, ens3, 00:00:11
K>* 192.168.122.1/32 [0/100] is directly connected, ens3, 00:00:11
ubuntu_nh#
```
Signed-off-by: Stephen Worley <sworley@cumulusnetworks.com>
2019-08-08 00:40:36 +02:00
|
|
|
#include "lib/mpls.h"
|
2019-01-24 14:06:34 +01:00
|
|
|
#include "lib/jhash.h"
|
2019-05-13 21:46:05 +02:00
|
|
|
|
|
|
|
#include "zebra/connected.h"
|
|
|
|
#include "zebra/debug.h"
|
|
|
|
#include "zebra/zebra_router.h"
|
|
|
|
#include "zebra/zebra_nhg.h"
|
|
|
|
#include "zebra/zebra_rnh.h"
|
|
|
|
#include "zebra/zebra_routemap.h"
|
|
|
|
#include "zebra/rt.h"
|
|
|
|
|
2019-01-24 16:49:28 +01:00
|
|
|
|
|
|
|
static void *zebra_nhg_alloc(void *arg)
|
|
|
|
{
|
|
|
|
struct nhg_hash_entry *nhe;
|
|
|
|
struct nhg_hash_entry *copy = arg;
|
|
|
|
|
|
|
|
nhe = XMALLOC(MTYPE_TMP, sizeof(struct nhg_hash_entry));
|
|
|
|
|
|
|
|
nhe->vrf_id = copy->vrf_id;
|
|
|
|
nhe->refcnt = 0;
|
|
|
|
nhe->dplane_ref = zebra_router_get_next_sequence();
|
|
|
|
nhe->nhg.nexthop = NULL;
|
|
|
|
|
|
|
|
nexthop_group_copy(&nhe->nhg, ©->nhg);
|
|
|
|
|
|
|
|
nhe->refcnt = 1;
|
|
|
|
|
|
|
|
return nhe;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t zebra_nhg_hash_key_nexthop_group(struct nexthop_group *nhg)
|
|
|
|
{
|
|
|
|
struct nexthop *nh;
|
|
|
|
uint32_t i;
|
|
|
|
uint32_t key = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are not interested in hashing over any recursively
|
|
|
|
* resolved nexthops
|
|
|
|
*/
|
|
|
|
for (nh = nhg->nexthop; nh; nh = nh->next) {
|
2019-02-25 23:59:28 +01:00
|
|
|
key = jhash_1word(nh->type, key);
|
2019-01-24 16:49:28 +01:00
|
|
|
key = jhash_2words(nh->vrf_id, nh->nh_label_type, key);
|
|
|
|
/* gate and blackhole are together in a union */
|
|
|
|
key = jhash(&nh->gate, sizeof(nh->gate), key);
|
|
|
|
key = jhash(&nh->src, sizeof(nh->src), key);
|
|
|
|
key = jhash(&nh->rmap_src, sizeof(nh->rmap_src), key);
|
|
|
|
if (nh->nh_label) {
|
|
|
|
for (i = 0; i < nh->nh_label->num_labels; i++)
|
|
|
|
key = jhash_1word(nh->nh_label->label[i], key);
|
|
|
|
}
|
|
|
|
switch (nh->type) {
|
|
|
|
case NEXTHOP_TYPE_IPV4_IFINDEX:
|
|
|
|
case NEXTHOP_TYPE_IPV6_IFINDEX:
|
|
|
|
case NEXTHOP_TYPE_IFINDEX:
|
|
|
|
key = jhash_1word(nh->ifindex, key);
|
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_BLACKHOLE:
|
|
|
|
case NEXTHOP_TYPE_IPV4:
|
|
|
|
case NEXTHOP_TYPE_IPV6:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return key;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t zebra_nhg_hash_key(const void *arg)
|
|
|
|
{
|
|
|
|
const struct nhg_hash_entry *nhe = arg;
|
|
|
|
int key = 0x5a351234;
|
|
|
|
|
2019-02-25 23:59:28 +01:00
|
|
|
key = jhash_1word(nhe->vrf_id, key);
|
2019-01-24 16:49:28 +01:00
|
|
|
|
|
|
|
return jhash_1word(zebra_nhg_hash_key_nexthop_group(&nhe->nhg), key);
|
|
|
|
}
|
|
|
|
|
2019-02-15 19:18:48 +01:00
|
|
|
uint32_t zebra_nhg_id_key(const void *arg)
|
|
|
|
{
|
|
|
|
const struct nhg_hash_entry *nhe = arg;
|
|
|
|
|
|
|
|
return nhe->id;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool zebra_nhg_id_equal(const void *arg1, const void *arg2)
|
|
|
|
{
|
|
|
|
const struct nhg_hash_entry *nhe1 = arg1;
|
|
|
|
const struct nhg_hash_entry *nhe2 = arg2;
|
|
|
|
|
|
|
|
return (nhe1->id == nhe2->id);
|
|
|
|
}
|
|
|
|
|
2019-01-24 16:49:28 +01:00
|
|
|
bool zebra_nhg_hash_equal(const void *arg1, const void *arg2)
|
|
|
|
{
|
|
|
|
const struct nhg_hash_entry *nhe1 = arg1;
|
|
|
|
const struct nhg_hash_entry *nhe2 = arg2;
|
|
|
|
struct nexthop *nh1, *nh2;
|
|
|
|
uint32_t nh_count = 0;
|
|
|
|
|
|
|
|
if (nhe1->vrf_id != nhe2->vrf_id)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Again we are not interested in looking at any recursively
|
|
|
|
* resolved nexthops. Top level only
|
|
|
|
*/
|
|
|
|
for (nh1 = nhe1->nhg.nexthop; nh1; nh1 = nh1->next) {
|
|
|
|
uint32_t inner_nh_count = 0;
|
|
|
|
for (nh2 = nhe2->nhg.nexthop; nh2; nh2 = nh2->next) {
|
|
|
|
if (inner_nh_count == nh_count) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
inner_nh_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nexthop_same(nh1, nh2))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
nh_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-02-15 19:18:48 +01:00
|
|
|
/**
|
|
|
|
* Helper function for lookup and get()
|
|
|
|
* since we are using two different tables.
|
|
|
|
*
|
|
|
|
* Avoiding code duplication hopefully.
|
|
|
|
*/
|
2019-02-26 14:16:11 +01:00
|
|
|
static struct nhg_hash_entry *
|
|
|
|
zebra_nhg_lookup_get(struct hash *hash_table,
|
|
|
|
struct nhg_hash_entry *lookup)
|
2019-01-24 16:49:28 +01:00
|
|
|
{
|
2019-02-15 19:18:48 +01:00
|
|
|
struct nhg_hash_entry *nhe;
|
2019-01-24 16:49:28 +01:00
|
|
|
|
2019-02-15 19:18:48 +01:00
|
|
|
nhe = hash_lookup(hash_table, lookup);
|
2019-01-24 16:49:28 +01:00
|
|
|
|
|
|
|
if (!nhe)
|
2019-02-15 19:18:48 +01:00
|
|
|
nhe = hash_get(hash_table, lookup, zebra_nhg_alloc);
|
2019-01-24 16:49:28 +01:00
|
|
|
else
|
|
|
|
nhe->refcnt++;
|
|
|
|
|
2019-02-26 14:16:11 +01:00
|
|
|
return nhe;
|
2019-01-24 16:49:28 +01:00
|
|
|
}
|
|
|
|
|
2019-02-15 19:18:48 +01:00
|
|
|
void zebra_nhg_find_id(uint32_t id, struct nexthop_group *nhg)
|
|
|
|
{
|
|
|
|
struct nhg_hash_entry lookup = {0};
|
|
|
|
|
|
|
|
lookup.nhg = *nhg;
|
|
|
|
|
|
|
|
zebra_nhg_lookup_get(zrouter.nhgs_id, &lookup);
|
|
|
|
}
|
|
|
|
|
2019-02-25 23:59:28 +01:00
|
|
|
void zebra_nhg_find(struct nexthop_group *nhg, struct route_entry *re)
|
2019-02-15 19:18:48 +01:00
|
|
|
{
|
|
|
|
struct nhg_hash_entry lookup;
|
|
|
|
|
|
|
|
memset(&lookup, 0, sizeof(lookup));
|
|
|
|
lookup.vrf_id = re->vrf_id;
|
|
|
|
lookup.nhg = *nhg;
|
|
|
|
|
2019-02-26 14:16:11 +01:00
|
|
|
re->nhe = zebra_nhg_lookup_get(zrouter.nhgs, &lookup);
|
2019-02-15 19:18:48 +01:00
|
|
|
}
|
|
|
|
|
2019-02-25 23:59:28 +01:00
|
|
|
void zebra_nhg_release(struct route_entry *re)
|
2019-01-24 16:49:28 +01:00
|
|
|
{
|
|
|
|
struct nhg_hash_entry lookup, *nhe;
|
|
|
|
|
|
|
|
lookup.vrf_id = re->vrf_id;
|
2019-02-13 22:06:48 +01:00
|
|
|
lookup.nhg = *re->ng;
|
2019-01-24 16:49:28 +01:00
|
|
|
|
|
|
|
nhe = hash_lookup(zrouter.nhgs, &lookup);
|
|
|
|
nhe->refcnt--;
|
|
|
|
|
|
|
|
if (nhe->refcnt == 0)
|
|
|
|
hash_release(zrouter.nhgs, nhe);
|
|
|
|
// re->ng = NULL;
|
|
|
|
}
|
|
|
|
|
2019-05-13 21:46:05 +02:00
|
|
|
static void nexthop_set_resolved(afi_t afi, const struct nexthop *newhop,
|
|
|
|
struct nexthop *nexthop)
|
|
|
|
{
|
|
|
|
struct nexthop *resolved_hop;
|
zebra: Append rparent labels when resolving nexthop
When resolving a nexthop, append its labels to the one its
resolving to along with the labels that may already be present there.
Before we were ignoring labels if the resolving level was greater than
two.
Before:
```
S> 2.2.2.2/32 [1/0] via 7.7.7.7 (recursive), label 2222, 00:00:07
* via 7.7.7.7, dummy1 onlink, label 1111, 00:00:07
S> 3.3.3.3/32 [1/0] via 2.2.2.2 (recursive), label 3333, 00:00:04
* via 7.7.7.7, dummy1 onlink, label 1111, 00:00:04
K>* 7.7.7.7/32 [0/0] is directly connected, dummy1, label 1111, 00:00:17
C>* 192.168.122.0/24 is directly connected, ens3, 00:00:17
K>* 192.168.122.1/32 [0/100] is directly connected, ens3, 00:00:17
ubuntu_nh#
```
This patch:
```
S> 2.2.2.2/32 [1/0] via 7.7.7.7 (recursive), label 2222, 00:00:04
* via 7.7.7.7, dummy1 onlink, label 1111/2222, 00:00:04
S> 3.3.3.3/32 [1/0] via 2.2.2.2 (recursive), label 3333, 00:00:02
* via 7.7.7.7, dummy1 onlink, label 1111/2222/3333, 00:00:02
K>* 7.7.7.7/32 [0/0] is directly connected, dummy1, label 1111, 00:00:11
C>* 192.168.122.0/24 is directly connected, ens3, 00:00:11
K>* 192.168.122.1/32 [0/100] is directly connected, ens3, 00:00:11
ubuntu_nh#
```
Signed-off-by: Stephen Worley <sworley@cumulusnetworks.com>
2019-08-08 00:40:36 +02:00
|
|
|
uint8_t num_labels = 0;
|
|
|
|
mpls_label_t labels[MPLS_MAX_LABELS];
|
|
|
|
enum lsp_types_t label_type = ZEBRA_LSP_NONE;
|
|
|
|
int i = 0;
|
2019-05-13 21:46:05 +02:00
|
|
|
|
|
|
|
resolved_hop = nexthop_new();
|
|
|
|
SET_FLAG(resolved_hop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
|
|
|
|
resolved_hop->vrf_id = nexthop->vrf_id;
|
|
|
|
switch (newhop->type) {
|
|
|
|
case NEXTHOP_TYPE_IPV4:
|
|
|
|
case NEXTHOP_TYPE_IPV4_IFINDEX:
|
|
|
|
/* If the resolving route specifies a gateway, use it */
|
|
|
|
resolved_hop->type = newhop->type;
|
|
|
|
resolved_hop->gate.ipv4 = newhop->gate.ipv4;
|
|
|
|
|
|
|
|
if (newhop->ifindex) {
|
|
|
|
resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
|
|
|
|
resolved_hop->ifindex = newhop->ifindex;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_IPV6:
|
|
|
|
case NEXTHOP_TYPE_IPV6_IFINDEX:
|
|
|
|
resolved_hop->type = newhop->type;
|
|
|
|
resolved_hop->gate.ipv6 = newhop->gate.ipv6;
|
|
|
|
|
|
|
|
if (newhop->ifindex) {
|
|
|
|
resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
|
|
|
|
resolved_hop->ifindex = newhop->ifindex;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_IFINDEX:
|
|
|
|
/* If the resolving route is an interface route,
|
|
|
|
* it means the gateway we are looking up is connected
|
|
|
|
* to that interface. (The actual network is _not_ onlink).
|
|
|
|
* Therefore, the resolved route should have the original
|
|
|
|
* gateway as nexthop as it is directly connected.
|
|
|
|
*
|
|
|
|
* On Linux, we have to set the onlink netlink flag because
|
|
|
|
* otherwise, the kernel won't accept the route.
|
|
|
|
*/
|
|
|
|
resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
|
|
|
|
if (afi == AFI_IP) {
|
|
|
|
resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
|
|
|
|
resolved_hop->gate.ipv4 = nexthop->gate.ipv4;
|
|
|
|
} else if (afi == AFI_IP6) {
|
|
|
|
resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
|
|
|
|
resolved_hop->gate.ipv6 = nexthop->gate.ipv6;
|
|
|
|
}
|
|
|
|
resolved_hop->ifindex = newhop->ifindex;
|
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_BLACKHOLE:
|
|
|
|
resolved_hop->type = NEXTHOP_TYPE_BLACKHOLE;
|
2019-08-13 07:56:38 +02:00
|
|
|
resolved_hop->bh_type = newhop->bh_type;
|
2019-05-13 21:46:05 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (newhop->flags & NEXTHOP_FLAG_ONLINK)
|
|
|
|
resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
|
|
|
|
|
zebra: Append rparent labels when resolving nexthop
When resolving a nexthop, append its labels to the one its
resolving to along with the labels that may already be present there.
Before we were ignoring labels if the resolving level was greater than
two.
Before:
```
S> 2.2.2.2/32 [1/0] via 7.7.7.7 (recursive), label 2222, 00:00:07
* via 7.7.7.7, dummy1 onlink, label 1111, 00:00:07
S> 3.3.3.3/32 [1/0] via 2.2.2.2 (recursive), label 3333, 00:00:04
* via 7.7.7.7, dummy1 onlink, label 1111, 00:00:04
K>* 7.7.7.7/32 [0/0] is directly connected, dummy1, label 1111, 00:00:17
C>* 192.168.122.0/24 is directly connected, ens3, 00:00:17
K>* 192.168.122.1/32 [0/100] is directly connected, ens3, 00:00:17
ubuntu_nh#
```
This patch:
```
S> 2.2.2.2/32 [1/0] via 7.7.7.7 (recursive), label 2222, 00:00:04
* via 7.7.7.7, dummy1 onlink, label 1111/2222, 00:00:04
S> 3.3.3.3/32 [1/0] via 2.2.2.2 (recursive), label 3333, 00:00:02
* via 7.7.7.7, dummy1 onlink, label 1111/2222/3333, 00:00:02
K>* 7.7.7.7/32 [0/0] is directly connected, dummy1, label 1111, 00:00:11
C>* 192.168.122.0/24 is directly connected, ens3, 00:00:11
K>* 192.168.122.1/32 [0/100] is directly connected, ens3, 00:00:11
ubuntu_nh#
```
Signed-off-by: Stephen Worley <sworley@cumulusnetworks.com>
2019-08-08 00:40:36 +02:00
|
|
|
/* Copy labels of the resolved route and the parent resolving to it */
|
|
|
|
if (newhop->nh_label) {
|
|
|
|
for (i = 0; i < newhop->nh_label->num_labels; i++)
|
|
|
|
labels[num_labels++] = newhop->nh_label->label[i];
|
|
|
|
label_type = newhop->nh_label_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nexthop->nh_label) {
|
|
|
|
for (i = 0; i < nexthop->nh_label->num_labels; i++)
|
|
|
|
labels[num_labels++] = nexthop->nh_label->label[i];
|
|
|
|
|
|
|
|
/* If the parent has labels, use its type */
|
|
|
|
label_type = nexthop->nh_label_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (num_labels)
|
|
|
|
nexthop_add_labels(resolved_hop, label_type, num_labels,
|
|
|
|
labels);
|
2019-05-13 21:46:05 +02:00
|
|
|
|
|
|
|
resolved_hop->rparent = nexthop;
|
2019-06-24 20:04:13 +02:00
|
|
|
_nexthop_add(&nexthop->resolved, resolved_hop);
|
2019-05-13 21:46:05 +02:00
|
|
|
}
|
|
|
|
|
2019-09-09 23:20:17 +02:00
|
|
|
/* Checks if nexthop we are trying to resolve to is valid */
|
|
|
|
static bool nexthop_valid_resolve(const struct nexthop *nexthop,
|
|
|
|
const struct nexthop *resolved)
|
|
|
|
{
|
|
|
|
/* Can't resolve to a recursive nexthop */
|
|
|
|
if (CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_RECURSIVE))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
switch (nexthop->type) {
|
|
|
|
case NEXTHOP_TYPE_IPV4_IFINDEX:
|
|
|
|
case NEXTHOP_TYPE_IPV6_IFINDEX:
|
|
|
|
/* If the nexthop we are resolving to does not match the
|
|
|
|
* ifindex for the nexthop the route wanted, its not valid.
|
|
|
|
*/
|
|
|
|
if (nexthop->ifindex != resolved->ifindex)
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_IPV4:
|
|
|
|
case NEXTHOP_TYPE_IPV6:
|
|
|
|
case NEXTHOP_TYPE_IFINDEX:
|
|
|
|
case NEXTHOP_TYPE_BLACKHOLE:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-05-13 21:46:05 +02:00
|
|
|
/*
|
|
|
|
* Given a nexthop we need to properly recursively resolve
|
|
|
|
* the route. As such, do a table lookup to find and match
|
|
|
|
* if at all possible. Set the nexthop->ifindex as appropriate
|
|
|
|
*/
|
|
|
|
static int nexthop_active(afi_t afi, struct route_entry *re,
|
|
|
|
struct nexthop *nexthop, struct route_node *top)
|
|
|
|
{
|
|
|
|
struct prefix p;
|
|
|
|
struct route_table *table;
|
|
|
|
struct route_node *rn;
|
|
|
|
struct route_entry *match = NULL;
|
|
|
|
int resolved;
|
|
|
|
struct nexthop *newhop;
|
|
|
|
struct interface *ifp;
|
|
|
|
rib_dest_t *dest;
|
2019-08-28 16:01:38 +02:00
|
|
|
struct zebra_vrf *zvrf;
|
2019-05-13 21:46:05 +02:00
|
|
|
|
|
|
|
if ((nexthop->type == NEXTHOP_TYPE_IPV4)
|
|
|
|
|| nexthop->type == NEXTHOP_TYPE_IPV6)
|
|
|
|
nexthop->ifindex = 0;
|
|
|
|
|
|
|
|
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE);
|
|
|
|
nexthops_free(nexthop->resolved);
|
|
|
|
nexthop->resolved = NULL;
|
|
|
|
re->nexthop_mtu = 0;
|
|
|
|
|
|
|
|
/*
|
2019-10-17 21:41:25 +02:00
|
|
|
* If the kernel has sent us a NEW route, then
|
2019-05-13 21:46:05 +02:00
|
|
|
* by golly gee whiz it's a good route.
|
2019-10-17 21:41:25 +02:00
|
|
|
*
|
|
|
|
* If its an already INSTALLED route we have already handled, then the
|
|
|
|
* kernel route's nexthop might have became unreachable
|
|
|
|
* and we have to handle that.
|
2019-05-13 21:46:05 +02:00
|
|
|
*/
|
2019-10-17 21:41:25 +02:00
|
|
|
if (!CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED)
|
|
|
|
&& (re->type == ZEBRA_ROUTE_KERNEL
|
|
|
|
|| re->type == ZEBRA_ROUTE_SYSTEM))
|
2019-05-13 21:46:05 +02:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if we should trust the passed in information
|
|
|
|
* for UNNUMBERED interfaces as that we won't find the GW
|
|
|
|
* address in the routing table.
|
|
|
|
* This check should suffice to handle IPv4 or IPv6 routes
|
|
|
|
* sourced from EVPN routes which are installed with the
|
|
|
|
* next hop as the remote VTEP IP.
|
|
|
|
*/
|
|
|
|
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) {
|
|
|
|
ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
|
|
|
|
if (!ifp) {
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
|
|
|
zlog_debug(
|
|
|
|
"\t%s: Onlink and interface: %u[%u] does not exist",
|
|
|
|
__PRETTY_FUNCTION__, nexthop->ifindex,
|
|
|
|
nexthop->vrf_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (connected_is_unnumbered(ifp)) {
|
|
|
|
if (if_is_operative(ifp))
|
|
|
|
return 1;
|
|
|
|
else {
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
|
|
|
zlog_debug(
|
|
|
|
"\t%s: Onlink and interface %s is not operative",
|
|
|
|
__PRETTY_FUNCTION__, ifp->name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!if_is_operative(ifp)) {
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
|
|
|
zlog_debug(
|
|
|
|
"\t%s: Interface %s is not unnumbered",
|
|
|
|
__PRETTY_FUNCTION__, ifp->name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make lookup prefix. */
|
|
|
|
memset(&p, 0, sizeof(struct prefix));
|
|
|
|
switch (afi) {
|
|
|
|
case AFI_IP:
|
|
|
|
p.family = AF_INET;
|
|
|
|
p.prefixlen = IPV4_MAX_PREFIXLEN;
|
|
|
|
p.u.prefix4 = nexthop->gate.ipv4;
|
|
|
|
break;
|
|
|
|
case AFI_IP6:
|
|
|
|
p.family = AF_INET6;
|
|
|
|
p.prefixlen = IPV6_MAX_PREFIXLEN;
|
|
|
|
p.u.prefix6 = nexthop->gate.ipv6;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(afi != AFI_IP && afi != AFI_IP6);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Lookup table. */
|
|
|
|
table = zebra_vrf_table(afi, SAFI_UNICAST, nexthop->vrf_id);
|
2019-08-28 16:01:38 +02:00
|
|
|
/* get zvrf */
|
|
|
|
zvrf = zebra_vrf_lookup_by_id(nexthop->vrf_id);
|
|
|
|
if (!table || !zvrf) {
|
2019-05-13 21:46:05 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
|
|
|
zlog_debug("\t%s: Table not found",
|
|
|
|
__PRETTY_FUNCTION__);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
rn = route_node_match(table, (struct prefix *)&p);
|
|
|
|
while (rn) {
|
|
|
|
route_unlock_node(rn);
|
|
|
|
|
|
|
|
/* Lookup should halt if we've matched against ourselves ('top',
|
|
|
|
* if specified) - i.e., we cannot have a nexthop NH1 is
|
|
|
|
* resolved by a route NH1. The exception is if the route is a
|
|
|
|
* host route.
|
|
|
|
*/
|
|
|
|
if (top && rn == top)
|
|
|
|
if (((afi == AFI_IP) && (rn->p.prefixlen != 32))
|
|
|
|
|| ((afi == AFI_IP6) && (rn->p.prefixlen != 128))) {
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
|
|
|
zlog_debug(
|
|
|
|
"\t%s: Matched against ourself and prefix length is not max bit length",
|
|
|
|
__PRETTY_FUNCTION__);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Pick up selected route. */
|
|
|
|
/* However, do not resolve over default route unless explicitly
|
|
|
|
* allowed. */
|
|
|
|
if (is_default_prefix(&rn->p)
|
2019-08-28 16:01:38 +02:00
|
|
|
&& !rnh_resolve_via_default(zvrf, p.family)) {
|
2019-05-13 21:46:05 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
|
|
|
zlog_debug(
|
|
|
|
"\t:%s: Resolved against default route",
|
|
|
|
__PRETTY_FUNCTION__);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
dest = rib_dest_from_rnode(rn);
|
|
|
|
if (dest && dest->selected_fib
|
|
|
|
&& !CHECK_FLAG(dest->selected_fib->status,
|
|
|
|
ROUTE_ENTRY_REMOVED)
|
|
|
|
&& dest->selected_fib->type != ZEBRA_ROUTE_TABLE)
|
|
|
|
match = dest->selected_fib;
|
|
|
|
|
|
|
|
/* If there is no selected route or matched route is EGP, go up
|
|
|
|
tree. */
|
|
|
|
if (!match) {
|
|
|
|
do {
|
|
|
|
rn = rn->parent;
|
|
|
|
} while (rn && rn->info == NULL);
|
|
|
|
if (rn)
|
|
|
|
route_lock_node(rn);
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (match->type == ZEBRA_ROUTE_CONNECT) {
|
|
|
|
/* Directly point connected route. */
|
2019-02-13 22:06:48 +01:00
|
|
|
newhop = match->ng->nexthop;
|
2019-05-13 21:46:05 +02:00
|
|
|
if (newhop) {
|
|
|
|
if (nexthop->type == NEXTHOP_TYPE_IPV4
|
|
|
|
|| nexthop->type == NEXTHOP_TYPE_IPV6)
|
|
|
|
nexthop->ifindex = newhop->ifindex;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
} else if (CHECK_FLAG(re->flags, ZEBRA_FLAG_ALLOW_RECURSION)) {
|
|
|
|
resolved = 0;
|
2019-02-13 22:06:48 +01:00
|
|
|
for (ALL_NEXTHOPS_PTR(match->ng, newhop)) {
|
2019-05-13 21:46:05 +02:00
|
|
|
if (!CHECK_FLAG(match->status,
|
|
|
|
ROUTE_ENTRY_INSTALLED))
|
|
|
|
continue;
|
2019-09-09 23:20:17 +02:00
|
|
|
if (!nexthop_valid_resolve(nexthop, newhop))
|
2019-05-13 21:46:05 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
SET_FLAG(nexthop->flags,
|
|
|
|
NEXTHOP_FLAG_RECURSIVE);
|
|
|
|
nexthop_set_resolved(afi, newhop, nexthop);
|
|
|
|
resolved = 1;
|
|
|
|
}
|
|
|
|
if (resolved)
|
|
|
|
re->nexthop_mtu = match->mtu;
|
|
|
|
if (!resolved && IS_ZEBRA_DEBUG_RIB_DETAILED)
|
|
|
|
zlog_debug("\t%s: Recursion failed to find",
|
|
|
|
__PRETTY_FUNCTION__);
|
|
|
|
return resolved;
|
|
|
|
} else if (re->type == ZEBRA_ROUTE_STATIC) {
|
|
|
|
resolved = 0;
|
2019-02-13 22:06:48 +01:00
|
|
|
for (ALL_NEXTHOPS_PTR(match->ng, newhop)) {
|
2019-05-13 21:46:05 +02:00
|
|
|
if (!CHECK_FLAG(match->status,
|
|
|
|
ROUTE_ENTRY_INSTALLED))
|
|
|
|
continue;
|
2019-09-09 23:20:17 +02:00
|
|
|
if (!nexthop_valid_resolve(nexthop, newhop))
|
2019-05-13 21:46:05 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
SET_FLAG(nexthop->flags,
|
|
|
|
NEXTHOP_FLAG_RECURSIVE);
|
|
|
|
nexthop_set_resolved(afi, newhop, nexthop);
|
|
|
|
resolved = 1;
|
|
|
|
}
|
|
|
|
if (resolved)
|
|
|
|
re->nexthop_mtu = match->mtu;
|
|
|
|
|
|
|
|
if (!resolved && IS_ZEBRA_DEBUG_RIB_DETAILED)
|
|
|
|
zlog_debug(
|
|
|
|
"\t%s: Static route unable to resolve",
|
|
|
|
__PRETTY_FUNCTION__);
|
|
|
|
return resolved;
|
|
|
|
} else {
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED) {
|
|
|
|
zlog_debug(
|
|
|
|
"\t%s: Route Type %s has not turned on recursion",
|
|
|
|
__PRETTY_FUNCTION__,
|
|
|
|
zebra_route_string(re->type));
|
|
|
|
if (re->type == ZEBRA_ROUTE_BGP
|
|
|
|
&& !CHECK_FLAG(re->flags, ZEBRA_FLAG_IBGP))
|
|
|
|
zlog_debug(
|
|
|
|
"\tEBGP: see \"disable-ebgp-connected-route-check\" or \"disable-connected-check\"");
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
|
|
|
zlog_debug("\t%s: Nexthop did not lookup in table",
|
|
|
|
__PRETTY_FUNCTION__);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This function verifies reachability of one given nexthop, which can be
|
|
|
|
* numbered or unnumbered, IPv4 or IPv6. The result is unconditionally stored
|
|
|
|
* in nexthop->flags field. The nexthop->ifindex will be updated
|
|
|
|
* appropriately as well. An existing route map can turn
|
|
|
|
* (otherwise active) nexthop into inactive, but not vice versa.
|
|
|
|
*
|
|
|
|
* The return value is the final value of 'ACTIVE' flag.
|
|
|
|
*/
|
|
|
|
static unsigned nexthop_active_check(struct route_node *rn,
|
|
|
|
struct route_entry *re,
|
|
|
|
struct nexthop *nexthop)
|
|
|
|
{
|
|
|
|
struct interface *ifp;
|
lib: Introducing a 3rd state for route-map match cmd: RMAP_NOOP
Introducing a 3rd state for route_map_apply library function: RMAP_NOOP
Traditionally route map MATCH rule apis were designed to return
a binary response, consisting of either RMAP_MATCH or RMAP_NOMATCH.
(Route-map SET rule apis return RMAP_OKAY or RMAP_ERROR).
Depending on this response, the following statemachine decided the
course of action:
State1:
If match cmd returns RMAP_MATCH then, keep existing behaviour.
If routemap type is PERMIT, execute set cmds or call cmds if applicable,
otherwise PERMIT!
Else If routemap type is DENY, we DENYMATCH right away
State2:
If match cmd returns RMAP_NOMATCH, continue on to next route-map. If there
are no other rules or if all the rules return RMAP_NOMATCH, return DENYMATCH
We require a 3rd state because of the following situation:
The issue - what if, the rule api needs to abort or ignore a rule?:
"match evpn vni xx" route-map filter can be applied to incoming routes
regardless of whether the tunnel type is vxlan or mpls.
This rule should be N/A for mpls based evpn route, but applicable to only
vxlan based evpn route.
Also, this rule should be applicable for routes with VNI label only, and
not for routes without labels. For example, type 3 and type 4 EVPN routes
do not have labels, so, this match cmd should let them through.
Today, the filter produces either a match or nomatch response regardless of
whether it is mpls/vxlan, resulting in either permitting or denying the
route.. So an mpls evpn route may get filtered out incorrectly.
Eg: "route-map RM1 permit 10 ; match evpn vni 20" or
"route-map RM2 deny 20 ; match vni 20"
With the introduction of the 3rd state, we can abort this rule check safely.
How? The rules api can now return RMAP_NOOP to indicate
that it encountered an invalid check, and needs to abort just that rule,
but continue with other rules.
As a result we have a 3rd state:
State3:
If match cmd returned RMAP_NOOP
Then, proceed to other route-map, otherwise if there are no more
rules or if all the rules return RMAP_NOOP, then, return RMAP_PERMITMATCH.
Signed-off-by: Lakshman Krishnamoorthy <lkrishnamoor@vmware.com>
2019-06-19 23:04:36 +02:00
|
|
|
route_map_result_t ret = RMAP_PERMITMATCH;
|
2019-05-13 21:46:05 +02:00
|
|
|
int family;
|
|
|
|
char buf[SRCDEST2STR_BUFFER];
|
|
|
|
const struct prefix *p, *src_p;
|
|
|
|
struct zebra_vrf *zvrf;
|
|
|
|
|
|
|
|
srcdest_rnode_prefixes(rn, &p, &src_p);
|
|
|
|
|
|
|
|
if (rn->p.family == AF_INET)
|
|
|
|
family = AFI_IP;
|
|
|
|
else if (rn->p.family == AF_INET6)
|
|
|
|
family = AFI_IP6;
|
|
|
|
else
|
|
|
|
family = 0;
|
|
|
|
switch (nexthop->type) {
|
|
|
|
case NEXTHOP_TYPE_IFINDEX:
|
|
|
|
ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
|
|
|
|
if (ifp && if_is_operative(ifp))
|
|
|
|
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
else
|
|
|
|
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_IPV4:
|
|
|
|
case NEXTHOP_TYPE_IPV4_IFINDEX:
|
|
|
|
family = AFI_IP;
|
|
|
|
if (nexthop_active(AFI_IP, re, nexthop, rn))
|
|
|
|
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
else
|
|
|
|
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_IPV6:
|
|
|
|
family = AFI_IP6;
|
|
|
|
if (nexthop_active(AFI_IP6, re, nexthop, rn))
|
|
|
|
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
else
|
|
|
|
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_IPV6_IFINDEX:
|
|
|
|
/* RFC 5549, v4 prefix with v6 NH */
|
|
|
|
if (rn->p.family != AF_INET)
|
|
|
|
family = AFI_IP6;
|
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(&nexthop->gate.ipv6)) {
|
|
|
|
ifp = if_lookup_by_index(nexthop->ifindex,
|
|
|
|
nexthop->vrf_id);
|
|
|
|
if (ifp && if_is_operative(ifp))
|
|
|
|
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
else
|
|
|
|
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
} else {
|
|
|
|
if (nexthop_active(AFI_IP6, re, nexthop, rn))
|
|
|
|
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
else
|
|
|
|
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_BLACKHOLE:
|
|
|
|
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) {
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
|
|
|
zlog_debug("\t%s: Unable to find a active nexthop",
|
|
|
|
__PRETTY_FUNCTION__);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX: What exactly do those checks do? Do we support
|
|
|
|
* e.g. IPv4 routes with IPv6 nexthops or vice versa?
|
|
|
|
*/
|
|
|
|
if (RIB_SYSTEM_ROUTE(re) || (family == AFI_IP && p->family != AF_INET)
|
|
|
|
|| (family == AFI_IP6 && p->family != AF_INET6))
|
|
|
|
return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
|
|
|
|
/* The original code didn't determine the family correctly
|
|
|
|
* e.g. for NEXTHOP_TYPE_IFINDEX. Retrieve the correct afi
|
|
|
|
* from the rib_table_info in those cases.
|
|
|
|
* Possibly it may be better to use only the rib_table_info
|
|
|
|
* in every case.
|
|
|
|
*/
|
|
|
|
if (!family) {
|
|
|
|
rib_table_info_t *info;
|
|
|
|
|
|
|
|
info = srcdest_rnode_table_info(rn);
|
|
|
|
family = info->afi;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&nexthop->rmap_src.ipv6, 0, sizeof(union g_addr));
|
|
|
|
|
|
|
|
zvrf = zebra_vrf_lookup_by_id(nexthop->vrf_id);
|
|
|
|
if (!zvrf) {
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
|
|
|
|
zlog_debug("\t%s: zvrf is NULL", __PRETTY_FUNCTION__);
|
|
|
|
return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* It'll get set if required inside */
|
|
|
|
ret = zebra_route_map_check(family, re->type, re->instance, p, nexthop,
|
|
|
|
zvrf, re->tag);
|
|
|
|
if (ret == RMAP_DENYMATCH) {
|
|
|
|
if (IS_ZEBRA_DEBUG_RIB) {
|
|
|
|
srcdest_rnode2str(rn, buf, sizeof(buf));
|
|
|
|
zlog_debug(
|
|
|
|
"%u:%s: Filtering out with NH out %s due to route map",
|
|
|
|
re->vrf_id, buf,
|
|
|
|
ifindex2ifname(nexthop->ifindex,
|
|
|
|
nexthop->vrf_id));
|
|
|
|
}
|
|
|
|
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
}
|
|
|
|
return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Iterate over all nexthops of the given RIB entry and refresh their
|
2019-02-15 17:39:12 +01:00
|
|
|
* ACTIVE flag. If any nexthop is found to toggle the ACTIVE flag,
|
|
|
|
* the whole re structure is flagged with ROUTE_ENTRY_CHANGED.
|
2019-05-13 21:46:05 +02:00
|
|
|
*
|
|
|
|
* Return value is the new number of active nexthops.
|
|
|
|
*/
|
|
|
|
int nexthop_active_update(struct route_node *rn, struct route_entry *re)
|
|
|
|
{
|
|
|
|
struct nexthop *nexthop;
|
|
|
|
union g_addr prev_src;
|
|
|
|
unsigned int prev_active, new_active;
|
|
|
|
ifindex_t prev_index;
|
2019-02-15 17:39:12 +01:00
|
|
|
uint8_t curr_active = 0;
|
2019-05-13 21:46:05 +02:00
|
|
|
|
|
|
|
UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
|
|
|
|
|
2019-02-13 22:06:48 +01:00
|
|
|
for (nexthop = re->ng->nexthop; nexthop; nexthop = nexthop->next) {
|
2019-05-13 21:46:05 +02:00
|
|
|
/* No protocol daemon provides src and so we're skipping
|
|
|
|
* tracking it */
|
|
|
|
prev_src = nexthop->rmap_src;
|
|
|
|
prev_active = CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
prev_index = nexthop->ifindex;
|
|
|
|
/*
|
|
|
|
* We need to respect the multipath_num here
|
|
|
|
* as that what we should be able to install from
|
|
|
|
* a multipath perpsective should not be a data plane
|
|
|
|
* decision point.
|
|
|
|
*/
|
|
|
|
new_active = nexthop_active_check(rn, re, nexthop);
|
|
|
|
if (new_active
|
2019-02-15 17:39:12 +01:00
|
|
|
&& nexthop_group_active_nexthop_num(re->ng)
|
|
|
|
>= zrouter.multipath_num) {
|
2019-05-13 21:46:05 +02:00
|
|
|
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
|
|
|
|
new_active = 0;
|
|
|
|
}
|
2019-02-15 17:39:12 +01:00
|
|
|
|
2019-05-13 21:46:05 +02:00
|
|
|
if (new_active)
|
2019-02-15 17:39:12 +01:00
|
|
|
curr_active++;
|
|
|
|
|
2019-05-13 21:46:05 +02:00
|
|
|
/* Don't allow src setting on IPv6 addr for now */
|
|
|
|
if (prev_active != new_active || prev_index != nexthop->ifindex
|
|
|
|
|| ((nexthop->type >= NEXTHOP_TYPE_IFINDEX
|
|
|
|
&& nexthop->type < NEXTHOP_TYPE_IPV6)
|
|
|
|
&& prev_src.ipv4.s_addr
|
|
|
|
!= nexthop->rmap_src.ipv4.s_addr)
|
|
|
|
|| ((nexthop->type >= NEXTHOP_TYPE_IPV6
|
|
|
|
&& nexthop->type < NEXTHOP_TYPE_BLACKHOLE)
|
|
|
|
&& !(IPV6_ADDR_SAME(&prev_src.ipv6,
|
|
|
|
&nexthop->rmap_src.ipv6)))
|
2019-07-29 20:53:58 +02:00
|
|
|
|| CHECK_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED))
|
2019-05-13 21:46:05 +02:00
|
|
|
SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
|
|
|
|
}
|
|
|
|
|
2019-02-15 17:39:12 +01:00
|
|
|
return curr_active;
|
2019-05-13 21:46:05 +02:00
|
|
|
}
|