2023-02-08 13:17:09 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2018-04-07 20:13:07 +02:00
|
|
|
/*
|
|
|
|
* BGP Label Pool - Manage label chunk allocations from zebra asynchronously
|
|
|
|
*
|
|
|
|
* Copyright (C) 2018 LabN Consulting, L.L.C.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <zebra.h>
|
|
|
|
|
|
|
|
#include "log.h"
|
|
|
|
#include "memory.h"
|
|
|
|
#include "stream.h"
|
|
|
|
#include "mpls.h"
|
|
|
|
#include "vty.h"
|
|
|
|
#include "linklist.h"
|
|
|
|
#include "skiplist.h"
|
|
|
|
#include "workqueue.h"
|
2019-06-12 16:33:12 +02:00
|
|
|
#include "mpls.h"
|
2018-04-07 20:13:07 +02:00
|
|
|
|
|
|
|
#include "bgpd/bgpd.h"
|
|
|
|
#include "bgpd/bgp_labelpool.h"
|
|
|
|
#include "bgpd/bgp_debug.h"
|
2018-06-15 23:08:53 +02:00
|
|
|
#include "bgpd/bgp_errors.h"
|
2019-07-08 18:45:14 +02:00
|
|
|
#include "bgpd/bgp_route.h"
|
2023-02-28 14:17:17 +01:00
|
|
|
#include "bgpd/bgp_zebra.h"
|
2023-01-16 18:24:26 +01:00
|
|
|
#include "bgpd/bgp_vty.h"
|
|
|
|
#include "bgpd/bgp_rd.h"
|
2018-04-07 20:13:07 +02:00
|
|
|
|
2022-08-26 23:47:07 +02:00
|
|
|
#define BGP_LABELPOOL_ENABLE_TESTS 0
|
|
|
|
|
|
|
|
#include "bgpd/bgp_labelpool_clippy.c"
|
|
|
|
|
|
|
|
|
|
|
|
#if BGP_LABELPOOL_ENABLE_TESTS
|
|
|
|
static void lptest_init(void);
|
|
|
|
static void lptest_finish(void);
|
|
|
|
#endif
|
|
|
|
|
2024-01-07 02:26:14 +01:00
|
|
|
static void bgp_sync_label_manager(struct event *e);
|
|
|
|
|
2018-04-07 20:13:07 +02:00
|
|
|
/*
|
|
|
|
* Remember where pool data are kept
|
|
|
|
*/
|
|
|
|
static struct labelpool *lp;
|
|
|
|
|
2022-08-26 23:47:07 +02:00
|
|
|
/*
|
|
|
|
* Number of labels requested at a time from the zebra label manager.
|
|
|
|
* We start small but double the request size each time up to a
|
|
|
|
* maximum size.
|
|
|
|
*
|
|
|
|
* The label space is 20 bits which is shared with other FRR processes
|
|
|
|
* on this host, so to avoid greedily requesting a mostly wasted chunk,
|
|
|
|
* we limit the chunk size to 1/16 of the label space (that's the -4 bits
|
|
|
|
* in the definition below). This limit slightly increases our cost of
|
|
|
|
* finding free labels in our allocated chunks.
|
|
|
|
*/
|
|
|
|
#define LP_CHUNK_SIZE_MIN 128
|
|
|
|
#define LP_CHUNK_SIZE_MAX (1 << (20 - 4))
|
2018-04-07 20:13:07 +02:00
|
|
|
|
|
|
|
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk");
|
2019-04-21 18:27:08 +02:00
|
|
|
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO item");
|
2018-04-07 20:13:07 +02:00
|
|
|
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment");
|
|
|
|
DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback");
|
|
|
|
|
|
|
|
struct lp_chunk {
|
|
|
|
uint32_t first;
|
|
|
|
uint32_t last;
|
2022-08-26 23:47:07 +02:00
|
|
|
uint32_t nfree; /* un-allocated count */
|
|
|
|
uint32_t idx_last_allocated; /* start looking here */
|
|
|
|
bitfield_t allocated_map;
|
2018-04-07 20:13:07 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* label control block
|
|
|
|
*/
|
|
|
|
struct lp_lcb {
|
|
|
|
mpls_label_t label; /* MPLS_LABEL_NONE = not allocated */
|
|
|
|
int type;
|
|
|
|
void *labelid; /* unique ID */
|
2025-02-10 18:02:00 +01:00
|
|
|
vrf_id_t vrf_id;
|
2018-04-07 20:13:07 +02:00
|
|
|
/*
|
|
|
|
* callback for label allocation and loss
|
|
|
|
*
|
|
|
|
* allocated: false = lost
|
|
|
|
*/
|
|
|
|
int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
|
|
|
|
};
|
|
|
|
|
|
|
|
struct lp_fifo {
|
2019-04-21 18:27:08 +02:00
|
|
|
struct lp_fifo_item fifo;
|
2018-04-07 20:13:07 +02:00
|
|
|
struct lp_lcb lcb;
|
|
|
|
};
|
|
|
|
|
2019-04-21 18:27:08 +02:00
|
|
|
DECLARE_LIST(lp_fifo, struct lp_fifo, fifo);
|
|
|
|
|
2018-04-07 20:13:07 +02:00
|
|
|
struct lp_cbq_item {
|
|
|
|
int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
|
|
|
|
int type;
|
|
|
|
mpls_label_t label;
|
|
|
|
void *labelid;
|
2025-02-10 18:02:00 +01:00
|
|
|
vrf_id_t vrf_id;
|
2018-04-07 20:13:07 +02:00
|
|
|
bool allocated; /* false = lost */
|
|
|
|
};
|
|
|
|
|
|
|
|
static wq_item_status lp_cbq_docallback(struct work_queue *wq, void *data)
|
|
|
|
{
|
|
|
|
struct lp_cbq_item *lcbq = data;
|
|
|
|
int rc;
|
|
|
|
int debug = BGP_DEBUG(labelpool, LABELPOOL);
|
2025-02-10 18:02:00 +01:00
|
|
|
struct bgp *bgp = bgp_lookup_by_vrf_id(lcbq->vrf_id);
|
2018-04-07 20:13:07 +02:00
|
|
|
|
|
|
|
if (debug)
|
|
|
|
zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
|
|
|
|
__func__, lcbq->labelid, lcbq->label, lcbq->allocated);
|
|
|
|
|
|
|
|
if (lcbq->label == MPLS_LABEL_NONE) {
|
|
|
|
/* shouldn't happen */
|
2018-09-13 20:23:42 +02:00
|
|
|
flog_err(EC_BGP_LABEL, "%s: error: label==MPLS_LABEL_NONE",
|
2018-09-13 21:38:57 +02:00
|
|
|
__func__);
|
2018-04-07 20:13:07 +02:00
|
|
|
return WQ_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2025-02-10 18:02:00 +01:00
|
|
|
if (!bgp)
|
|
|
|
return WQ_SUCCESS;
|
|
|
|
|
2018-04-07 20:13:07 +02:00
|
|
|
rc = (*(lcbq->cbfunc))(lcbq->label, lcbq->labelid, lcbq->allocated);
|
|
|
|
|
|
|
|
if (lcbq->allocated && rc) {
|
|
|
|
/*
|
|
|
|
* Callback rejected allocation. This situation could arise
|
|
|
|
* if there was a label request followed by the requestor
|
|
|
|
* deciding it didn't need the assignment (e.g., config
|
|
|
|
* change) while the reply to the original request (with
|
|
|
|
* label) was in the work queue.
|
|
|
|
*/
|
|
|
|
if (debug)
|
|
|
|
zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
|
|
|
|
__func__, lcbq->labelid, lcbq->label);
|
|
|
|
|
|
|
|
uintptr_t lbl = lcbq->label;
|
|
|
|
void *labelid;
|
|
|
|
struct lp_lcb *lcb;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the rejected label was marked inuse by this labelid,
|
|
|
|
* release the label back to the pool.
|
|
|
|
*
|
|
|
|
* Further, if the rejected label was still assigned to
|
|
|
|
* this labelid in the LCB, delete the LCB.
|
|
|
|
*/
|
|
|
|
if (!skiplist_search(lp->inuse, (void *)lbl, &labelid)) {
|
|
|
|
if (labelid == lcbq->labelid) {
|
|
|
|
if (!skiplist_search(lp->ledger, labelid,
|
|
|
|
(void **)&lcb)) {
|
|
|
|
if (lcbq->label == lcb->label)
|
|
|
|
skiplist_delete(lp->ledger,
|
|
|
|
labelid, NULL);
|
|
|
|
}
|
|
|
|
skiplist_delete(lp->inuse, (void *)lbl, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return WQ_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lp_cbq_item_free(struct work_queue *wq, void *data)
|
|
|
|
{
|
|
|
|
XFREE(MTYPE_BGP_LABEL_CBQ, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lp_lcb_free(void *goner)
|
|
|
|
{
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_BGP_LABEL_CB, goner);
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void lp_chunk_free(void *goner)
|
|
|
|
{
|
2022-08-26 23:47:07 +02:00
|
|
|
struct lp_chunk *chunk = (struct lp_chunk *)goner;
|
|
|
|
|
|
|
|
bf_free(chunk->allocated_map);
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
|
|
|
|
2023-03-07 20:14:41 +01:00
|
|
|
void bgp_lp_init(struct event_loop *master, struct labelpool *pool)
|
2018-04-07 20:13:07 +02:00
|
|
|
{
|
|
|
|
if (BGP_DEBUG(labelpool, LABELPOOL))
|
|
|
|
zlog_debug("%s: entry", __func__);
|
|
|
|
|
|
|
|
lp = pool; /* Set module pointer to pool data */
|
|
|
|
|
|
|
|
lp->ledger = skiplist_new(0, NULL, lp_lcb_free);
|
|
|
|
lp->inuse = skiplist_new(0, NULL, NULL);
|
|
|
|
lp->chunks = list_new();
|
|
|
|
lp->chunks->del = lp_chunk_free;
|
2019-04-21 18:27:08 +02:00
|
|
|
lp_fifo_init(&lp->requests);
|
2018-04-07 20:13:07 +02:00
|
|
|
lp->callback_q = work_queue_new(master, "label callbacks");
|
|
|
|
|
|
|
|
lp->callback_q->spec.workfunc = lp_cbq_docallback;
|
|
|
|
lp->callback_q->spec.del_item_data = lp_cbq_item_free;
|
|
|
|
lp->callback_q->spec.max_retries = 0;
|
2022-08-26 23:47:07 +02:00
|
|
|
|
|
|
|
lp->next_chunksize = LP_CHUNK_SIZE_MIN;
|
|
|
|
|
|
|
|
#if BGP_LABELPOOL_ENABLE_TESTS
|
|
|
|
lptest_init();
|
|
|
|
#endif
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
|
|
|
|
2020-12-17 11:41:07 +01:00
|
|
|
/* check if a label callback was for a BGP LU node, and if so, unlock it */
|
2019-06-18 11:14:28 +02:00
|
|
|
static void check_bgp_lu_cb_unlock(struct lp_lcb *lcb)
|
|
|
|
{
|
|
|
|
if (lcb->type == LP_TYPE_BGP_LU)
|
2020-12-17 11:41:07 +01:00
|
|
|
bgp_dest_unlock_node(lcb->labelid);
|
2019-06-18 11:14:28 +02:00
|
|
|
}
|
|
|
|
|
2020-12-17 11:41:07 +01:00
|
|
|
/* check if a label callback was for a BGP LU node, and if so, lock it */
|
2019-06-18 11:14:28 +02:00
|
|
|
static void check_bgp_lu_cb_lock(struct lp_lcb *lcb)
|
|
|
|
{
|
|
|
|
if (lcb->type == LP_TYPE_BGP_LU)
|
2020-12-17 11:41:07 +01:00
|
|
|
bgp_dest_lock_node(lcb->labelid);
|
2019-06-18 11:14:28 +02:00
|
|
|
}
|
|
|
|
|
2018-04-07 20:13:07 +02:00
|
|
|
void bgp_lp_finish(void)
|
|
|
|
{
|
|
|
|
struct lp_fifo *lf;
|
2019-06-18 11:14:28 +02:00
|
|
|
struct work_queue_item *item, *titem;
|
2023-06-12 16:09:52 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct lp_chunk *chunk;
|
2018-04-07 20:13:07 +02:00
|
|
|
|
2022-08-26 23:47:07 +02:00
|
|
|
#if BGP_LABELPOOL_ENABLE_TESTS
|
|
|
|
lptest_finish();
|
|
|
|
#endif
|
2018-04-07 20:13:07 +02:00
|
|
|
if (!lp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
skiplist_free(lp->ledger);
|
|
|
|
lp->ledger = NULL;
|
|
|
|
|
|
|
|
skiplist_free(lp->inuse);
|
|
|
|
lp->inuse = NULL;
|
|
|
|
|
2023-06-12 16:09:52 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk))
|
|
|
|
bgp_zebra_release_label_range(chunk->first, chunk->last);
|
|
|
|
|
2018-10-02 11:39:51 +02:00
|
|
|
list_delete(&lp->chunks);
|
2018-04-07 20:13:07 +02:00
|
|
|
|
2019-06-18 11:14:28 +02:00
|
|
|
while ((lf = lp_fifo_pop(&lp->requests))) {
|
|
|
|
check_bgp_lu_cb_unlock(&lf->lcb);
|
2018-04-07 20:13:07 +02:00
|
|
|
XFREE(MTYPE_BGP_LABEL_FIFO, lf);
|
2019-06-18 11:14:28 +02:00
|
|
|
}
|
2019-04-21 18:27:08 +02:00
|
|
|
lp_fifo_fini(&lp->requests);
|
2018-04-07 20:13:07 +02:00
|
|
|
|
2019-06-18 11:14:28 +02:00
|
|
|
/* we must unlock path infos for LU callbacks; but we cannot do that
|
|
|
|
* in the deletion callback of the workqueue, as that is also called
|
|
|
|
* to remove an element from the queue after it has been run, resulting
|
|
|
|
* in a double unlock. Hence we need to iterate over our queues and
|
|
|
|
* lists and manually perform the unlocking (ugh)
|
|
|
|
*/
|
|
|
|
STAILQ_FOREACH_SAFE (item, &lp->callback_q->items, wq, titem)
|
|
|
|
check_bgp_lu_cb_unlock(item->data);
|
|
|
|
|
2018-04-07 20:13:07 +02:00
|
|
|
work_queue_free_and_null(&lp->callback_q);
|
|
|
|
|
|
|
|
lp = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static mpls_label_t get_label_from_pool(void *labelid)
|
|
|
|
{
|
|
|
|
struct listnode *node;
|
|
|
|
struct lp_chunk *chunk;
|
|
|
|
int debug = BGP_DEBUG(labelpool, LABELPOOL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find a free label
|
|
|
|
*/
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
|
|
|
|
uintptr_t lbl;
|
2022-08-26 23:47:07 +02:00
|
|
|
unsigned int index;
|
2018-04-07 20:13:07 +02:00
|
|
|
|
|
|
|
if (debug)
|
|
|
|
zlog_debug("%s: chunk first=%u last=%u",
|
|
|
|
__func__, chunk->first, chunk->last);
|
|
|
|
|
2022-08-26 23:47:07 +02:00
|
|
|
/*
|
|
|
|
* don't look in chunks with no available labels
|
|
|
|
*/
|
|
|
|
if (!chunk->nfree)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* roll through bitfield starting where we stopped
|
|
|
|
* last time
|
|
|
|
*/
|
|
|
|
index = bf_find_next_clear_bit_wrap(
|
|
|
|
&chunk->allocated_map, chunk->idx_last_allocated + 1,
|
|
|
|
0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* since chunk->nfree is non-zero, we should always get
|
|
|
|
* a valid index
|
|
|
|
*/
|
|
|
|
assert(index != WORD_MAX);
|
|
|
|
|
|
|
|
lbl = chunk->first + index;
|
|
|
|
if (skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
|
|
|
|
/* something is very wrong */
|
|
|
|
zlog_err("%s: unable to insert inuse label %u (id %p)",
|
|
|
|
__func__, (uint32_t)lbl, labelid);
|
|
|
|
return MPLS_LABEL_NONE;
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
2022-08-26 23:47:07 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Success
|
|
|
|
*/
|
|
|
|
bf_set_bit(chunk->allocated_map, index);
|
|
|
|
chunk->idx_last_allocated = index;
|
|
|
|
chunk->nfree -= 1;
|
|
|
|
|
|
|
|
return lbl;
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
2022-08-26 23:47:07 +02:00
|
|
|
|
2018-04-07 20:13:07 +02:00
|
|
|
return MPLS_LABEL_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Success indicated by value of "label" field in returned LCB
|
|
|
|
*/
|
2025-02-10 18:02:00 +01:00
|
|
|
static struct lp_lcb *lcb_alloc(int type, void *labelid, vrf_id_t vrf_id,
|
|
|
|
int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
|
2018-04-07 20:13:07 +02:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Set up label control block
|
|
|
|
*/
|
|
|
|
struct lp_lcb *new = XCALLOC(MTYPE_BGP_LABEL_CB,
|
|
|
|
sizeof(struct lp_lcb));
|
|
|
|
|
|
|
|
new->label = get_label_from_pool(labelid);
|
|
|
|
new->type = type;
|
|
|
|
new->labelid = labelid;
|
2025-02-10 18:02:00 +01:00
|
|
|
new->vrf_id = vrf_id;
|
2018-04-07 20:13:07 +02:00
|
|
|
new->cbfunc = cbfunc;
|
|
|
|
|
|
|
|
return new;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Callers who need labels must supply a type, labelid, and callback.
|
|
|
|
* The type is a value defined in bgp_labelpool.h (add types as needed).
|
|
|
|
* The callback is for asynchronous notification of label allocation.
|
|
|
|
* The labelid is passed as an argument to the callback. It should be unique
|
|
|
|
* to the requested label instance.
|
|
|
|
*
|
|
|
|
* If zebra is not connected, callbacks with labels will be delayed
|
|
|
|
* until connection is established. If zebra connection is lost after
|
|
|
|
* labels have been assigned, existing assignments via this labelpool
|
|
|
|
* module will continue until reconnection.
|
|
|
|
*
|
|
|
|
* When connection to zebra is reestablished, previous label assignments
|
|
|
|
* will be invalidated (via callbacks having the "allocated" parameter unset)
|
|
|
|
* and new labels will be automatically reassigned by this labelpool module
|
2022-08-26 23:47:07 +02:00
|
|
|
* (that is, a requestor does not need to call bgp_lp_get() again if it is
|
2018-04-07 20:13:07 +02:00
|
|
|
* notified via callback that its label has been lost: it will eventually
|
|
|
|
* get another callback with a new label assignment).
|
|
|
|
*
|
2022-08-26 23:47:07 +02:00
|
|
|
* The callback function should return 0 to accept the allocation
|
|
|
|
* and non-zero to refuse it. The callback function return value is
|
|
|
|
* ignored for invalidations (i.e., when the "allocated" parameter is false)
|
|
|
|
*
|
2018-04-07 20:13:07 +02:00
|
|
|
* Prior requests for a given labelid are detected so that requests and
|
|
|
|
* assignments are not duplicated.
|
|
|
|
*/
|
2025-02-10 18:02:00 +01:00
|
|
|
void bgp_lp_get(int type, void *labelid, vrf_id_t vrf_id,
|
|
|
|
int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
|
2018-04-07 20:13:07 +02:00
|
|
|
{
|
|
|
|
struct lp_lcb *lcb;
|
|
|
|
int requested = 0;
|
|
|
|
int debug = BGP_DEBUG(labelpool, LABELPOOL);
|
|
|
|
|
|
|
|
if (debug)
|
|
|
|
zlog_debug("%s: labelid=%p", __func__, labelid);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Have we seen this request before?
|
|
|
|
*/
|
|
|
|
if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
|
|
|
|
requested = 1;
|
|
|
|
} else {
|
2025-02-10 18:02:00 +01:00
|
|
|
lcb = lcb_alloc(type, labelid, vrf_id, cbfunc);
|
2018-04-07 20:13:07 +02:00
|
|
|
if (debug)
|
|
|
|
zlog_debug("%s: inserting lcb=%p label=%u",
|
|
|
|
__func__, lcb, lcb->label);
|
|
|
|
int rc = skiplist_insert(lp->ledger, labelid, lcb);
|
|
|
|
|
|
|
|
if (rc) {
|
|
|
|
/* shouldn't happen */
|
2018-09-13 20:23:42 +02:00
|
|
|
flog_err(EC_BGP_LABEL,
|
2018-09-13 21:38:57 +02:00
|
|
|
"%s: can't insert new LCB into ledger list",
|
|
|
|
__func__);
|
2018-04-07 20:13:07 +02:00
|
|
|
XFREE(MTYPE_BGP_LABEL_CB, lcb);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lcb->label != MPLS_LABEL_NONE) {
|
|
|
|
/*
|
|
|
|
* Fast path: we filled the request from local pool (or
|
|
|
|
* this is a duplicate request that we filled already).
|
|
|
|
* Enqueue response work item with new label.
|
|
|
|
*/
|
|
|
|
struct lp_cbq_item *q;
|
|
|
|
|
|
|
|
q = XCALLOC(MTYPE_BGP_LABEL_CBQ, sizeof(struct lp_cbq_item));
|
|
|
|
|
|
|
|
q->cbfunc = lcb->cbfunc;
|
|
|
|
q->type = lcb->type;
|
|
|
|
q->label = lcb->label;
|
|
|
|
q->labelid = lcb->labelid;
|
2025-02-10 18:02:00 +01:00
|
|
|
q->vrf_id = lcb->vrf_id;
|
2018-04-07 20:13:07 +02:00
|
|
|
q->allocated = true;
|
|
|
|
|
2020-12-17 11:41:07 +01:00
|
|
|
/* if this is a LU request, lock node before queueing */
|
2019-06-18 11:14:28 +02:00
|
|
|
check_bgp_lu_cb_lock(lcb);
|
|
|
|
|
2018-04-07 20:13:07 +02:00
|
|
|
work_queue_add(lp->callback_q, q);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (requested)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (debug)
|
|
|
|
zlog_debug("%s: slow path. lcb=%p label=%u",
|
|
|
|
__func__, lcb, lcb->label);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Slow path: we are out of labels in the local pool,
|
|
|
|
* so remember the request and also get another chunk from
|
|
|
|
* the label manager.
|
|
|
|
*
|
|
|
|
* We track number of outstanding label requests: don't
|
|
|
|
* need to get a chunk for each one.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
|
|
|
|
sizeof(struct lp_fifo));
|
|
|
|
|
|
|
|
lf->lcb = *lcb;
|
2020-12-17 11:41:07 +01:00
|
|
|
/* if this is a LU request, lock node before queueing */
|
2019-06-18 11:14:28 +02:00
|
|
|
check_bgp_lu_cb_lock(lcb);
|
|
|
|
|
2019-04-21 18:27:08 +02:00
|
|
|
lp_fifo_add_tail(&lp->requests, lf);
|
2018-04-07 20:13:07 +02:00
|
|
|
|
2019-04-21 18:27:08 +02:00
|
|
|
if (lp_fifo_count(&lp->requests) > lp->pending_count) {
|
2023-06-12 16:09:52 +02:00
|
|
|
if (!bgp_zebra_request_label_range(MPLS_LABEL_BASE_ANY,
|
bgpd: fix hardset l3vpn label available in mpls pool
Today, when configuring BGP L3VPN mpls, the operator may
use that command to hardset a label value:
> router bgp 65500 vrf vrf1
> address-family ipv4 unicast
> label vpn export <hardset_label_value>
Today, BGP uses this value without checks, leading to potential
conflicts with other control planes like LDP. For instance, if
LDP initiates with a label chunk of [16;72] and BGP also uses the
50 label value, a conflict arises.
The 'label manager' service in zebra oversees label allocations.
While all the control plane daemons use it, BGP doesn't when a
hardset label is in place.
This update fixes this problem. Now, when a hardset label is set for
l3vpn export, a request is made to the label manager for approval,
ensuring no conflicts with other daemons. But, this means some existing
BGP configurations might become non-operational if they conflict with
labels already allocated to another daemon but not used.
note: Labels below 16 are reserved and won't be checked for consistency
by the label manager.
Fixes: ddb5b4880ba8 ("bgpd: vpn-vrf route leaking")
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2023-09-01 17:14:06 +02:00
|
|
|
lp->next_chunksize, true))
|
2018-04-07 20:13:07 +02:00
|
|
|
return;
|
2023-06-12 16:09:52 +02:00
|
|
|
|
|
|
|
lp->pending_count += lp->next_chunksize;
|
|
|
|
if ((lp->next_chunksize << 1) <= LP_CHUNK_SIZE_MAX)
|
|
|
|
lp->next_chunksize <<= 1;
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
2024-01-07 02:26:14 +01:00
|
|
|
|
|
|
|
event_add_timer(bm->master, bgp_sync_label_manager, NULL, 1,
|
|
|
|
&bm->t_bgp_sync_label_manager);
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void bgp_lp_release(
|
|
|
|
int type,
|
|
|
|
void *labelid,
|
|
|
|
mpls_label_t label)
|
|
|
|
{
|
|
|
|
struct lp_lcb *lcb;
|
|
|
|
|
|
|
|
if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
|
|
|
|
if (label == lcb->label && type == lcb->type) {
|
2022-08-26 23:47:07 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct lp_chunk *chunk;
|
2018-04-07 20:13:07 +02:00
|
|
|
uintptr_t lbl = label;
|
2022-08-26 23:47:07 +02:00
|
|
|
bool deallocated = false;
|
2018-04-07 20:13:07 +02:00
|
|
|
|
|
|
|
/* no longer in use */
|
|
|
|
skiplist_delete(lp->inuse, (void *)lbl, NULL);
|
|
|
|
|
|
|
|
/* no longer requested */
|
|
|
|
skiplist_delete(lp->ledger, labelid, NULL);
|
2022-08-26 23:47:07 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the chunk this label belongs to and
|
|
|
|
* deallocate the label
|
|
|
|
*/
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
|
|
|
|
uint32_t index;
|
|
|
|
|
|
|
|
if ((label < chunk->first) ||
|
|
|
|
(label > chunk->last))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
index = label - chunk->first;
|
|
|
|
assert(bf_test_index(chunk->allocated_map,
|
|
|
|
index));
|
|
|
|
bf_release_index(chunk->allocated_map, index);
|
|
|
|
chunk->nfree += 1;
|
|
|
|
deallocated = true;
|
2023-09-27 07:58:22 +02:00
|
|
|
break;
|
2022-08-26 23:47:07 +02:00
|
|
|
}
|
|
|
|
assert(deallocated);
|
2023-09-27 07:58:22 +02:00
|
|
|
if (deallocated &&
|
|
|
|
chunk->nfree == chunk->last - chunk->first + 1 &&
|
|
|
|
lp_fifo_count(&lp->requests) == 0) {
|
|
|
|
bgp_zebra_release_label_range(chunk->first,
|
|
|
|
chunk->last);
|
|
|
|
list_delete_node(lp->chunks, node);
|
|
|
|
lp_chunk_free(chunk);
|
|
|
|
lp->next_chunksize = LP_CHUNK_SIZE_MIN;
|
|
|
|
}
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-12 16:09:52 +02:00
|
|
|
static void bgp_sync_label_manager(struct event *e)
|
2018-04-07 20:13:07 +02:00
|
|
|
{
|
|
|
|
int debug = BGP_DEBUG(labelpool, LABELPOOL);
|
|
|
|
struct lp_fifo *lf;
|
|
|
|
|
2023-06-12 16:09:52 +02:00
|
|
|
while ((lf = lp_fifo_pop(&lp->requests))) {
|
2018-04-07 20:13:07 +02:00
|
|
|
struct lp_lcb *lcb;
|
|
|
|
void *labelid = lf->lcb.labelid;
|
|
|
|
|
|
|
|
if (skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
|
|
|
|
/* request no longer in effect */
|
|
|
|
|
|
|
|
if (debug) {
|
|
|
|
zlog_debug("%s: labelid %p: request no longer in effect",
|
2023-08-09 23:01:16 +02:00
|
|
|
__func__, labelid);
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
2020-12-17 11:41:07 +01:00
|
|
|
/* if this was a BGP_LU request, unlock node
|
2020-12-04 13:16:32 +01:00
|
|
|
*/
|
|
|
|
check_bgp_lu_cb_unlock(lcb);
|
2018-04-07 20:13:07 +02:00
|
|
|
goto finishedrequest;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* have LCB */
|
|
|
|
if (lcb->label != MPLS_LABEL_NONE) {
|
|
|
|
/* request already has a label */
|
|
|
|
if (debug) {
|
|
|
|
zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
|
2023-08-09 23:01:16 +02:00
|
|
|
__func__, labelid, lcb->label,
|
|
|
|
lcb->label, lcb);
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
2020-12-17 11:41:07 +01:00
|
|
|
/* if this was a BGP_LU request, unlock node
|
2019-06-18 11:14:28 +02:00
|
|
|
*/
|
|
|
|
check_bgp_lu_cb_unlock(lcb);
|
|
|
|
|
2018-04-07 20:13:07 +02:00
|
|
|
goto finishedrequest;
|
|
|
|
}
|
|
|
|
|
|
|
|
lcb->label = get_label_from_pool(lcb->labelid);
|
|
|
|
|
|
|
|
if (lcb->label == MPLS_LABEL_NONE) {
|
|
|
|
/*
|
|
|
|
* Out of labels in local pool, await next chunk
|
|
|
|
*/
|
|
|
|
if (debug) {
|
|
|
|
zlog_debug("%s: out of labels, await more",
|
2023-08-09 23:01:16 +02:00
|
|
|
__func__);
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
2024-01-07 02:26:14 +01:00
|
|
|
|
|
|
|
lp_fifo_add_tail(&lp->requests, lf);
|
|
|
|
event_add_timer(bm->master, bgp_sync_label_manager,
|
|
|
|
NULL, 1, &bm->t_bgp_sync_label_manager);
|
2018-04-07 20:13:07 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we filled the request from local pool.
|
|
|
|
* Enqueue response work item with new label.
|
|
|
|
*/
|
|
|
|
struct lp_cbq_item *q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
|
2023-08-09 23:01:16 +02:00
|
|
|
sizeof(struct lp_cbq_item));
|
2018-04-07 20:13:07 +02:00
|
|
|
|
|
|
|
q->cbfunc = lcb->cbfunc;
|
|
|
|
q->type = lcb->type;
|
|
|
|
q->label = lcb->label;
|
|
|
|
q->labelid = lcb->labelid;
|
2025-02-10 18:02:00 +01:00
|
|
|
q->vrf_id = lcb->vrf_id;
|
2018-04-07 20:13:07 +02:00
|
|
|
q->allocated = true;
|
|
|
|
|
|
|
|
if (debug)
|
|
|
|
zlog_debug("%s: assigning label %u to labelid %p",
|
2023-08-09 23:01:16 +02:00
|
|
|
__func__, q->label, q->labelid);
|
2018-04-07 20:13:07 +02:00
|
|
|
|
|
|
|
work_queue_add(lp->callback_q, q);
|
|
|
|
|
|
|
|
finishedrequest:
|
|
|
|
XFREE(MTYPE_BGP_LABEL_FIFO, lf);
|
2023-08-09 23:01:16 +02:00
|
|
|
}
|
2023-06-12 16:09:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void bgp_lp_event_chunk(uint32_t first, uint32_t last)
|
|
|
|
{
|
|
|
|
struct lp_chunk *chunk;
|
|
|
|
uint32_t labelcount;
|
|
|
|
|
|
|
|
if (last < first) {
|
|
|
|
flog_err(EC_BGP_LABEL,
|
|
|
|
"%s: zebra label chunk invalid: first=%u, last=%u",
|
|
|
|
__func__, first, last);
|
|
|
|
return;
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
2023-06-12 16:09:52 +02:00
|
|
|
|
|
|
|
chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
|
|
|
|
|
|
|
|
labelcount = last - first + 1;
|
|
|
|
|
|
|
|
chunk->first = first;
|
|
|
|
chunk->last = last;
|
|
|
|
chunk->nfree = labelcount;
|
|
|
|
bf_init(chunk->allocated_map, labelcount);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Optimize for allocation by adding the new (presumably larger)
|
|
|
|
* chunk at the head of the list so it is examined first.
|
|
|
|
*/
|
|
|
|
listnode_add_head(lp->chunks, chunk);
|
|
|
|
|
|
|
|
lp->pending_count -= labelcount;
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* continue using allocated labels until zebra returns
|
|
|
|
*/
|
|
|
|
void bgp_lp_event_zebra_down(void)
|
|
|
|
{
|
|
|
|
/* rats. */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Inform owners of previously-allocated labels that their labels
|
|
|
|
* are not valid. Request chunk from zebra large enough to satisfy
|
|
|
|
* previously-allocated labels plus any outstanding requests.
|
|
|
|
*/
|
|
|
|
void bgp_lp_event_zebra_up(void)
|
|
|
|
{
|
2022-08-26 23:47:07 +02:00
|
|
|
unsigned int labels_needed;
|
|
|
|
unsigned int chunks_needed;
|
2018-04-07 20:13:07 +02:00
|
|
|
void *labelid;
|
|
|
|
struct lp_lcb *lcb;
|
|
|
|
|
2020-12-04 13:14:26 +01:00
|
|
|
lp->reconnect_count++;
|
2018-04-07 20:13:07 +02:00
|
|
|
/*
|
|
|
|
* Get label chunk allocation request dispatched to zebra
|
|
|
|
*/
|
2019-04-21 18:27:08 +02:00
|
|
|
labels_needed = lp_fifo_count(&lp->requests) +
|
2018-04-07 20:13:07 +02:00
|
|
|
skiplist_count(lp->inuse);
|
|
|
|
|
2022-08-26 23:47:07 +02:00
|
|
|
if (labels_needed > lp->next_chunksize) {
|
|
|
|
while ((lp->next_chunksize < labels_needed) &&
|
|
|
|
(lp->next_chunksize << 1 <= LP_CHUNK_SIZE_MAX))
|
|
|
|
|
|
|
|
lp->next_chunksize <<= 1;
|
|
|
|
}
|
|
|
|
|
2018-04-07 20:13:07 +02:00
|
|
|
/* round up */
|
2023-11-01 09:29:07 +01:00
|
|
|
chunks_needed = (labels_needed + lp->next_chunksize - 1) / lp->next_chunksize;
|
2022-08-26 23:47:07 +02:00
|
|
|
labels_needed = chunks_needed * lp->next_chunksize;
|
2018-04-07 20:13:07 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Invalidate current list of chunks
|
|
|
|
*/
|
|
|
|
list_delete_all_node(lp->chunks);
|
|
|
|
|
2023-09-01 17:51:54 +02:00
|
|
|
if (labels_needed && !bgp_zebra_request_label_range(MPLS_LABEL_BASE_ANY,
|
|
|
|
labels_needed, true))
|
2023-06-12 16:09:52 +02:00
|
|
|
return;
|
bgpd: fix wrong 'pending' labelpool counter value at startup
If BGP starts with a l3vpn configuration, the 'pending' value
of the 'show bgp labelpool summary' command is set to 128,
whereas the 'pending' value is 0 if the l3vpn configuration is
applied after.
with no config at startup:
> show bgp labelpool summary
> Labelpool Summary
> -----------------
> Ledger: 1
> InUse: 1
> Requests: 0
> LabelChunks: 1
> Pending: 0
> Reconnects: 1
with config at startup:
> show bgp labelpool summary
> Labelpool Summary
> -----------------
> Ledger: 1
> InUse: 1
> Requests: 0
> LabelChunks: 1
> Pending: 128
> Reconnects: 1
When BGP configuration is applied at startup, the label request fails,
because the zapi connection with zebra is not yet up. At zebra
up event, the label request is done again, succeeds, decrements the
'pending_count' value in 'bgp_lp_event_chunk() function, then sets
the 'pending_count' value to the 'labels_needed' value.
This method was correct when label requests were asyncronous: the
'pending_count' value was first set, then decremented. In syncronous
label requests, the operations are swapped.
Fix this by incrementing the expected 'labels_needed' value instead.
Fixes: 0043ebab996e ("bgpd: Use synchronous way to get labels from Zebra")
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2023-09-29 17:35:54 +02:00
|
|
|
lp->pending_count += labels_needed;
|
2023-06-12 16:09:52 +02:00
|
|
|
|
2018-04-07 20:13:07 +02:00
|
|
|
/*
|
|
|
|
* Invalidate any existing labels and requeue them as requests
|
|
|
|
*/
|
|
|
|
while (!skiplist_first(lp->inuse, NULL, &labelid)) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get LCB
|
|
|
|
*/
|
|
|
|
if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
|
|
|
|
|
|
|
|
if (lcb->label != MPLS_LABEL_NONE) {
|
|
|
|
/*
|
|
|
|
* invalidate
|
|
|
|
*/
|
|
|
|
struct lp_cbq_item *q;
|
|
|
|
|
|
|
|
q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
|
|
|
|
sizeof(struct lp_cbq_item));
|
|
|
|
q->cbfunc = lcb->cbfunc;
|
|
|
|
q->type = lcb->type;
|
|
|
|
q->label = lcb->label;
|
|
|
|
q->labelid = lcb->labelid;
|
2025-02-10 18:02:00 +01:00
|
|
|
q->vrf_id = lcb->vrf_id;
|
2018-04-07 20:13:07 +02:00
|
|
|
q->allocated = false;
|
2019-06-18 11:14:28 +02:00
|
|
|
check_bgp_lu_cb_lock(lcb);
|
2018-04-07 20:13:07 +02:00
|
|
|
work_queue_add(lp->callback_q, q);
|
|
|
|
|
|
|
|
lcb->label = MPLS_LABEL_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* request queue
|
|
|
|
*/
|
|
|
|
struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
|
|
|
|
sizeof(struct lp_fifo));
|
|
|
|
|
|
|
|
lf->lcb = *lcb;
|
2019-06-18 11:14:28 +02:00
|
|
|
check_bgp_lu_cb_lock(lcb);
|
2019-04-21 18:27:08 +02:00
|
|
|
lp_fifo_add_tail(&lp->requests, lf);
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
skiplist_delete_first(lp->inuse);
|
|
|
|
}
|
2023-06-12 16:09:52 +02:00
|
|
|
|
|
|
|
event_add_timer(bm->master, bgp_sync_label_manager, NULL, 1,
|
2023-06-17 08:21:55 +02:00
|
|
|
&bm->t_bgp_sync_label_manager);
|
2018-04-07 20:13:07 +02:00
|
|
|
}
|
2020-12-04 13:14:26 +01:00
|
|
|
|
|
|
|
DEFUN(show_bgp_labelpool_summary, show_bgp_labelpool_summary_cmd,
|
|
|
|
"show bgp labelpool summary [json]",
|
|
|
|
SHOW_STR BGP_STR
|
|
|
|
"BGP Labelpool information\n"
|
|
|
|
"BGP Labelpool summary\n" JSON_STR)
|
|
|
|
{
|
|
|
|
bool uj = use_json(argc, argv);
|
|
|
|
json_object *json = NULL;
|
|
|
|
|
|
|
|
if (!lp) {
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "{}\n");
|
|
|
|
else
|
|
|
|
vty_out(vty, "No existing BGP labelpool\n");
|
|
|
|
return (CMD_WARNING);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json = json_object_new_object();
|
2022-01-31 20:42:17 +01:00
|
|
|
json_object_int_add(json, "ledger", skiplist_count(lp->ledger));
|
|
|
|
json_object_int_add(json, "inUse", skiplist_count(lp->inuse));
|
|
|
|
json_object_int_add(json, "requests",
|
|
|
|
lp_fifo_count(&lp->requests));
|
|
|
|
json_object_int_add(json, "labelChunks", listcount(lp->chunks));
|
|
|
|
json_object_int_add(json, "pending", lp->pending_count);
|
|
|
|
json_object_int_add(json, "reconnects", lp->reconnect_count);
|
2021-11-25 16:51:12 +01:00
|
|
|
vty_json(vty, json);
|
2020-12-04 13:14:26 +01:00
|
|
|
} else {
|
|
|
|
vty_out(vty, "Labelpool Summary\n");
|
|
|
|
vty_out(vty, "-----------------\n");
|
|
|
|
vty_out(vty, "%-13s %d\n",
|
|
|
|
"Ledger:", skiplist_count(lp->ledger));
|
|
|
|
vty_out(vty, "%-13s %d\n", "InUse:", skiplist_count(lp->inuse));
|
|
|
|
vty_out(vty, "%-13s %zu\n",
|
|
|
|
"Requests:", lp_fifo_count(&lp->requests));
|
|
|
|
vty_out(vty, "%-13s %d\n",
|
|
|
|
"LabelChunks:", listcount(lp->chunks));
|
|
|
|
vty_out(vty, "%-13s %d\n", "Pending:", lp->pending_count);
|
|
|
|
vty_out(vty, "%-13s %d\n", "Reconnects:", lp->reconnect_count);
|
|
|
|
}
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN(show_bgp_labelpool_ledger, show_bgp_labelpool_ledger_cmd,
|
|
|
|
"show bgp labelpool ledger [json]",
|
|
|
|
SHOW_STR BGP_STR
|
|
|
|
"BGP Labelpool information\n"
|
|
|
|
"BGP Labelpool ledger\n" JSON_STR)
|
|
|
|
{
|
|
|
|
bool uj = use_json(argc, argv);
|
|
|
|
json_object *json = NULL, *json_elem = NULL;
|
|
|
|
struct lp_lcb *lcb = NULL;
|
2020-12-17 11:41:07 +01:00
|
|
|
struct bgp_dest *dest;
|
2020-12-04 13:14:26 +01:00
|
|
|
void *cursor = NULL;
|
|
|
|
const struct prefix *p;
|
|
|
|
int rc, count;
|
|
|
|
|
|
|
|
if (!lp) {
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "{}\n");
|
|
|
|
else
|
|
|
|
vty_out(vty, "No existing BGP labelpool\n");
|
|
|
|
return (CMD_WARNING);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
count = skiplist_count(lp->ledger);
|
|
|
|
if (!count) {
|
|
|
|
vty_out(vty, "{}\n");
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
json = json_object_new_array();
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "Prefix Label\n");
|
|
|
|
vty_out(vty, "---------------------------\n");
|
|
|
|
}
|
|
|
|
|
2020-12-17 11:41:07 +01:00
|
|
|
for (rc = skiplist_next(lp->ledger, (void **)&dest, (void **)&lcb,
|
2020-12-04 13:14:26 +01:00
|
|
|
&cursor);
|
2020-12-17 11:41:07 +01:00
|
|
|
!rc; rc = skiplist_next(lp->ledger, (void **)&dest, (void **)&lcb,
|
2020-12-04 13:14:26 +01:00
|
|
|
&cursor)) {
|
|
|
|
if (uj) {
|
|
|
|
json_elem = json_object_new_object();
|
|
|
|
json_object_array_add(json, json_elem);
|
|
|
|
}
|
|
|
|
switch (lcb->type) {
|
|
|
|
case LP_TYPE_BGP_LU:
|
2020-12-17 11:41:07 +01:00
|
|
|
if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED))
|
2020-12-04 13:14:26 +01:00
|
|
|
if (uj) {
|
|
|
|
json_object_string_add(
|
|
|
|
json_elem, "prefix", "INVALID");
|
|
|
|
json_object_int_add(json_elem, "label",
|
|
|
|
lcb->label);
|
|
|
|
} else
|
|
|
|
vty_out(vty, "%-18s %u\n",
|
|
|
|
"INVALID", lcb->label);
|
|
|
|
else {
|
2020-12-17 11:41:07 +01:00
|
|
|
p = bgp_dest_get_prefix(dest);
|
2020-12-04 13:14:26 +01:00
|
|
|
if (uj) {
|
2022-08-25 12:46:58 +02:00
|
|
|
json_object_string_addf(
|
|
|
|
json_elem, "prefix", "%pFX", p);
|
2020-12-04 13:14:26 +01:00
|
|
|
json_object_int_add(json_elem, "label",
|
|
|
|
lcb->label);
|
|
|
|
} else
|
2022-08-25 12:46:58 +02:00
|
|
|
vty_out(vty, "%-18pFX %u\n", p,
|
2020-12-04 13:14:26 +01:00
|
|
|
lcb->label);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case LP_TYPE_VRF:
|
|
|
|
if (uj) {
|
|
|
|
json_object_string_add(json_elem, "prefix",
|
|
|
|
"VRF");
|
|
|
|
json_object_int_add(json_elem, "label",
|
|
|
|
lcb->label);
|
|
|
|
} else
|
|
|
|
vty_out(vty, "%-18s %u\n", "VRF",
|
|
|
|
lcb->label);
|
|
|
|
|
|
|
|
break;
|
2023-02-28 14:11:30 +01:00
|
|
|
case LP_TYPE_NEXTHOP:
|
|
|
|
if (uj) {
|
|
|
|
json_object_string_add(json_elem, "prefix",
|
|
|
|
"nexthop");
|
|
|
|
json_object_int_add(json_elem, "label",
|
|
|
|
lcb->label);
|
|
|
|
} else
|
|
|
|
vty_out(vty, "%-18s %u\n", "nexthop",
|
|
|
|
lcb->label);
|
|
|
|
break;
|
2023-05-02 16:30:20 +02:00
|
|
|
case LP_TYPE_BGP_L3VPN_BIND:
|
|
|
|
if (uj) {
|
|
|
|
json_object_string_add(json_elem, "prefix",
|
|
|
|
"l3vpn-bind");
|
|
|
|
json_object_int_add(json_elem, "label",
|
|
|
|
lcb->label);
|
|
|
|
} else
|
|
|
|
vty_out(vty, "%-18s %u\n", "l3vpn-bind",
|
|
|
|
lcb->label);
|
|
|
|
break;
|
2020-12-04 13:14:26 +01:00
|
|
|
}
|
|
|
|
}
|
2021-11-25 23:02:37 +01:00
|
|
|
if (uj)
|
2021-11-25 16:51:12 +01:00
|
|
|
vty_json(vty, json);
|
2020-12-04 13:14:26 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN(show_bgp_labelpool_inuse, show_bgp_labelpool_inuse_cmd,
|
|
|
|
"show bgp labelpool inuse [json]",
|
|
|
|
SHOW_STR BGP_STR
|
|
|
|
"BGP Labelpool information\n"
|
|
|
|
"BGP Labelpool inuse\n" JSON_STR)
|
|
|
|
{
|
|
|
|
bool uj = use_json(argc, argv);
|
|
|
|
json_object *json = NULL, *json_elem = NULL;
|
2020-12-17 11:41:07 +01:00
|
|
|
struct bgp_dest *dest;
|
2020-12-04 13:14:26 +01:00
|
|
|
mpls_label_t label;
|
|
|
|
struct lp_lcb *lcb;
|
|
|
|
void *cursor = NULL;
|
|
|
|
const struct prefix *p;
|
|
|
|
int rc, count;
|
|
|
|
|
|
|
|
if (!lp) {
|
|
|
|
vty_out(vty, "No existing BGP labelpool\n");
|
|
|
|
return (CMD_WARNING);
|
|
|
|
}
|
|
|
|
if (!lp) {
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "{}\n");
|
|
|
|
else
|
|
|
|
vty_out(vty, "No existing BGP labelpool\n");
|
|
|
|
return (CMD_WARNING);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
count = skiplist_count(lp->inuse);
|
|
|
|
if (!count) {
|
|
|
|
vty_out(vty, "{}\n");
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
json = json_object_new_array();
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "Prefix Label\n");
|
|
|
|
vty_out(vty, "---------------------------\n");
|
|
|
|
}
|
2020-12-17 11:41:07 +01:00
|
|
|
for (rc = skiplist_next(lp->inuse, (void **)&label, (void **)&dest,
|
2020-12-04 13:14:26 +01:00
|
|
|
&cursor);
|
2020-12-17 11:41:07 +01:00
|
|
|
!rc; rc = skiplist_next(lp->ledger, (void **)&label,
|
|
|
|
(void **)&dest, &cursor)) {
|
|
|
|
if (skiplist_search(lp->ledger, dest, (void **)&lcb))
|
2020-12-04 13:14:26 +01:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json_elem = json_object_new_object();
|
|
|
|
json_object_array_add(json, json_elem);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (lcb->type) {
|
|
|
|
case LP_TYPE_BGP_LU:
|
2020-12-17 11:41:07 +01:00
|
|
|
if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED))
|
2020-12-04 13:14:26 +01:00
|
|
|
if (uj) {
|
|
|
|
json_object_string_add(
|
|
|
|
json_elem, "prefix", "INVALID");
|
|
|
|
json_object_int_add(json_elem, "label",
|
|
|
|
label);
|
|
|
|
} else
|
|
|
|
vty_out(vty, "INVALID %u\n",
|
|
|
|
label);
|
|
|
|
else {
|
2020-12-17 11:41:07 +01:00
|
|
|
p = bgp_dest_get_prefix(dest);
|
2020-12-04 13:14:26 +01:00
|
|
|
if (uj) {
|
2022-08-25 12:46:58 +02:00
|
|
|
json_object_string_addf(
|
|
|
|
json_elem, "prefix", "%pFX", p);
|
2020-12-04 13:14:26 +01:00
|
|
|
json_object_int_add(json_elem, "label",
|
|
|
|
label);
|
|
|
|
} else
|
2022-08-25 12:46:58 +02:00
|
|
|
vty_out(vty, "%-18pFX %u\n", p,
|
2020-12-04 13:14:26 +01:00
|
|
|
label);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case LP_TYPE_VRF:
|
|
|
|
if (uj) {
|
|
|
|
json_object_string_add(json_elem, "prefix",
|
|
|
|
"VRF");
|
|
|
|
json_object_int_add(json_elem, "label", label);
|
|
|
|
} else
|
|
|
|
vty_out(vty, "%-18s %u\n", "VRF",
|
|
|
|
label);
|
|
|
|
break;
|
2023-02-28 14:11:30 +01:00
|
|
|
case LP_TYPE_NEXTHOP:
|
|
|
|
if (uj) {
|
|
|
|
json_object_string_add(json_elem, "prefix",
|
|
|
|
"nexthop");
|
|
|
|
json_object_int_add(json_elem, "label", label);
|
|
|
|
} else
|
|
|
|
vty_out(vty, "%-18s %u\n", "nexthop",
|
|
|
|
label);
|
|
|
|
break;
|
2023-05-02 16:30:20 +02:00
|
|
|
case LP_TYPE_BGP_L3VPN_BIND:
|
|
|
|
if (uj) {
|
|
|
|
json_object_string_add(json_elem, "prefix",
|
|
|
|
"l3vpn-bind");
|
|
|
|
json_object_int_add(json_elem, "label", label);
|
|
|
|
} else
|
|
|
|
vty_out(vty, "%-18s %u\n", "l3vpn-bind",
|
|
|
|
label);
|
|
|
|
break;
|
2020-12-04 13:14:26 +01:00
|
|
|
}
|
|
|
|
}
|
2021-11-25 23:02:37 +01:00
|
|
|
if (uj)
|
2021-11-25 16:51:12 +01:00
|
|
|
vty_json(vty, json);
|
2020-12-04 13:14:26 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN(show_bgp_labelpool_requests, show_bgp_labelpool_requests_cmd,
|
|
|
|
"show bgp labelpool requests [json]",
|
|
|
|
SHOW_STR BGP_STR
|
|
|
|
"BGP Labelpool information\n"
|
|
|
|
"BGP Labelpool requests\n" JSON_STR)
|
|
|
|
{
|
|
|
|
bool uj = use_json(argc, argv);
|
|
|
|
json_object *json = NULL, *json_elem = NULL;
|
2020-12-17 11:41:07 +01:00
|
|
|
struct bgp_dest *dest;
|
2020-12-04 13:14:26 +01:00
|
|
|
const struct prefix *p;
|
|
|
|
struct lp_fifo *item, *next;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
if (!lp) {
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "{}\n");
|
|
|
|
else
|
|
|
|
vty_out(vty, "No existing BGP labelpool\n");
|
|
|
|
return (CMD_WARNING);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
count = lp_fifo_count(&lp->requests);
|
|
|
|
if (!count) {
|
|
|
|
vty_out(vty, "{}\n");
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
json = json_object_new_array();
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "Prefix \n");
|
|
|
|
vty_out(vty, "----------------\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
for (item = lp_fifo_first(&lp->requests); item; item = next) {
|
|
|
|
next = lp_fifo_next_safe(&lp->requests, item);
|
2020-12-17 11:41:07 +01:00
|
|
|
dest = item->lcb.labelid;
|
2020-12-04 13:14:26 +01:00
|
|
|
if (uj) {
|
|
|
|
json_elem = json_object_new_object();
|
|
|
|
json_object_array_add(json, json_elem);
|
|
|
|
}
|
|
|
|
switch (item->lcb.type) {
|
|
|
|
case LP_TYPE_BGP_LU:
|
2020-12-17 11:41:07 +01:00
|
|
|
if (!CHECK_FLAG(dest->flags,
|
|
|
|
BGP_NODE_LABEL_REQUESTED)) {
|
2020-12-04 13:14:26 +01:00
|
|
|
if (uj)
|
|
|
|
json_object_string_add(
|
|
|
|
json_elem, "prefix", "INVALID");
|
|
|
|
else
|
|
|
|
vty_out(vty, "INVALID\n");
|
|
|
|
} else {
|
2020-12-17 11:41:07 +01:00
|
|
|
p = bgp_dest_get_prefix(dest);
|
2020-12-04 13:14:26 +01:00
|
|
|
if (uj)
|
2022-08-25 12:46:58 +02:00
|
|
|
json_object_string_addf(
|
|
|
|
json_elem, "prefix", "%pFX", p);
|
2020-12-04 13:14:26 +01:00
|
|
|
else
|
2022-08-25 12:46:58 +02:00
|
|
|
vty_out(vty, "%-18pFX\n", p);
|
2020-12-04 13:14:26 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case LP_TYPE_VRF:
|
|
|
|
if (uj)
|
|
|
|
json_object_string_add(json_elem, "prefix",
|
|
|
|
"VRF");
|
|
|
|
else
|
|
|
|
vty_out(vty, "VRF\n");
|
2023-02-28 14:11:30 +01:00
|
|
|
break;
|
|
|
|
case LP_TYPE_NEXTHOP:
|
|
|
|
if (uj)
|
|
|
|
json_object_string_add(json_elem, "prefix",
|
|
|
|
"nexthop");
|
|
|
|
else
|
|
|
|
vty_out(vty, "Nexthop\n");
|
2020-12-04 13:14:26 +01:00
|
|
|
break;
|
2023-05-02 16:30:20 +02:00
|
|
|
case LP_TYPE_BGP_L3VPN_BIND:
|
|
|
|
if (uj)
|
|
|
|
json_object_string_add(json_elem, "prefix",
|
|
|
|
"l3vpn-bind");
|
|
|
|
else
|
|
|
|
vty_out(vty, "L3VPN-BIND\n");
|
|
|
|
break;
|
2020-12-04 13:14:26 +01:00
|
|
|
}
|
|
|
|
}
|
2021-11-25 23:02:37 +01:00
|
|
|
if (uj)
|
2021-11-25 16:51:12 +01:00
|
|
|
vty_json(vty, json);
|
2020-12-04 13:14:26 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN(show_bgp_labelpool_chunks, show_bgp_labelpool_chunks_cmd,
|
|
|
|
"show bgp labelpool chunks [json]",
|
|
|
|
SHOW_STR BGP_STR
|
|
|
|
"BGP Labelpool information\n"
|
|
|
|
"BGP Labelpool chunks\n" JSON_STR)
|
|
|
|
{
|
|
|
|
bool uj = use_json(argc, argv);
|
|
|
|
json_object *json = NULL, *json_elem;
|
|
|
|
struct listnode *node;
|
|
|
|
struct lp_chunk *chunk;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
if (!lp) {
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "{}\n");
|
|
|
|
else
|
|
|
|
vty_out(vty, "No existing BGP labelpool\n");
|
|
|
|
return (CMD_WARNING);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
count = listcount(lp->chunks);
|
|
|
|
if (!count) {
|
|
|
|
vty_out(vty, "{}\n");
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
json = json_object_new_array();
|
|
|
|
} else {
|
2022-08-26 23:47:07 +02:00
|
|
|
vty_out(vty, "%10s %10s %10s %10s\n", "First", "Last", "Size",
|
|
|
|
"nfree");
|
|
|
|
vty_out(vty, "-------------------------------------------\n");
|
2020-12-04 13:14:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
|
2022-08-26 23:47:07 +02:00
|
|
|
uint32_t size;
|
|
|
|
|
|
|
|
size = chunk->last - chunk->first + 1;
|
|
|
|
|
2020-12-04 13:14:26 +01:00
|
|
|
if (uj) {
|
|
|
|
json_elem = json_object_new_object();
|
|
|
|
json_object_array_add(json, json_elem);
|
|
|
|
json_object_int_add(json_elem, "first", chunk->first);
|
|
|
|
json_object_int_add(json_elem, "last", chunk->last);
|
2022-08-26 23:47:07 +02:00
|
|
|
json_object_int_add(json_elem, "size", size);
|
|
|
|
json_object_int_add(json_elem, "numberFree",
|
|
|
|
chunk->nfree);
|
2020-12-04 13:14:26 +01:00
|
|
|
} else
|
2022-08-26 23:47:07 +02:00
|
|
|
vty_out(vty, "%10u %10u %10u %10u\n", chunk->first,
|
|
|
|
chunk->last, size, chunk->nfree);
|
2020-12-04 13:14:26 +01:00
|
|
|
}
|
2021-11-25 23:02:37 +01:00
|
|
|
if (uj)
|
2021-11-25 16:51:12 +01:00
|
|
|
vty_json(vty, json);
|
2020-12-04 13:14:26 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2023-01-16 18:24:26 +01:00
|
|
|
static void show_bgp_nexthop_label_afi(struct vty *vty, afi_t afi,
|
|
|
|
struct bgp *bgp, bool detail)
|
|
|
|
{
|
|
|
|
struct bgp_label_per_nexthop_cache_head *tree;
|
|
|
|
struct bgp_label_per_nexthop_cache *iter;
|
|
|
|
safi_t safi;
|
|
|
|
void *src;
|
|
|
|
char buf[PREFIX2STR_BUFFER];
|
|
|
|
char labelstr[MPLS_LABEL_STRLEN];
|
|
|
|
struct bgp_dest *dest;
|
|
|
|
struct bgp_path_info *path;
|
|
|
|
struct bgp *bgp_path;
|
|
|
|
struct bgp_table *table;
|
|
|
|
|
|
|
|
vty_out(vty, "Current BGP label nexthop cache for %s, VRF %s\n",
|
|
|
|
afi2str(afi), bgp->name_pretty);
|
|
|
|
|
|
|
|
tree = &bgp->mpls_labels_per_nexthop[afi];
|
|
|
|
frr_each (bgp_label_per_nexthop_cache, tree, iter) {
|
|
|
|
if (afi2family(afi) == AF_INET)
|
|
|
|
src = (void *)&iter->nexthop.u.prefix4;
|
|
|
|
else
|
|
|
|
src = (void *)&iter->nexthop.u.prefix6;
|
|
|
|
|
|
|
|
vty_out(vty, " %s, label %s #paths %u\n",
|
|
|
|
inet_ntop(afi2family(afi), src, buf, sizeof(buf)),
|
|
|
|
mpls_label2str(1, &iter->label, labelstr,
|
|
|
|
sizeof(labelstr), 0, true),
|
|
|
|
iter->path_count);
|
|
|
|
if (iter->nh)
|
|
|
|
vty_out(vty, " if %s\n",
|
|
|
|
ifindex2ifname(iter->nh->ifindex,
|
|
|
|
iter->nh->vrf_id));
|
2024-12-20 16:13:19 +01:00
|
|
|
vty_out(vty, " Last update: %s", time_to_string(iter->last_update, buf));
|
2023-01-16 18:24:26 +01:00
|
|
|
if (!detail)
|
|
|
|
continue;
|
|
|
|
vty_out(vty, " Paths:\n");
|
2023-05-24 17:26:13 +02:00
|
|
|
LIST_FOREACH (path, &(iter->paths),
|
|
|
|
mplsvpn.blnc.label_nh_thread) {
|
2023-01-16 18:24:26 +01:00
|
|
|
dest = path->net;
|
|
|
|
table = bgp_dest_table(dest);
|
|
|
|
assert(dest && table);
|
|
|
|
afi = family2afi(bgp_dest_get_prefix(dest)->family);
|
|
|
|
safi = table->safi;
|
|
|
|
bgp_path = table->bgp;
|
|
|
|
|
|
|
|
if (dest->pdest) {
|
|
|
|
vty_out(vty, " %d/%d %pBD RD ", afi, safi,
|
|
|
|
dest);
|
|
|
|
|
|
|
|
vty_out(vty, BGP_RD_AS_FORMAT(bgp->asnotation),
|
|
|
|
(struct prefix_rd *)bgp_dest_get_prefix(
|
|
|
|
dest->pdest));
|
|
|
|
vty_out(vty, " %s flags 0x%x\n",
|
|
|
|
bgp_path->name_pretty, path->flags);
|
|
|
|
} else
|
|
|
|
vty_out(vty, " %d/%d %pBD %s flags 0x%x\n",
|
|
|
|
afi, safi, dest, bgp_path->name_pretty,
|
|
|
|
path->flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY(show_bgp_nexthop_label, show_bgp_nexthop_label_cmd,
|
|
|
|
"show bgp [<view|vrf> VIEWVRFNAME] label-nexthop [detail]",
|
|
|
|
SHOW_STR BGP_STR BGP_INSTANCE_HELP_STR
|
|
|
|
"BGP label per-nexthop table\n"
|
|
|
|
"Show detailed information\n")
|
|
|
|
{
|
|
|
|
int idx = 0;
|
|
|
|
char *vrf = NULL;
|
|
|
|
struct bgp *bgp;
|
|
|
|
bool detail = false;
|
|
|
|
int afi;
|
|
|
|
|
|
|
|
if (argv_find(argv, argc, "vrf", &idx)) {
|
|
|
|
vrf = argv[++idx]->arg;
|
|
|
|
bgp = bgp_lookup_by_name(vrf);
|
|
|
|
} else
|
|
|
|
bgp = bgp_get_default();
|
|
|
|
|
|
|
|
if (!bgp)
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
|
|
|
|
if (argv_find(argv, argc, "detail", &idx))
|
|
|
|
detail = true;
|
|
|
|
|
|
|
|
for (afi = AFI_IP; afi <= AFI_IP6; afi++)
|
|
|
|
show_bgp_nexthop_label_afi(vty, afi, bgp, detail);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-08-26 23:47:07 +02:00
|
|
|
#if BGP_LABELPOOL_ENABLE_TESTS
|
|
|
|
/*------------------------------------------------------------------------
|
|
|
|
* Testing code start
|
|
|
|
*------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
DEFINE_MTYPE_STATIC(BGPD, LABELPOOL_TEST, "Label pool test");
|
|
|
|
|
|
|
|
#define LPT_STAT_INSERT_FAIL 0
|
|
|
|
#define LPT_STAT_DELETE_FAIL 1
|
|
|
|
#define LPT_STAT_ALLOCATED 2
|
|
|
|
#define LPT_STAT_DEALLOCATED 3
|
|
|
|
#define LPT_STAT_MAX 4
|
|
|
|
|
|
|
|
const char *lpt_counter_names[] = {
|
|
|
|
"sl insert failures",
|
|
|
|
"sl delete failures",
|
|
|
|
"labels allocated",
|
|
|
|
"labels deallocated",
|
|
|
|
};
|
|
|
|
|
|
|
|
static uint8_t lpt_generation;
|
|
|
|
static bool lpt_inprogress;
|
|
|
|
static struct skiplist *lp_tests;
|
|
|
|
static unsigned int lpt_test_cb_tcb_lookup_fails;
|
|
|
|
static unsigned int lpt_release_tcb_lookup_fails;
|
|
|
|
static unsigned int lpt_test_event_tcb_lookup_fails;
|
|
|
|
static unsigned int lpt_stop_tcb_lookup_fails;
|
|
|
|
|
|
|
|
struct lp_test {
|
|
|
|
uint8_t generation;
|
|
|
|
unsigned int request_maximum;
|
|
|
|
unsigned int request_blocksize;
|
|
|
|
uintptr_t request_count; /* match type of labelid */
|
|
|
|
int label_type;
|
|
|
|
struct skiplist *labels;
|
|
|
|
struct timeval starttime;
|
|
|
|
struct skiplist *timestamps_alloc;
|
|
|
|
struct skiplist *timestamps_dealloc;
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *event_thread;
|
2022-08-26 23:47:07 +02:00
|
|
|
unsigned int counter[LPT_STAT_MAX];
|
|
|
|
};
|
|
|
|
|
|
|
|
/* test parameters */
|
|
|
|
#define LPT_MAX_COUNT 500000 /* get this many labels in all */
|
|
|
|
#define LPT_BLKSIZE 10000 /* this many at a time, then yield */
|
|
|
|
#define LPT_TS_INTERVAL 10000 /* timestamp every this many labels */
|
|
|
|
|
|
|
|
|
|
|
|
static int test_cb(mpls_label_t label, void *labelid, bool allocated)
|
|
|
|
{
|
|
|
|
uintptr_t generation;
|
|
|
|
struct lp_test *tcb;
|
|
|
|
|
|
|
|
generation = ((uintptr_t)labelid >> 24) & 0xff;
|
|
|
|
|
|
|
|
if (skiplist_search(lp_tests, (void *)generation, (void **)&tcb)) {
|
|
|
|
|
|
|
|
/* couldn't find current test in progress */
|
|
|
|
++lpt_test_cb_tcb_lookup_fails;
|
|
|
|
return -1; /* reject allocation */
|
|
|
|
}
|
|
|
|
|
|
|
|
if (allocated) {
|
|
|
|
++tcb->counter[LPT_STAT_ALLOCATED];
|
|
|
|
if (!(tcb->counter[LPT_STAT_ALLOCATED] % LPT_TS_INTERVAL)) {
|
|
|
|
uintptr_t time_ms;
|
|
|
|
|
|
|
|
time_ms = monotime_since(&tcb->starttime, NULL) / 1000;
|
|
|
|
skiplist_insert(tcb->timestamps_alloc,
|
|
|
|
(void *)(uintptr_t)tcb
|
|
|
|
->counter[LPT_STAT_ALLOCATED],
|
|
|
|
(void *)time_ms);
|
|
|
|
}
|
|
|
|
if (skiplist_insert(tcb->labels, labelid,
|
|
|
|
(void *)(uintptr_t)label)) {
|
|
|
|
++tcb->counter[LPT_STAT_INSERT_FAIL];
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
++tcb->counter[LPT_STAT_DEALLOCATED];
|
|
|
|
if (!(tcb->counter[LPT_STAT_DEALLOCATED] % LPT_TS_INTERVAL)) {
|
|
|
|
uintptr_t time_ms;
|
|
|
|
|
|
|
|
time_ms = monotime_since(&tcb->starttime, NULL) / 1000;
|
|
|
|
skiplist_insert(tcb->timestamps_dealloc,
|
|
|
|
(void *)(uintptr_t)tcb
|
|
|
|
->counter[LPT_STAT_ALLOCATED],
|
|
|
|
(void *)time_ms);
|
|
|
|
}
|
|
|
|
if (skiplist_delete(tcb->labels, labelid, 0)) {
|
|
|
|
++tcb->counter[LPT_STAT_DELETE_FAIL];
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-03-01 22:18:12 +01:00
|
|
|
static void labelpool_test_event_handler(struct event *thread)
|
2022-08-26 23:47:07 +02:00
|
|
|
{
|
|
|
|
struct lp_test *tcb;
|
|
|
|
|
|
|
|
if (skiplist_search(lp_tests, (void *)(uintptr_t)(lpt_generation),
|
|
|
|
(void **)&tcb)) {
|
|
|
|
|
|
|
|
/* couldn't find current test in progress */
|
|
|
|
++lpt_test_event_tcb_lookup_fails;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* request a bunch of labels
|
|
|
|
*/
|
|
|
|
for (unsigned int i = 0; (i < tcb->request_blocksize) &&
|
|
|
|
(tcb->request_count < tcb->request_maximum);
|
|
|
|
++i) {
|
|
|
|
|
|
|
|
uintptr_t id;
|
|
|
|
|
|
|
|
++tcb->request_count;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* construct 32-bit id from request_count and generation
|
|
|
|
*/
|
|
|
|
id = ((uintptr_t)tcb->generation << 24) |
|
|
|
|
(tcb->request_count & 0x00ffffff);
|
|
|
|
bgp_lp_get(LP_TYPE_VRF, (void *)id, test_cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tcb->request_count < tcb->request_maximum)
|
|
|
|
thread_add_event(bm->master, labelpool_test_event_handler, NULL,
|
|
|
|
0, &tcb->event_thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lptest_stop(void)
|
|
|
|
{
|
|
|
|
struct lp_test *tcb;
|
|
|
|
|
|
|
|
if (!lpt_inprogress)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (skiplist_search(lp_tests, (void *)(uintptr_t)(lpt_generation),
|
|
|
|
(void **)&tcb)) {
|
|
|
|
|
|
|
|
/* couldn't find current test in progress */
|
|
|
|
++lpt_stop_tcb_lookup_fails;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tcb->event_thread)
|
2022-12-10 15:08:37 +01:00
|
|
|
event_cancel(&tcb->event_thread);
|
2022-08-26 23:47:07 +02:00
|
|
|
|
|
|
|
lpt_inprogress = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lptest_start(struct vty *vty)
|
|
|
|
{
|
|
|
|
struct lp_test *tcb;
|
|
|
|
|
|
|
|
if (lpt_inprogress) {
|
|
|
|
vty_out(vty, "test already in progress\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skiplist_count(lp_tests) >=
|
|
|
|
(1 << (8 * sizeof(lpt_generation))) - 1) {
|
|
|
|
/*
|
|
|
|
* Too many test runs
|
|
|
|
*/
|
|
|
|
vty_out(vty, "too many tests: clear first\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We pack the generation and request number into the labelid;
|
|
|
|
* make sure they fit.
|
|
|
|
*/
|
|
|
|
unsigned int n1 = LPT_MAX_COUNT;
|
|
|
|
unsigned int sh = 0;
|
|
|
|
unsigned int label_bits;
|
|
|
|
|
|
|
|
label_bits = 8 * (sizeof(tcb->request_count) - sizeof(lpt_generation));
|
|
|
|
|
|
|
|
/* n1 should be same type as tcb->request_maximum */
|
|
|
|
assert(sizeof(n1) == sizeof(tcb->request_maximum));
|
|
|
|
|
|
|
|
while (n1 >>= 1)
|
|
|
|
++sh;
|
|
|
|
sh += 1; /* number of bits needed to hold LPT_MAX_COUNT */
|
|
|
|
|
|
|
|
if (sh > label_bits) {
|
|
|
|
vty_out(vty,
|
|
|
|
"Sorry, test iteration count too big on this platform (LPT_MAX_COUNT %u, need %u bits, but label_bits is only %u)\n",
|
|
|
|
LPT_MAX_COUNT, sh, label_bits);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
lpt_inprogress = true;
|
|
|
|
++lpt_generation;
|
|
|
|
|
|
|
|
tcb = XCALLOC(MTYPE_LABELPOOL_TEST, sizeof(*tcb));
|
|
|
|
|
|
|
|
tcb->generation = lpt_generation;
|
|
|
|
tcb->label_type = LP_TYPE_VRF;
|
|
|
|
tcb->request_maximum = LPT_MAX_COUNT;
|
|
|
|
tcb->request_blocksize = LPT_BLKSIZE;
|
|
|
|
tcb->labels = skiplist_new(0, NULL, NULL);
|
|
|
|
tcb->timestamps_alloc = skiplist_new(0, NULL, NULL);
|
|
|
|
tcb->timestamps_dealloc = skiplist_new(0, NULL, NULL);
|
|
|
|
thread_add_event(bm->master, labelpool_test_event_handler, NULL, 0,
|
|
|
|
&tcb->event_thread);
|
|
|
|
monotime(&tcb->starttime);
|
|
|
|
|
|
|
|
skiplist_insert(lp_tests, (void *)(uintptr_t)tcb->generation, tcb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY(start_labelpool_perf_test, start_labelpool_perf_test_cmd,
|
|
|
|
"debug bgp lptest start",
|
|
|
|
DEBUG_STR BGP_STR
|
|
|
|
"label pool test\n"
|
|
|
|
"start\n")
|
|
|
|
{
|
|
|
|
lptest_start(vty);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lptest_print_stats(struct vty *vty, struct lp_test *tcb)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
vty_out(vty, "Global Lookup Failures in test_cb: %5u\n",
|
|
|
|
lpt_test_cb_tcb_lookup_fails);
|
|
|
|
vty_out(vty, "Global Lookup Failures in release: %5u\n",
|
|
|
|
lpt_release_tcb_lookup_fails);
|
|
|
|
vty_out(vty, "Global Lookup Failures in event: %5u\n",
|
|
|
|
lpt_test_event_tcb_lookup_fails);
|
|
|
|
vty_out(vty, "Global Lookup Failures in stop: %5u\n",
|
|
|
|
lpt_stop_tcb_lookup_fails);
|
|
|
|
vty_out(vty, "\n");
|
|
|
|
|
|
|
|
if (!tcb) {
|
|
|
|
if (skiplist_search(lp_tests, (void *)(uintptr_t)lpt_generation,
|
|
|
|
(void **)&tcb)) {
|
|
|
|
vty_out(vty, "Error: can't find test %u\n",
|
|
|
|
lpt_generation);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
vty_out(vty, "Test Generation %u:\n", tcb->generation);
|
|
|
|
|
|
|
|
vty_out(vty, "Counter Value\n");
|
|
|
|
for (i = 0; i < LPT_STAT_MAX; ++i) {
|
|
|
|
vty_out(vty, "%20s: %10u\n", lpt_counter_names[i],
|
|
|
|
tcb->counter[i]);
|
|
|
|
}
|
|
|
|
vty_out(vty, "\n");
|
|
|
|
|
|
|
|
if (tcb->timestamps_alloc) {
|
|
|
|
void *Key;
|
|
|
|
void *Value;
|
|
|
|
void *cursor;
|
|
|
|
|
|
|
|
float elapsed;
|
|
|
|
|
|
|
|
vty_out(vty, "%10s %10s\n", "Count", "Seconds");
|
|
|
|
|
|
|
|
cursor = NULL;
|
|
|
|
while (!skiplist_next(tcb->timestamps_alloc, &Key, &Value,
|
|
|
|
&cursor)) {
|
|
|
|
|
|
|
|
elapsed = ((float)(uintptr_t)Value) / 1000;
|
|
|
|
|
|
|
|
vty_out(vty, "%10llu %10.3f\n",
|
|
|
|
(unsigned long long)(uintptr_t)Key, elapsed);
|
|
|
|
}
|
|
|
|
vty_out(vty, "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY(show_labelpool_perf_test, show_labelpool_perf_test_cmd,
|
|
|
|
"debug bgp lptest show",
|
|
|
|
DEBUG_STR BGP_STR
|
|
|
|
"label pool test\n"
|
|
|
|
"show\n")
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lp_tests) {
|
|
|
|
void *Key;
|
|
|
|
void *Value;
|
|
|
|
void *cursor;
|
|
|
|
|
|
|
|
cursor = NULL;
|
|
|
|
while (!skiplist_next(lp_tests, &Key, &Value, &cursor)) {
|
|
|
|
lptest_print_stats(vty, (struct lp_test *)Value);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "no test results\n");
|
|
|
|
}
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY(stop_labelpool_perf_test, stop_labelpool_perf_test_cmd,
|
|
|
|
"debug bgp lptest stop",
|
|
|
|
DEBUG_STR BGP_STR
|
|
|
|
"label pool test\n"
|
|
|
|
"stop\n")
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lpt_inprogress) {
|
|
|
|
lptest_stop();
|
|
|
|
lptest_print_stats(vty, NULL);
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "no test in progress\n");
|
|
|
|
}
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY(clear_labelpool_perf_test, clear_labelpool_perf_test_cmd,
|
|
|
|
"debug bgp lptest clear",
|
|
|
|
DEBUG_STR BGP_STR
|
|
|
|
"label pool test\n"
|
|
|
|
"clear\n")
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lpt_inprogress) {
|
|
|
|
lptest_stop();
|
|
|
|
}
|
|
|
|
if (lp_tests) {
|
|
|
|
while (!skiplist_first(lp_tests, NULL, NULL))
|
|
|
|
/* del function of skiplist cleans up tcbs */
|
|
|
|
skiplist_delete_first(lp_tests);
|
|
|
|
}
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* With the "release" command, we can release labels at intervals through
|
|
|
|
* the ID space. Thus we can to exercise the bitfield-wrapping behavior
|
|
|
|
* of the allocator in a subsequent test.
|
|
|
|
*/
|
|
|
|
/* clang-format off */
|
|
|
|
DEFPY(release_labelpool_perf_test, release_labelpool_perf_test_cmd,
|
|
|
|
"debug bgp lptest release test GENERATION$generation every (1-5)$every_nth",
|
|
|
|
DEBUG_STR
|
|
|
|
BGP_STR
|
|
|
|
"label pool test\n"
|
|
|
|
"release labels\n"
|
|
|
|
"\"test\"\n"
|
|
|
|
"test number\n"
|
|
|
|
"\"every\"\n"
|
|
|
|
"label fraction denominator\n")
|
|
|
|
{
|
|
|
|
/* clang-format on */
|
|
|
|
|
|
|
|
unsigned long testnum;
|
|
|
|
char *end;
|
|
|
|
struct lp_test *tcb;
|
|
|
|
|
|
|
|
testnum = strtoul(generation, &end, 0);
|
|
|
|
if (*end) {
|
|
|
|
vty_out(vty, "Invalid test number: \"%s\"\n", generation);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
if (lpt_inprogress && (testnum == lpt_generation)) {
|
|
|
|
vty_out(vty,
|
|
|
|
"Error: Test %lu is still in progress (stop first)\n",
|
|
|
|
testnum);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skiplist_search(lp_tests, (void *)(uintptr_t)testnum,
|
|
|
|
(void **)&tcb)) {
|
|
|
|
|
|
|
|
/* couldn't find current test in progress */
|
|
|
|
vty_out(vty, "Error: Can't look up test number: \"%lu\"\n",
|
|
|
|
testnum);
|
|
|
|
++lpt_release_tcb_lookup_fails;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *Key, *cKey;
|
|
|
|
void *Value, *cValue;
|
|
|
|
void *cursor;
|
|
|
|
unsigned int iteration;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
cursor = NULL;
|
|
|
|
iteration = 0;
|
|
|
|
rc = skiplist_next(tcb->labels, &Key, &Value, &cursor);
|
|
|
|
|
|
|
|
while (!rc) {
|
|
|
|
cKey = Key;
|
|
|
|
cValue = Value;
|
|
|
|
|
|
|
|
/* find next item before we delete this one */
|
|
|
|
rc = skiplist_next(tcb->labels, &Key, &Value, &cursor);
|
|
|
|
|
|
|
|
if (!(iteration % every_nth)) {
|
|
|
|
bgp_lp_release(tcb->label_type, cKey,
|
|
|
|
(mpls_label_t)(uintptr_t)cValue);
|
|
|
|
skiplist_delete(tcb->labels, cKey, NULL);
|
|
|
|
++tcb->counter[LPT_STAT_DEALLOCATED];
|
|
|
|
}
|
|
|
|
++iteration;
|
|
|
|
}
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lptest_delete(void *val)
|
|
|
|
{
|
|
|
|
struct lp_test *tcb = (struct lp_test *)val;
|
|
|
|
void *Key;
|
|
|
|
void *Value;
|
|
|
|
void *cursor;
|
|
|
|
|
|
|
|
if (tcb->labels) {
|
|
|
|
cursor = NULL;
|
|
|
|
while (!skiplist_next(tcb->labels, &Key, &Value, &cursor))
|
|
|
|
bgp_lp_release(tcb->label_type, Key,
|
|
|
|
(mpls_label_t)(uintptr_t)Value);
|
|
|
|
skiplist_free(tcb->labels);
|
|
|
|
tcb->labels = NULL;
|
|
|
|
}
|
|
|
|
if (tcb->timestamps_alloc) {
|
|
|
|
cursor = NULL;
|
|
|
|
skiplist_free(tcb->timestamps_alloc);
|
|
|
|
tcb->timestamps_alloc = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tcb->timestamps_dealloc) {
|
|
|
|
cursor = NULL;
|
|
|
|
skiplist_free(tcb->timestamps_dealloc);
|
|
|
|
tcb->timestamps_dealloc = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tcb->event_thread)
|
2022-12-10 15:08:37 +01:00
|
|
|
event_cancel(&tcb->event_thread);
|
2022-08-26 23:47:07 +02:00
|
|
|
|
|
|
|
memset(tcb, 0, sizeof(*tcb));
|
|
|
|
|
|
|
|
XFREE(MTYPE_LABELPOOL_TEST, tcb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lptest_init(void)
|
|
|
|
{
|
|
|
|
lp_tests = skiplist_new(0, NULL, lptest_delete);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lptest_finish(void)
|
|
|
|
{
|
|
|
|
if (lp_tests) {
|
|
|
|
skiplist_free(lp_tests);
|
|
|
|
lp_tests = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*------------------------------------------------------------------------
|
|
|
|
* Testing code end
|
|
|
|
*------------------------------------------------------------------------*/
|
|
|
|
#endif /* BGP_LABELPOOL_ENABLE_TESTS */
|
|
|
|
|
2020-12-04 13:14:26 +01:00
|
|
|
void bgp_lp_vty_init(void)
|
|
|
|
{
|
|
|
|
install_element(VIEW_NODE, &show_bgp_labelpool_summary_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_bgp_labelpool_ledger_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_bgp_labelpool_inuse_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_bgp_labelpool_requests_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_bgp_labelpool_chunks_cmd);
|
2022-08-26 23:47:07 +02:00
|
|
|
|
|
|
|
#if BGP_LABELPOOL_ENABLE_TESTS
|
|
|
|
install_element(ENABLE_NODE, &start_labelpool_perf_test_cmd);
|
|
|
|
install_element(ENABLE_NODE, &show_labelpool_perf_test_cmd);
|
|
|
|
install_element(ENABLE_NODE, &stop_labelpool_perf_test_cmd);
|
|
|
|
install_element(ENABLE_NODE, &release_labelpool_perf_test_cmd);
|
|
|
|
install_element(ENABLE_NODE, &clear_labelpool_perf_test_cmd);
|
|
|
|
#endif /* BGP_LABELPOOL_ENABLE_TESTS */
|
2020-12-04 13:14:26 +01:00
|
|
|
}
|
2023-02-28 14:17:17 +01:00
|
|
|
|
|
|
|
DEFINE_MTYPE_STATIC(BGPD, LABEL_PER_NEXTHOP_CACHE,
|
|
|
|
"BGP Label Per Nexthop entry");
|
|
|
|
|
|
|
|
/* The nexthops values are compared to
|
|
|
|
* find in the tree the appropriate cache entry
|
|
|
|
*/
|
|
|
|
int bgp_label_per_nexthop_cache_cmp(const struct bgp_label_per_nexthop_cache *a,
|
|
|
|
const struct bgp_label_per_nexthop_cache *b)
|
|
|
|
{
|
|
|
|
return prefix_cmp(&a->nexthop, &b->nexthop);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bgp_label_per_nexthop_cache *
|
|
|
|
bgp_label_per_nexthop_new(struct bgp_label_per_nexthop_cache_head *tree,
|
|
|
|
struct prefix *nexthop)
|
|
|
|
{
|
|
|
|
struct bgp_label_per_nexthop_cache *blnc;
|
|
|
|
|
|
|
|
blnc = XCALLOC(MTYPE_LABEL_PER_NEXTHOP_CACHE,
|
|
|
|
sizeof(struct bgp_label_per_nexthop_cache));
|
|
|
|
blnc->tree = tree;
|
|
|
|
blnc->label = MPLS_INVALID_LABEL;
|
|
|
|
prefix_copy(&blnc->nexthop, nexthop);
|
|
|
|
LIST_INIT(&(blnc->paths));
|
|
|
|
bgp_label_per_nexthop_cache_add(tree, blnc);
|
|
|
|
|
|
|
|
return blnc;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bgp_label_per_nexthop_cache *
|
|
|
|
bgp_label_per_nexthop_find(struct bgp_label_per_nexthop_cache_head *tree,
|
|
|
|
struct prefix *nexthop)
|
|
|
|
{
|
|
|
|
struct bgp_label_per_nexthop_cache blnc = {};
|
|
|
|
|
|
|
|
if (!tree)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
memcpy(&blnc.nexthop, nexthop, sizeof(struct prefix));
|
|
|
|
return bgp_label_per_nexthop_cache_find(tree, &blnc);
|
|
|
|
}
|
|
|
|
|
|
|
|
void bgp_label_per_nexthop_free(struct bgp_label_per_nexthop_cache *blnc)
|
|
|
|
{
|
|
|
|
if (blnc->label != MPLS_INVALID_LABEL) {
|
|
|
|
bgp_zebra_send_nexthop_label(ZEBRA_MPLS_LABELS_DELETE,
|
2023-02-16 10:39:40 +01:00
|
|
|
blnc->label, blnc->nh->ifindex,
|
|
|
|
blnc->nh->vrf_id, ZEBRA_LSP_BGP,
|
2023-05-11 15:42:08 +02:00
|
|
|
&blnc->nexthop, 0, NULL);
|
2023-02-28 14:17:17 +01:00
|
|
|
bgp_lp_release(LP_TYPE_NEXTHOP, blnc, blnc->label);
|
|
|
|
}
|
|
|
|
bgp_label_per_nexthop_cache_del(blnc->tree, blnc);
|
2023-02-16 10:39:40 +01:00
|
|
|
if (blnc->nh)
|
|
|
|
nexthop_free(blnc->nh);
|
|
|
|
blnc->nh = NULL;
|
2023-02-28 14:17:17 +01:00
|
|
|
XFREE(MTYPE_LABEL_PER_NEXTHOP_CACHE, blnc);
|
|
|
|
}
|
2023-01-16 18:24:26 +01:00
|
|
|
|
|
|
|
void bgp_label_per_nexthop_init(void)
|
|
|
|
{
|
|
|
|
install_element(VIEW_NODE, &show_bgp_nexthop_label_cmd);
|
|
|
|
}
|