zebra: Limit reading packets when MetaQ is full

Currently Zebra is just reading packets off the zapi
wire and stacking them up for processing in zebra
in the future.  When there is significant churn
in the network the size of zebra can grow without
bounds due to the MetaQ sizing constraints.  This
ends up showing by the number of nexthops in the
system.  Reducing the number of packets serviced
to limit the metaQ size to the packets to process
allieviates this problem.

Signed-off-by: Donald Sharp <sharpd@nvidia.com>
This commit is contained in:
Donald Sharp 2025-03-24 14:11:35 -04:00
parent 12bf042c68
commit 937a9fb3e9
3 changed files with 23 additions and 11 deletions

View file

@ -462,6 +462,8 @@ extern void meta_queue_free(struct meta_queue *mq, struct zebra_vrf *zvrf);
extern int zebra_rib_labeled_unicast(struct route_entry *re);
extern struct route_table *rib_table_ipv6;
extern uint32_t zebra_rib_meta_queue_size(void);
extern void rib_unlink(struct route_node *rn, struct route_entry *re);
extern int rib_gc_dest(struct route_node *rn);
extern struct route_table *rib_tables_iter_next(rib_tables_iter_t *iter);

View file

@ -3302,8 +3302,8 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
mq->size++;
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
rnode_debug(rn, re->vrf_id, "queued rn %p into sub-queue %s",
(void *)rn, subqueue2str(qindex));
rnode_debug(rn, re->vrf_id, "queued rn %p into sub-queue %s mq size %u", (void *)rn,
subqueue2str(qindex), zrouter.mq->size);
return 0;
}
@ -3335,8 +3335,8 @@ static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data)
mq->size++;
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug("NHG Context id=%u queued into sub-queue %s",
ctx->id, subqueue2str(qindex));
zlog_debug("NHG Context id=%u queued into sub-queue %s mq size %u", ctx->id,
subqueue2str(qindex), zrouter.mq->size);
return 0;
}
@ -3363,8 +3363,8 @@ static int rib_meta_queue_nhg_process(struct meta_queue *mq, void *data,
mq->size++;
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug("NHG id=%u queued into sub-queue %s", nhe->id,
subqueue2str(qindex));
zlog_debug("NHG id=%u queued into sub-queue %s mq size %u", nhe->id,
subqueue2str(qindex), zrouter.mq->size);
return 0;
}
@ -3410,6 +3410,11 @@ static int mq_add_handler(void *data,
return mq_add_func(zrouter.mq, data);
}
uint32_t zebra_rib_meta_queue_size(void)
{
return zrouter.mq->size;
}
void mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
struct prefix *prefix, uint8_t route_type,
uint8_t route_instance)
@ -4226,7 +4231,7 @@ static int rib_meta_queue_gr_run_add(struct meta_queue *mq, void *data)
mq->size++;
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug("Graceful Run adding");
zlog_debug("Graceful Run adding mq size %u", zrouter.mq->size);
return 0;
}
@ -4241,10 +4246,9 @@ static int rib_meta_queue_early_route_add(struct meta_queue *mq, void *data)
if (IS_ZEBRA_DEBUG_RIB_DETAILED) {
struct vrf *vrf = vrf_lookup_by_id(ere->re->vrf_id);
zlog_debug("Route %pFX(%s) (%s) queued for processing into sub-queue %s",
&ere->p, VRF_LOGNAME(vrf),
ere->deletion ? "delete" : "add",
subqueue2str(META_QUEUE_EARLY_ROUTE));
zlog_debug("Route %pFX(%s) (%s) queued for processing into sub-queue %s mq size %u",
&ere->p, VRF_LOGNAME(vrf), ere->deletion ? "delete" : "add",
subqueue2str(META_QUEUE_EARLY_ROUTE), zrouter.mq->size);
}
return 0;

View file

@ -530,6 +530,12 @@ static void zserv_process_messages(struct event *thread)
struct stream_fifo *cache = stream_fifo_new();
uint32_t p2p = zrouter.packets_to_process;
bool need_resched = false;
uint32_t meta_queue_size = zebra_rib_meta_queue_size();
if (meta_queue_size < p2p)
p2p = p2p - meta_queue_size;
else
p2p = 0;
frr_with_mutex (&client->ibuf_mtx) {
uint32_t i;