2002-12-13 21:15:29 +01:00
|
|
|
/* Virtual terminal interface shell.
|
|
|
|
* Copyright (C) 2000 Kunihiro Ishiguro
|
|
|
|
*
|
|
|
|
* This file is part of GNU Zebra.
|
|
|
|
*
|
|
|
|
* GNU Zebra is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2, or (at your option) any
|
|
|
|
* later version.
|
|
|
|
*
|
|
|
|
* GNU Zebra is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
2017-05-13 10:25:29 +02:00
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; see the file COPYING; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2002-12-13 21:15:29 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <zebra.h>
|
|
|
|
|
|
|
|
#include <sys/un.h>
|
|
|
|
#include <setjmp.h>
|
|
|
|
#include <sys/wait.h>
|
|
|
|
#include <sys/resource.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
|
2021-09-02 12:20:56 +02:00
|
|
|
/* readline carries some ancient definitions around */
|
|
|
|
#pragma GCC diagnostic push
|
|
|
|
#pragma GCC diagnostic ignored "-Wstrict-prototypes"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include <readline/readline.h>
|
|
|
|
#include <readline/history.h>
|
2021-09-02 12:20:56 +02:00
|
|
|
#pragma GCC diagnostic pop
|
2002-12-13 21:15:29 +01:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
#include <dirent.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
2015-03-04 07:07:01 +01:00
|
|
|
#include "linklist.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "command.h"
|
|
|
|
#include "memory.h"
|
2019-12-06 13:48:06 +01:00
|
|
|
#include "network.h"
|
2016-01-07 16:03:01 +01:00
|
|
|
#include "filter.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "vtysh/vtysh.h"
|
2020-10-02 00:16:23 +02:00
|
|
|
#include "vtysh/vtysh_daemons.h"
|
2004-11-20 03:06:59 +01:00
|
|
|
#include "log.h"
|
2015-05-22 11:40:00 +02:00
|
|
|
#include "vrf.h"
|
2016-11-13 09:48:56 +01:00
|
|
|
#include "libfrr.h"
|
2018-04-20 20:34:46 +02:00
|
|
|
#include "command_graph.h"
|
2018-05-15 00:13:03 +02:00
|
|
|
#include "frrstr.h"
|
2018-06-20 20:51:04 +02:00
|
|
|
#include "json.h"
|
2019-01-08 22:33:49 +01:00
|
|
|
#include "ferr.h"
|
2022-07-18 09:05:35 +02:00
|
|
|
#include "bgpd/bgp_vty.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-29 05:48:31 +02:00
|
|
|
DEFINE_MTYPE_STATIC(MVTYSH, VTYSH_CMD, "Vtysh cmd copy");
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Struct VTY. */
|
|
|
|
struct vty *vty;
|
|
|
|
|
|
|
|
/* VTY shell pager name. */
|
|
|
|
char *vtysh_pager_name = NULL;
|
|
|
|
|
2021-06-29 02:15:25 +02:00
|
|
|
/* VTY should add timestamp */
|
|
|
|
bool vtysh_add_timestamp;
|
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
/* VTY shell client structure */
|
2002-12-13 21:15:29 +01:00
|
|
|
struct vtysh_client {
|
|
|
|
int fd;
|
2015-05-20 03:29:15 +02:00
|
|
|
const char *name;
|
2005-01-28 22:11:46 +01:00
|
|
|
int flag;
|
2016-11-13 09:48:56 +01:00
|
|
|
char path[MAXPATHLEN];
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
struct vtysh_client *next;
|
2019-12-06 13:48:06 +01:00
|
|
|
|
|
|
|
struct thread *log_reader;
|
|
|
|
int log_fd;
|
2022-03-07 17:34:17 +01:00
|
|
|
uint32_t lost_msgs;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
};
|
|
|
|
|
2019-12-06 13:48:06 +01:00
|
|
|
static bool stderr_tty;
|
|
|
|
static bool stderr_stdout_same;
|
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
/* Some utility functions for working on vtysh-specific vty tasks */
|
|
|
|
|
|
|
|
static FILE *vty_open_pager(struct vty *vty)
|
|
|
|
{
|
|
|
|
if (vty->is_paged)
|
|
|
|
return vty->of;
|
|
|
|
|
2018-05-29 19:13:51 +02:00
|
|
|
if (!vtysh_pager_name)
|
|
|
|
return NULL;
|
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
vty->of_saved = vty->of;
|
|
|
|
vty->of = popen(vtysh_pager_name, "w");
|
|
|
|
if (vty->of == NULL) {
|
|
|
|
vty->of = vty->of_saved;
|
|
|
|
perror("popen");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
vty->is_paged = true;
|
|
|
|
|
|
|
|
return vty->of;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vty_close_pager(struct vty *vty)
|
|
|
|
{
|
|
|
|
if (!vty->is_paged)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fflush(vty->of);
|
|
|
|
if (pclose(vty->of) == -1) {
|
|
|
|
perror("pclose");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
vty->of = vty->of_saved;
|
|
|
|
vty->is_paged = false;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-23 14:15:52 +01:00
|
|
|
static void vtysh_pager_envdef(bool fallback)
|
2018-05-15 00:13:03 +02:00
|
|
|
{
|
|
|
|
char *pager_defined;
|
|
|
|
|
|
|
|
pager_defined = getenv("VTYSH_PAGER");
|
|
|
|
|
|
|
|
if (pager_defined)
|
|
|
|
vtysh_pager_name = strdup(pager_defined);
|
2019-01-23 14:15:52 +01:00
|
|
|
else if (fallback)
|
2018-05-15 00:13:03 +02:00
|
|
|
vtysh_pager_name = strdup(VTYSH_PAGER);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* --- */
|
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
struct vtysh_client vtysh_client[] = {
|
2019-12-06 13:48:06 +01:00
|
|
|
{.name = "zebra", .flag = VTYSH_ZEBRA},
|
|
|
|
{.name = "ripd", .flag = VTYSH_RIPD},
|
|
|
|
{.name = "ripngd", .flag = VTYSH_RIPNGD},
|
|
|
|
{.name = "ospfd", .flag = VTYSH_OSPFD},
|
|
|
|
{.name = "ospf6d", .flag = VTYSH_OSPF6D},
|
|
|
|
{.name = "ldpd", .flag = VTYSH_LDPD},
|
|
|
|
{.name = "bgpd", .flag = VTYSH_BGPD},
|
|
|
|
{.name = "isisd", .flag = VTYSH_ISISD},
|
|
|
|
{.name = "pimd", .flag = VTYSH_PIMD},
|
|
|
|
{.name = "nhrpd", .flag = VTYSH_NHRPD},
|
|
|
|
{.name = "eigrpd", .flag = VTYSH_EIGRPD},
|
|
|
|
{.name = "babeld", .flag = VTYSH_BABELD},
|
|
|
|
{.name = "sharpd", .flag = VTYSH_SHARPD},
|
|
|
|
{.name = "fabricd", .flag = VTYSH_FABRICD},
|
|
|
|
{.name = "watchfrr", .flag = VTYSH_WATCHFRR},
|
|
|
|
{.name = "pbrd", .flag = VTYSH_PBRD},
|
|
|
|
{.name = "staticd", .flag = VTYSH_STATICD},
|
|
|
|
{.name = "bfdd", .flag = VTYSH_BFDD},
|
|
|
|
{.name = "vrrpd", .flag = VTYSH_VRRPD},
|
|
|
|
{.name = "pathd", .flag = VTYSH_PATHD},
|
|
|
|
{.name = "pim6d", .flag = VTYSH_PIM6D},
|
2005-01-28 22:11:46 +01:00
|
|
|
};
|
|
|
|
|
2019-06-13 23:29:33 +02:00
|
|
|
/* Searches for client by name, returns index */
|
|
|
|
static int vtysh_client_lookup(const char *name)
|
|
|
|
{
|
|
|
|
int idx = -1;
|
|
|
|
|
|
|
|
for (unsigned int i = 0; i < array_size(vtysh_client); i++) {
|
|
|
|
if (strmatch(vtysh_client[i].name, name)) {
|
|
|
|
idx = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
2016-09-26 22:01:37 +02:00
|
|
|
enum vtysh_write_integrated vtysh_write_integrated =
|
|
|
|
WRITE_INTEGRATED_UNSPECIFIED;
|
2004-10-03 22:11:32 +02:00
|
|
|
|
2018-05-21 20:00:51 +02:00
|
|
|
static int vtysh_reconnect(struct vtysh_client *vclient);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
static void vclient_close(struct vtysh_client *vclient)
|
|
|
|
{
|
2005-01-28 22:11:46 +01:00
|
|
|
if (vclient->fd >= 0) {
|
2019-08-23 22:05:10 +02:00
|
|
|
if (vty->of)
|
|
|
|
vty_out(vty,
|
|
|
|
"Warning: closing connection to %s because of an I/O error!\n",
|
|
|
|
vclient->name);
|
2005-01-28 22:11:46 +01:00
|
|
|
close(vclient->fd);
|
2018-05-21 20:00:51 +02:00
|
|
|
/* indicate as candidate for reconnect */
|
|
|
|
vclient->fd = VTYSH_WAS_ACTIVE;
|
2005-01-28 22:11:46 +01:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2019-12-06 13:48:06 +01:00
|
|
|
static ssize_t vtysh_client_receive(struct vtysh_client *vclient, char *buf,
|
|
|
|
size_t bufsz, int *pass_fd)
|
2019-12-04 00:17:50 +01:00
|
|
|
{
|
|
|
|
struct iovec iov[1] = {
|
|
|
|
{
|
|
|
|
.iov_base = buf,
|
|
|
|
.iov_len = bufsz,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
union {
|
|
|
|
uint8_t buf[CMSG_SPACE(sizeof(int))];
|
|
|
|
struct cmsghdr align;
|
|
|
|
} u;
|
|
|
|
struct msghdr mh = {
|
|
|
|
.msg_iov = iov,
|
|
|
|
.msg_iovlen = array_size(iov),
|
|
|
|
.msg_control = u.buf,
|
|
|
|
.msg_controllen = sizeof(u.buf),
|
|
|
|
};
|
|
|
|
struct cmsghdr *cmh = CMSG_FIRSTHDR(&mh);
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
cmh->cmsg_level = SOL_SOCKET;
|
|
|
|
cmh->cmsg_type = SCM_RIGHTS;
|
|
|
|
cmh->cmsg_len = CMSG_LEN(sizeof(int));
|
|
|
|
memset(CMSG_DATA(cmh), -1, sizeof(int));
|
|
|
|
|
|
|
|
do {
|
|
|
|
ret = recvmsg(vclient->fd, &mh, 0);
|
2022-03-01 16:00:40 +01:00
|
|
|
if (ret >= 0 || (errno != EINTR && errno != EAGAIN))
|
|
|
|
break;
|
|
|
|
} while (true);
|
2019-12-04 00:17:50 +01:00
|
|
|
|
|
|
|
if (cmh->cmsg_len == CMSG_LEN(sizeof(int))) {
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
memcpy(&fd, CMSG_DATA(cmh), sizeof(int));
|
|
|
|
if (fd != -1) {
|
|
|
|
if (pass_fd)
|
|
|
|
*pass_fd = fd;
|
|
|
|
else
|
|
|
|
close(fd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
/*
|
|
|
|
* Send a CLI command to a client and read the response.
|
|
|
|
*
|
|
|
|
* Output will be printed to vty->of. If you want to suppress output, set that
|
|
|
|
* to NULL.
|
|
|
|
*
|
|
|
|
* vclient
|
|
|
|
* the client to send the command to
|
|
|
|
*
|
|
|
|
* line
|
|
|
|
* the command to send
|
|
|
|
*
|
|
|
|
* callback
|
|
|
|
* if non-null, this will be called with each line of output received from
|
|
|
|
* the client passed in the second parameter
|
|
|
|
*
|
|
|
|
* cbarg
|
|
|
|
* optional first argument to pass to callback
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* a status code
|
|
|
|
*/
|
2017-05-10 16:38:48 +02:00
|
|
|
static int vtysh_client_run(struct vtysh_client *vclient, const char *line,
|
2019-12-04 00:17:50 +01:00
|
|
|
void (*callback)(void *, const char *), void *cbarg,
|
|
|
|
int *pass_fd)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
int ret;
|
2016-10-12 17:05:51 +02:00
|
|
|
char stackbuf[4096];
|
|
|
|
char *buf = stackbuf;
|
|
|
|
size_t bufsz = sizeof(stackbuf);
|
|
|
|
char *bufvalid, *end = NULL;
|
|
|
|
char terminator[3] = {0, 0, 0};
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-05-21 20:00:51 +02:00
|
|
|
/* vclinet was previously active, try to reconnect */
|
|
|
|
if (vclient->fd == VTYSH_WAS_ACTIVE) {
|
|
|
|
ret = vtysh_reconnect(vclient);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (vclient->fd < 0)
|
|
|
|
return CMD_SUCCESS;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
ret = write(vclient->fd, line, strlen(line) + 1);
|
2018-05-21 20:00:51 +02:00
|
|
|
if (ret <= 0) {
|
|
|
|
/* close connection and try to reconnect */
|
|
|
|
vclient_close(vclient);
|
|
|
|
ret = vtysh_reconnect(vclient);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_err;
|
|
|
|
/* retry line */
|
|
|
|
ret = write(vclient->fd, line, strlen(line) + 1);
|
|
|
|
if (ret <= 0)
|
|
|
|
goto out_err;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-10-12 17:05:51 +02:00
|
|
|
bufvalid = buf;
|
|
|
|
do {
|
2019-12-04 00:17:50 +01:00
|
|
|
ssize_t nread;
|
|
|
|
|
2019-12-06 13:48:06 +01:00
|
|
|
nread = vtysh_client_receive(
|
|
|
|
vclient, bufvalid, buf + bufsz - bufvalid - 1, pass_fd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-10-12 17:05:51 +02:00
|
|
|
if (nread < 0 && (errno == EINTR || errno == EAGAIN))
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-10-12 17:05:51 +02:00
|
|
|
if (nread <= 0) {
|
2019-08-23 22:05:10 +02:00
|
|
|
if (vty->of)
|
|
|
|
vty_out(vty,
|
|
|
|
"vtysh: error reading from %s: %s (%d)",
|
|
|
|
vclient->name, safe_strerror(errno),
|
|
|
|
errno);
|
2016-10-12 17:05:51 +02:00
|
|
|
goto out_err;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-10-12 17:05:51 +02:00
|
|
|
bufvalid += nread;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-06-01 21:25:03 +02:00
|
|
|
/* Null terminate so we may pass this to *printf later. */
|
|
|
|
bufvalid[0] = '\0';
|
|
|
|
|
2018-03-28 21:19:08 +02:00
|
|
|
/*
|
|
|
|
* We expect string output from daemons, so instead of looking
|
|
|
|
* for the full 3 null bytes of the terminator, we check for
|
|
|
|
* just one instead and assume it is the first byte of the
|
|
|
|
* terminator. The presence of the full terminator is checked
|
|
|
|
* later.
|
|
|
|
*/
|
2018-01-08 22:40:49 +01:00
|
|
|
if (bufvalid - buf >= 4)
|
2018-03-28 21:19:08 +02:00
|
|
|
end = memmem(bufvalid - 4, 4, "\0", 1);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-01-08 22:40:49 +01:00
|
|
|
/*
|
|
|
|
* calculate # bytes we have, up to & not including the
|
|
|
|
* terminator if present
|
|
|
|
*/
|
|
|
|
size_t textlen = (end ? end : bufvalid) - buf;
|
2018-03-28 21:19:08 +02:00
|
|
|
bool b = false;
|
2018-01-08 22:40:49 +01:00
|
|
|
|
|
|
|
/* feed line processing callback if present */
|
|
|
|
while (callback && bufvalid > buf && (end > buf || !end)) {
|
|
|
|
textlen = (end ? end : bufvalid) - buf;
|
2016-10-12 17:05:51 +02:00
|
|
|
char *eol = memchr(buf, '\n', textlen);
|
|
|
|
if (eol)
|
|
|
|
/* line break */
|
|
|
|
*eol++ = '\0';
|
|
|
|
else if (end == buf)
|
2018-03-28 21:19:08 +02:00
|
|
|
/*
|
|
|
|
* no line break, end of input, no text left
|
|
|
|
* before end; nothing to write
|
|
|
|
*/
|
|
|
|
b = true;
|
2016-10-12 17:05:51 +02:00
|
|
|
else if (end)
|
2018-03-28 21:19:08 +02:00
|
|
|
/* no nl, end of input, but some text left */
|
2016-10-12 17:05:51 +02:00
|
|
|
eol = end;
|
2018-06-01 21:25:03 +02:00
|
|
|
else if (bufvalid == buf + bufsz - 1) {
|
2018-03-28 21:19:08 +02:00
|
|
|
/*
|
|
|
|
* no nl, no end of input, no buffer space;
|
|
|
|
* realloc
|
|
|
|
*/
|
|
|
|
char *new;
|
|
|
|
|
|
|
|
bufsz *= 2;
|
|
|
|
if (buf == stackbuf) {
|
|
|
|
new = XMALLOC(MTYPE_TMP, bufsz);
|
|
|
|
memcpy(new, stackbuf, sizeof(stackbuf));
|
|
|
|
} else
|
|
|
|
new = XREALLOC(MTYPE_TMP, buf, bufsz);
|
|
|
|
|
|
|
|
bufvalid = bufvalid - buf + new;
|
|
|
|
buf = new;
|
|
|
|
/* if end != NULL, we won't be reading more
|
|
|
|
* data... */
|
|
|
|
assert(end == NULL);
|
|
|
|
b = true;
|
|
|
|
} else
|
|
|
|
b = true;
|
|
|
|
|
|
|
|
if (b)
|
2016-10-12 17:05:51 +02:00
|
|
|
break;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-01-08 22:40:49 +01:00
|
|
|
/* eol is at line end now, either \n => \0 or \0\0\0 */
|
2016-10-12 17:05:51 +02:00
|
|
|
assert(eol && eol <= bufvalid);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
if (vty->of)
|
|
|
|
vty_out(vty, "%s\n", buf);
|
|
|
|
|
|
|
|
callback(cbarg, buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-03-28 21:19:08 +02:00
|
|
|
/* shift back data and adjust bufvalid */
|
2016-10-12 17:05:51 +02:00
|
|
|
memmove(buf, eol, bufvalid - eol);
|
|
|
|
bufvalid -= eol - buf;
|
2016-10-19 14:38:48 +02:00
|
|
|
if (end)
|
|
|
|
end -= eol - buf;
|
2016-10-12 17:05:51 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-01-08 22:40:49 +01:00
|
|
|
/* else if no callback, dump raw */
|
|
|
|
if (!callback) {
|
2018-05-15 00:13:03 +02:00
|
|
|
if (vty->of)
|
|
|
|
vty_out(vty, "%s", buf);
|
2018-01-08 22:40:49 +01:00
|
|
|
memmove(buf, buf + textlen, bufvalid - buf - textlen);
|
|
|
|
bufvalid -= textlen;
|
2018-03-28 21:19:08 +02:00
|
|
|
if (end)
|
|
|
|
end -= textlen;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ----------------------------------------------------
|
|
|
|
* At this point `buf` should be in one of two states:
|
|
|
|
* - Empty (i.e. buf == bufvalid)
|
|
|
|
* - Contains up to 4 bytes of the terminator
|
|
|
|
* ----------------------------------------------------
|
|
|
|
*/
|
|
|
|
assert(((buf == bufvalid)
|
|
|
|
|| (bufvalid - buf <= 4 && buf[0] == 0x00)));
|
2018-01-08 22:40:49 +01:00
|
|
|
}
|
|
|
|
|
2018-03-28 21:19:08 +02:00
|
|
|
/* if we have the terminator, break */
|
|
|
|
if (end && bufvalid - buf == 4) {
|
|
|
|
assert(!memcmp(buf, terminator, 3));
|
|
|
|
ret = buf[3];
|
|
|
|
break;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2018-03-28 21:19:08 +02:00
|
|
|
|
|
|
|
} while (true);
|
2016-10-12 17:05:51 +02:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
vclient_close(vclient);
|
|
|
|
ret = CMD_SUCCESS;
|
|
|
|
out:
|
|
|
|
if (buf != stackbuf)
|
|
|
|
XFREE(MTYPE_TMP, buf);
|
|
|
|
return ret;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2016-10-12 17:05:51 +02:00
|
|
|
static int vtysh_client_run_all(struct vtysh_client *head_client,
|
2018-05-15 00:13:03 +02:00
|
|
|
const char *line, int continue_on_err,
|
2017-05-10 16:38:48 +02:00
|
|
|
void (*callback)(void *, const char *),
|
|
|
|
void *cbarg)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
|
|
|
struct vtysh_client *client;
|
2016-10-12 17:05:51 +02:00
|
|
|
int rc, rc_all = CMD_SUCCESS;
|
2017-08-04 01:34:17 +02:00
|
|
|
int correct_instance = 0, wrong_instance = 0;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2016-10-12 17:05:51 +02:00
|
|
|
for (client = head_client; client; client = client->next) {
|
2019-12-04 00:17:50 +01:00
|
|
|
rc = vtysh_client_run(client, line, callback, cbarg, NULL);
|
2017-08-04 01:34:17 +02:00
|
|
|
if (rc == CMD_NOT_MY_INSTANCE) {
|
|
|
|
wrong_instance++;
|
|
|
|
continue;
|
|
|
|
}
|
2017-09-28 20:01:05 +02:00
|
|
|
if (client->fd > 0)
|
|
|
|
correct_instance++;
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
if (rc != CMD_SUCCESS) {
|
2016-10-12 17:05:51 +02:00
|
|
|
if (!continue_on_err)
|
|
|
|
return rc;
|
|
|
|
rc_all = rc;
|
|
|
|
}
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
2019-08-23 22:05:10 +02:00
|
|
|
if (wrong_instance && !correct_instance && vty->of) {
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty,
|
2017-08-06 04:17:13 +02:00
|
|
|
"%% [%s]: command ignored as it targets an instance that is not running\n",
|
2017-08-04 01:34:17 +02:00
|
|
|
head_client->name);
|
2017-08-04 01:34:17 +02:00
|
|
|
rc_all = CMD_WARNING_CONFIG_FAILED;
|
2017-08-04 01:34:17 +02:00
|
|
|
}
|
2016-10-12 17:05:51 +02:00
|
|
|
return rc_all;
|
|
|
|
}
|
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
/*
|
|
|
|
* Execute command against all daemons.
|
|
|
|
*
|
|
|
|
* head_client
|
|
|
|
* where to start walking in the daemon list
|
|
|
|
*
|
|
|
|
* line
|
|
|
|
* the specific command to execute
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* a status code
|
|
|
|
*/
|
2016-10-12 17:05:51 +02:00
|
|
|
static int vtysh_client_execute(struct vtysh_client *head_client,
|
2018-05-15 00:13:03 +02:00
|
|
|
const char *line)
|
2016-10-12 17:05:51 +02:00
|
|
|
{
|
2018-05-15 00:13:03 +02:00
|
|
|
return vtysh_client_run_all(head_client, line, 0, NULL, NULL);
|
2016-10-12 17:05:51 +02:00
|
|
|
}
|
|
|
|
|
2019-06-13 23:29:33 +02:00
|
|
|
/* Execute by name */
|
|
|
|
static int vtysh_client_execute_name(const char *name, const char *line)
|
|
|
|
{
|
|
|
|
int ret = CMD_SUCCESS;
|
|
|
|
int idx_client = -1;
|
|
|
|
|
|
|
|
idx_client = vtysh_client_lookup(name);
|
|
|
|
if (idx_client != -1)
|
|
|
|
ret = vtysh_client_execute(&vtysh_client[idx_client], line);
|
|
|
|
else {
|
|
|
|
vty_out(vty, "Client not found\n");
|
|
|
|
ret = CMD_WARNING;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
/*
|
|
|
|
* Retrieve all running config from daemons and parse it with the vtysh config
|
|
|
|
* parser. Returned output is not displayed to the user.
|
|
|
|
*
|
|
|
|
* head_client
|
|
|
|
* where to start walking in the daemon list
|
|
|
|
*
|
|
|
|
* line
|
|
|
|
* the specific command to execute
|
|
|
|
*/
|
2016-10-12 17:05:51 +02:00
|
|
|
static void vtysh_client_config(struct vtysh_client *head_client, char *line)
|
|
|
|
{
|
2017-08-15 13:43:50 +02:00
|
|
|
/* watchfrr currently doesn't load any config, and has some hardcoded
|
|
|
|
* settings that show up in "show run". skip it here (for now at
|
|
|
|
* least) so we don't get that mangled up in config-write.
|
|
|
|
*/
|
|
|
|
if (head_client->flag == VTYSH_WATCHFRR)
|
|
|
|
return;
|
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
/* suppress output to user */
|
|
|
|
vty->of_saved = vty->of;
|
|
|
|
vty->of = NULL;
|
|
|
|
vtysh_client_run_all(head_client, line, 1, vtysh_config_parse_line,
|
|
|
|
NULL);
|
|
|
|
vty->of = vty->of_saved;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Command execution over the vty interface. */
|
2004-10-07 23:40:25 +02:00
|
|
|
static int vtysh_execute_func(const char *line, int pager)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2008-07-29 00:04:56 +02:00
|
|
|
int ret, cmd_stat;
|
2018-03-27 21:13:34 +02:00
|
|
|
unsigned int i;
|
2002-12-13 21:15:29 +01:00
|
|
|
vector vline;
|
2016-11-12 02:06:32 +01:00
|
|
|
const struct cmd_element *cmd;
|
2005-01-23 22:42:25 +01:00
|
|
|
int tried = 0;
|
|
|
|
int saved_ret, saved_node;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2004-08-26 15:08:30 +02:00
|
|
|
/* Split readline string up into the vector. */
|
2005-01-23 22:42:25 +01:00
|
|
|
vline = cmd_make_strvec(line);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2005-01-23 22:42:25 +01:00
|
|
|
if (vline == NULL)
|
|
|
|
return CMD_SUCCESS;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-06-29 02:15:25 +02:00
|
|
|
if (vtysh_add_timestamp && strncmp(line, "exit", 4)) {
|
|
|
|
char ts[48];
|
|
|
|
|
2021-11-11 20:33:41 +01:00
|
|
|
(void)frr_timestamp(3, ts, sizeof(ts));
|
2021-06-29 02:15:25 +02:00
|
|
|
vty_out(vty, "%% %s\n\n", ts);
|
|
|
|
}
|
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
saved_ret = ret = cmd_execute(vty, line, &cmd, 1);
|
2005-01-23 22:42:25 +01:00
|
|
|
saved_node = vty->node;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-05-17 19:04:57 +02:00
|
|
|
/*
|
|
|
|
* If command doesn't succeeded in current node, try to walk up in node
|
|
|
|
* tree. Changing vty->node is enough to try it just out without actual
|
|
|
|
* walkup in the vtysh.
|
|
|
|
*/
|
2005-01-23 22:42:25 +01:00
|
|
|
while (ret != CMD_SUCCESS && ret != CMD_SUCCESS_DAEMON
|
2017-05-16 00:01:57 +02:00
|
|
|
&& ret != CMD_WARNING && ret != CMD_WARNING_CONFIG_FAILED
|
2019-09-30 17:57:10 +02:00
|
|
|
&& ret != CMD_ERR_AMBIGUOUS && ret != CMD_ERR_INCOMPLETE
|
2017-05-16 00:01:57 +02:00
|
|
|
&& vty->node > CONFIG_NODE) {
|
2017-01-12 14:30:17 +01:00
|
|
|
vty->node = node_parent(vty->node);
|
2018-05-15 00:13:03 +02:00
|
|
|
ret = cmd_execute(vty, line, &cmd, 1);
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
tried++;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2005-01-23 22:42:25 +01:00
|
|
|
vty->node = saved_node;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-05-17 19:04:57 +02:00
|
|
|
/*
|
|
|
|
* If command succeeded in any other node than current (tried > 0) we
|
|
|
|
* have to move into node in the vtysh where it succeeded.
|
|
|
|
*/
|
2015-05-20 03:04:11 +02:00
|
|
|
if (ret == CMD_SUCCESS || ret == CMD_SUCCESS_DAEMON
|
|
|
|
|| ret == CMD_WARNING) {
|
2021-08-18 12:51:39 +02:00
|
|
|
while (tried-- > 0)
|
2005-01-23 22:42:25 +01:00
|
|
|
vtysh_execute("exit");
|
|
|
|
}
|
2018-05-17 19:04:57 +02:00
|
|
|
/*
|
|
|
|
* If command didn't succeed in any node, continue with return value
|
|
|
|
* from first try.
|
|
|
|
*/
|
2005-01-23 22:42:25 +01:00
|
|
|
else if (tried) {
|
|
|
|
ret = saved_ret;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2004-08-25 14:22:00 +02:00
|
|
|
cmd_free_strvec(vline);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2008-07-29 00:04:56 +02:00
|
|
|
cmd_stat = ret;
|
2002-12-13 21:15:29 +01:00
|
|
|
switch (ret) {
|
|
|
|
case CMD_WARNING:
|
2017-07-13 21:56:08 +02:00
|
|
|
case CMD_WARNING_CONFIG_FAILED:
|
2004-08-25 14:22:00 +02:00
|
|
|
if (vty->type == VTY_FILE)
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "Warning...\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
2002-12-13 21:15:29 +01:00
|
|
|
case CMD_ERR_AMBIGUOUS:
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "%% Ambiguous command: %s\n", line);
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
2002-12-13 21:15:29 +01:00
|
|
|
case CMD_ERR_NO_MATCH:
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "%% Unknown command: %s\n", line);
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
2002-12-13 21:15:29 +01:00
|
|
|
case CMD_ERR_INCOMPLETE:
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "%% Command incomplete: %s\n", line);
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
2002-12-13 21:15:29 +01:00
|
|
|
case CMD_SUCCESS_DAEMON: {
|
2018-01-12 18:35:19 +01:00
|
|
|
/*
|
|
|
|
* FIXME: Don't open pager for exit commands. popen() causes
|
|
|
|
* problems if exited from vtysh at all. This hack shouldn't
|
|
|
|
* cause any problem but is really ugly.
|
|
|
|
*/
|
2018-05-15 00:13:03 +02:00
|
|
|
if (pager && strncmp(line, "exit", 4))
|
|
|
|
vty_open_pager(vty);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-05-08 03:00:34 +02:00
|
|
|
if (!strcmp(cmd->string, "configure")) {
|
2012-09-26 10:39:10 +02:00
|
|
|
for (i = 0; i < array_size(vtysh_client); i++) {
|
2005-01-28 22:11:46 +01:00
|
|
|
cmd_stat = vtysh_client_execute(
|
2018-05-15 00:13:03 +02:00
|
|
|
&vtysh_client[i], line);
|
2005-01-28 22:11:46 +01:00
|
|
|
if (cmd_stat == CMD_WARNING)
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (cmd_stat) {
|
2004-08-25 14:22:00 +02:00
|
|
|
line = "end";
|
|
|
|
vline = cmd_make_strvec(line);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-06-11 19:42:03 +02:00
|
|
|
|
|
|
|
if (vline == NULL) {
|
|
|
|
if (vty->is_paged)
|
|
|
|
vty_close_pager(vty);
|
2008-07-28 21:19:04 +02:00
|
|
|
return CMD_SUCCESS;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2005-01-17 00:31:54 +01:00
|
|
|
ret = cmd_execute_command(vline, vty, &cmd, 1);
|
2004-08-25 14:22:00 +02:00
|
|
|
cmd_free_strvec(vline);
|
|
|
|
if (ret != CMD_SUCCESS_DAEMON)
|
|
|
|
break;
|
2002-12-13 21:15:29 +01:00
|
|
|
} else if (cmd->func) {
|
|
|
|
(*cmd->func)(cmd, vty, 0, NULL);
|
|
|
|
break;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2004-08-25 14:22:00 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2005-01-28 22:11:46 +01:00
|
|
|
cmd_stat = CMD_SUCCESS;
|
2017-09-27 17:36:22 +02:00
|
|
|
struct vtysh_client *vc;
|
2012-09-26 10:39:10 +02:00
|
|
|
for (i = 0; i < array_size(vtysh_client); i++) {
|
2005-01-28 22:11:46 +01:00
|
|
|
if (cmd->daemon & vtysh_client[i].flag) {
|
2018-05-21 20:00:51 +02:00
|
|
|
if (vtysh_client[i].fd < 0
|
|
|
|
&& (cmd->daemon == vtysh_client[i].flag)) {
|
|
|
|
for (vc = &vtysh_client[i]; vc;
|
|
|
|
vc = vc->next)
|
2018-11-21 22:13:25 +01:00
|
|
|
if (vc->fd == VTYSH_WAS_ACTIVE)
|
2018-05-21 20:00:51 +02:00
|
|
|
vtysh_reconnect(vc);
|
|
|
|
}
|
2017-09-18 18:35:10 +02:00
|
|
|
if (vtysh_client[i].fd < 0
|
|
|
|
&& (cmd->daemon == vtysh_client[i].flag)) {
|
2017-09-27 17:36:22 +02:00
|
|
|
bool any_inst = false;
|
|
|
|
for (vc = &vtysh_client[i]; vc;
|
|
|
|
vc = vc->next)
|
|
|
|
any_inst = any_inst
|
|
|
|
|| (vc->fd > 0);
|
2017-09-28 20:01:05 +02:00
|
|
|
if (!any_inst) {
|
2017-09-27 17:36:22 +02:00
|
|
|
fprintf(stderr,
|
|
|
|
"%s is not running\n",
|
|
|
|
vtysh_client[i].name);
|
2021-08-17 17:30:03 +02:00
|
|
|
cmd_stat = CMD_ERR_NO_DAEMON;
|
|
|
|
break;
|
2017-09-28 20:01:05 +02:00
|
|
|
}
|
2017-09-18 18:35:10 +02:00
|
|
|
}
|
2005-01-28 22:11:46 +01:00
|
|
|
cmd_stat = vtysh_client_execute(
|
2018-05-15 00:13:03 +02:00
|
|
|
&vtysh_client[i], line);
|
2005-01-28 22:11:46 +01:00
|
|
|
if (cmd_stat != CMD_SUCCESS)
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2021-08-17 17:30:03 +02:00
|
|
|
if (cmd_stat != CMD_SUCCESS && cmd_stat != CMD_ERR_NO_DAEMON)
|
2005-01-28 22:11:46 +01:00
|
|
|
break;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (cmd->func)
|
|
|
|
(*cmd->func)(cmd, vty, 0, NULL);
|
|
|
|
}
|
|
|
|
}
|
2018-05-15 00:13:03 +02:00
|
|
|
if (vty->is_paged)
|
|
|
|
vty_close_pager(vty);
|
|
|
|
|
2008-07-28 21:19:04 +02:00
|
|
|
return cmd_stat;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2004-10-07 23:40:25 +02:00
|
|
|
int vtysh_execute_no_pager(const char *line)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2008-07-28 21:19:04 +02:00
|
|
|
return vtysh_execute_func(line, 0);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2004-10-07 23:40:25 +02:00
|
|
|
int vtysh_execute(const char *line)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2008-07-28 21:19:04 +02:00
|
|
|
return vtysh_execute_func(line, 1);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2016-08-18 19:47:01 +02:00
|
|
|
static char *trim(char *s)
|
|
|
|
{
|
|
|
|
size_t size;
|
|
|
|
char *end;
|
|
|
|
|
|
|
|
size = strlen(s);
|
|
|
|
|
|
|
|
if (!size)
|
|
|
|
return s;
|
|
|
|
|
|
|
|
end = s + size - 1;
|
2019-08-06 16:54:52 +02:00
|
|
|
while (end >= s && isspace((unsigned char)*end))
|
2016-08-18 19:47:01 +02:00
|
|
|
end--;
|
|
|
|
*(end + 1) = '\0';
|
|
|
|
|
2019-08-06 16:54:52 +02:00
|
|
|
while (*s && isspace((unsigned char)*s))
|
2016-08-18 19:47:01 +02:00
|
|
|
s++;
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-05-20 03:29:15 +02:00
|
|
|
int vtysh_mark_file(const char *filename)
|
2015-05-20 03:04:11 +02:00
|
|
|
{
|
|
|
|
struct vty *vty;
|
|
|
|
FILE *confp = NULL;
|
|
|
|
int ret;
|
|
|
|
vector vline;
|
|
|
|
int tried = 0;
|
2016-11-12 02:06:32 +01:00
|
|
|
const struct cmd_element *cmd;
|
2015-05-20 03:04:11 +02:00
|
|
|
int saved_ret, prev_node;
|
|
|
|
int lineno = 0;
|
2016-08-18 19:47:01 +02:00
|
|
|
char *vty_buf_copy = NULL;
|
|
|
|
char *vty_buf_trimmed = NULL;
|
2015-05-20 03:04:11 +02:00
|
|
|
|
|
|
|
if (strncmp("-", filename, 1) == 0)
|
|
|
|
confp = stdin;
|
|
|
|
else
|
|
|
|
confp = fopen(filename, "r");
|
|
|
|
|
|
|
|
if (confp == NULL) {
|
2016-04-06 15:34:33 +02:00
|
|
|
fprintf(stderr, "%% Can't open config file %s due to '%s'.\n",
|
|
|
|
filename, safe_strerror(errno));
|
2020-02-09 13:21:56 +01:00
|
|
|
return CMD_ERR_NO_FILE;
|
2016-04-06 15:34:33 +02:00
|
|
|
}
|
2015-05-20 03:04:11 +02:00
|
|
|
|
|
|
|
vty = vty_new();
|
2019-12-06 20:35:50 +01:00
|
|
|
vty->wfd = STDOUT_FILENO;
|
2015-05-20 03:04:11 +02:00
|
|
|
vty->type = VTY_TERM;
|
|
|
|
vty->node = CONFIG_NODE;
|
|
|
|
|
|
|
|
vtysh_execute_no_pager("enable");
|
2019-05-08 03:00:34 +02:00
|
|
|
vtysh_execute_no_pager("configure");
|
2015-05-29 05:48:31 +02:00
|
|
|
vty_buf_copy = XCALLOC(MTYPE_VTYSH_CMD, VTY_BUFSIZ);
|
2015-05-20 03:04:11 +02:00
|
|
|
|
|
|
|
while (fgets(vty->buf, VTY_BUFSIZ, confp)) {
|
|
|
|
lineno++;
|
|
|
|
tried = 0;
|
2019-05-06 23:28:55 +02:00
|
|
|
strlcpy(vty_buf_copy, vty->buf, VTY_BUFSIZ);
|
2016-08-18 19:47:01 +02:00
|
|
|
vty_buf_trimmed = trim(vty_buf_copy);
|
2015-05-20 03:04:11 +02:00
|
|
|
|
2016-08-18 19:47:01 +02:00
|
|
|
if (vty_buf_trimmed[0] == '!' || vty_buf_trimmed[0] == '#') {
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "%s", vty->buf);
|
2015-05-20 03:04:11 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Split readline string up into the vector. */
|
|
|
|
vline = cmd_make_strvec(vty->buf);
|
|
|
|
|
|
|
|
if (vline == NULL) {
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "%s", vty->buf);
|
2015-05-20 03:04:11 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-05-17 19:04:57 +02:00
|
|
|
/*
|
|
|
|
* Ignore the "end" lines, we will generate these where
|
|
|
|
* appropriate
|
|
|
|
*/
|
2016-08-18 19:47:01 +02:00
|
|
|
if (strlen(vty_buf_trimmed) == 3
|
|
|
|
&& strncmp("end", vty_buf_trimmed, 3) == 0) {
|
2017-10-23 01:14:21 +02:00
|
|
|
cmd_free_strvec(vline);
|
2017-07-13 21:56:08 +02:00
|
|
|
continue;
|
2015-05-20 03:04:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
prev_node = vty->node;
|
|
|
|
saved_ret = ret = cmd_execute_command_strict(vline, vty, &cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-05-17 19:04:57 +02:00
|
|
|
/*
|
|
|
|
* If command doesn't succeeded in current node, try to walk up
|
|
|
|
* in node tree. Changing vty->node is enough to try it just
|
|
|
|
* out without actual walkup in the vtysh.
|
|
|
|
*/
|
2015-05-20 03:04:11 +02:00
|
|
|
while (ret != CMD_SUCCESS && ret != CMD_SUCCESS_DAEMON
|
|
|
|
&& ret != CMD_WARNING && ret != CMD_WARNING_CONFIG_FAILED
|
2019-09-30 17:57:10 +02:00
|
|
|
&& ret != CMD_ERR_AMBIGUOUS && ret != CMD_ERR_INCOMPLETE
|
2015-05-20 03:04:11 +02:00
|
|
|
&& vty->node > CONFIG_NODE) {
|
|
|
|
vty->node = node_parent(vty->node);
|
|
|
|
ret = cmd_execute_command_strict(vline, vty, &cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
tried++;
|
2015-05-20 03:04:11 +02:00
|
|
|
}
|
|
|
|
|
2018-05-17 19:04:57 +02:00
|
|
|
/*
|
|
|
|
* If command succeeded in any other node than current (tried >
|
|
|
|
* 0) we have to move into node in the vtysh where it
|
|
|
|
* succeeded.
|
|
|
|
*/
|
2015-05-20 03:04:11 +02:00
|
|
|
if (ret == CMD_SUCCESS || ret == CMD_SUCCESS_DAEMON
|
|
|
|
|| ret == CMD_WARNING) {
|
2021-08-18 12:51:39 +02:00
|
|
|
while (tried-- > 0)
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "exit\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2018-05-17 19:04:57 +02:00
|
|
|
/*
|
|
|
|
* If command didn't succeed in any node, continue with return
|
|
|
|
* value from first try.
|
|
|
|
*/
|
2015-05-20 03:04:11 +02:00
|
|
|
else if (tried) {
|
2005-01-23 22:42:25 +01:00
|
|
|
ret = saved_ret;
|
2015-05-20 03:04:11 +02:00
|
|
|
vty->node = prev_node;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2015-05-29 05:48:31 +02:00
|
|
|
cmd_free_strvec(vline);
|
2015-05-20 03:04:11 +02:00
|
|
|
switch (ret) {
|
|
|
|
case CMD_WARNING:
|
2017-07-13 21:56:08 +02:00
|
|
|
case CMD_WARNING_CONFIG_FAILED:
|
2015-05-20 03:04:11 +02:00
|
|
|
if (vty->type == VTY_FILE)
|
2016-04-06 15:34:33 +02:00
|
|
|
fprintf(stderr, "line %d: Warning...: %s\n",
|
2015-05-20 03:04:11 +02:00
|
|
|
lineno, vty->buf);
|
|
|
|
fclose(confp);
|
|
|
|
vty_close(vty);
|
2015-05-29 05:48:31 +02:00
|
|
|
XFREE(MTYPE_VTYSH_CMD, vty_buf_copy);
|
2015-05-20 03:04:11 +02:00
|
|
|
return ret;
|
2002-12-13 21:15:29 +01:00
|
|
|
case CMD_ERR_AMBIGUOUS:
|
2015-05-20 03:04:11 +02:00
|
|
|
fprintf(stderr, "line %d: %% Ambiguous command: %s\n",
|
|
|
|
lineno, vty->buf);
|
|
|
|
fclose(confp);
|
|
|
|
vty_close(vty);
|
2015-05-29 05:48:31 +02:00
|
|
|
XFREE(MTYPE_VTYSH_CMD, vty_buf_copy);
|
2016-04-06 15:34:33 +02:00
|
|
|
return CMD_ERR_AMBIGUOUS;
|
2015-05-20 03:04:11 +02:00
|
|
|
case CMD_ERR_NO_MATCH:
|
|
|
|
fprintf(stderr, "line %d: %% Unknown command: %s\n",
|
|
|
|
lineno, vty->buf);
|
|
|
|
fclose(confp);
|
|
|
|
vty_close(vty);
|
2015-05-29 05:48:31 +02:00
|
|
|
XFREE(MTYPE_VTYSH_CMD, vty_buf_copy);
|
2016-04-06 15:34:33 +02:00
|
|
|
return CMD_ERR_NO_MATCH;
|
|
|
|
case CMD_ERR_INCOMPLETE:
|
|
|
|
fprintf(stderr, "line %d: %% Command incomplete: %s\n",
|
|
|
|
lineno, vty->buf);
|
2015-05-20 03:04:11 +02:00
|
|
|
fclose(confp);
|
|
|
|
vty_close(vty);
|
2015-05-29 05:48:31 +02:00
|
|
|
XFREE(MTYPE_VTYSH_CMD, vty_buf_copy);
|
2016-04-06 15:34:33 +02:00
|
|
|
return CMD_ERR_INCOMPLETE;
|
2015-05-20 03:04:11 +02:00
|
|
|
case CMD_SUCCESS:
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "%s", vty->buf);
|
2019-07-18 17:29:54 +02:00
|
|
|
if (strmatch(vty_buf_trimmed, "exit-vrf"))
|
|
|
|
vty_out(vty, "end\n");
|
2015-05-20 03:04:11 +02:00
|
|
|
break;
|
|
|
|
case CMD_SUCCESS_DAEMON: {
|
2018-06-19 10:40:56 +02:00
|
|
|
int cmd_stat;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "%s", vty->buf);
|
2019-07-18 17:29:54 +02:00
|
|
|
if (strmatch(vty_buf_trimmed, "exit-vrf"))
|
|
|
|
vty_out(vty, "end\n");
|
2018-05-15 00:13:03 +02:00
|
|
|
cmd_stat = vtysh_client_execute(&vtysh_client[0],
|
|
|
|
vty->buf);
|
2015-05-20 03:04:11 +02:00
|
|
|
if (cmd_stat != CMD_SUCCESS)
|
|
|
|
break;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:04:11 +02:00
|
|
|
if (cmd->func)
|
|
|
|
(*cmd->func)(cmd, vty, 0, NULL);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2015-05-20 03:04:11 +02:00
|
|
|
}
|
|
|
|
/* This is the end */
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "\nend\n");
|
2015-05-20 03:04:11 +02:00
|
|
|
vty_close(vty);
|
2015-05-29 05:48:31 +02:00
|
|
|
XFREE(MTYPE_VTYSH_CMD, vty_buf_copy);
|
2015-05-20 03:04:11 +02:00
|
|
|
|
|
|
|
if (confp != stdin)
|
|
|
|
fclose(confp);
|
|
|
|
|
2020-02-09 13:21:56 +01:00
|
|
|
return 0;
|
2015-05-20 03:04:11 +02:00
|
|
|
}
|
|
|
|
|
2022-02-14 18:56:04 +01:00
|
|
|
/* Configuration make from file. */
|
2002-12-13 21:15:29 +01:00
|
|
|
int vtysh_config_from_file(struct vty *vty, FILE *fp)
|
2017-07-17 14:03:14 +02:00
|
|
|
{
|
|
|
|
int ret;
|
2016-11-12 02:06:32 +01:00
|
|
|
const struct cmd_element *cmd;
|
2015-09-01 21:46:08 +02:00
|
|
|
int lineno = 0;
|
2018-05-17 19:04:57 +02:00
|
|
|
/* once we have an error, we remember & return that */
|
2016-03-09 13:25:02 +01:00
|
|
|
int retcode = CMD_SUCCESS;
|
2022-07-20 21:57:33 +02:00
|
|
|
char *vty_buf_copy = XCALLOC(MTYPE_VTYSH_CMD, VTY_BUFSIZ);
|
|
|
|
char *vty_buf_trimmed = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
while (fgets(vty->buf, VTY_BUFSIZ, fp)) {
|
2015-09-01 21:46:08 +02:00
|
|
|
lineno++;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-07-20 21:57:33 +02:00
|
|
|
strlcpy(vty_buf_copy, vty->buf, VTY_BUFSIZ);
|
|
|
|
vty_buf_trimmed = trim(vty_buf_copy);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore the "end" lines, we will generate these where
|
|
|
|
* appropriate, otherwise we never execute
|
|
|
|
* XFRR_end_configuration, and start/end markers do not work.
|
|
|
|
*/
|
|
|
|
if (strmatch(vty_buf_trimmed, "end"))
|
|
|
|
continue;
|
|
|
|
|
2018-10-11 15:43:32 +02:00
|
|
|
ret = command_config_read_one_line(vty, &cmd, lineno, 1);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
switch (ret) {
|
|
|
|
case CMD_WARNING:
|
2017-07-13 21:56:08 +02:00
|
|
|
case CMD_WARNING_CONFIG_FAILED:
|
2002-12-13 21:15:29 +01:00
|
|
|
if (vty->type == VTY_FILE)
|
2016-04-06 15:34:33 +02:00
|
|
|
fprintf(stderr, "line %d: Warning[%d]...: %s\n",
|
|
|
|
lineno, vty->node, vty->buf);
|
2018-05-17 19:04:57 +02:00
|
|
|
retcode = ret;
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
2002-12-13 21:15:29 +01:00
|
|
|
case CMD_ERR_AMBIGUOUS:
|
2017-01-25 18:43:58 +01:00
|
|
|
fprintf(stderr,
|
2016-04-06 15:34:33 +02:00
|
|
|
"line %d: %% Ambiguous command[%d]: %s\n",
|
|
|
|
lineno, vty->node, vty->buf);
|
2018-05-17 19:04:57 +02:00
|
|
|
retcode = CMD_ERR_AMBIGUOUS;
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
2002-12-13 21:15:29 +01:00
|
|
|
case CMD_ERR_NO_MATCH:
|
2016-04-06 15:34:33 +02:00
|
|
|
fprintf(stderr, "line %d: %% Unknown command[%d]: %s",
|
|
|
|
lineno, vty->node, vty->buf);
|
2018-05-17 19:04:57 +02:00
|
|
|
retcode = CMD_ERR_NO_MATCH;
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
2002-12-13 21:15:29 +01:00
|
|
|
case CMD_ERR_INCOMPLETE:
|
2017-01-25 18:43:58 +01:00
|
|
|
fprintf(stderr,
|
2016-04-06 15:34:33 +02:00
|
|
|
"line %d: %% Command incomplete[%d]: %s\n",
|
|
|
|
lineno, vty->node, vty->buf);
|
2018-05-17 19:04:57 +02:00
|
|
|
retcode = CMD_ERR_INCOMPLETE;
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
2002-12-13 21:15:29 +01:00
|
|
|
case CMD_SUCCESS_DAEMON: {
|
2018-03-27 21:13:34 +02:00
|
|
|
unsigned int i;
|
2005-01-28 22:11:46 +01:00
|
|
|
int cmd_stat = CMD_SUCCESS;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2012-09-26 10:39:10 +02:00
|
|
|
for (i = 0; i < array_size(vtysh_client); i++) {
|
2006-01-11 02:38:25 +01:00
|
|
|
if (cmd->daemon & vtysh_client[i].flag) {
|
2005-01-28 22:11:46 +01:00
|
|
|
cmd_stat = vtysh_client_execute(
|
2018-05-15 00:13:03 +02:00
|
|
|
&vtysh_client[i], vty->buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
/*
|
2016-04-15 15:15:21 +02:00
|
|
|
* CMD_WARNING - Can mean that the
|
2018-05-17 19:04:57 +02:00
|
|
|
* command was parsed successfully but
|
|
|
|
* it was already entered in a few
|
|
|
|
* spots. As such if we receive a
|
2016-04-15 15:15:21 +02:00
|
|
|
* CMD_WARNING from a daemon we
|
2018-05-17 19:04:57 +02:00
|
|
|
* shouldn't stop talking to the other
|
|
|
|
* daemons for the particular command.
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
2016-04-15 15:15:21 +02:00
|
|
|
if (cmd_stat != CMD_SUCCESS
|
|
|
|
&& cmd_stat != CMD_WARNING) {
|
2017-01-25 18:43:58 +01:00
|
|
|
fprintf(stderr,
|
2016-04-06 15:34:33 +02:00
|
|
|
"line %d: Failure to communicate[%d] to %s, line: %s\n",
|
|
|
|
lineno, cmd_stat,
|
|
|
|
vtysh_client[i].name,
|
|
|
|
vty->buf);
|
2017-08-16 22:22:59 +02:00
|
|
|
retcode = cmd_stat;
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-01-28 22:11:46 +01:00
|
|
|
if (cmd_stat != CMD_SUCCESS)
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (cmd->func)
|
|
|
|
(*cmd->func)(cmd, vty, 0, NULL);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-20 21:57:33 +02:00
|
|
|
XFREE(MTYPE_VTYSH_CMD, vty_buf_copy);
|
|
|
|
|
2016-03-09 13:25:02 +01:00
|
|
|
return (retcode);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2018-05-11 07:15:48 +02:00
|
|
|
/*
|
|
|
|
* Function processes cli commands terminated with '?' character when entered
|
|
|
|
* through either 'vtysh' or 'vtysh -c' interfaces.
|
|
|
|
*/
|
|
|
|
static int vtysh_process_questionmark(const char *input, int input_len)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2018-05-11 07:15:48 +02:00
|
|
|
int ret, width = 0;
|
2016-11-12 02:06:32 +01:00
|
|
|
unsigned int i;
|
2018-05-11 07:15:48 +02:00
|
|
|
vector vline, describe;
|
2016-11-12 02:06:32 +01:00
|
|
|
struct cmd_token *token;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2018-05-11 07:15:48 +02:00
|
|
|
if (!input)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
vline = cmd_make_strvec(input);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-09-28 22:17:36 +02:00
|
|
|
/* In case of '> ?'. */
|
|
|
|
if (vline == NULL) {
|
|
|
|
vline = vector_init(1);
|
|
|
|
vector_set(vline, NULL);
|
2019-08-06 16:54:52 +02:00
|
|
|
} else if (input_len && isspace((unsigned char)input[input_len - 1]))
|
2015-09-28 22:17:36 +02:00
|
|
|
vector_set(vline, NULL);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-10-25 11:43:32 +02:00
|
|
|
describe = cmd_describe_command(vline, vty, &ret);
|
|
|
|
|
2017-07-13 21:56:08 +02:00
|
|
|
/* Ambiguous and no match error. */
|
|
|
|
switch (ret) {
|
2002-12-13 21:15:29 +01:00
|
|
|
case CMD_ERR_AMBIGUOUS:
|
2016-04-06 15:34:33 +02:00
|
|
|
cmd_free_strvec(vline);
|
2017-10-25 11:43:32 +02:00
|
|
|
vector_free(describe);
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "%% Ambiguous command.\n");
|
|
|
|
rl_on_new_line();
|
2016-04-06 15:34:33 +02:00
|
|
|
return 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
case CMD_ERR_NO_MATCH:
|
2016-04-06 15:34:33 +02:00
|
|
|
cmd_free_strvec(vline);
|
2017-11-02 16:50:13 +01:00
|
|
|
if (describe)
|
|
|
|
vector_free(describe);
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "%% There is no matched command.\n");
|
|
|
|
rl_on_new_line();
|
2016-04-06 15:34:33 +02:00
|
|
|
return 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-05-10 16:38:48 +02:00
|
|
|
|
|
|
|
/* Get width of command string. */
|
2002-12-13 21:15:29 +01:00
|
|
|
width = 0;
|
2017-05-10 16:38:48 +02:00
|
|
|
for (i = 0; i < vector_active(describe); i++)
|
|
|
|
if ((token = vector_slot(describe, i)) != NULL) {
|
|
|
|
if (token->text[0] == '\0')
|
|
|
|
continue;
|
|
|
|
|
2017-07-12 20:40:18 +02:00
|
|
|
int len = strlen(token->text);
|
|
|
|
|
2016-10-01 03:03:24 +02:00
|
|
|
if (width < len)
|
2017-07-14 13:18:14 +02:00
|
|
|
width = len;
|
2017-05-10 16:38:48 +02:00
|
|
|
}
|
2017-07-12 20:40:18 +02:00
|
|
|
|
2017-05-10 16:38:48 +02:00
|
|
|
for (i = 0; i < vector_active(describe); i++)
|
2013-09-30 14:27:51 +02:00
|
|
|
if ((token = vector_slot(describe, i)) != NULL) {
|
|
|
|
if (!token->desc)
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, " %-s\n", token->text);
|
2017-05-10 16:38:48 +02:00
|
|
|
else
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, " %-*s %s\n", width, token->text,
|
|
|
|
token->desc);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-10 16:38:48 +02:00
|
|
|
if (IS_VARYING_TOKEN(token->type)) {
|
|
|
|
const char *ref = vector_slot(
|
|
|
|
vline, vector_active(vline) - 1);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-10 16:38:48 +02:00
|
|
|
vector varcomps = vector_init(VECTOR_MIN_SIZE);
|
|
|
|
cmd_variable_complete(token, ref, varcomps);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-10 16:38:48 +02:00
|
|
|
if (vector_active(varcomps) > 0) {
|
2017-07-12 20:40:18 +02:00
|
|
|
int rows, cols;
|
|
|
|
rl_get_screen_size(&rows, &cols);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-14 13:18:14 +02:00
|
|
|
char *ac = cmd_variable_comp2str(
|
|
|
|
varcomps, cols);
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "%s\n", ac);
|
2017-07-12 20:40:18 +02:00
|
|
|
XFREE(MTYPE_TMP, ac);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2017-05-10 16:38:48 +02:00
|
|
|
vector_free(varcomps);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
cmd_free_strvec(vline);
|
|
|
|
vector_free(describe);
|
|
|
|
|
2018-05-11 07:15:48 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Entry point for user commands terminated with '?' character and typed through
|
|
|
|
* the usual vtysh's stdin interface. This is the function being registered with
|
|
|
|
* readline() api's.
|
|
|
|
*/
|
2018-06-08 01:49:17 +02:00
|
|
|
static int vtysh_rl_describe(int a, int b)
|
2018-05-11 07:15:48 +02:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "\n");
|
2018-05-11 07:15:48 +02:00
|
|
|
|
|
|
|
ret = vtysh_process_questionmark(rl_line_buffer, rl_end);
|
2002-12-13 21:15:29 +01:00
|
|
|
rl_on_new_line();
|
|
|
|
|
2018-05-11 07:15:48 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function in charged of processing vtysh instructions terminating with '?'
|
|
|
|
* character and received through the 'vtysh -c' interface. If user's
|
|
|
|
* instruction is well-formatted, we will call the same processing routine
|
|
|
|
* utilized by the traditional vtysh's stdin interface.
|
|
|
|
*/
|
|
|
|
int vtysh_execute_command_questionmark(char *input)
|
|
|
|
{
|
|
|
|
int input_len, qmark_count = 0;
|
|
|
|
const char *str;
|
|
|
|
|
|
|
|
if (!(input && *input))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* Finding out question_mark count and strlen */
|
|
|
|
for (str = input; *str; ++str) {
|
|
|
|
if (*str == '?')
|
|
|
|
qmark_count++;
|
|
|
|
}
|
|
|
|
input_len = str - input;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Verify that user's input terminates in '?' and that patterns such as
|
|
|
|
* 'cmd ? subcmd ?' are prevented.
|
|
|
|
*/
|
|
|
|
if (qmark_count != 1 || input[input_len - 1] != '?')
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Questionmark-processing function is not expecting to receive '?'
|
|
|
|
* character in input string.
|
|
|
|
*/
|
|
|
|
input[input_len - 1] = '\0';
|
|
|
|
|
|
|
|
return vtysh_process_questionmark(input, input_len - 1);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2004-08-26 15:08:30 +02:00
|
|
|
/* Result of cmd_complete_command() call will be stored here
|
|
|
|
* and used in new_completion() in order to put the space in
|
|
|
|
* correct places only. */
|
2002-12-13 21:15:29 +01:00
|
|
|
int complete_status;
|
|
|
|
|
2003-04-19 01:55:29 +02:00
|
|
|
static char *command_generator(const char *text, int state)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
vector vline;
|
|
|
|
static char **matched = NULL;
|
|
|
|
static int index = 0;
|
|
|
|
|
|
|
|
/* First call. */
|
|
|
|
if (!state) {
|
|
|
|
index = 0;
|
|
|
|
|
|
|
|
if (vty->node == AUTH_NODE || vty->node == AUTH_ENABLE_NODE)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
vline = cmd_make_strvec(rl_line_buffer);
|
|
|
|
if (vline == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
2019-08-06 16:54:52 +02:00
|
|
|
if (rl_end &&
|
|
|
|
isspace((unsigned char)rl_line_buffer[rl_end - 1]))
|
2016-05-16 03:07:50 +02:00
|
|
|
vector_set(vline, NULL);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
matched = cmd_complete_command(vline, vty, &complete_status);
|
2016-09-09 23:58:33 +02:00
|
|
|
cmd_free_strvec(vline);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2020-11-30 16:50:51 +01:00
|
|
|
if (matched && matched[index]) {
|
|
|
|
XCOUNTFREE(MTYPE_COMPLETION, matched[index]);
|
2002-12-13 21:15:29 +01:00
|
|
|
return matched[index++];
|
2020-11-30 16:50:51 +01:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-01-26 21:57:24 +01:00
|
|
|
XFREE(MTYPE_TMP, matched);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-09-07 20:30:24 +02:00
|
|
|
static char **new_completion(const char *text, int start, int end)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
char **matches;
|
|
|
|
|
2003-04-19 01:55:29 +02:00
|
|
|
matches = rl_completion_matches(text, command_generator);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
if (matches) {
|
|
|
|
rl_point = rl_end;
|
2013-03-04 10:23:30 +01:00
|
|
|
if (complete_status != CMD_COMPLETE_FULL_MATCH)
|
|
|
|
/* only append a space on full match */
|
|
|
|
rl_completion_append_character = '\0';
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return matches;
|
|
|
|
}
|
|
|
|
|
2004-08-26 15:08:30 +02:00
|
|
|
/* Vty node structures. */
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_BGPD
|
2008-12-01 20:10:34 +01:00
|
|
|
static struct cmd_node bgp_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router)# ",
|
2002-12-13 21:15:29 +01:00
|
|
|
};
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_BGPD */
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2008-12-01 20:10:34 +01:00
|
|
|
static struct cmd_node rip_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "rip",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = RIP_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router)# ",
|
2002-12-13 21:15:29 +01:00
|
|
|
};
|
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_ISISD
|
2008-12-01 20:10:34 +01:00
|
|
|
static struct cmd_node isis_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "isis",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = ISIS_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router)# ",
|
2003-12-23 11:39:08 +01:00
|
|
|
};
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_ISISD */
|
2003-12-23 11:39:08 +01:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_FABRICD
|
2018-03-22 15:01:15 +01:00
|
|
|
static struct cmd_node openfabric_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "openfabric",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = OPENFABRIC_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router)# ",
|
2018-03-22 15:01:15 +01:00
|
|
|
};
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_FABRICD */
|
2018-03-22 15:01:15 +01:00
|
|
|
|
2008-12-01 20:10:34 +01:00
|
|
|
static struct cmd_node interface_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "interface",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = INTERFACE_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-if)# ",
|
2002-12-13 21:15:29 +01:00
|
|
|
};
|
|
|
|
|
2017-08-02 01:16:28 +02:00
|
|
|
static struct cmd_node pw_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "pw",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = PW_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-pw)# ",
|
2017-08-02 01:16:28 +02:00
|
|
|
};
|
|
|
|
|
2020-07-31 18:04:20 +02:00
|
|
|
static struct cmd_node segment_routing_node = {
|
|
|
|
.name = "segment-routing",
|
|
|
|
.node = SEGMENT_ROUTING_NODE,
|
|
|
|
.parent_node = CONFIG_NODE,
|
|
|
|
.prompt = "%s(config-sr)# ",
|
|
|
|
};
|
|
|
|
|
2021-06-05 18:34:46 +02:00
|
|
|
#if defined(HAVE_PATHD)
|
2020-07-31 18:04:20 +02:00
|
|
|
static struct cmd_node sr_traffic_eng_node = {
|
|
|
|
.name = "sr traffic-eng",
|
|
|
|
.node = SR_TRAFFIC_ENG_NODE,
|
|
|
|
.parent_node = SEGMENT_ROUTING_NODE,
|
|
|
|
.prompt = "%s(config-sr-te)# ",
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct cmd_node srte_segment_list_node = {
|
|
|
|
.name = "srte segment-list",
|
|
|
|
.node = SR_SEGMENT_LIST_NODE,
|
|
|
|
.parent_node = SR_TRAFFIC_ENG_NODE,
|
|
|
|
.prompt = "%s(config-sr-te-segment-list)# ",
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct cmd_node srte_policy_node = {
|
|
|
|
.name = "srte policy",
|
|
|
|
.node = SR_POLICY_NODE,
|
|
|
|
.parent_node = SR_TRAFFIC_ENG_NODE,
|
|
|
|
.prompt = "%s(config-sr-te-policy)# ",
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct cmd_node srte_candidate_dyn_node = {
|
|
|
|
.name = "srte candidate-dyn",
|
|
|
|
.node = SR_CANDIDATE_DYN_NODE,
|
|
|
|
.parent_node = SR_POLICY_NODE,
|
|
|
|
.prompt = "%s(config-sr-te-candidate)# ",
|
|
|
|
};
|
|
|
|
|
2020-10-16 16:55:51 +02:00
|
|
|
static struct cmd_node pcep_node = {
|
|
|
|
.name = "srte pcep",
|
|
|
|
.node = PCEP_NODE,
|
|
|
|
.parent_node = SR_TRAFFIC_ENG_NODE,
|
|
|
|
.prompt = "%s(config-sr-te-pcep)# "
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct cmd_node pcep_pcc_node = {
|
|
|
|
.name = "srte pcep pcc",
|
|
|
|
.node = PCEP_PCC_NODE,
|
|
|
|
.parent_node = PCEP_NODE,
|
|
|
|
.prompt = "%s(config-sr-te-pcep-pcc)# ",
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct cmd_node pcep_pce_node = {
|
|
|
|
.name = "srte pcep pce-peer",
|
|
|
|
.node = PCEP_PCE_NODE,
|
|
|
|
.parent_node = PCEP_NODE,
|
|
|
|
.prompt = "%s(config-sr-te-pcep-pce-peer)# ",
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct cmd_node pcep_pce_config_node = {
|
|
|
|
.name = "srte pcep pce-config",
|
|
|
|
.node = PCEP_PCE_CONFIG_NODE,
|
|
|
|
.parent_node = PCEP_NODE,
|
|
|
|
.prompt = "%s(pcep-sr-te-pcep-pce-config)# ",
|
|
|
|
};
|
2021-01-11 12:48:08 +01:00
|
|
|
#endif /* HAVE_PATHD */
|
2020-10-16 16:55:51 +02:00
|
|
|
|
2016-02-02 13:34:29 +01:00
|
|
|
static struct cmd_node vrf_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "vrf",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = VRF_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-vrf)# ",
|
2016-02-02 13:34:29 +01:00
|
|
|
};
|
|
|
|
|
pbrd: Add PBR to FRR
This is an implementation of PBR for FRR.
This implemenation uses a combination of rules and
tables to determine how packets will flow.
PBR introduces a new concept of 'nexthop-groups' to
specify a group of nexthops that will be used for
ecmp. Nexthop-groups are specified on the cli via:
nexthop-group DONNA
nexthop 192.168.208.1
nexthop 192.168.209.1
nexthop 192.168.210.1
!
PBR sees the nexthop-group and installs these as a default
route with these nexthops starting at table 10000
robot# show pbr nexthop-groups
Nexthop-Group: DONNA Table: 10001 Valid: 1 Installed: 1
Valid: 1 nexthop 192.168.209.1
Valid: 1 nexthop 192.168.210.1
Valid: 1 nexthop 192.168.208.1
I have also introduced the ability to specify a table
in a 'show ip route table XXX' to see the specified tables.
robot# show ip route table 10001
Codes: K - kernel route, C - connected, S - static, R - RIP,
O - OSPF, I - IS-IS, B - BGP, P - PIM, E - EIGRP, N - NHRP,
T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP,
F - PBR,
> - selected route, * - FIB route
F>* 0.0.0.0/0 [0/0] via 192.168.208.1, enp0s8, 00:14:25
* via 192.168.209.1, enp0s9, 00:14:25
* via 192.168.210.1, enp0s10, 00:14:25
PBR tracks PBR-MAPS via the pbr-map command:
!
pbr-map EVA seq 10
match src-ip 4.3.4.0/24
set nexthop-group DONNA
!
pbr-map EVA seq 20
match dst-ip 4.3.5.0/24
set nexthop-group DONNA
!
pbr-maps can have 'match src-ip <prefix>' and 'match dst-ip <prefix>'
to affect decisions about incoming packets. Additionally if you
only have one nexthop to use for a pbr-map you do not need
to setup a nexthop-group and can specify 'set nexthop XXXX'.
To apply the pbr-map to an incoming interface you do this:
interface enp0s10
pbr-policy EVA
!
When a pbr-map is applied to interfaces it can be installed
into the kernel as a rule:
[sharpd@robot frr1]$ ip rule show
0: from all lookup local
309: from 4.3.4.0/24 iif enp0s10 lookup 10001
319: from all to 4.3.5.0/24 iif enp0s10 lookup 10001
1000: from all lookup [l3mdev-table]
32766: from all lookup main
32767: from all lookup default
[sharpd@robot frr1]$ ip route show table 10001
default proto pbr metric 20
nexthop via 192.168.208.1 dev enp0s8 weight 1
nexthop via 192.168.209.1 dev enp0s9 weight 1
nexthop via 192.168.210.1 dev enp0s10 weight 1
The linux kernel now will use the rules and tables to properly
apply these policies.
Signed-off-by: Donald Sharp <sharpd@cumulusnetworks.com>
Signed-off-by: Don Slice <dslice@cumulusnetworks.com>
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2018-01-23 19:11:36 +01:00
|
|
|
static struct cmd_node nh_group_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "nexthop-group",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = NH_GROUP_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-nh-group)# ",
|
pbrd: Add PBR to FRR
This is an implementation of PBR for FRR.
This implemenation uses a combination of rules and
tables to determine how packets will flow.
PBR introduces a new concept of 'nexthop-groups' to
specify a group of nexthops that will be used for
ecmp. Nexthop-groups are specified on the cli via:
nexthop-group DONNA
nexthop 192.168.208.1
nexthop 192.168.209.1
nexthop 192.168.210.1
!
PBR sees the nexthop-group and installs these as a default
route with these nexthops starting at table 10000
robot# show pbr nexthop-groups
Nexthop-Group: DONNA Table: 10001 Valid: 1 Installed: 1
Valid: 1 nexthop 192.168.209.1
Valid: 1 nexthop 192.168.210.1
Valid: 1 nexthop 192.168.208.1
I have also introduced the ability to specify a table
in a 'show ip route table XXX' to see the specified tables.
robot# show ip route table 10001
Codes: K - kernel route, C - connected, S - static, R - RIP,
O - OSPF, I - IS-IS, B - BGP, P - PIM, E - EIGRP, N - NHRP,
T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP,
F - PBR,
> - selected route, * - FIB route
F>* 0.0.0.0/0 [0/0] via 192.168.208.1, enp0s8, 00:14:25
* via 192.168.209.1, enp0s9, 00:14:25
* via 192.168.210.1, enp0s10, 00:14:25
PBR tracks PBR-MAPS via the pbr-map command:
!
pbr-map EVA seq 10
match src-ip 4.3.4.0/24
set nexthop-group DONNA
!
pbr-map EVA seq 20
match dst-ip 4.3.5.0/24
set nexthop-group DONNA
!
pbr-maps can have 'match src-ip <prefix>' and 'match dst-ip <prefix>'
to affect decisions about incoming packets. Additionally if you
only have one nexthop to use for a pbr-map you do not need
to setup a nexthop-group and can specify 'set nexthop XXXX'.
To apply the pbr-map to an incoming interface you do this:
interface enp0s10
pbr-policy EVA
!
When a pbr-map is applied to interfaces it can be installed
into the kernel as a rule:
[sharpd@robot frr1]$ ip rule show
0: from all lookup local
309: from 4.3.4.0/24 iif enp0s10 lookup 10001
319: from all to 4.3.5.0/24 iif enp0s10 lookup 10001
1000: from all lookup [l3mdev-table]
32766: from all lookup main
32767: from all lookup default
[sharpd@robot frr1]$ ip route show table 10001
default proto pbr metric 20
nexthop via 192.168.208.1 dev enp0s8 weight 1
nexthop via 192.168.209.1 dev enp0s9 weight 1
nexthop via 192.168.210.1 dev enp0s10 weight 1
The linux kernel now will use the rules and tables to properly
apply these policies.
Signed-off-by: Donald Sharp <sharpd@cumulusnetworks.com>
Signed-off-by: Don Slice <dslice@cumulusnetworks.com>
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2018-01-23 19:11:36 +01:00
|
|
|
};
|
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node rmap_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "routemap",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = RMAP_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-route-map)# ",
|
|
|
|
};
|
2004-08-26 15:08:30 +02:00
|
|
|
|
2020-09-30 05:59:19 +02:00
|
|
|
static struct cmd_node srv6_node = {
|
|
|
|
.name = "srv6",
|
|
|
|
.node = SRV6_NODE,
|
|
|
|
.parent_node = SEGMENT_ROUTING_NODE,
|
|
|
|
.prompt = "%s(config-srv6)# ",
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct cmd_node srv6_locs_node = {
|
|
|
|
.name = "srv6-locators",
|
|
|
|
.node = SRV6_LOCS_NODE,
|
|
|
|
.parent_node = SRV6_NODE,
|
|
|
|
.prompt = "%s(config-srv6-locators)# ",
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct cmd_node srv6_loc_node = {
|
|
|
|
.name = "srv6-locator",
|
|
|
|
.node = SRV6_LOC_NODE,
|
|
|
|
.parent_node = SRV6_LOCS_NODE,
|
|
|
|
.prompt = "%s(config-srv6-locator)# ",
|
|
|
|
};
|
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_PBRD
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node pbr_map_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "pbr-map",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = PBRMAP_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-pbr-map)# ",
|
|
|
|
};
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_PBRD */
|
pbrd: Add PBR to FRR
This is an implementation of PBR for FRR.
This implemenation uses a combination of rules and
tables to determine how packets will flow.
PBR introduces a new concept of 'nexthop-groups' to
specify a group of nexthops that will be used for
ecmp. Nexthop-groups are specified on the cli via:
nexthop-group DONNA
nexthop 192.168.208.1
nexthop 192.168.209.1
nexthop 192.168.210.1
!
PBR sees the nexthop-group and installs these as a default
route with these nexthops starting at table 10000
robot# show pbr nexthop-groups
Nexthop-Group: DONNA Table: 10001 Valid: 1 Installed: 1
Valid: 1 nexthop 192.168.209.1
Valid: 1 nexthop 192.168.210.1
Valid: 1 nexthop 192.168.208.1
I have also introduced the ability to specify a table
in a 'show ip route table XXX' to see the specified tables.
robot# show ip route table 10001
Codes: K - kernel route, C - connected, S - static, R - RIP,
O - OSPF, I - IS-IS, B - BGP, P - PIM, E - EIGRP, N - NHRP,
T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP,
F - PBR,
> - selected route, * - FIB route
F>* 0.0.0.0/0 [0/0] via 192.168.208.1, enp0s8, 00:14:25
* via 192.168.209.1, enp0s9, 00:14:25
* via 192.168.210.1, enp0s10, 00:14:25
PBR tracks PBR-MAPS via the pbr-map command:
!
pbr-map EVA seq 10
match src-ip 4.3.4.0/24
set nexthop-group DONNA
!
pbr-map EVA seq 20
match dst-ip 4.3.5.0/24
set nexthop-group DONNA
!
pbr-maps can have 'match src-ip <prefix>' and 'match dst-ip <prefix>'
to affect decisions about incoming packets. Additionally if you
only have one nexthop to use for a pbr-map you do not need
to setup a nexthop-group and can specify 'set nexthop XXXX'.
To apply the pbr-map to an incoming interface you do this:
interface enp0s10
pbr-policy EVA
!
When a pbr-map is applied to interfaces it can be installed
into the kernel as a rule:
[sharpd@robot frr1]$ ip rule show
0: from all lookup local
309: from 4.3.4.0/24 iif enp0s10 lookup 10001
319: from all to 4.3.5.0/24 iif enp0s10 lookup 10001
1000: from all lookup [l3mdev-table]
32766: from all lookup main
32767: from all lookup default
[sharpd@robot frr1]$ ip route show table 10001
default proto pbr metric 20
nexthop via 192.168.208.1 dev enp0s8 weight 1
nexthop via 192.168.209.1 dev enp0s9 weight 1
nexthop via 192.168.210.1 dev enp0s10 weight 1
The linux kernel now will use the rules and tables to properly
apply these policies.
Signed-off-by: Donald Sharp <sharpd@cumulusnetworks.com>
Signed-off-by: Don Slice <dslice@cumulusnetworks.com>
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2018-01-23 19:11:36 +01:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node zebra_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "zebra",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = ZEBRA_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router)# ",
|
|
|
|
};
|
2004-08-26 15:08:30 +02:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_BGPD
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node bgp_vpnv4_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp vpnv4",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_VPNV4_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-af)# ",
|
2021-11-06 21:10:41 +01:00
|
|
|
.no_xpath = true,
|
2018-09-08 21:46:23 +02:00
|
|
|
};
|
2004-08-26 15:08:30 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node bgp_vpnv6_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp vpnv6",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_VPNV6_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-af)# ",
|
2021-11-06 21:10:41 +01:00
|
|
|
.no_xpath = true,
|
2018-09-08 21:46:23 +02:00
|
|
|
};
|
2016-06-07 04:29:05 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node bgp_flowspecv4_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp ipv4 flowspec",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_FLOWSPECV4_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-af)# ",
|
2021-11-06 21:10:41 +01:00
|
|
|
.no_xpath = true,
|
2018-09-08 21:46:23 +02:00
|
|
|
};
|
2018-03-07 10:02:45 +01:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node bgp_flowspecv6_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp ipv6 flowspec",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_FLOWSPECV6_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-af)# ",
|
2021-11-06 21:10:41 +01:00
|
|
|
.no_xpath = true,
|
2018-09-08 21:46:23 +02:00
|
|
|
};
|
2018-03-07 10:02:45 +01:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node bgp_ipv4_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp ipv4 unicast",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_IPV4_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-af)# ",
|
2021-11-06 21:10:41 +01:00
|
|
|
.no_xpath = true,
|
2018-09-08 21:46:23 +02:00
|
|
|
};
|
2004-08-26 15:08:30 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node bgp_ipv4m_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp ipv4 multicast",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_IPV4M_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-af)# ",
|
2021-11-06 21:10:41 +01:00
|
|
|
.no_xpath = true,
|
2018-09-08 21:46:23 +02:00
|
|
|
};
|
2004-08-26 15:08:30 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node bgp_ipv4l_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp ipv4 labeled unicast",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_IPV4L_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-af)# ",
|
2021-11-06 21:10:41 +01:00
|
|
|
.no_xpath = true,
|
2018-09-08 21:46:23 +02:00
|
|
|
};
|
2017-02-08 20:19:54 +01:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node bgp_ipv6_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp ipv6",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_IPV6_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-af)# ",
|
2021-11-06 21:10:41 +01:00
|
|
|
.no_xpath = true,
|
2018-09-08 21:46:23 +02:00
|
|
|
};
|
2004-08-26 15:08:30 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node bgp_ipv6m_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp ipv6 multicast",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_IPV6M_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-af)# ",
|
2021-11-06 21:10:41 +01:00
|
|
|
.no_xpath = true,
|
2018-09-08 21:46:23 +02:00
|
|
|
};
|
2005-08-23 00:44:29 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node bgp_evpn_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp evpn",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_EVPN_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-af)# ",
|
2021-11-06 21:10:41 +01:00
|
|
|
.no_xpath = true,
|
2018-09-08 21:46:23 +02:00
|
|
|
};
|
2017-01-04 15:47:00 +01:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node bgp_evpn_vni_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp evpn vni",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_EVPN_VNI_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_EVPN_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-af-vni)# ",
|
|
|
|
};
|
2017-05-16 00:01:57 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node bgp_ipv6l_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp ipv6 labeled unicast",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_IPV6L_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-af)# ",
|
2021-11-06 21:10:41 +01:00
|
|
|
.no_xpath = true,
|
2018-09-08 21:46:23 +02:00
|
|
|
};
|
2017-02-08 20:19:54 +01:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef ENABLE_BGP_VNC
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
static struct cmd_node bgp_vnc_defaults_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp vnc defaults",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_VNC_DEFAULTS_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-vnc-defaults)# ",
|
|
|
|
};
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
|
|
|
|
static struct cmd_node bgp_vnc_nve_group_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp vnc nve",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_VNC_NVE_GROUP_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-vnc-nve-group)# ",
|
|
|
|
};
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node bgp_vrf_policy_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp vrf policy",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_VRF_POLICY_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-vrf-policy)# ",
|
|
|
|
};
|
2017-01-12 14:30:17 +01:00
|
|
|
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
static struct cmd_node bgp_vnc_l2_group_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bgp vnc l2",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BGP_VNC_L2_GROUP_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router-vnc-l2-group)# ",
|
|
|
|
};
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* ENABLE_BGP_VNC */
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node bmp_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bmp",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BMP_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BGP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-bgp-bmp)# "
|
|
|
|
};
|
2020-12-19 00:47:11 +01:00
|
|
|
|
|
|
|
static struct cmd_node bgp_srv6_node = {
|
|
|
|
.name = "bgp srv6",
|
|
|
|
.node = BGP_SRV6_NODE,
|
|
|
|
.parent_node = BGP_NODE,
|
|
|
|
.prompt = "%s(config-router-srv6)# ",
|
|
|
|
};
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_BGPD */
|
2019-04-24 19:33:41 +02:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_OSPFD
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node ospf_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "ospf",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = OSPF_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router)# ",
|
|
|
|
};
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_OSPFD */
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_EIGRPD
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node eigrp_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "eigrp",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = EIGRP_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router)# ",
|
|
|
|
};
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_EIGRPD */
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_BABELD
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node babel_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "babel",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BABEL_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router)# ",
|
|
|
|
};
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_BABELD */
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node ripng_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "ripng",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = RIPNG_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-router)# ",
|
|
|
|
};
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_OSPF6D
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node ospf6_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "ospf6",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = OSPF6_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-ospf6)# ",
|
|
|
|
};
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_OSPF6D */
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_LDPD
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node ldp_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "ldp",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = LDP_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-ldp)# ",
|
|
|
|
};
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node ldp_ipv4_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "ldp ipv4",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = LDP_IPV4_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = LDP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-ldp-af)# ",
|
|
|
|
};
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node ldp_ipv6_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "ldp ipv6",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = LDP_IPV6_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = LDP_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-ldp-af)# ",
|
|
|
|
};
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node ldp_ipv4_iface_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "ldp ipv4 interface",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = LDP_IPV4_IFACE_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = LDP_IPV4_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-ldp-af-if)# ",
|
|
|
|
};
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node ldp_ipv6_iface_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "ldp ipv6 interface",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = LDP_IPV6_IFACE_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = LDP_IPV6_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-ldp-af-if)# ",
|
|
|
|
};
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node ldp_l2vpn_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "ldp l2vpn",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = LDP_L2VPN_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-l2vpn)# ",
|
|
|
|
};
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node ldp_pseudowire_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "ldp",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = LDP_PSEUDOWIRE_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = LDP_L2VPN_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-l2vpn-pw)# ",
|
|
|
|
};
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_LDPD */
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node keychain_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "keychain",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = KEYCHAIN_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-keychain)# ",
|
|
|
|
};
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node keychain_key_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "keychain key",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = KEYCHAIN_KEY_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = KEYCHAIN_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-keychain-key)# ",
|
|
|
|
};
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
struct cmd_node link_params_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "link-params",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = LINK_PARAMS_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = INTERFACE_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-link-params)# ",
|
2021-11-06 21:10:41 +01:00
|
|
|
.no_xpath = true,
|
2004-08-26 15:08:30 +02:00
|
|
|
};
|
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_BGPD
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node rpki_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "rpki",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = RPKI_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-rpki)# ",
|
|
|
|
};
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_BGPD */
|
2017-11-10 13:56:24 +01:00
|
|
|
|
2018-06-27 18:26:06 +02:00
|
|
|
#if HAVE_BFDD > 0
|
|
|
|
static struct cmd_node bfd_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bfd",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BFD_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = CONFIG_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-bfd)# ",
|
2018-06-27 18:26:06 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct cmd_node bfd_peer_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "bfd peer",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = BFD_PEER_NODE,
|
2018-09-08 23:15:09 +02:00
|
|
|
.parent_node = BFD_NODE,
|
2018-09-08 21:46:23 +02:00
|
|
|
.prompt = "%s(config-bfd-peer)# ",
|
2018-06-27 18:26:06 +02:00
|
|
|
};
|
2020-05-15 20:24:59 +02:00
|
|
|
|
|
|
|
static struct cmd_node bfd_profile_node = {
|
|
|
|
.name = "bfd profile",
|
|
|
|
.node = BFD_PROFILE_NODE,
|
|
|
|
.parent_node = BFD_NODE,
|
|
|
|
.prompt = "%s(config-bfd-profile)# ",
|
|
|
|
};
|
2018-06-27 18:26:06 +02:00
|
|
|
#endif /* HAVE_BFDD */
|
|
|
|
|
2017-03-09 05:07:46 +01:00
|
|
|
/* Defined in lib/vty.c */
|
|
|
|
extern struct cmd_node vty_node;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-03-09 05:07:46 +01:00
|
|
|
/* When '^Z' is received from vty, move down to the enable mode. */
|
|
|
|
static int vtysh_end(void)
|
2017-07-17 14:03:14 +02:00
|
|
|
{
|
2004-08-26 15:08:30 +02:00
|
|
|
switch (vty->node) {
|
|
|
|
case VIEW_NODE:
|
|
|
|
case ENABLE_NODE:
|
|
|
|
/* Nothing to do. */
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
|
|
|
default:
|
2017-03-09 05:07:46 +01:00
|
|
|
vty->node = ENABLE_NODE;
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
|
|
|
}
|
2004-08-26 15:08:30 +02:00
|
|
|
return CMD_SUCCESS;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2019-12-06 13:48:06 +01:00
|
|
|
#include "vtysh/vtysh_clippy.c"
|
|
|
|
|
2017-05-10 16:38:48 +02:00
|
|
|
DEFUNSH(VTYSH_REALLYALL, vtysh_end_all, vtysh_end_all_cmd, "end",
|
2004-10-03 22:11:32 +02:00
|
|
|
"End current mode and change to enable mode\n")
|
2017-03-09 05:07:46 +01:00
|
|
|
{
|
|
|
|
return vtysh_end();
|
|
|
|
}
|
|
|
|
|
2021-08-08 21:41:10 +02:00
|
|
|
DEFUNSH(VTYSH_ZEBRA, srv6, srv6_cmd,
|
2020-09-30 05:59:19 +02:00
|
|
|
"srv6",
|
2022-02-14 18:56:04 +01:00
|
|
|
"Segment-Routing SRv6 configuration\n")
|
2020-09-30 05:59:19 +02:00
|
|
|
{
|
|
|
|
vty->node = SRV6_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-08-08 21:41:10 +02:00
|
|
|
DEFUNSH(VTYSH_ZEBRA, srv6_locators, srv6_locators_cmd,
|
2020-09-30 05:59:19 +02:00
|
|
|
"locators",
|
2022-02-14 18:56:04 +01:00
|
|
|
"Segment-Routing SRv6 locators configuration\n")
|
2020-09-30 05:59:19 +02:00
|
|
|
{
|
|
|
|
vty->node = SRV6_LOCS_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-08-08 21:41:10 +02:00
|
|
|
DEFUNSH(VTYSH_ZEBRA, srv6_locator, srv6_locator_cmd,
|
2020-09-30 05:59:19 +02:00
|
|
|
"locator WORD",
|
|
|
|
"Segment Routing SRv6 locator\n"
|
|
|
|
"Specify locator-name\n")
|
|
|
|
{
|
|
|
|
vty->node = SRV6_LOC_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_BGPD
|
2017-05-14 05:38:14 +02:00
|
|
|
DEFUNSH(VTYSH_BGPD, router_bgp, router_bgp_cmd,
|
2022-06-14 12:41:13 +02:00
|
|
|
"router bgp [(1-4294967295) [<view|vrf> VIEWVRFNAME]]",
|
2017-05-14 05:38:14 +02:00
|
|
|
ROUTER_STR BGP_STR AS_STR
|
2016-10-01 01:03:05 +02:00
|
|
|
"BGP view\nBGP VRF\n"
|
|
|
|
"View/VRF name\n")
|
2017-05-14 05:38:14 +02:00
|
|
|
{
|
|
|
|
vty->node = BGP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-02-28 15:15:15 +01:00
|
|
|
#ifdef KEEP_OLD_VPN_COMMANDS
|
2008-12-01 20:10:34 +01:00
|
|
|
DEFUNSH(VTYSH_BGPD, address_family_vpnv4, address_family_vpnv4_cmd,
|
|
|
|
"address-family vpnv4 [unicast]",
|
|
|
|
"Enter Address Family command mode\n"
|
2022-07-18 09:05:35 +02:00
|
|
|
BGP_AF_STR
|
|
|
|
BGP_AF_MODIFIER_STR)
|
2004-08-26 15:08:30 +02:00
|
|
|
{
|
|
|
|
vty->node = BGP_VPNV4_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2008-12-01 20:10:34 +01:00
|
|
|
DEFUNSH(VTYSH_BGPD, address_family_vpnv6, address_family_vpnv6_cmd,
|
|
|
|
"address-family vpnv6 [unicast]",
|
|
|
|
"Enter Address Family command mode\n"
|
2022-07-18 09:05:35 +02:00
|
|
|
BGP_AF_STR
|
|
|
|
BGP_AF_MODIFIER_STR)
|
2004-08-26 15:08:30 +02:00
|
|
|
{
|
|
|
|
vty->node = BGP_VPNV6_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2019-02-28 15:15:15 +01:00
|
|
|
#endif /* KEEP_OLD_VPN_COMMANDS */
|
2004-08-26 15:08:30 +02:00
|
|
|
|
2016-08-02 00:47:15 +02:00
|
|
|
DEFUNSH(VTYSH_BGPD, address_family_ipv4, address_family_ipv4_cmd,
|
|
|
|
"address-family ipv4 [unicast]",
|
|
|
|
"Enter Address Family command mode\n"
|
2022-07-18 09:05:35 +02:00
|
|
|
BGP_AF_STR
|
|
|
|
BGP_AF_MODIFIER_STR)
|
2016-08-02 00:47:15 +02:00
|
|
|
{
|
|
|
|
vty->node = BGP_IPV4_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-03-07 10:02:45 +01:00
|
|
|
DEFUNSH(VTYSH_BGPD, address_family_flowspecv4, address_family_flowspecv4_cmd,
|
|
|
|
"address-family ipv4 flowspec",
|
|
|
|
"Enter Address Family command mode\n"
|
2022-07-18 09:05:35 +02:00
|
|
|
BGP_AF_STR
|
|
|
|
BGP_AF_MODIFIER_STR)
|
2018-03-07 10:02:45 +01:00
|
|
|
{
|
|
|
|
vty->node = BGP_FLOWSPECV4_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BGPD, address_family_flowspecv6, address_family_flowspecv6_cmd,
|
|
|
|
"address-family ipv6 flowspec",
|
|
|
|
"Enter Address Family command mode\n"
|
2022-07-18 09:05:35 +02:00
|
|
|
BGP_AF_STR
|
|
|
|
BGP_AF_MODIFIER_STR)
|
2018-03-07 10:02:45 +01:00
|
|
|
{
|
|
|
|
vty->node = BGP_FLOWSPECV6_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2016-08-02 00:47:15 +02:00
|
|
|
DEFUNSH(VTYSH_BGPD, address_family_ipv4_multicast,
|
|
|
|
address_family_ipv4_multicast_cmd, "address-family ipv4 multicast",
|
|
|
|
"Enter Address Family command mode\n"
|
2022-07-18 09:05:35 +02:00
|
|
|
BGP_AF_STR
|
|
|
|
BGP_AF_MODIFIER_STR)
|
2016-08-02 00:47:15 +02:00
|
|
|
{
|
|
|
|
vty->node = BGP_IPV4M_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BGPD, address_family_ipv4_vpn, address_family_ipv4_vpn_cmd,
|
2017-04-27 00:39:10 +02:00
|
|
|
"address-family ipv4 vpn",
|
2016-08-02 00:47:15 +02:00
|
|
|
"Enter Address Family command mode\n"
|
2022-07-18 09:05:35 +02:00
|
|
|
BGP_AF_STR
|
|
|
|
BGP_AF_MODIFIER_STR)
|
2016-08-02 00:47:15 +02:00
|
|
|
{
|
|
|
|
vty->node = BGP_VPNV4_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BGPD, address_family_ipv4_labeled_unicast,
|
|
|
|
address_family_ipv4_labeled_unicast_cmd,
|
|
|
|
"address-family ipv4 labeled-unicast",
|
|
|
|
"Enter Address Family command mode\n"
|
2022-07-18 09:05:35 +02:00
|
|
|
BGP_AF_STR
|
|
|
|
BGP_AF_MODIFIER_STR)
|
2016-08-02 00:47:15 +02:00
|
|
|
{
|
|
|
|
vty->node = BGP_IPV4L_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BGPD, address_family_ipv6, address_family_ipv6_cmd,
|
|
|
|
"address-family ipv6 [unicast]",
|
|
|
|
"Enter Address Family command mode\n"
|
2022-07-18 09:05:35 +02:00
|
|
|
BGP_AF_STR
|
|
|
|
BGP_AF_MODIFIER_STR)
|
2016-08-02 00:47:15 +02:00
|
|
|
{
|
|
|
|
vty->node = BGP_IPV6_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BGPD, address_family_ipv6_multicast,
|
|
|
|
address_family_ipv6_multicast_cmd, "address-family ipv6 multicast",
|
|
|
|
"Enter Address Family command mode\n"
|
2022-07-18 09:05:35 +02:00
|
|
|
BGP_AF_STR
|
|
|
|
BGP_AF_MODIFIER_STR)
|
2016-08-02 00:47:15 +02:00
|
|
|
{
|
|
|
|
vty->node = BGP_IPV6M_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BGPD, address_family_ipv6_vpn, address_family_ipv6_vpn_cmd,
|
2017-04-27 00:39:10 +02:00
|
|
|
"address-family ipv6 vpn",
|
2016-08-02 00:47:15 +02:00
|
|
|
"Enter Address Family command mode\n"
|
2022-07-18 09:05:35 +02:00
|
|
|
BGP_AF_STR
|
|
|
|
BGP_AF_MODIFIER_STR)
|
2016-08-02 00:47:15 +02:00
|
|
|
{
|
|
|
|
vty->node = BGP_VPNV6_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2008-12-01 20:10:34 +01:00
|
|
|
DEFUNSH(VTYSH_BGPD, address_family_ipv6_labeled_unicast,
|
|
|
|
address_family_ipv6_labeled_unicast_cmd,
|
2017-05-03 16:24:04 +02:00
|
|
|
"address-family ipv6 labeled-unicast",
|
2008-12-01 20:10:34 +01:00
|
|
|
"Enter Address Family command mode\n"
|
2022-07-18 09:05:35 +02:00
|
|
|
BGP_AF_STR
|
|
|
|
BGP_AF_MODIFIER_STR)
|
2004-08-26 15:08:30 +02:00
|
|
|
{
|
|
|
|
vty->node = BGP_IPV6L_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-03-12 16:51:21 +01:00
|
|
|
DEFUNSH(VTYSH_BGPD,
|
|
|
|
rpki,
|
|
|
|
rpki_cmd,
|
|
|
|
"rpki",
|
2017-11-10 13:56:24 +01:00
|
|
|
"Enable rpki and enter rpki configuration mode\n")
|
|
|
|
{
|
|
|
|
vty->node = RPKI_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-24 19:33:41 +02:00
|
|
|
DEFUNSH(VTYSH_BGPD,
|
|
|
|
bmp_targets,
|
|
|
|
bmp_targets_cmd,
|
|
|
|
"bmp targets BMPTARGETS",
|
|
|
|
"BGP Monitoring Protocol\n"
|
|
|
|
"Create BMP target group\n"
|
|
|
|
"Name of the BMP target group\n")
|
|
|
|
{
|
|
|
|
vty->node = BMP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-12-19 00:47:11 +01:00
|
|
|
DEFUNSH(VTYSH_BGPD,
|
|
|
|
bgp_srv6,
|
|
|
|
bgp_srv6_cmd,
|
|
|
|
"segment-routing srv6",
|
|
|
|
"Segment-Routing configuration\n"
|
|
|
|
"Segment-Routing SRv6 configuration\n")
|
|
|
|
{
|
|
|
|
vty->node = BGP_SRV6_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BGPD,
|
|
|
|
exit_bgp_srv6,
|
|
|
|
exit_bgp_srv6_cmd,
|
|
|
|
"exit",
|
|
|
|
"exit Segment-Routing SRv6 configuration\n")
|
|
|
|
{
|
|
|
|
if (vty->node == BGP_SRV6_NODE)
|
|
|
|
vty->node = BGP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BGPD,
|
|
|
|
quit_bgp_srv6,
|
|
|
|
quit_bgp_srv6_cmd,
|
|
|
|
"quit",
|
|
|
|
"quit Segment-Routing SRv6 configuration\n")
|
|
|
|
{
|
|
|
|
if (vty->node == BGP_SRV6_NODE)
|
|
|
|
vty->node = BGP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2008-12-01 20:10:34 +01:00
|
|
|
DEFUNSH(VTYSH_BGPD, address_family_evpn, address_family_evpn_cmd,
|
2017-01-23 09:28:53 +01:00
|
|
|
"address-family <l2vpn evpn>",
|
2008-12-01 20:10:34 +01:00
|
|
|
"Enter Address Family command mode\n"
|
2022-07-18 09:05:35 +02:00
|
|
|
BGP_AF_STR
|
|
|
|
BGP_AF_MODIFIER_STR)
|
2004-08-26 15:08:30 +02:00
|
|
|
{
|
|
|
|
vty->node = BGP_EVPN_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-10-19 02:44:52 +02:00
|
|
|
DEFUNSH(VTYSH_BGPD, bgp_evpn_vni, bgp_evpn_vni_cmd, "vni " CMD_VNI_RANGE,
|
2004-10-03 22:11:32 +02:00
|
|
|
"VXLAN Network Identifier\n"
|
|
|
|
"VNI number\n")
|
2017-07-17 14:03:14 +02:00
|
|
|
{
|
2004-10-03 22:11:32 +02:00
|
|
|
vty->node = BGP_EVPN_VNI_NODE;
|
|
|
|
return CMD_SUCCESS;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2004-10-03 22:11:32 +02:00
|
|
|
|
2004-08-26 15:08:30 +02:00
|
|
|
#if defined(ENABLE_BGP_VNC)
|
2016-11-08 19:41:48 +01:00
|
|
|
DEFUNSH(VTYSH_BGPD, vnc_defaults, vnc_defaults_cmd, "vnc defaults",
|
2004-10-03 22:11:32 +02:00
|
|
|
"VNC/RFP related configuration\n"
|
|
|
|
"Configure default NVE group\n")
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
vty->node = BGP_VNC_DEFAULTS_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
DEFUNSH(VTYSH_BGPD, vnc_nve_group, vnc_nve_group_cmd, "vnc nve-group NAME",
|
|
|
|
"VNC/RFP related configuration\n"
|
2016-11-04 00:02:21 +01:00
|
|
|
"Configure a NVE group\n"
|
2017-02-08 20:19:54 +01:00
|
|
|
"Group name\n")
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
vty->node = BGP_VNC_NVE_GROUP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-06-07 04:29:05 +02:00
|
|
|
DEFUNSH(VTYSH_BGPD, vnc_vrf_policy, vnc_vrf_policy_cmd, "vrf-policy NAME",
|
|
|
|
"Configure a VRF policy group\n"
|
2017-02-08 20:19:54 +01:00
|
|
|
"Group name\n")
|
2016-06-07 04:29:05 +02:00
|
|
|
{
|
|
|
|
vty->node = BGP_VRF_POLICY_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
DEFUNSH(VTYSH_BGPD, vnc_l2_group, vnc_l2_group_cmd, "vnc l2-group NAME",
|
2017-02-08 20:19:54 +01:00
|
|
|
"VNC/RFP related configuration\n"
|
|
|
|
"Configure a L2 group\n"
|
2017-04-27 00:39:10 +02:00
|
|
|
"Group name\n")
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-04-27 00:39:10 +02:00
|
|
|
vty->node = BGP_VNC_L2_GROUP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2020-10-06 22:07:02 +02:00
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BGPD, exit_vnc_config, exit_vnc_config_cmd, "exit-vnc",
|
|
|
|
"Exit from VNC configuration mode\n")
|
|
|
|
{
|
|
|
|
if (vty->node == BGP_VNC_DEFAULTS_NODE
|
|
|
|
|| vty->node == BGP_VNC_NVE_GROUP_NODE
|
|
|
|
|| vty->node == BGP_VNC_L2_GROUP_NODE)
|
|
|
|
vty->node = BGP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BGPD, exit_vrf_policy, exit_vrf_policy_cmd, "exit-vrf-policy",
|
|
|
|
"Exit from VRF policy configuration mode\n")
|
|
|
|
{
|
|
|
|
if (vty->node == BGP_VRF_POLICY_NODE)
|
|
|
|
vty->node = BGP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
#endif
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_BGPD */
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2018-10-18 14:12:39 +02:00
|
|
|
DEFUNSH(VTYSH_KEYS, key_chain, key_chain_cmd, "key chain WORD",
|
2017-04-27 00:39:10 +02:00
|
|
|
"Authentication key management\n"
|
2017-02-08 20:19:54 +01:00
|
|
|
"Key-chain management\n"
|
|
|
|
"Key-chain name\n")
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-04-27 00:39:10 +02:00
|
|
|
vty->node = KEYCHAIN_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-01-26 03:51:28 +01:00
|
|
|
|
2018-10-18 14:12:39 +02:00
|
|
|
DEFUNSH(VTYSH_KEYS, key, key_cmd, "key (0-2147483647)",
|
2017-04-27 00:39:10 +02:00
|
|
|
"Configure a key\n"
|
|
|
|
"Key identifier number\n")
|
|
|
|
{
|
|
|
|
vty->node = KEYCHAIN_KEY_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-02-08 20:19:54 +01:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_RIPD
|
2019-01-04 22:08:10 +01:00
|
|
|
DEFUNSH(VTYSH_RIPD, router_rip, router_rip_cmd, "router rip [vrf NAME]",
|
|
|
|
ROUTER_STR "RIP\n" VRF_CMD_HELP_STR)
|
2017-04-27 00:39:10 +02:00
|
|
|
{
|
|
|
|
vty->node = RIP_NODE;
|
2005-08-23 00:44:29 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_RIPD */
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_RIPNGD
|
2019-01-04 22:08:10 +01:00
|
|
|
DEFUNSH(VTYSH_RIPNGD, router_ripng, router_ripng_cmd, "router ripng [vrf NAME]",
|
|
|
|
ROUTER_STR "RIPng\n" VRF_CMD_HELP_STR)
|
2017-05-02 21:52:09 +02:00
|
|
|
{
|
|
|
|
vty->node = RIPNG_NODE;
|
2005-08-23 00:44:29 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_RIPNGD */
|
2005-08-23 00:44:29 +02:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_OSPFD
|
2017-08-25 22:51:12 +02:00
|
|
|
DEFUNSH(VTYSH_OSPFD, router_ospf, router_ospf_cmd,
|
|
|
|
"router ospf [(1-65535)] [vrf NAME]",
|
2017-05-16 00:01:57 +02:00
|
|
|
"Enable a routing process\n"
|
|
|
|
"Start OSPF configuration\n"
|
2018-03-12 16:51:21 +01:00
|
|
|
"Instance ID\n"
|
|
|
|
VRF_CMD_HELP_STR)
|
2017-05-16 00:01:57 +02:00
|
|
|
{
|
|
|
|
vty->node = OSPF_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_OSPFD */
|
2017-05-16 00:01:57 +02:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_EIGRPD
|
2019-06-15 19:48:18 +02:00
|
|
|
DEFUNSH(VTYSH_EIGRPD, router_eigrp, router_eigrp_cmd, "router eigrp (1-65535) [vrf NAME]",
|
2017-05-16 00:01:57 +02:00
|
|
|
"Enable a routing process\n"
|
|
|
|
"Start EIGRP configuration\n"
|
2019-06-15 19:48:18 +02:00
|
|
|
"AS number to use\n"
|
|
|
|
VRF_CMD_HELP_STR)
|
2017-01-04 15:47:00 +01:00
|
|
|
{
|
|
|
|
vty->node = EIGRP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_EIGRPD */
|
2017-05-16 00:01:57 +02:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_BABELD
|
2017-05-16 00:01:57 +02:00
|
|
|
DEFUNSH(VTYSH_BABELD, router_babel, router_babel_cmd, "router babel",
|
|
|
|
"Enable a routing process\n"
|
|
|
|
"Make Babel instance command\n")
|
|
|
|
{
|
|
|
|
vty->node = BABEL_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_BABELD */
|
2017-01-04 15:47:00 +01:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_OSPF6D
|
2021-05-05 22:19:01 +02:00
|
|
|
DEFUNSH(VTYSH_OSPF6D, router_ospf6, router_ospf6_cmd, "router ospf6 [vrf NAME]",
|
|
|
|
ROUTER_STR OSPF6_STR VRF_CMD_HELP_STR)
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
{
|
|
|
|
vty->node = OSPF6_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
|
2016-10-24 21:24:03 +02:00
|
|
|
#if defined(HAVE_LDPD)
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
DEFUNSH(VTYSH_LDPD, ldp_mpls_ldp, ldp_mpls_ldp_cmd, "mpls ldp",
|
|
|
|
"Global MPLS configuration subcommands\n"
|
|
|
|
"Label Distribution Protocol\n")
|
|
|
|
{
|
|
|
|
vty->node = LDP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-01-12 14:30:17 +01:00
|
|
|
DEFUNSH(VTYSH_LDPD, ldp_address_family_ipv4, ldp_address_family_ipv4_cmd,
|
|
|
|
"address-family ipv4",
|
|
|
|
"Configure Address Family and its parameters\n"
|
|
|
|
"IPv4\n")
|
|
|
|
{
|
|
|
|
vty->node = LDP_IPV4_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
DEFUNSH(VTYSH_LDPD, ldp_address_family_ipv6, ldp_address_family_ipv6_cmd,
|
|
|
|
"address-family ipv6",
|
|
|
|
"Configure Address Family and its parameters\n"
|
|
|
|
"IPv6\n")
|
|
|
|
{
|
|
|
|
vty->node = LDP_IPV6_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-07-27 00:32:55 +02:00
|
|
|
DEFUNSH(VTYSH_LDPD, ldp_exit_address_family, ldp_exit_address_family_cmd,
|
|
|
|
"exit-address-family", "Exit from Address Family configuration mode\n")
|
|
|
|
{
|
|
|
|
if (vty->node == LDP_IPV4_NODE || vty->node == LDP_IPV6_NODE)
|
|
|
|
vty->node = LDP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
DEFUNSH(VTYSH_LDPD, ldp_interface_ifname, ldp_interface_ifname_cmd,
|
|
|
|
"interface IFNAME",
|
|
|
|
"Enable LDP on an interface and enter interface submode\n"
|
|
|
|
"Interface's name\n")
|
|
|
|
{
|
|
|
|
switch (vty->node) {
|
2016-08-02 00:47:15 +02:00
|
|
|
case LDP_IPV4_NODE:
|
2002-12-13 21:15:29 +01:00
|
|
|
vty->node = LDP_IPV4_IFACE_NODE;
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
2016-08-02 00:47:15 +02:00
|
|
|
case LDP_IPV6_NODE:
|
|
|
|
vty->node = LDP_IPV6_IFACE_NODE;
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_LDPD, ldp_l2vpn_word_type_vpls, ldp_l2vpn_word_type_vpls_cmd,
|
2016-10-01 01:03:05 +02:00
|
|
|
"l2vpn WORD type vpls",
|
2002-12-13 21:15:29 +01:00
|
|
|
"Configure l2vpn commands\n"
|
|
|
|
"L2VPN name\n"
|
2016-08-02 00:47:15 +02:00
|
|
|
"L2VPN type\n"
|
2002-12-13 21:15:29 +01:00
|
|
|
"Virtual Private LAN Service\n")
|
|
|
|
{
|
|
|
|
vty->node = LDP_L2VPN_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_LDPD, ldp_member_pseudowire_ifname,
|
2017-05-09 23:04:00 +02:00
|
|
|
ldp_member_pseudowire_ifname_cmd, "member pseudowire IFNAME",
|
|
|
|
"L2VPN member configuration\n"
|
|
|
|
"Pseudowire interface\n"
|
|
|
|
"Interface's name\n")
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
vty->node = LDP_PSEUDOWIRE_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
#endif
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_ISISD
|
2020-08-18 09:26:51 +02:00
|
|
|
DEFUNSH(VTYSH_ISISD, router_isis, router_isis_cmd,
|
|
|
|
"router isis WORD [vrf NAME]",
|
2017-05-09 23:04:00 +02:00
|
|
|
ROUTER_STR
|
|
|
|
"ISO IS-IS\n"
|
2020-08-18 09:26:51 +02:00
|
|
|
"ISO Routing area tag\n" VRF_CMD_HELP_STR)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
vty->node = ISIS_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_ISISD */
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_FABRICD
|
2018-03-22 15:01:15 +01:00
|
|
|
DEFUNSH(VTYSH_FABRICD, router_openfabric, router_openfabric_cmd, "router openfabric WORD",
|
|
|
|
ROUTER_STR
|
|
|
|
"OpenFabric routing protocol\n"
|
|
|
|
"ISO Routing area tag\n")
|
|
|
|
{
|
|
|
|
vty->node = OPENFABRIC_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_FABRICD */
|
2018-03-22 15:01:15 +01:00
|
|
|
|
2020-09-30 05:59:19 +02:00
|
|
|
DEFUNSH(VTYSH_SR, segment_routing, segment_routing_cmd,
|
2020-07-31 18:04:20 +02:00
|
|
|
"segment-routing",
|
|
|
|
"Configure segment routing\n")
|
|
|
|
{
|
|
|
|
vty->node = SEGMENT_ROUTING_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-06-05 18:34:46 +02:00
|
|
|
#if defined (HAVE_PATHD)
|
2020-07-31 18:04:20 +02:00
|
|
|
DEFUNSH(VTYSH_PATHD, sr_traffic_eng, sr_traffic_eng_cmd,
|
|
|
|
"traffic-eng",
|
|
|
|
"Configure SR traffic engineering\n")
|
|
|
|
{
|
|
|
|
vty->node = SR_TRAFFIC_ENG_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_PATHD, srte_segment_list, srte_segment_list_cmd,
|
|
|
|
"segment-list WORD$name",
|
|
|
|
"Segment List\n"
|
|
|
|
"Segment List Name\n")
|
|
|
|
{
|
|
|
|
vty->node = SR_SEGMENT_LIST_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_PATHD, srte_policy, srte_policy_cmd,
|
|
|
|
"policy color (0-4294967295) endpoint <A.B.C.D|X:X::X:X>",
|
|
|
|
"Segment Routing Policy\n"
|
|
|
|
"SR Policy color\n"
|
|
|
|
"SR Policy color value\n"
|
|
|
|
"SR Policy endpoint\n"
|
|
|
|
"SR Policy endpoint IPv4 address\n"
|
|
|
|
"SR Policy endpoint IPv6 address\n")
|
|
|
|
{
|
|
|
|
vty->node = SR_POLICY_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_PATHD, srte_policy_candidate_dyn_path,
|
|
|
|
srte_policy_candidate_dyn_path_cmd,
|
|
|
|
"candidate-path preference (0-4294967295) name WORD dynamic",
|
|
|
|
"Segment Routing Policy Candidate Path\n"
|
|
|
|
"Segment Routing Policy Candidate Path Preference\n"
|
|
|
|
"Administrative Preference\n"
|
|
|
|
"Segment Routing Policy Candidate Path Name\n"
|
|
|
|
"Symbolic Name\n"
|
|
|
|
"Dynamic Path\n")
|
|
|
|
{
|
|
|
|
vty->node = SR_CANDIDATE_DYN_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2020-10-16 16:55:51 +02:00
|
|
|
|
|
|
|
DEFUNSH(VTYSH_PATHD, pcep, pcep_cmd,
|
|
|
|
"pcep",
|
|
|
|
"Configure SR pcep\n")
|
|
|
|
{
|
|
|
|
vty->node = PCEP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_PATHD, pcep_cli_pcc, pcep_cli_pcc_cmd,
|
2021-08-11 16:46:31 +02:00
|
|
|
"pcc",
|
2020-10-16 16:55:51 +02:00
|
|
|
"PCC configuration\n")
|
|
|
|
{
|
|
|
|
vty->node = PCEP_PCC_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_PATHD, pcep_cli_pce, pcep_cli_pce_cmd,
|
2021-08-11 16:46:31 +02:00
|
|
|
"pce WORD",
|
2020-10-16 16:55:51 +02:00
|
|
|
"PCE configuration\n"
|
|
|
|
"Peer name\n")
|
|
|
|
{
|
|
|
|
vty->node = PCEP_PCE_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_PATHD, pcep_cli_pcep_pce_config, pcep_cli_pcep_pce_config_cmd,
|
2021-08-11 16:46:31 +02:00
|
|
|
"pce-config WORD",
|
2020-10-16 16:55:51 +02:00
|
|
|
"PCEP peer Configuration Group\n"
|
|
|
|
"PCEP peer Configuration Group name\n")
|
|
|
|
{
|
|
|
|
vty->node = PCEP_PCE_CONFIG_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-07-31 18:04:20 +02:00
|
|
|
#endif /* HAVE_PATHD */
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
DEFUNSH(VTYSH_RMAP, vtysh_route_map, vtysh_route_map_cmd,
|
2021-11-23 15:05:25 +01:00
|
|
|
"route-map RMAP_NAME <deny|permit> (1-65535)",
|
2016-10-01 01:03:05 +02:00
|
|
|
"Create route-map or enter route-map command mode\n"
|
2002-12-13 21:15:29 +01:00
|
|
|
"Route map tag\n"
|
|
|
|
"Route map denies set operations\n"
|
|
|
|
"Route map permits set operations\n"
|
|
|
|
"Sequence to insert to/delete from existing route-map entry\n")
|
2017-07-17 14:03:14 +02:00
|
|
|
{
|
2002-12-13 21:15:29 +01:00
|
|
|
vty->node = RMAP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_PBRD
|
pbrd: Add PBR to FRR
This is an implementation of PBR for FRR.
This implemenation uses a combination of rules and
tables to determine how packets will flow.
PBR introduces a new concept of 'nexthop-groups' to
specify a group of nexthops that will be used for
ecmp. Nexthop-groups are specified on the cli via:
nexthop-group DONNA
nexthop 192.168.208.1
nexthop 192.168.209.1
nexthop 192.168.210.1
!
PBR sees the nexthop-group and installs these as a default
route with these nexthops starting at table 10000
robot# show pbr nexthop-groups
Nexthop-Group: DONNA Table: 10001 Valid: 1 Installed: 1
Valid: 1 nexthop 192.168.209.1
Valid: 1 nexthop 192.168.210.1
Valid: 1 nexthop 192.168.208.1
I have also introduced the ability to specify a table
in a 'show ip route table XXX' to see the specified tables.
robot# show ip route table 10001
Codes: K - kernel route, C - connected, S - static, R - RIP,
O - OSPF, I - IS-IS, B - BGP, P - PIM, E - EIGRP, N - NHRP,
T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP,
F - PBR,
> - selected route, * - FIB route
F>* 0.0.0.0/0 [0/0] via 192.168.208.1, enp0s8, 00:14:25
* via 192.168.209.1, enp0s9, 00:14:25
* via 192.168.210.1, enp0s10, 00:14:25
PBR tracks PBR-MAPS via the pbr-map command:
!
pbr-map EVA seq 10
match src-ip 4.3.4.0/24
set nexthop-group DONNA
!
pbr-map EVA seq 20
match dst-ip 4.3.5.0/24
set nexthop-group DONNA
!
pbr-maps can have 'match src-ip <prefix>' and 'match dst-ip <prefix>'
to affect decisions about incoming packets. Additionally if you
only have one nexthop to use for a pbr-map you do not need
to setup a nexthop-group and can specify 'set nexthop XXXX'.
To apply the pbr-map to an incoming interface you do this:
interface enp0s10
pbr-policy EVA
!
When a pbr-map is applied to interfaces it can be installed
into the kernel as a rule:
[sharpd@robot frr1]$ ip rule show
0: from all lookup local
309: from 4.3.4.0/24 iif enp0s10 lookup 10001
319: from all to 4.3.5.0/24 iif enp0s10 lookup 10001
1000: from all lookup [l3mdev-table]
32766: from all lookup main
32767: from all lookup default
[sharpd@robot frr1]$ ip route show table 10001
default proto pbr metric 20
nexthop via 192.168.208.1 dev enp0s8 weight 1
nexthop via 192.168.209.1 dev enp0s9 weight 1
nexthop via 192.168.210.1 dev enp0s10 weight 1
The linux kernel now will use the rules and tables to properly
apply these policies.
Signed-off-by: Donald Sharp <sharpd@cumulusnetworks.com>
Signed-off-by: Don Slice <dslice@cumulusnetworks.com>
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2018-01-23 19:11:36 +01:00
|
|
|
DEFUNSH(VTYSH_PBRD, vtysh_pbr_map, vtysh_pbr_map_cmd,
|
2019-06-05 12:51:08 +02:00
|
|
|
"pbr-map PBRMAP seq (1-700)",
|
pbrd: Add PBR to FRR
This is an implementation of PBR for FRR.
This implemenation uses a combination of rules and
tables to determine how packets will flow.
PBR introduces a new concept of 'nexthop-groups' to
specify a group of nexthops that will be used for
ecmp. Nexthop-groups are specified on the cli via:
nexthop-group DONNA
nexthop 192.168.208.1
nexthop 192.168.209.1
nexthop 192.168.210.1
!
PBR sees the nexthop-group and installs these as a default
route with these nexthops starting at table 10000
robot# show pbr nexthop-groups
Nexthop-Group: DONNA Table: 10001 Valid: 1 Installed: 1
Valid: 1 nexthop 192.168.209.1
Valid: 1 nexthop 192.168.210.1
Valid: 1 nexthop 192.168.208.1
I have also introduced the ability to specify a table
in a 'show ip route table XXX' to see the specified tables.
robot# show ip route table 10001
Codes: K - kernel route, C - connected, S - static, R - RIP,
O - OSPF, I - IS-IS, B - BGP, P - PIM, E - EIGRP, N - NHRP,
T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP,
F - PBR,
> - selected route, * - FIB route
F>* 0.0.0.0/0 [0/0] via 192.168.208.1, enp0s8, 00:14:25
* via 192.168.209.1, enp0s9, 00:14:25
* via 192.168.210.1, enp0s10, 00:14:25
PBR tracks PBR-MAPS via the pbr-map command:
!
pbr-map EVA seq 10
match src-ip 4.3.4.0/24
set nexthop-group DONNA
!
pbr-map EVA seq 20
match dst-ip 4.3.5.0/24
set nexthop-group DONNA
!
pbr-maps can have 'match src-ip <prefix>' and 'match dst-ip <prefix>'
to affect decisions about incoming packets. Additionally if you
only have one nexthop to use for a pbr-map you do not need
to setup a nexthop-group and can specify 'set nexthop XXXX'.
To apply the pbr-map to an incoming interface you do this:
interface enp0s10
pbr-policy EVA
!
When a pbr-map is applied to interfaces it can be installed
into the kernel as a rule:
[sharpd@robot frr1]$ ip rule show
0: from all lookup local
309: from 4.3.4.0/24 iif enp0s10 lookup 10001
319: from all to 4.3.5.0/24 iif enp0s10 lookup 10001
1000: from all lookup [l3mdev-table]
32766: from all lookup main
32767: from all lookup default
[sharpd@robot frr1]$ ip route show table 10001
default proto pbr metric 20
nexthop via 192.168.208.1 dev enp0s8 weight 1
nexthop via 192.168.209.1 dev enp0s9 weight 1
nexthop via 192.168.210.1 dev enp0s10 weight 1
The linux kernel now will use the rules and tables to properly
apply these policies.
Signed-off-by: Donald Sharp <sharpd@cumulusnetworks.com>
Signed-off-by: Don Slice <dslice@cumulusnetworks.com>
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2018-01-23 19:11:36 +01:00
|
|
|
"Create pbr-map or enter pbr-map command mode\n"
|
|
|
|
"The name of the PBR MAP\n"
|
|
|
|
"Sequence to insert to/delete from existing pbr-map entry\n"
|
|
|
|
"Sequence number\n")
|
|
|
|
{
|
|
|
|
vty->node = PBRMAP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-09-29 10:17:04 +02:00
|
|
|
DEFSH(VTYSH_PBRD, vtysh_no_pbr_map_cmd, "no pbr-map PBRMAP [seq (1-700)]",
|
|
|
|
NO_STR
|
|
|
|
"Delete pbr-map\n"
|
|
|
|
"The name of the PBR MAP\n"
|
|
|
|
"Sequence to delete from existing pbr-map entry\n"
|
|
|
|
"Sequence number\n")
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_PBRD */
|
2020-09-29 10:17:04 +02:00
|
|
|
|
2018-06-27 18:26:06 +02:00
|
|
|
#if HAVE_BFDD > 0
|
|
|
|
DEFUNSH(VTYSH_BFDD, bfd_enter, bfd_enter_cmd, "bfd", "Configure BFD peers\n")
|
|
|
|
{
|
|
|
|
vty->node = BFD_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BFDD, bfd_peer_enter, bfd_peer_enter_cmd,
|
|
|
|
"peer <A.B.C.D|X:X::X:X> [{multihop|local-address <A.B.C.D|X:X::X:X>|interface IFNAME|vrf NAME}]",
|
|
|
|
"Configure peer\n"
|
|
|
|
"IPv4 peer address\n"
|
|
|
|
"IPv6 peer address\n"
|
|
|
|
"Configure multihop\n"
|
|
|
|
"Configure local address\n"
|
|
|
|
"IPv4 local address\n"
|
|
|
|
"IPv6 local address\n"
|
|
|
|
INTERFACE_STR
|
|
|
|
"Configure interface name to use\n"
|
|
|
|
"Configure VRF\n"
|
|
|
|
"Configure VRF name\n")
|
|
|
|
{
|
|
|
|
vty->node = BFD_PEER_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2020-05-15 20:24:59 +02:00
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BFDD, bfd_profile_enter, bfd_profile_enter_cmd,
|
2021-04-01 14:48:24 +02:00
|
|
|
"profile BFDPROF",
|
2020-05-15 20:24:59 +02:00
|
|
|
BFD_PROFILE_STR
|
|
|
|
BFD_PROFILE_NAME_STR)
|
|
|
|
{
|
|
|
|
vty->node = BFD_PROFILE_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2018-06-27 18:26:06 +02:00
|
|
|
#endif /* HAVE_BFDD */
|
|
|
|
|
2015-05-29 05:48:31 +02:00
|
|
|
DEFUNSH(VTYSH_ALL, vtysh_line_vty, vtysh_line_vty_cmd, "line vty",
|
2016-10-01 01:03:05 +02:00
|
|
|
"Configure a terminal line\n"
|
|
|
|
"Virtual terminal\n")
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
vty->node = VTY_NODE;
|
|
|
|
return CMD_SUCCESS;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2017-05-10 16:38:48 +02:00
|
|
|
DEFUNSH(VTYSH_REALLYALL, vtysh_enable, vtysh_enable_cmd, "enable",
|
2002-12-13 21:15:29 +01:00
|
|
|
"Turn on privileged mode command\n")
|
2017-07-17 14:03:14 +02:00
|
|
|
{
|
2004-08-26 15:08:30 +02:00
|
|
|
vty->node = ENABLE_NODE;
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-03-09 05:07:46 +01:00
|
|
|
DEFUNSH(VTYSH_REALLYALL, vtysh_disable, vtysh_disable_cmd, "disable",
|
|
|
|
"Turn off privileged mode command\n")
|
2017-07-17 14:03:14 +02:00
|
|
|
{
|
2017-03-09 05:07:46 +01:00
|
|
|
if (vty->node == ENABLE_NODE)
|
|
|
|
vty->node = VIEW_NODE;
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
DEFUNSH(VTYSH_REALLYALL, vtysh_config_terminal, vtysh_config_terminal_cmd,
|
2019-05-08 03:00:34 +02:00
|
|
|
"configure [terminal]",
|
2002-12-13 21:15:29 +01:00
|
|
|
"Configuration from vty interface\n"
|
|
|
|
"Configuration terminal\n")
|
2017-07-17 14:03:14 +02:00
|
|
|
{
|
2017-03-09 05:07:46 +01:00
|
|
|
vty->node = CONFIG_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-14 05:38:14 +02:00
|
|
|
static int vtysh_exit(struct vty *vty)
|
2017-07-17 14:03:14 +02:00
|
|
|
{
|
2020-04-02 12:06:12 +02:00
|
|
|
struct cmd_node *cnode = vector_lookup(cmdvec, vty->node);
|
|
|
|
|
|
|
|
if (vty->node == VIEW_NODE || vty->node == ENABLE_NODE)
|
2017-07-17 14:03:14 +02:00
|
|
|
exit(0);
|
2020-04-02 12:06:12 +02:00
|
|
|
if (cnode->node_exit)
|
|
|
|
cnode->node_exit(vty);
|
|
|
|
if (cnode->parent_node)
|
|
|
|
vty->node = cnode->parent_node;
|
|
|
|
|
|
|
|
if (vty->node == CONFIG_NODE) {
|
|
|
|
/* resync in case one of the daemons is somewhere else */
|
2017-05-14 05:38:14 +02:00
|
|
|
vtysh_execute("end");
|
2019-05-08 03:00:34 +02:00
|
|
|
vtysh_execute("configure");
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2016-08-02 00:47:15 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-08-02 00:47:15 +02:00
|
|
|
DEFUNSH(VTYSH_REALLYALL, vtysh_exit_all, vtysh_exit_all_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
2020-11-20 15:42:39 +01:00
|
|
|
DEFUNSH(VTYSH_REALLYALL, vtysh_quit_all, vtysh_quit_all_cmd, "quit",
|
2016-08-02 00:47:15 +02:00
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_all(self, vty, argc, argv);
|
|
|
|
}
|
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_BGPD
|
2016-08-02 00:47:15 +02:00
|
|
|
DEFUNSH(VTYSH_BGPD, exit_address_family, exit_address_family_cmd,
|
|
|
|
"exit-address-family", "Exit from Address Family configuration mode\n")
|
|
|
|
{
|
|
|
|
if (vty->node == BGP_IPV4_NODE || vty->node == BGP_IPV4M_NODE
|
|
|
|
|| vty->node == BGP_IPV4L_NODE || vty->node == BGP_VPNV4_NODE
|
|
|
|
|| vty->node == BGP_VPNV6_NODE || vty->node == BGP_IPV6_NODE
|
|
|
|
|| vty->node == BGP_IPV6L_NODE || vty->node == BGP_IPV6M_NODE
|
2018-03-07 10:02:45 +01:00
|
|
|
|| vty->node == BGP_EVPN_NODE
|
|
|
|
|| vty->node == BGP_FLOWSPECV4_NODE
|
|
|
|
|| vty->node == BGP_FLOWSPECV6_NODE)
|
2016-08-02 00:47:15 +02:00
|
|
|
vty->node = BGP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BGPD, exit_vni, exit_vni_cmd, "exit-vni", "Exit from VNI mode\n")
|
|
|
|
{
|
2017-05-16 00:01:57 +02:00
|
|
|
if (vty->node == BGP_EVPN_VNI_NODE)
|
2016-08-02 00:47:15 +02:00
|
|
|
vty->node = BGP_EVPN_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-08-22 22:05:04 +02:00
|
|
|
DEFUNSH(VTYSH_BGPD, rpki_exit, rpki_exit_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
vtysh_exit(vty);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BGPD, rpki_quit, rpki_quit_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return rpki_exit(self, vty, argc, argv);
|
2016-08-02 00:47:15 +02:00
|
|
|
}
|
|
|
|
|
2019-04-24 19:33:41 +02:00
|
|
|
DEFUNSH(VTYSH_BGPD, bmp_exit, bmp_exit_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
vtysh_exit(vty);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BGPD, bmp_quit, bmp_quit_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return bmp_exit(self, vty, argc, argv);
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_BGPD */
|
2017-01-12 14:30:17 +01:00
|
|
|
|
2020-09-29 10:17:04 +02:00
|
|
|
DEFUNSH(VTYSH_VRF, exit_vrf_config, exit_vrf_config_cmd, "exit-vrf",
|
|
|
|
"Exit from VRF configuration mode\n")
|
|
|
|
{
|
|
|
|
if (vty->node == VRF_NODE)
|
|
|
|
vty->node = CONFIG_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-08-08 21:41:10 +02:00
|
|
|
DEFUNSH(VTYSH_ZEBRA, exit_srv6_config, exit_srv6_config_cmd, "exit",
|
2020-09-30 05:59:19 +02:00
|
|
|
"Exit from SRv6 configuration mode\n")
|
|
|
|
{
|
|
|
|
if (vty->node == SRV6_NODE)
|
|
|
|
vty->node = SEGMENT_ROUTING_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-08-08 21:41:10 +02:00
|
|
|
DEFUNSH(VTYSH_ZEBRA, exit_srv6_locs_config, exit_srv6_locs_config_cmd, "exit",
|
2020-09-30 05:59:19 +02:00
|
|
|
"Exit from SRv6-locator configuration mode\n")
|
|
|
|
{
|
|
|
|
if (vty->node == SRV6_LOCS_NODE)
|
|
|
|
vty->node = SRV6_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-08-08 21:41:10 +02:00
|
|
|
DEFUNSH(VTYSH_ZEBRA, exit_srv6_loc_config, exit_srv6_loc_config_cmd, "exit",
|
2020-09-30 05:59:19 +02:00
|
|
|
"Exit from SRv6-locators configuration mode\n")
|
|
|
|
{
|
|
|
|
if (vty->node == SRV6_LOC_NODE)
|
|
|
|
vty->node = SRV6_LOCS_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_RIPD
|
2002-12-13 21:15:29 +01:00
|
|
|
DEFUNSH(VTYSH_RIPD, vtysh_exit_ripd, vtysh_exit_ripd_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
2016-10-01 01:03:05 +02:00
|
|
|
DEFUNSH(VTYSH_RIPD, vtysh_quit_ripd, vtysh_quit_ripd_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_ripd(self, vty, argc, argv);
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_RIPD */
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_RIPNGD
|
2004-08-25 14:22:00 +02:00
|
|
|
DEFUNSH(VTYSH_RIPNGD, vtysh_exit_ripngd, vtysh_exit_ripngd_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
2003-03-25 06:07:42 +01:00
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
2016-10-01 01:03:05 +02:00
|
|
|
DEFUNSH(VTYSH_RIPNGD, vtysh_quit_ripngd, vtysh_quit_ripngd_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_ripngd(self, vty, argc, argv);
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_RIPNGD */
|
2003-03-25 06:07:42 +01:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
DEFUNSH(VTYSH_RMAP, vtysh_exit_rmap, vtysh_exit_rmap_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
2016-10-01 01:03:05 +02:00
|
|
|
DEFUNSH(VTYSH_RMAP, vtysh_quit_rmap, vtysh_quit_rmap_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_rmap(self, vty, argc, argv);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_PBRD
|
pbrd: Add PBR to FRR
This is an implementation of PBR for FRR.
This implemenation uses a combination of rules and
tables to determine how packets will flow.
PBR introduces a new concept of 'nexthop-groups' to
specify a group of nexthops that will be used for
ecmp. Nexthop-groups are specified on the cli via:
nexthop-group DONNA
nexthop 192.168.208.1
nexthop 192.168.209.1
nexthop 192.168.210.1
!
PBR sees the nexthop-group and installs these as a default
route with these nexthops starting at table 10000
robot# show pbr nexthop-groups
Nexthop-Group: DONNA Table: 10001 Valid: 1 Installed: 1
Valid: 1 nexthop 192.168.209.1
Valid: 1 nexthop 192.168.210.1
Valid: 1 nexthop 192.168.208.1
I have also introduced the ability to specify a table
in a 'show ip route table XXX' to see the specified tables.
robot# show ip route table 10001
Codes: K - kernel route, C - connected, S - static, R - RIP,
O - OSPF, I - IS-IS, B - BGP, P - PIM, E - EIGRP, N - NHRP,
T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP,
F - PBR,
> - selected route, * - FIB route
F>* 0.0.0.0/0 [0/0] via 192.168.208.1, enp0s8, 00:14:25
* via 192.168.209.1, enp0s9, 00:14:25
* via 192.168.210.1, enp0s10, 00:14:25
PBR tracks PBR-MAPS via the pbr-map command:
!
pbr-map EVA seq 10
match src-ip 4.3.4.0/24
set nexthop-group DONNA
!
pbr-map EVA seq 20
match dst-ip 4.3.5.0/24
set nexthop-group DONNA
!
pbr-maps can have 'match src-ip <prefix>' and 'match dst-ip <prefix>'
to affect decisions about incoming packets. Additionally if you
only have one nexthop to use for a pbr-map you do not need
to setup a nexthop-group and can specify 'set nexthop XXXX'.
To apply the pbr-map to an incoming interface you do this:
interface enp0s10
pbr-policy EVA
!
When a pbr-map is applied to interfaces it can be installed
into the kernel as a rule:
[sharpd@robot frr1]$ ip rule show
0: from all lookup local
309: from 4.3.4.0/24 iif enp0s10 lookup 10001
319: from all to 4.3.5.0/24 iif enp0s10 lookup 10001
1000: from all lookup [l3mdev-table]
32766: from all lookup main
32767: from all lookup default
[sharpd@robot frr1]$ ip route show table 10001
default proto pbr metric 20
nexthop via 192.168.208.1 dev enp0s8 weight 1
nexthop via 192.168.209.1 dev enp0s9 weight 1
nexthop via 192.168.210.1 dev enp0s10 weight 1
The linux kernel now will use the rules and tables to properly
apply these policies.
Signed-off-by: Donald Sharp <sharpd@cumulusnetworks.com>
Signed-off-by: Don Slice <dslice@cumulusnetworks.com>
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2018-01-23 19:11:36 +01:00
|
|
|
DEFUNSH(VTYSH_PBRD, vtysh_exit_pbr_map, vtysh_exit_pbr_map_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_PBRD, vtysh_quit_pbr_map, vtysh_quit_pbr_map_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_rmap(self, vty, argc, argv);
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_PBRD */
|
pbrd: Add PBR to FRR
This is an implementation of PBR for FRR.
This implemenation uses a combination of rules and
tables to determine how packets will flow.
PBR introduces a new concept of 'nexthop-groups' to
specify a group of nexthops that will be used for
ecmp. Nexthop-groups are specified on the cli via:
nexthop-group DONNA
nexthop 192.168.208.1
nexthop 192.168.209.1
nexthop 192.168.210.1
!
PBR sees the nexthop-group and installs these as a default
route with these nexthops starting at table 10000
robot# show pbr nexthop-groups
Nexthop-Group: DONNA Table: 10001 Valid: 1 Installed: 1
Valid: 1 nexthop 192.168.209.1
Valid: 1 nexthop 192.168.210.1
Valid: 1 nexthop 192.168.208.1
I have also introduced the ability to specify a table
in a 'show ip route table XXX' to see the specified tables.
robot# show ip route table 10001
Codes: K - kernel route, C - connected, S - static, R - RIP,
O - OSPF, I - IS-IS, B - BGP, P - PIM, E - EIGRP, N - NHRP,
T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP,
F - PBR,
> - selected route, * - FIB route
F>* 0.0.0.0/0 [0/0] via 192.168.208.1, enp0s8, 00:14:25
* via 192.168.209.1, enp0s9, 00:14:25
* via 192.168.210.1, enp0s10, 00:14:25
PBR tracks PBR-MAPS via the pbr-map command:
!
pbr-map EVA seq 10
match src-ip 4.3.4.0/24
set nexthop-group DONNA
!
pbr-map EVA seq 20
match dst-ip 4.3.5.0/24
set nexthop-group DONNA
!
pbr-maps can have 'match src-ip <prefix>' and 'match dst-ip <prefix>'
to affect decisions about incoming packets. Additionally if you
only have one nexthop to use for a pbr-map you do not need
to setup a nexthop-group and can specify 'set nexthop XXXX'.
To apply the pbr-map to an incoming interface you do this:
interface enp0s10
pbr-policy EVA
!
When a pbr-map is applied to interfaces it can be installed
into the kernel as a rule:
[sharpd@robot frr1]$ ip rule show
0: from all lookup local
309: from 4.3.4.0/24 iif enp0s10 lookup 10001
319: from all to 4.3.5.0/24 iif enp0s10 lookup 10001
1000: from all lookup [l3mdev-table]
32766: from all lookup main
32767: from all lookup default
[sharpd@robot frr1]$ ip route show table 10001
default proto pbr metric 20
nexthop via 192.168.208.1 dev enp0s8 weight 1
nexthop via 192.168.209.1 dev enp0s9 weight 1
nexthop via 192.168.210.1 dev enp0s10 weight 1
The linux kernel now will use the rules and tables to properly
apply these policies.
Signed-off-by: Donald Sharp <sharpd@cumulusnetworks.com>
Signed-off-by: Don Slice <dslice@cumulusnetworks.com>
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2018-01-23 19:11:36 +01:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_BGPD
|
2002-12-13 21:15:29 +01:00
|
|
|
DEFUNSH(VTYSH_BGPD, vtysh_exit_bgpd, vtysh_exit_bgpd_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
2016-10-01 01:03:05 +02:00
|
|
|
DEFUNSH(VTYSH_BGPD, vtysh_quit_bgpd, vtysh_quit_bgpd_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_bgpd(self, vty, argc, argv);
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_BGPD */
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_OSPFD
|
2002-12-13 21:15:29 +01:00
|
|
|
DEFUNSH(VTYSH_OSPFD, vtysh_exit_ospfd, vtysh_exit_ospfd_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
2016-10-01 01:03:05 +02:00
|
|
|
DEFUNSH(VTYSH_OSPFD, vtysh_quit_ospfd, vtysh_quit_ospfd_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_ospfd(self, vty, argc, argv);
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_OSPFD */
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_EIGRPD
|
2017-03-09 05:07:46 +01:00
|
|
|
DEFUNSH(VTYSH_EIGRPD, vtysh_exit_eigrpd, vtysh_exit_eigrpd_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_EIGRPD, vtysh_quit_eigrpd, vtysh_quit_eigrpd_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_EIGRPD */
|
2017-03-09 05:07:46 +01:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_BABELD
|
2020-09-28 16:13:40 +02:00
|
|
|
DEFUNSH(VTYSH_BABELD, vtysh_exit_babeld, vtysh_exit_babeld_cmd, "exit",
|
2017-05-14 05:38:14 +02:00
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_BABELD, vtysh_quit_babeld, vtysh_quit_babeld_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_BABELD */
|
2017-05-14 05:38:14 +02:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_OSPF6D
|
2004-08-25 14:22:00 +02:00
|
|
|
DEFUNSH(VTYSH_OSPF6D, vtysh_exit_ospf6d, vtysh_exit_ospf6d_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
2003-03-25 06:07:42 +01:00
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
2016-10-01 01:03:05 +02:00
|
|
|
DEFUNSH(VTYSH_OSPF6D, vtysh_quit_ospf6d, vtysh_quit_ospf6d_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_ospf6d(self, vty, argc, argv);
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_OSPF6D */
|
2003-03-25 06:07:42 +01:00
|
|
|
|
2016-10-24 21:24:03 +02:00
|
|
|
#if defined(HAVE_LDPD)
|
2016-08-02 00:47:15 +02:00
|
|
|
DEFUNSH(VTYSH_LDPD, vtysh_exit_ldpd, vtysh_exit_ldpd_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
|
|
|
ALIAS(vtysh_exit_ldpd, vtysh_quit_ldpd_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
2016-10-24 21:24:03 +02:00
|
|
|
#endif
|
2016-08-02 00:47:15 +02:00
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_ISISD
|
2004-08-25 14:22:00 +02:00
|
|
|
DEFUNSH(VTYSH_ISISD, vtysh_exit_isisd, vtysh_exit_isisd_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
2003-12-23 11:39:08 +01:00
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
2016-10-01 01:03:05 +02:00
|
|
|
DEFUNSH(VTYSH_ISISD, vtysh_quit_isisd, vtysh_quit_isisd_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_isisd(self, vty, argc, argv);
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_ISISD */
|
2003-12-23 11:39:08 +01:00
|
|
|
|
2018-06-27 18:26:06 +02:00
|
|
|
#if HAVE_BFDD > 0
|
|
|
|
DEFUNSH(VTYSH_BFDD, vtysh_exit_bfdd, vtysh_exit_bfdd_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
|
|
|
ALIAS(vtysh_exit_bfdd, vtysh_quit_bfdd_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
#endif
|
|
|
|
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_FABRICD
|
2018-03-22 15:01:15 +01:00
|
|
|
DEFUNSH(VTYSH_FABRICD, vtysh_exit_fabricd, vtysh_exit_fabricd_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_FABRICD, vtysh_quit_fabricd, vtysh_quit_fabricd_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_fabricd(self, vty, argc, argv);
|
|
|
|
}
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_FABRICD */
|
2018-03-22 15:01:15 +01:00
|
|
|
|
2020-09-28 16:17:05 +02:00
|
|
|
DEFUNSH(VTYSH_KEYS, vtysh_exit_keys, vtysh_exit_keys_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_KEYS, vtysh_quit_keys, vtysh_quit_keys_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_keys(self, vty, argc, argv);
|
|
|
|
}
|
|
|
|
|
2021-06-05 18:34:46 +02:00
|
|
|
DEFUNSH(VTYSH_SR, vtysh_exit_sr, vtysh_exit_sr_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_SR, vtysh_quit_sr, vtysh_quit_sr_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
2021-01-11 12:48:08 +01:00
|
|
|
#if defined(HAVE_PATHD)
|
2020-07-31 18:04:20 +02:00
|
|
|
DEFUNSH(VTYSH_PATHD, vtysh_exit_pathd, vtysh_exit_pathd_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_PATHD, vtysh_quit_pathd, vtysh_quit_pathd_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_pathd(self, vty, argc, argv);
|
|
|
|
}
|
2021-01-11 12:48:08 +01:00
|
|
|
#endif /* HAVE_PATHD */
|
2020-07-31 18:04:20 +02:00
|
|
|
|
2004-10-03 22:11:32 +02:00
|
|
|
DEFUNSH(VTYSH_ALL, vtysh_exit_line_vty, vtysh_exit_line_vty_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
2016-10-01 01:03:05 +02:00
|
|
|
DEFUNSH(VTYSH_ALL, vtysh_quit_line_vty, vtysh_quit_line_vty_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_line_vty(self, vty, argc, argv);
|
|
|
|
}
|
2004-10-03 22:11:32 +02:00
|
|
|
|
2004-08-26 15:08:30 +02:00
|
|
|
DEFUNSH(VTYSH_INTERFACE, vtysh_interface, vtysh_interface_cmd,
|
2016-10-01 01:03:05 +02:00
|
|
|
"interface IFNAME [vrf NAME]",
|
2002-12-13 21:15:29 +01:00
|
|
|
"Select an interface to configure\n"
|
2016-10-01 01:03:05 +02:00
|
|
|
"Interface's name\n" VRF_CMD_HELP_STR)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
vty->node = INTERFACE_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-08-02 01:16:28 +02:00
|
|
|
DEFUNSH(VTYSH_ZEBRA, vtysh_pseudowire, vtysh_pseudowire_cmd,
|
|
|
|
"pseudowire IFNAME",
|
|
|
|
"Static pseudowire configuration\n"
|
|
|
|
"Pseudowire name\n")
|
|
|
|
{
|
|
|
|
vty->node = PW_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-12-17 15:56:26 +01:00
|
|
|
DEFUNSH(VTYSH_NH_GROUP,
|
|
|
|
vtysh_nexthop_group, vtysh_nexthop_group_cmd,
|
2019-06-05 12:33:00 +02:00
|
|
|
"nexthop-group NHGNAME",
|
pbrd: Add PBR to FRR
This is an implementation of PBR for FRR.
This implemenation uses a combination of rules and
tables to determine how packets will flow.
PBR introduces a new concept of 'nexthop-groups' to
specify a group of nexthops that will be used for
ecmp. Nexthop-groups are specified on the cli via:
nexthop-group DONNA
nexthop 192.168.208.1
nexthop 192.168.209.1
nexthop 192.168.210.1
!
PBR sees the nexthop-group and installs these as a default
route with these nexthops starting at table 10000
robot# show pbr nexthop-groups
Nexthop-Group: DONNA Table: 10001 Valid: 1 Installed: 1
Valid: 1 nexthop 192.168.209.1
Valid: 1 nexthop 192.168.210.1
Valid: 1 nexthop 192.168.208.1
I have also introduced the ability to specify a table
in a 'show ip route table XXX' to see the specified tables.
robot# show ip route table 10001
Codes: K - kernel route, C - connected, S - static, R - RIP,
O - OSPF, I - IS-IS, B - BGP, P - PIM, E - EIGRP, N - NHRP,
T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP,
F - PBR,
> - selected route, * - FIB route
F>* 0.0.0.0/0 [0/0] via 192.168.208.1, enp0s8, 00:14:25
* via 192.168.209.1, enp0s9, 00:14:25
* via 192.168.210.1, enp0s10, 00:14:25
PBR tracks PBR-MAPS via the pbr-map command:
!
pbr-map EVA seq 10
match src-ip 4.3.4.0/24
set nexthop-group DONNA
!
pbr-map EVA seq 20
match dst-ip 4.3.5.0/24
set nexthop-group DONNA
!
pbr-maps can have 'match src-ip <prefix>' and 'match dst-ip <prefix>'
to affect decisions about incoming packets. Additionally if you
only have one nexthop to use for a pbr-map you do not need
to setup a nexthop-group and can specify 'set nexthop XXXX'.
To apply the pbr-map to an incoming interface you do this:
interface enp0s10
pbr-policy EVA
!
When a pbr-map is applied to interfaces it can be installed
into the kernel as a rule:
[sharpd@robot frr1]$ ip rule show
0: from all lookup local
309: from 4.3.4.0/24 iif enp0s10 lookup 10001
319: from all to 4.3.5.0/24 iif enp0s10 lookup 10001
1000: from all lookup [l3mdev-table]
32766: from all lookup main
32767: from all lookup default
[sharpd@robot frr1]$ ip route show table 10001
default proto pbr metric 20
nexthop via 192.168.208.1 dev enp0s8 weight 1
nexthop via 192.168.209.1 dev enp0s9 weight 1
nexthop via 192.168.210.1 dev enp0s10 weight 1
The linux kernel now will use the rules and tables to properly
apply these policies.
Signed-off-by: Donald Sharp <sharpd@cumulusnetworks.com>
Signed-off-by: Don Slice <dslice@cumulusnetworks.com>
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2018-01-23 19:11:36 +01:00
|
|
|
"Nexthop Group configuration\n"
|
|
|
|
"Name of the Nexthop Group\n")
|
|
|
|
{
|
|
|
|
vty->node = NH_GROUP_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-12-17 15:56:26 +01:00
|
|
|
DEFSH(VTYSH_NH_GROUP, vtysh_no_nexthop_group_cmd,
|
2019-06-05 12:33:00 +02:00
|
|
|
"no nexthop-group NHGNAME",
|
pbrd: Add PBR to FRR
This is an implementation of PBR for FRR.
This implemenation uses a combination of rules and
tables to determine how packets will flow.
PBR introduces a new concept of 'nexthop-groups' to
specify a group of nexthops that will be used for
ecmp. Nexthop-groups are specified on the cli via:
nexthop-group DONNA
nexthop 192.168.208.1
nexthop 192.168.209.1
nexthop 192.168.210.1
!
PBR sees the nexthop-group and installs these as a default
route with these nexthops starting at table 10000
robot# show pbr nexthop-groups
Nexthop-Group: DONNA Table: 10001 Valid: 1 Installed: 1
Valid: 1 nexthop 192.168.209.1
Valid: 1 nexthop 192.168.210.1
Valid: 1 nexthop 192.168.208.1
I have also introduced the ability to specify a table
in a 'show ip route table XXX' to see the specified tables.
robot# show ip route table 10001
Codes: K - kernel route, C - connected, S - static, R - RIP,
O - OSPF, I - IS-IS, B - BGP, P - PIM, E - EIGRP, N - NHRP,
T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP,
F - PBR,
> - selected route, * - FIB route
F>* 0.0.0.0/0 [0/0] via 192.168.208.1, enp0s8, 00:14:25
* via 192.168.209.1, enp0s9, 00:14:25
* via 192.168.210.1, enp0s10, 00:14:25
PBR tracks PBR-MAPS via the pbr-map command:
!
pbr-map EVA seq 10
match src-ip 4.3.4.0/24
set nexthop-group DONNA
!
pbr-map EVA seq 20
match dst-ip 4.3.5.0/24
set nexthop-group DONNA
!
pbr-maps can have 'match src-ip <prefix>' and 'match dst-ip <prefix>'
to affect decisions about incoming packets. Additionally if you
only have one nexthop to use for a pbr-map you do not need
to setup a nexthop-group and can specify 'set nexthop XXXX'.
To apply the pbr-map to an incoming interface you do this:
interface enp0s10
pbr-policy EVA
!
When a pbr-map is applied to interfaces it can be installed
into the kernel as a rule:
[sharpd@robot frr1]$ ip rule show
0: from all lookup local
309: from 4.3.4.0/24 iif enp0s10 lookup 10001
319: from all to 4.3.5.0/24 iif enp0s10 lookup 10001
1000: from all lookup [l3mdev-table]
32766: from all lookup main
32767: from all lookup default
[sharpd@robot frr1]$ ip route show table 10001
default proto pbr metric 20
nexthop via 192.168.208.1 dev enp0s8 weight 1
nexthop via 192.168.209.1 dev enp0s9 weight 1
nexthop via 192.168.210.1 dev enp0s10 weight 1
The linux kernel now will use the rules and tables to properly
apply these policies.
Signed-off-by: Donald Sharp <sharpd@cumulusnetworks.com>
Signed-off-by: Don Slice <dslice@cumulusnetworks.com>
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2018-01-23 19:11:36 +01:00
|
|
|
NO_STR
|
|
|
|
"Nexthop Group Configuration\n"
|
|
|
|
"Name of the Nexthop Group\n")
|
|
|
|
|
2016-02-02 13:34:29 +01:00
|
|
|
DEFUNSH(VTYSH_VRF, vtysh_vrf, vtysh_vrf_cmd, "vrf NAME",
|
|
|
|
"Select a VRF to configure\n"
|
|
|
|
"VRF's name\n")
|
|
|
|
{
|
|
|
|
vty->node = VRF_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_VRF, vtysh_exit_vrf, vtysh_exit_vrf_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
2016-10-01 01:03:05 +02:00
|
|
|
DEFUNSH(VTYSH_VRF, vtysh_quit_vrf, vtysh_quit_vrf_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_vrf(self, vty, argc, argv);
|
|
|
|
}
|
2016-02-02 13:34:29 +01:00
|
|
|
|
2019-12-17 15:56:26 +01:00
|
|
|
DEFUNSH(VTYSH_NH_GROUP,
|
|
|
|
vtysh_exit_nexthop_group, vtysh_exit_nexthop_group_cmd,
|
pbrd: Add PBR to FRR
This is an implementation of PBR for FRR.
This implemenation uses a combination of rules and
tables to determine how packets will flow.
PBR introduces a new concept of 'nexthop-groups' to
specify a group of nexthops that will be used for
ecmp. Nexthop-groups are specified on the cli via:
nexthop-group DONNA
nexthop 192.168.208.1
nexthop 192.168.209.1
nexthop 192.168.210.1
!
PBR sees the nexthop-group and installs these as a default
route with these nexthops starting at table 10000
robot# show pbr nexthop-groups
Nexthop-Group: DONNA Table: 10001 Valid: 1 Installed: 1
Valid: 1 nexthop 192.168.209.1
Valid: 1 nexthop 192.168.210.1
Valid: 1 nexthop 192.168.208.1
I have also introduced the ability to specify a table
in a 'show ip route table XXX' to see the specified tables.
robot# show ip route table 10001
Codes: K - kernel route, C - connected, S - static, R - RIP,
O - OSPF, I - IS-IS, B - BGP, P - PIM, E - EIGRP, N - NHRP,
T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP,
F - PBR,
> - selected route, * - FIB route
F>* 0.0.0.0/0 [0/0] via 192.168.208.1, enp0s8, 00:14:25
* via 192.168.209.1, enp0s9, 00:14:25
* via 192.168.210.1, enp0s10, 00:14:25
PBR tracks PBR-MAPS via the pbr-map command:
!
pbr-map EVA seq 10
match src-ip 4.3.4.0/24
set nexthop-group DONNA
!
pbr-map EVA seq 20
match dst-ip 4.3.5.0/24
set nexthop-group DONNA
!
pbr-maps can have 'match src-ip <prefix>' and 'match dst-ip <prefix>'
to affect decisions about incoming packets. Additionally if you
only have one nexthop to use for a pbr-map you do not need
to setup a nexthop-group and can specify 'set nexthop XXXX'.
To apply the pbr-map to an incoming interface you do this:
interface enp0s10
pbr-policy EVA
!
When a pbr-map is applied to interfaces it can be installed
into the kernel as a rule:
[sharpd@robot frr1]$ ip rule show
0: from all lookup local
309: from 4.3.4.0/24 iif enp0s10 lookup 10001
319: from all to 4.3.5.0/24 iif enp0s10 lookup 10001
1000: from all lookup [l3mdev-table]
32766: from all lookup main
32767: from all lookup default
[sharpd@robot frr1]$ ip route show table 10001
default proto pbr metric 20
nexthop via 192.168.208.1 dev enp0s8 weight 1
nexthop via 192.168.209.1 dev enp0s9 weight 1
nexthop via 192.168.210.1 dev enp0s10 weight 1
The linux kernel now will use the rules and tables to properly
apply these policies.
Signed-off-by: Donald Sharp <sharpd@cumulusnetworks.com>
Signed-off-by: Don Slice <dslice@cumulusnetworks.com>
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2018-01-23 19:11:36 +01:00
|
|
|
"exit", "Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
2019-12-17 15:56:26 +01:00
|
|
|
DEFUNSH(VTYSH_NH_GROUP,
|
|
|
|
vtysh_quit_nexthop_group, vtysh_quit_nexthop_group_cmd,
|
pbrd: Add PBR to FRR
This is an implementation of PBR for FRR.
This implemenation uses a combination of rules and
tables to determine how packets will flow.
PBR introduces a new concept of 'nexthop-groups' to
specify a group of nexthops that will be used for
ecmp. Nexthop-groups are specified on the cli via:
nexthop-group DONNA
nexthop 192.168.208.1
nexthop 192.168.209.1
nexthop 192.168.210.1
!
PBR sees the nexthop-group and installs these as a default
route with these nexthops starting at table 10000
robot# show pbr nexthop-groups
Nexthop-Group: DONNA Table: 10001 Valid: 1 Installed: 1
Valid: 1 nexthop 192.168.209.1
Valid: 1 nexthop 192.168.210.1
Valid: 1 nexthop 192.168.208.1
I have also introduced the ability to specify a table
in a 'show ip route table XXX' to see the specified tables.
robot# show ip route table 10001
Codes: K - kernel route, C - connected, S - static, R - RIP,
O - OSPF, I - IS-IS, B - BGP, P - PIM, E - EIGRP, N - NHRP,
T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP,
F - PBR,
> - selected route, * - FIB route
F>* 0.0.0.0/0 [0/0] via 192.168.208.1, enp0s8, 00:14:25
* via 192.168.209.1, enp0s9, 00:14:25
* via 192.168.210.1, enp0s10, 00:14:25
PBR tracks PBR-MAPS via the pbr-map command:
!
pbr-map EVA seq 10
match src-ip 4.3.4.0/24
set nexthop-group DONNA
!
pbr-map EVA seq 20
match dst-ip 4.3.5.0/24
set nexthop-group DONNA
!
pbr-maps can have 'match src-ip <prefix>' and 'match dst-ip <prefix>'
to affect decisions about incoming packets. Additionally if you
only have one nexthop to use for a pbr-map you do not need
to setup a nexthop-group and can specify 'set nexthop XXXX'.
To apply the pbr-map to an incoming interface you do this:
interface enp0s10
pbr-policy EVA
!
When a pbr-map is applied to interfaces it can be installed
into the kernel as a rule:
[sharpd@robot frr1]$ ip rule show
0: from all lookup local
309: from 4.3.4.0/24 iif enp0s10 lookup 10001
319: from all to 4.3.5.0/24 iif enp0s10 lookup 10001
1000: from all lookup [l3mdev-table]
32766: from all lookup main
32767: from all lookup default
[sharpd@robot frr1]$ ip route show table 10001
default proto pbr metric 20
nexthop via 192.168.208.1 dev enp0s8 weight 1
nexthop via 192.168.209.1 dev enp0s9 weight 1
nexthop via 192.168.210.1 dev enp0s10 weight 1
The linux kernel now will use the rules and tables to properly
apply these policies.
Signed-off-by: Donald Sharp <sharpd@cumulusnetworks.com>
Signed-off-by: Don Slice <dslice@cumulusnetworks.com>
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2018-01-23 19:11:36 +01:00
|
|
|
"quit", "Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_nexthop_group(self, vty, argc, argv);
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
DEFUNSH(VTYSH_INTERFACE, vtysh_exit_interface, vtysh_exit_interface_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
2016-10-01 01:03:05 +02:00
|
|
|
DEFUNSH(VTYSH_INTERFACE, vtysh_quit_interface, vtysh_quit_interface_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_interface(self, vty, argc, argv);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2021-07-21 16:22:14 +02:00
|
|
|
DEFUNSH(VTYSH_ZEBRA, vtysh_exit_pseudowire, vtysh_exit_pseudowire_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit(vty);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_ZEBRA, vtysh_quit_pseudowire, vtysh_quit_pseudowire_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_pseudowire(self, vty, argc, argv);
|
|
|
|
}
|
|
|
|
|
2020-10-01 18:12:10 +02:00
|
|
|
static char *do_prepend(struct vty *vty, struct cmd_token **argv, int argc)
|
|
|
|
{
|
|
|
|
const char *argstr[argc + 1];
|
|
|
|
int i, off = 0;
|
|
|
|
|
|
|
|
if (vty->node != VIEW_NODE) {
|
|
|
|
off = 1;
|
|
|
|
argstr[0] = "do";
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < argc; i++)
|
|
|
|
argstr[i + off] = argv[i]->arg;
|
|
|
|
|
|
|
|
return frrstr_join(argstr, argc + off, " ");
|
|
|
|
}
|
|
|
|
|
2023-01-26 14:53:47 +01:00
|
|
|
#pragma GCC diagnostic push
|
|
|
|
#pragma GCC diagnostic ignored "-Wformat-nonliteral"
|
|
|
|
/* 'headline' is a format string with a %s for the daemon name
|
|
|
|
*
|
|
|
|
* Also for some reason GCC emits the warning on the end of the function
|
|
|
|
* (optimization maybe?) rather than on the vty_out line, so this pragma
|
|
|
|
* wraps the entire function rather than just the vty_out line.
|
|
|
|
*/
|
|
|
|
|
2020-10-01 18:12:10 +02:00
|
|
|
static int show_per_daemon(struct vty *vty, struct cmd_token **argv, int argc,
|
|
|
|
const char *headline)
|
2018-06-17 00:12:54 +02:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
int ret = CMD_SUCCESS;
|
2020-10-01 18:12:10 +02:00
|
|
|
char *line = do_prepend(vty, argv, argc);
|
2018-06-17 00:12:54 +02:00
|
|
|
|
|
|
|
for (i = 0; i < array_size(vtysh_client); i++)
|
2021-01-28 00:41:42 +01:00
|
|
|
if (vtysh_client[i].fd >= 0 || vtysh_client[i].next) {
|
2020-10-01 18:12:10 +02:00
|
|
|
vty_out(vty, headline, vtysh_client[i].name);
|
2018-06-17 00:12:54 +02:00
|
|
|
ret = vtysh_client_execute(&vtysh_client[i], line);
|
|
|
|
vty_out(vty, "\n");
|
|
|
|
}
|
2020-10-01 18:12:10 +02:00
|
|
|
|
|
|
|
XFREE(MTYPE_TMP, line);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2023-01-26 14:53:47 +01:00
|
|
|
#pragma GCC diagnostic pop
|
2020-10-01 18:12:10 +02:00
|
|
|
|
|
|
|
static int show_one_daemon(struct vty *vty, struct cmd_token **argv, int argc,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
char *line = do_prepend(vty, argv, argc);
|
|
|
|
|
|
|
|
ret = vtysh_client_execute_name(name, line);
|
|
|
|
|
|
|
|
XFREE(MTYPE_TMP, line);
|
|
|
|
|
2018-06-17 00:12:54 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-02-23 16:14:53 +01:00
|
|
|
DEFUN (vtysh_show_thread_timer,
|
|
|
|
vtysh_show_thread_timer_cmd,
|
|
|
|
"show thread timers",
|
|
|
|
SHOW_STR
|
|
|
|
"Thread information\n"
|
|
|
|
"Show all timers and how long they have in the system\n")
|
|
|
|
{
|
|
|
|
return show_per_daemon(vty, argv, argc, "Thread timers for %s:\n");
|
|
|
|
}
|
|
|
|
|
2020-10-01 18:12:10 +02:00
|
|
|
DEFUN (vtysh_show_poll,
|
|
|
|
vtysh_show_poll_cmd,
|
|
|
|
"show thread poll",
|
|
|
|
SHOW_STR
|
|
|
|
"Thread information\n"
|
|
|
|
"Thread Poll Information\n")
|
|
|
|
{
|
|
|
|
return show_per_daemon(vty, argv, argc, "Thread statistics for %s:\n");
|
|
|
|
}
|
|
|
|
|
2015-08-20 03:33:13 +02:00
|
|
|
DEFUN (vtysh_show_thread,
|
|
|
|
vtysh_show_thread_cmd,
|
|
|
|
"show thread cpu [FILTER]",
|
2017-06-19 22:34:22 +02:00
|
|
|
SHOW_STR
|
|
|
|
"Thread information\n"
|
|
|
|
"Thread CPU usage\n"
|
|
|
|
"Display filter (rwtexb)\n")
|
2015-08-20 03:33:13 +02:00
|
|
|
{
|
2020-10-01 18:12:10 +02:00
|
|
|
return show_per_daemon(vty, argv, argc, "Thread statistics for %s:\n");
|
2015-08-20 03:33:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (vtysh_show_work_queues,
|
|
|
|
vtysh_show_work_queues_cmd,
|
|
|
|
"show work-queues",
|
|
|
|
SHOW_STR
|
|
|
|
"Work Queue information\n")
|
|
|
|
{
|
2020-10-01 18:12:10 +02:00
|
|
|
return show_per_daemon(vty, argv, argc,
|
|
|
|
"Work queue statistics for %s:\n");
|
2015-08-20 03:33:13 +02:00
|
|
|
}
|
|
|
|
|
2016-02-25 13:29:29 +01:00
|
|
|
DEFUN (vtysh_show_work_queues_daemon,
|
|
|
|
vtysh_show_work_queues_daemon_cmd,
|
2019-06-13 23:08:36 +02:00
|
|
|
"show work-queues " DAEMONS_LIST,
|
2016-02-25 13:29:29 +01:00
|
|
|
SHOW_STR
|
|
|
|
"Work Queue information\n"
|
2019-06-13 23:08:36 +02:00
|
|
|
DAEMONS_STR)
|
2016-02-25 13:29:29 +01:00
|
|
|
{
|
2020-10-01 18:12:10 +02:00
|
|
|
return show_one_daemon(vty, argv, argc - 1, argv[argc - 1]->text);
|
2016-02-25 13:29:29 +01:00
|
|
|
}
|
|
|
|
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
DEFUNSH(VTYSH_ZEBRA, vtysh_link_params, vtysh_link_params_cmd, "link-params",
|
|
|
|
LINK_PARAMS_STR)
|
|
|
|
{
|
|
|
|
vty->node = LINK_PARAMS_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2016-11-18 21:42:41 +01:00
|
|
|
DEFUNSH(VTYSH_ZEBRA, exit_link_params, exit_link_params_cmd, "exit-link-params",
|
|
|
|
"Exit from Link Params configuration node\n")
|
|
|
|
{
|
|
|
|
if (vty->node == LINK_PARAMS_NODE)
|
|
|
|
vty->node = INTERFACE_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-07-21 16:22:14 +02:00
|
|
|
DEFUNSH(VTYSH_ZEBRA, vtysh_exit_link_params, vtysh_exit_link_params_cmd, "exit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
if (vty->node == LINK_PARAMS_NODE)
|
|
|
|
vty->node = INTERFACE_NODE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_ZEBRA, vtysh_quit_link_params, vtysh_quit_link_params_cmd, "quit",
|
|
|
|
"Exit current mode and down to previous mode\n")
|
|
|
|
{
|
|
|
|
return vtysh_exit_link_params(self, vty, argc, argv);
|
|
|
|
}
|
|
|
|
|
2018-02-28 22:14:45 +01:00
|
|
|
DEFUNSH_HIDDEN (0x00,
|
|
|
|
vtysh_debug_all,
|
|
|
|
vtysh_debug_all_cmd,
|
|
|
|
"[no] debug all",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
"Toggle all debugs on or off\n")
|
|
|
|
{
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-08-18 18:50:13 +02:00
|
|
|
DEFUN (vtysh_show_debugging,
|
|
|
|
vtysh_show_debugging_cmd,
|
|
|
|
"show debugging",
|
|
|
|
SHOW_STR
|
|
|
|
DEBUG_STR)
|
|
|
|
{
|
2020-10-01 18:12:10 +02:00
|
|
|
return show_per_daemon(vty, argv, argc, "");
|
2017-08-18 18:50:13 +02:00
|
|
|
}
|
|
|
|
|
2017-08-21 14:34:31 +02:00
|
|
|
DEFUN (vtysh_show_debugging_hashtable,
|
|
|
|
vtysh_show_debugging_hashtable_cmd,
|
|
|
|
"show debugging hashtable [statistics]",
|
|
|
|
SHOW_STR
|
|
|
|
DEBUG_STR
|
|
|
|
"Statistics about hash tables\n"
|
|
|
|
"Statistics about hash tables\n")
|
|
|
|
{
|
2020-10-01 18:12:10 +02:00
|
|
|
bool stats = strmatch(argv[argc - 1]->text, "statistics");
|
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "\n");
|
|
|
|
vty_out(vty,
|
2017-09-05 17:50:26 +02:00
|
|
|
"Load factor (LF) - average number of elements across all buckets\n");
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty,
|
2017-09-05 17:50:26 +02:00
|
|
|
"Full load factor (FLF) - average number of elements across full buckets\n\n");
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty,
|
2017-09-05 17:50:26 +02:00
|
|
|
"Standard deviation (SD) is calculated for both the LF and FLF\n");
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty,
|
2017-09-05 17:50:26 +02:00
|
|
|
"and indicates the typical deviation of bucket chain length\n");
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "from the value in the corresponding load factor.\n\n");
|
2017-09-05 17:50:26 +02:00
|
|
|
|
2020-10-01 18:12:10 +02:00
|
|
|
return show_per_daemon(vty, argv, stats ? argc - 1 : argc,
|
2017-08-21 14:34:31 +02:00
|
|
|
"Hashtable statistics for %s:\n");
|
|
|
|
}
|
|
|
|
|
2018-06-14 01:08:30 +02:00
|
|
|
DEFUN (vtysh_show_error_code,
|
|
|
|
vtysh_show_error_code_cmd,
|
2018-06-20 20:51:04 +02:00
|
|
|
"show error <(1-4294967296)|all> [json]",
|
2018-06-14 01:08:30 +02:00
|
|
|
SHOW_STR
|
|
|
|
"Information on errors\n"
|
2018-06-20 20:51:04 +02:00
|
|
|
"Error code to get info about\n"
|
|
|
|
"Information on all errors\n"
|
|
|
|
JSON_STR)
|
2018-06-14 01:08:30 +02:00
|
|
|
{
|
2019-01-08 22:33:49 +01:00
|
|
|
uint32_t arg = 0;
|
|
|
|
|
|
|
|
if (!strmatch(argv[2]->text, "all"))
|
|
|
|
arg = strtoul(argv[2]->arg, NULL, 10);
|
|
|
|
|
|
|
|
/* If it's not a shared code, send it to all the daemons */
|
|
|
|
if (arg < LIB_FERR_START || arg > LIB_FERR_END) {
|
2020-10-01 18:12:10 +02:00
|
|
|
show_per_daemon(vty, argv, argc, "");
|
2019-01-08 22:33:49 +01:00
|
|
|
/* Otherwise, print it ourselves to avoid duplication */
|
|
|
|
} else {
|
|
|
|
bool json = strmatch(argv[argc - 1]->text, "json");
|
2019-07-29 19:19:05 +02:00
|
|
|
|
2019-01-08 22:33:49 +01:00
|
|
|
if (!strmatch(argv[2]->text, "all"))
|
|
|
|
arg = strtoul(argv[2]->arg, NULL, 10);
|
|
|
|
|
|
|
|
log_ref_display(vty, arg, json);
|
|
|
|
}
|
2018-06-20 20:51:04 +02:00
|
|
|
|
2019-01-08 22:33:49 +01:00
|
|
|
return CMD_SUCCESS;
|
2018-06-14 01:08:30 +02:00
|
|
|
}
|
|
|
|
|
2019-09-23 14:38:02 +02:00
|
|
|
/* Northbound. */
|
2021-03-24 11:16:15 +01:00
|
|
|
DEFUN_HIDDEN (show_config_running,
|
2021-02-12 16:18:34 +01:00
|
|
|
show_config_running_cmd,
|
|
|
|
"show configuration running\
|
|
|
|
[<json|xml> [translate WORD]]\
|
2022-10-04 17:57:09 +02:00
|
|
|
[with-defaults] " DAEMONS_LIST,
|
2021-02-12 16:18:34 +01:00
|
|
|
SHOW_STR
|
|
|
|
"Configuration information\n"
|
|
|
|
"Running configuration\n"
|
|
|
|
"Change output format to JSON\n"
|
|
|
|
"Change output format to XML\n"
|
|
|
|
"Translate output\n"
|
|
|
|
"YANG module translator\n"
|
|
|
|
"Show default values\n"
|
|
|
|
DAEMONS_STR)
|
|
|
|
{
|
|
|
|
return show_one_daemon(vty, argv, argc - 1, argv[argc - 1]->text);
|
|
|
|
}
|
|
|
|
|
2019-09-23 14:38:03 +02:00
|
|
|
DEFUN (show_yang_operational_data,
|
|
|
|
show_yang_operational_data_cmd,
|
2019-10-16 18:05:17 +02:00
|
|
|
"show yang operational-data XPATH\
|
2019-09-23 14:38:03 +02:00
|
|
|
[{\
|
2019-10-16 18:05:17 +02:00
|
|
|
format <json|xml>\
|
|
|
|
|translate WORD\
|
2022-02-10 20:20:42 +01:00
|
|
|
|with-config\
|
2022-10-04 17:57:09 +02:00
|
|
|
}] " DAEMONS_LIST,
|
2019-09-23 14:38:03 +02:00
|
|
|
SHOW_STR
|
|
|
|
"YANG information\n"
|
|
|
|
"Show YANG operational data\n"
|
|
|
|
"XPath expression specifying the YANG data path\n"
|
|
|
|
"Set the output format\n"
|
|
|
|
"JavaScript Object Notation\n"
|
|
|
|
"Extensible Markup Language\n"
|
|
|
|
"Translate operational data\n"
|
|
|
|
"YANG module translator\n"
|
2022-02-10 20:20:42 +01:00
|
|
|
"Merge configuration data\n"
|
2019-09-23 14:38:03 +02:00
|
|
|
DAEMONS_STR)
|
|
|
|
{
|
2020-10-01 18:12:10 +02:00
|
|
|
return show_one_daemon(vty, argv, argc - 1, argv[argc - 1]->text);
|
2019-09-23 14:38:03 +02:00
|
|
|
}
|
|
|
|
|
2021-05-04 16:41:58 +02:00
|
|
|
DEFUN(show_yang_module, show_yang_module_cmd,
|
|
|
|
"show yang module [module-translator WORD] " DAEMONS_LIST,
|
|
|
|
SHOW_STR
|
|
|
|
"YANG information\n"
|
|
|
|
"Show loaded modules\n"
|
|
|
|
"YANG module translator\n"
|
|
|
|
"YANG module translator\n" DAEMONS_STR)
|
|
|
|
{
|
|
|
|
return show_one_daemon(vty, argv, argc - 1, argv[argc - 1]->text);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN(show_yang_module_detail, show_yang_module_detail_cmd,
|
|
|
|
"show yang module\
|
|
|
|
[module-translator WORD]\
|
|
|
|
WORD <compiled|summary|tree|yang|yin> " DAEMONS_LIST,
|
|
|
|
SHOW_STR
|
|
|
|
"YANG information\n"
|
|
|
|
"Show loaded modules\n"
|
|
|
|
"YANG module translator\n"
|
|
|
|
"YANG module translator\n"
|
|
|
|
"Module name\n"
|
|
|
|
"Display compiled module in YANG format\n"
|
|
|
|
"Display summary information about the module\n"
|
|
|
|
"Display module in the tree (RFC 8340) format\n"
|
|
|
|
"Display module in the YANG format\n"
|
|
|
|
"Display module in the YIN format\n" DAEMONS_STR)
|
|
|
|
{
|
|
|
|
return show_one_daemon(vty, argv, argc - 1, argv[argc - 1]->text);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-23 14:38:02 +02:00
|
|
|
DEFUNSH(VTYSH_ALL, debug_nb,
|
|
|
|
debug_nb_cmd,
|
|
|
|
"[no] debug northbound\
|
|
|
|
[<\
|
2019-10-16 18:05:17 +02:00
|
|
|
callbacks [{configuration|state|rpc}]\
|
|
|
|
|notifications\
|
|
|
|
|events\
|
2019-10-16 21:59:54 +02:00
|
|
|
|libyang\
|
2019-09-23 14:38:02 +02:00
|
|
|
>]",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
"Northbound debugging\n"
|
|
|
|
"Callbacks\n"
|
|
|
|
"Configuration\n"
|
|
|
|
"State\n"
|
|
|
|
"RPC\n"
|
|
|
|
"Notifications\n"
|
2019-10-16 21:59:54 +02:00
|
|
|
"Events\n"
|
|
|
|
"libyang debugging\n")
|
2019-09-23 14:38:02 +02:00
|
|
|
{
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-12-03 18:36:29 +01:00
|
|
|
DEFUN (vtysh_show_history,
|
|
|
|
vtysh_show_history_cmd,
|
|
|
|
"show history",
|
|
|
|
SHOW_STR
|
|
|
|
"The list of commands stored in history\n")
|
|
|
|
{
|
|
|
|
HIST_ENTRY **hlist = history_list();
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
while (hlist[i]) {
|
|
|
|
vty_out(vty, "%s\n", hlist[i]->line);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-04-04 16:12:59 +02:00
|
|
|
/* Memory */
|
|
|
|
DEFUN (vtysh_show_memory,
|
|
|
|
vtysh_show_memory_cmd,
|
2021-04-08 17:47:43 +02:00
|
|
|
"show memory [" DAEMONS_LIST "]",
|
2017-04-04 16:12:59 +02:00
|
|
|
SHOW_STR
|
2021-04-08 17:47:43 +02:00
|
|
|
"Memory statistics\n"
|
|
|
|
DAEMONS_STR)
|
2017-04-04 16:12:59 +02:00
|
|
|
{
|
2021-04-08 17:47:43 +02:00
|
|
|
if (argc == 3)
|
|
|
|
return show_one_daemon(vty, argv, argc - 1,
|
|
|
|
argv[argc - 1]->text);
|
|
|
|
|
2020-10-01 18:12:10 +02:00
|
|
|
return show_per_daemon(vty, argv, argc, "Memory statistics for %s:\n");
|
2017-04-04 16:12:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (vtysh_show_modules,
|
|
|
|
vtysh_show_modules_cmd,
|
|
|
|
"show modules",
|
|
|
|
SHOW_STR
|
|
|
|
"Loaded modules\n")
|
|
|
|
{
|
2020-10-01 18:12:10 +02:00
|
|
|
return show_per_daemon(vty, argv, argc, "Module information for %s:\n");
|
2017-04-04 16:12:59 +02:00
|
|
|
}
|
|
|
|
|
2004-08-26 15:08:30 +02:00
|
|
|
/* Logging commands. */
|
2006-05-24 00:10:01 +02:00
|
|
|
DEFUN (vtysh_show_logging,
|
|
|
|
vtysh_show_logging_cmd,
|
|
|
|
"show logging",
|
|
|
|
SHOW_STR
|
|
|
|
"Show current logging configuration\n")
|
|
|
|
{
|
2020-10-01 18:12:10 +02:00
|
|
|
return show_per_daemon(vty, argv, argc,
|
2017-08-18 17:12:31 +02:00
|
|
|
"Logging configuration for %s:\n");
|
2004-12-07 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* bgp_main.c: (main) The 2nd argument to openzlog has been removed.
* isis_main.c: (main) The 2nd argument to openzlog has been removed.
* ospf6_main.c: (main) The 2nd argument to openzlog has been removed.
Note that stdout logging will no longer be enabled by default when
not running as a daemon.
* ospf_main.c: (main) The 2nd argument to openzlog has been removed.
* rip_main.c: (main) The 2nd argument to openzlog has been removed.
* ripng_main.c: (main) The 2nd argument to openzlog has been removed.
* main.c: (main) The 2nd argument to openzlog has been removed.
So stdout logging will no longer be enabled by default.
* irdp_main.c: (irdp_finish) Reduce severity of shutdown message
from LOG_WARNING to LOG_INFO.
* vtysh.c: Make several functions static instead of global.
Added several commands to support destination-specific logging levels.
(vtysh_completion) This function is unused, so comment it out.
* basic.texi: Document new logging features. Separate basic config
commands from basic VTY commands.
* log.h: Replace struct zlog flags and maskpri fields with maxlvl
array to support individual logging levels for each destination.
Remove the 2nd argument to openzlog since the default logging config
should be standardized inside the library. Replaced the
zlog_set_flag and zlog_reset_flag functions with zlog_set_level.
And zlog_set_file now requires an additional log_level argument.
Declare zlog_proto_names for use inside command.c in the
"show logging" command. Added defines useful for command
construction.
* log.c: (vzlog) Decide where to send the message based on the
individual logging levels configured for each destination.
Remove support for ZLOG_STDERR since it was never actually used.
Support record-priority for terminal monitors.
(zlog_signal,zlog_backtrace_sigsafe) Support destination-specific
logging levels. Remove stderr support (was never used). Added
support for terminal monitor logging.
(_zlog_assert_failed) Increase message severity to LOG_EMERG.
(openzlog) Remove 2nd argument since default config should be
standardized in library. By default, terminal monitoring
is set to debug, and all other logging is disabled.
(zlog_set_flag,zlog_reset_flag) Removed.
(zlog_set_level) New function to replace zlog_set_flag and
zlog_reset_flag. Supports destination-specific logging levels.
(zlog_set_file,zlog_reset_file) Support file-specific logging level.
(zlog_rotate) Log an error message if fopen fails, and support
new file-specific logging level.
* command.h: Change DEFUN_CMD_FUNC_DECL and DEFUN_CMD_FUNC_TEXT so that
command functions will be static instead of global. Remove
declarations for config_exit and config_help. Define new macros
DEFUNSH_ATTR, DEFUNSH_HIDDEN, and DEFUNSH_DEPRECATED so we can
have deprecated commands in vtysh. Similarly, for completeness,
define macros ALIAS_SH, ALIAS_SH_HIDDEN, and ALIAS_SH_DEPRECATED.
Also, fix bug in ALIAS_ATTR macro (didn't matter because it
was never used).
* command.c: Make many functions static instead of global.
(facility_name,facility_match,level_match) New functions
to support enhanced destination-specific logging levels.
(config_write_host) Support new destination-specific logging levels.
(config_logmsg) Added new "logmsg" command to help test logging
system.
(show_logging) Added "show logging" command to show the current
configuration of the logging system.
(config_log_stdout_level) Support explicit stdout logging level.
(no_config_log_stdout) Now takes optional LEVEL arg.
(config_log_monitor,config_log_monitor_level,no_config_log_monitor)
New commands creating new "log monitor" commands to set terminal
monitoring log level.
(config_log_file_level) Support explicit file logging level.
(config_log_syslog_level) Support explicit syslog logging level.
(config_log_facility,no_config_log_facility) Implement new
"log facility" command.
(cmd_init) Add hooks for new commands: "show logging", "logmsg",
"log stdout <level>", "log monitor", "log monitor <level>",
"no log monitor", "log file <filename> <level>",
"no log file <filename> <level>", "log syslog <level>",
"log facility", and "no log facility".
* vty.h: Added a "level" argument to vty_log so it can support
"log record-priority". Declare new function vty_log_fixed for
use in signal handlers.
* vty.c: (vty_log,vty_log_out) Added a "level" argument to support
"log record-priority" for vty terminal monitors.
(vty_down_level) Use config_exit_cmd.func instead of calling
config_exit directly (since command functions will now be static
instead of global).
(vty_log_fixed) New function to send terminal monitor messages
from inside a signal handler.
2004-12-07 16:39:31 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-05-30 22:14:25 +02:00
|
|
|
DEFUNSH(VTYSH_ALL, vtysh_debug_memstats,
|
|
|
|
vtysh_debug_memstats_cmd, "[no] debug memstats-at-exit",
|
|
|
|
NO_STR
|
|
|
|
"Debug\n"
|
|
|
|
"Print memory statistics at exit\n")
|
|
|
|
{
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-11-16 13:29:44 +01:00
|
|
|
DEFUN(vtysh_debug_uid_backtrace,
|
|
|
|
vtysh_debug_uid_backtrace_cmd,
|
|
|
|
"[no] debug unique-id UID backtrace",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
"Options per individual log message, by unique ID\n"
|
|
|
|
"Log message unique ID (XXXXX-XXXXX)\n"
|
|
|
|
"Add backtrace to log when message is printed\n")
|
|
|
|
{
|
|
|
|
unsigned int i, ok = 0;
|
|
|
|
int err = CMD_SUCCESS, ret;
|
|
|
|
const char *uid;
|
|
|
|
char line[64];
|
|
|
|
|
|
|
|
if (!strcmp(argv[0]->text, "no")) {
|
|
|
|
uid = argv[3]->arg;
|
|
|
|
snprintfrr(line, sizeof(line),
|
|
|
|
"no debug unique-id %s backtrace", uid);
|
|
|
|
} else {
|
|
|
|
uid = argv[2]->arg;
|
|
|
|
snprintfrr(line, sizeof(line), "debug unique-id %s backtrace",
|
|
|
|
uid);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < array_size(vtysh_client); i++)
|
|
|
|
if (vtysh_client[i].fd >= 0 || vtysh_client[i].next) {
|
|
|
|
ret = vtysh_client_execute(&vtysh_client[i], line);
|
|
|
|
switch (ret) {
|
|
|
|
case CMD_SUCCESS:
|
|
|
|
ok++;
|
|
|
|
break;
|
|
|
|
case CMD_ERR_NOTHING_TODO:
|
|
|
|
/* ignore this daemon
|
|
|
|
*
|
|
|
|
* note this doesn't need to handle instances
|
|
|
|
* of the same daemon individually because
|
|
|
|
* the same daemon will have the same UIDs
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (err == CMD_SUCCESS)
|
|
|
|
err = ret;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err == CMD_SUCCESS && !ok) {
|
|
|
|
vty_out(vty, "%% no running daemon recognizes unique-ID %s\n",
|
|
|
|
uid);
|
|
|
|
err = CMD_WARNING;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2022-07-01 22:24:52 +02:00
|
|
|
DEFUNSH(VTYSH_ALL, vtysh_allow_reserved_ranges, vtysh_allow_reserved_ranges_cmd,
|
|
|
|
"allow-reserved-ranges",
|
|
|
|
"Allow using IPv4 (Class E) reserved IP space\n")
|
|
|
|
{
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_ALL, no_vtysh_allow_reserved_ranges,
|
|
|
|
no_vtysh_allow_reserved_ranges_cmd, "no allow-reserved-ranges",
|
|
|
|
NO_STR "Allow using IPv4 (Class E) reserved IP space\n")
|
|
|
|
{
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2004-10-03 22:11:32 +02:00
|
|
|
DEFUNSH(VTYSH_ALL, vtysh_service_password_encrypt,
|
|
|
|
vtysh_service_password_encrypt_cmd, "service password-encryption",
|
|
|
|
"Set up miscellaneous service\n"
|
|
|
|
"Enable encrypted passwords\n")
|
|
|
|
{
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_ALL, no_vtysh_service_password_encrypt,
|
|
|
|
no_vtysh_service_password_encrypt_cmd, "no service password-encryption",
|
|
|
|
NO_STR
|
|
|
|
"Set up miscellaneous service\n"
|
|
|
|
"Enable encrypted passwords\n")
|
|
|
|
{
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_ALL, vtysh_config_password, vtysh_password_cmd,
|
2017-07-13 21:28:41 +02:00
|
|
|
"password [(8-8)] LINE",
|
2018-05-11 02:54:30 +02:00
|
|
|
"Modify the terminal connection password\n"
|
2004-10-03 22:11:32 +02:00
|
|
|
"Specifies a HIDDEN password will follow\n"
|
2017-07-13 21:28:41 +02:00
|
|
|
"The password string\n")
|
2004-10-03 22:11:32 +02:00
|
|
|
{
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-05-11 02:54:30 +02:00
|
|
|
DEFUNSH(VTYSH_ALL, no_vtysh_config_password, no_vtysh_password_cmd,
|
|
|
|
"no password", NO_STR
|
|
|
|
"Modify the terminal connection password\n")
|
|
|
|
{
|
2018-05-13 19:11:43 +02:00
|
|
|
vty_out(vty, NO_PASSWD_CMD_WARNING);
|
2018-05-12 20:19:49 +02:00
|
|
|
|
2018-05-11 02:54:30 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2004-10-03 22:11:32 +02:00
|
|
|
DEFUNSH(VTYSH_ALL, vtysh_config_enable_password, vtysh_enable_password_cmd,
|
2017-07-13 21:28:41 +02:00
|
|
|
"enable password [(8-8)] LINE",
|
2004-10-03 22:11:32 +02:00
|
|
|
"Modify enable password parameters\n"
|
|
|
|
"Assign the privileged level password\n"
|
|
|
|
"Specifies a HIDDEN password will follow\n"
|
2017-07-13 21:28:41 +02:00
|
|
|
"The 'enable' password string\n")
|
2004-10-03 22:11:32 +02:00
|
|
|
{
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUNSH(VTYSH_ALL, no_vtysh_config_enable_password,
|
2017-07-22 14:52:33 +02:00
|
|
|
no_vtysh_enable_password_cmd, "no enable password", NO_STR
|
2004-10-03 22:11:32 +02:00
|
|
|
"Modify enable password parameters\n"
|
|
|
|
"Assign the privileged level password\n")
|
|
|
|
{
|
2018-05-13 19:11:43 +02:00
|
|
|
vty_out(vty, NO_PASSWD_CMD_WARNING);
|
2018-05-12 20:19:49 +02:00
|
|
|
|
2004-10-03 22:11:32 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
DEFUN (vtysh_write_terminal,
|
|
|
|
vtysh_write_terminal_cmd,
|
2019-08-08 20:04:52 +02:00
|
|
|
"write terminal ["DAEMONS_LIST"] [no-header]",
|
2002-12-13 21:15:29 +01:00
|
|
|
"Write running configuration to memory, network, or terminal\n"
|
2016-10-07 04:33:37 +02:00
|
|
|
"Write to terminal\n"
|
2019-08-08 20:04:52 +02:00
|
|
|
DAEMONS_STR
|
|
|
|
"Skip \"Building configuration...\" header\n")
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2018-03-27 21:13:34 +02:00
|
|
|
unsigned int i;
|
2022-04-07 14:44:23 +02:00
|
|
|
char line[] = "do write terminal";
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-08-08 20:04:52 +02:00
|
|
|
if (!strcmp(argv[argc - 1]->arg, "no-header"))
|
|
|
|
argc--;
|
|
|
|
else {
|
|
|
|
vty_out(vty, "Building configuration...\n");
|
|
|
|
vty_out(vty, "\nCurrent configuration:\n");
|
|
|
|
vty_out(vty, "!\n");
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-10-07 04:33:37 +02:00
|
|
|
for (i = 0; i < array_size(vtysh_client); i++)
|
|
|
|
if ((argc < 3)
|
|
|
|
|| (strmatch(vtysh_client[i].name, argv[2]->text)))
|
|
|
|
vtysh_client_config(&vtysh_client[i], line);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2004-10-03 22:11:32 +02:00
|
|
|
/* Integrate vtysh specific configuration. */
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_open_pager(vty);
|
2004-10-03 22:11:32 +02:00
|
|
|
vtysh_config_write();
|
2018-05-15 00:13:03 +02:00
|
|
|
vtysh_config_dump();
|
|
|
|
vty_close_pager(vty);
|
|
|
|
vty_out(vty, "end\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2016-10-01 01:03:05 +02:00
|
|
|
DEFUN (vtysh_show_running_config,
|
|
|
|
vtysh_show_running_config_cmd,
|
2019-08-08 20:04:52 +02:00
|
|
|
"show running-config ["DAEMONS_LIST"] [no-header]",
|
2016-10-01 01:03:05 +02:00
|
|
|
SHOW_STR
|
|
|
|
"Current operating configuration\n"
|
2019-08-08 20:04:52 +02:00
|
|
|
DAEMONS_STR
|
|
|
|
"Skip \"Building configuration...\" header\n")
|
2015-09-22 21:00:57 +02:00
|
|
|
{
|
2016-10-01 01:03:05 +02:00
|
|
|
return vtysh_write_terminal(self, vty, argc, argv);
|
2015-09-22 21:00:57 +02:00
|
|
|
}
|
|
|
|
|
2004-10-03 22:11:32 +02:00
|
|
|
DEFUN (vtysh_integrated_config,
|
|
|
|
vtysh_integrated_config_cmd,
|
|
|
|
"service integrated-vtysh-config",
|
|
|
|
"Set up miscellaneous service\n"
|
|
|
|
"Write configuration into integrated file\n")
|
2002-12-13 21:49:00 +01:00
|
|
|
{
|
2016-09-26 22:01:37 +02:00
|
|
|
vtysh_write_integrated = WRITE_INTEGRATED_YES;
|
2004-10-03 22:11:32 +02:00
|
|
|
return CMD_SUCCESS;
|
2002-12-13 21:49:00 +01:00
|
|
|
}
|
|
|
|
|
2004-10-03 22:11:32 +02:00
|
|
|
DEFUN (no_vtysh_integrated_config,
|
|
|
|
no_vtysh_integrated_config_cmd,
|
|
|
|
"no service integrated-vtysh-config",
|
|
|
|
NO_STR
|
|
|
|
"Set up miscellaneous service\n"
|
|
|
|
"Write configuration into integrated file\n")
|
2002-12-13 21:49:00 +01:00
|
|
|
{
|
2016-09-26 22:01:37 +02:00
|
|
|
vtysh_write_integrated = WRITE_INTEGRATED_NO;
|
2004-10-03 22:11:32 +02:00
|
|
|
return CMD_SUCCESS;
|
2002-12-13 21:49:00 +01:00
|
|
|
}
|
|
|
|
|
2015-05-20 03:29:18 +02:00
|
|
|
static void backup_config_file(const char *fbackup)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
char *integrate_sav = NULL;
|
|
|
|
|
2019-05-06 23:28:55 +02:00
|
|
|
size_t integrate_sav_sz = strlen(fbackup) + strlen(CONF_BACKUP_EXT) + 1;
|
|
|
|
integrate_sav = malloc(integrate_sav_sz);
|
|
|
|
strlcpy(integrate_sav, fbackup, integrate_sav_sz);
|
|
|
|
strlcat(integrate_sav, CONF_BACKUP_EXT, integrate_sav_sz);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2004-08-26 15:08:30 +02:00
|
|
|
/* Move current configuration file to backup config file. */
|
2020-04-02 01:59:32 +02:00
|
|
|
if (unlink(integrate_sav) != 0 && errno != ENOENT)
|
|
|
|
vty_out(vty, "Unlink failed for %s: %s\n", integrate_sav,
|
|
|
|
strerror(errno));
|
|
|
|
if (rename(fbackup, integrate_sav) != 0 && errno != ENOENT)
|
|
|
|
vty_out(vty, "Error renaming %s to %s: %s\n", fbackup,
|
|
|
|
integrate_sav, strerror(errno));
|
2004-08-26 15:08:30 +02:00
|
|
|
free(integrate_sav);
|
2015-05-20 03:29:18 +02:00
|
|
|
}
|
|
|
|
|
2016-11-08 19:01:06 +01:00
|
|
|
int vtysh_write_config_integrated(void)
|
2015-05-20 03:29:18 +02:00
|
|
|
{
|
2018-03-27 21:13:34 +02:00
|
|
|
unsigned int i;
|
2022-04-07 14:44:23 +02:00
|
|
|
char line[] = "do write terminal";
|
2016-11-08 18:22:30 +01:00
|
|
|
FILE *fp;
|
2016-11-08 23:36:01 +01:00
|
|
|
int fd;
|
2018-06-18 17:12:27 +02:00
|
|
|
#ifdef FRR_USER
|
2016-11-08 23:36:01 +01:00
|
|
|
struct passwd *pwentry;
|
2018-06-18 17:12:27 +02:00
|
|
|
#endif
|
|
|
|
#ifdef FRR_GROUP
|
2016-11-08 23:36:01 +01:00
|
|
|
struct group *grentry;
|
2018-06-18 17:12:27 +02:00
|
|
|
#endif
|
2016-11-08 23:36:01 +01:00
|
|
|
uid_t uid = -1;
|
|
|
|
gid_t gid = -1;
|
|
|
|
struct stat st;
|
|
|
|
int err = 0;
|
2015-05-20 03:29:18 +02:00
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "Building Configuration...\n");
|
2015-05-20 03:29:18 +02:00
|
|
|
|
2017-08-27 20:57:34 +02:00
|
|
|
backup_config_file(frr_config);
|
|
|
|
fp = fopen(frr_config, "w");
|
2002-12-13 21:15:29 +01:00
|
|
|
if (fp == NULL) {
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty,
|
2016-11-09 15:05:14 +01:00
|
|
|
"%% Error: failed to open configuration file %s: %s\n",
|
2017-08-27 20:57:34 +02:00
|
|
|
frr_config, safe_strerror(errno));
|
2017-07-13 21:56:08 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2015-05-20 03:29:18 +02:00
|
|
|
}
|
2016-11-08 23:36:01 +01:00
|
|
|
fd = fileno(fp);
|
2015-05-20 03:29:18 +02:00
|
|
|
|
2012-09-26 10:39:10 +02:00
|
|
|
for (i = 0; i < array_size(vtysh_client); i++)
|
2015-05-20 03:29:15 +02:00
|
|
|
vtysh_client_config(&vtysh_client[i], line);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-20 03:29:17 +02:00
|
|
|
vtysh_config_write();
|
2018-06-12 20:14:52 +02:00
|
|
|
vty->of_saved = vty->of;
|
|
|
|
vty->of = fp;
|
2018-05-15 00:13:03 +02:00
|
|
|
vtysh_config_dump();
|
2018-06-12 20:14:52 +02:00
|
|
|
vty->of = vty->of_saved;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2016-11-08 23:36:01 +01:00
|
|
|
if (fchmod(fd, CONFIGFILE_MASK) != 0) {
|
|
|
|
printf("%% Warning: can't chmod configuration file %s: %s\n",
|
2017-08-27 20:57:34 +02:00
|
|
|
frr_config, safe_strerror(errno));
|
2016-11-08 23:36:01 +01:00
|
|
|
err++;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-05-23 20:23:04 +02:00
|
|
|
#ifdef FRR_USER
|
2016-12-14 19:30:44 +01:00
|
|
|
pwentry = getpwnam(FRR_USER);
|
2016-11-08 23:36:01 +01:00
|
|
|
if (pwentry)
|
|
|
|
uid = pwentry->pw_uid;
|
|
|
|
else {
|
2016-12-14 19:30:44 +01:00
|
|
|
printf("%% Warning: could not look up user \"%s\"\n", FRR_USER);
|
2016-11-08 23:36:01 +01:00
|
|
|
err++;
|
2003-12-22 21:15:53 +01:00
|
|
|
}
|
2017-05-23 20:23:04 +02:00
|
|
|
#endif
|
|
|
|
#ifdef FRR_GROUP
|
2016-12-14 19:30:44 +01:00
|
|
|
grentry = getgrnam(FRR_GROUP);
|
2016-11-08 23:36:01 +01:00
|
|
|
if (grentry)
|
|
|
|
gid = grentry->gr_gid;
|
|
|
|
else {
|
2016-12-14 19:30:44 +01:00
|
|
|
printf("%% Warning: could not look up group \"%s\"\n",
|
|
|
|
FRR_GROUP);
|
2016-11-08 23:36:01 +01:00
|
|
|
err++;
|
2015-05-20 03:29:18 +02:00
|
|
|
}
|
2017-05-23 20:23:04 +02:00
|
|
|
#endif
|
2002-12-13 21:49:00 +01:00
|
|
|
|
2016-11-08 23:36:01 +01:00
|
|
|
if (!fstat(fd, &st)) {
|
|
|
|
if (st.st_uid == uid)
|
|
|
|
uid = -1;
|
|
|
|
if (st.st_gid == gid)
|
|
|
|
gid = -1;
|
|
|
|
if ((uid != (uid_t)-1 || gid != (gid_t)-1)
|
|
|
|
&& fchown(fd, uid, gid)) {
|
|
|
|
printf("%% Warning: can't chown configuration file %s: %s\n",
|
2017-08-27 20:57:34 +02:00
|
|
|
frr_config, safe_strerror(errno));
|
2016-11-08 23:36:01 +01:00
|
|
|
err++;
|
|
|
|
}
|
|
|
|
} else {
|
2017-08-27 20:57:34 +02:00
|
|
|
printf("%% Warning: stat() failed on %s: %s\n", frr_config,
|
2016-11-08 23:36:01 +01:00
|
|
|
safe_strerror(errno));
|
|
|
|
err++;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-04-19 13:01:13 +02:00
|
|
|
if (fflush(fp) != 0) {
|
|
|
|
printf("%% Warning: fflush() failed on %s: %s\n", frr_config,
|
|
|
|
safe_strerror(errno));
|
|
|
|
err++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fsync(fd) < 0) {
|
|
|
|
printf("%% Warning: fsync() failed on %s: %s\n", frr_config,
|
|
|
|
safe_strerror(errno));
|
|
|
|
err++;
|
|
|
|
}
|
|
|
|
|
2016-11-08 23:36:01 +01:00
|
|
|
fclose(fp);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-27 20:57:34 +02:00
|
|
|
printf("Integrated configuration saved to %s\n", frr_config);
|
2016-11-08 23:36:01 +01:00
|
|
|
if (err)
|
|
|
|
return CMD_WARNING;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-09 15:05:14 +01:00
|
|
|
printf("[OK]\n");
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-11-21 18:04:12 +01:00
|
|
|
DEFUN_HIDDEN(start_config, start_config_cmd, "XFRR_start_configuration",
|
|
|
|
"The Beginning of Configuration\n")
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
char line[] = "XFRR_start_configuration";
|
|
|
|
|
|
|
|
for (i = 0; i < array_size(vtysh_client); i++)
|
|
|
|
vtysh_client_execute(&vtysh_client[i], line);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN_HIDDEN(end_config, end_config_cmd, "XFRR_end_configuration",
|
|
|
|
"The End of Configuration\n")
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
char line[] = "XFRR_end_configuration";
|
|
|
|
|
|
|
|
for (i = 0; i < array_size(vtysh_client); i++)
|
|
|
|
vtysh_client_execute(&vtysh_client[i], line);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2016-11-08 19:01:06 +01:00
|
|
|
static bool want_config_integrated(void)
|
2016-09-26 22:01:37 +02:00
|
|
|
{
|
|
|
|
struct stat s;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-26 22:01:37 +02:00
|
|
|
switch (vtysh_write_integrated) {
|
|
|
|
case WRITE_INTEGRATED_UNSPECIFIED:
|
2017-08-27 20:57:34 +02:00
|
|
|
if (stat(frr_config, &s) && errno == ENOENT)
|
2016-09-26 22:01:37 +02:00
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
case WRITE_INTEGRATED_NO:
|
|
|
|
return false;
|
|
|
|
case WRITE_INTEGRATED_YES:
|
|
|
|
return true;
|
|
|
|
}
|
2016-10-13 15:48:02 +02:00
|
|
|
return true;
|
2016-09-26 22:01:37 +02:00
|
|
|
}
|
|
|
|
|
2002-12-13 21:49:00 +01:00
|
|
|
DEFUN (vtysh_write_memory,
|
|
|
|
vtysh_write_memory_cmd,
|
2016-10-01 01:03:05 +02:00
|
|
|
"write [<memory|file>]",
|
2002-12-13 21:49:00 +01:00
|
|
|
"Write running configuration to memory, network, or terminal\n"
|
2016-10-01 01:03:05 +02:00
|
|
|
"Write configuration to the file (same as write file)\n"
|
|
|
|
"Write configuration to the file (same as write memory)\n")
|
2002-12-13 21:49:00 +01:00
|
|
|
{
|
2003-04-19 01:55:29 +02:00
|
|
|
int ret = CMD_SUCCESS;
|
2022-04-07 14:44:23 +02:00
|
|
|
char line[] = "do write memory";
|
2018-03-27 21:13:34 +02:00
|
|
|
unsigned int i;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "Note: this version of vtysh never writes vtysh.conf\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-04-02 13:25:18 +02:00
|
|
|
/* If integrated frr.conf explicitly set. */
|
2016-11-08 19:01:06 +01:00
|
|
|
if (want_config_integrated()) {
|
2017-07-13 21:56:08 +02:00
|
|
|
ret = CMD_WARNING_CONFIG_FAILED;
|
2018-04-25 21:09:02 +02:00
|
|
|
|
|
|
|
/* first attempt to use watchfrr if it's available */
|
|
|
|
bool used_watchfrr = false;
|
|
|
|
|
2016-11-08 23:36:16 +01:00
|
|
|
for (i = 0; i < array_size(vtysh_client); i++)
|
2017-01-04 02:13:02 +01:00
|
|
|
if (vtysh_client[i].flag == VTYSH_WATCHFRR)
|
2016-11-08 23:36:16 +01:00
|
|
|
break;
|
2018-04-25 21:09:02 +02:00
|
|
|
if (i < array_size(vtysh_client) && vtysh_client[i].fd != -1) {
|
|
|
|
used_watchfrr = true;
|
2017-06-14 21:47:14 +02:00
|
|
|
ret = vtysh_client_execute(&vtysh_client[i],
|
2018-05-15 00:13:03 +02:00
|
|
|
"do write integrated");
|
2018-04-25 21:09:02 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-03-20 20:07:36 +01:00
|
|
|
/*
|
2018-04-25 21:09:02 +02:00
|
|
|
* If we didn't use watchfrr, fallback to writing the config
|
|
|
|
* ourselves
|
2018-03-20 20:07:36 +01:00
|
|
|
*/
|
2018-04-25 21:09:02 +02:00
|
|
|
if (!used_watchfrr) {
|
2020-03-27 12:35:23 +01:00
|
|
|
printf("\nWarning: attempting direct configuration write without watchfrr.\nFile permissions and ownership may be incorrect, or write may fail.\n\n");
|
2016-11-08 23:36:16 +01:00
|
|
|
ret = vtysh_write_config_integrated();
|
|
|
|
}
|
|
|
|
return ret;
|
2015-05-20 03:29:18 +02:00
|
|
|
}
|
|
|
|
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, "Building Configuration...\n");
|
2015-05-20 03:29:18 +02:00
|
|
|
|
2012-09-26 10:39:10 +02:00
|
|
|
for (i = 0; i < array_size(vtysh_client); i++)
|
2018-05-15 00:13:03 +02:00
|
|
|
ret = vtysh_client_execute(&vtysh_client[i], line);
|
2002-12-13 21:49:00 +01:00
|
|
|
|
2003-04-19 01:55:29 +02:00
|
|
|
return ret;
|
2002-12-13 21:49:00 +01:00
|
|
|
}
|
|
|
|
|
2016-10-01 01:03:05 +02:00
|
|
|
DEFUN (vtysh_copy_running_config,
|
|
|
|
vtysh_copy_running_config_cmd,
|
|
|
|
"copy running-config startup-config",
|
2002-12-13 21:15:29 +01:00
|
|
|
"Copy from one file to another\n"
|
|
|
|
"Copy from current system configuration\n"
|
|
|
|
"Copy to startup configuration\n")
|
2016-10-01 01:03:05 +02:00
|
|
|
{
|
|
|
|
return vtysh_write_memory(self, vty, argc, argv);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2020-01-23 16:17:40 +01:00
|
|
|
DEFUN (vtysh_copy_to_running,
|
|
|
|
vtysh_copy_to_running_cmd,
|
|
|
|
"copy FILENAME running-config",
|
|
|
|
"Apply a configuration file\n"
|
|
|
|
"Configuration file to read\n"
|
|
|
|
"Apply to current configuration\n")
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *fname = argv[1]->arg;
|
|
|
|
|
2020-12-15 20:45:23 +01:00
|
|
|
ret = vtysh_read_config(fname, true);
|
2020-01-23 16:17:40 +01:00
|
|
|
|
|
|
|
/* Return to enable mode - the 'read_config' api leaves us up a level */
|
|
|
|
vtysh_execute_no_pager("enable");
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-11-08 06:50:13 +01:00
|
|
|
DEFUN (vtysh_terminal_paginate,
|
|
|
|
vtysh_terminal_paginate_cmd,
|
|
|
|
"[no] terminal paginate",
|
|
|
|
NO_STR
|
|
|
|
"Set terminal line parameters\n"
|
|
|
|
"Use pager for output scrolling\n")
|
|
|
|
{
|
|
|
|
free(vtysh_pager_name);
|
|
|
|
vtysh_pager_name = NULL;
|
|
|
|
|
|
|
|
if (strcmp(argv[0]->text, "no"))
|
2019-01-23 14:15:52 +01:00
|
|
|
vtysh_pager_envdef(true);
|
2018-11-08 06:50:13 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2004-08-27 15:56:39 +02:00
|
|
|
DEFUN (vtysh_terminal_length,
|
|
|
|
vtysh_terminal_length_cmd,
|
2018-11-08 06:50:13 +01:00
|
|
|
"[no] terminal length (0-4294967295)",
|
|
|
|
NO_STR
|
2004-08-27 15:56:39 +02:00
|
|
|
"Set terminal line parameters\n"
|
|
|
|
"Set number of lines on a screen\n"
|
2018-11-08 06:50:13 +01:00
|
|
|
"Number of lines on screen (0 for no pausing, nonzero to use pager)\n")
|
2004-08-27 15:56:39 +02:00
|
|
|
{
|
2016-09-30 18:10:28 +02:00
|
|
|
int idx_number = 2;
|
2018-11-08 06:50:13 +01:00
|
|
|
unsigned long lines;
|
2004-08-27 15:56:39 +02:00
|
|
|
|
2018-11-08 06:50:13 +01:00
|
|
|
free(vtysh_pager_name);
|
|
|
|
vtysh_pager_name = NULL;
|
2004-08-27 15:56:39 +02:00
|
|
|
|
2018-11-08 06:50:13 +01:00
|
|
|
if (!strcmp(argv[0]->text, "no") || !strcmp(argv[1]->text, "no")) {
|
|
|
|
/* "terminal no length" = use VTYSH_PAGER */
|
2019-01-23 14:15:52 +01:00
|
|
|
vtysh_pager_envdef(true);
|
2018-11-08 06:50:13 +01:00
|
|
|
return CMD_SUCCESS;
|
2004-08-27 15:56:39 +02:00
|
|
|
}
|
|
|
|
|
2018-11-08 06:50:13 +01:00
|
|
|
lines = strtoul(argv[idx_number]->arg, NULL, 10);
|
2004-08-27 15:56:39 +02:00
|
|
|
if (lines != 0) {
|
2018-11-08 06:50:13 +01:00
|
|
|
vty_out(vty,
|
|
|
|
"%% The \"terminal length\" command is deprecated and its value is ignored.\n"
|
|
|
|
"%% Please use \"terminal paginate\" instead with OS TTY length handling.\n");
|
2019-01-23 14:15:52 +01:00
|
|
|
vtysh_pager_envdef(true);
|
2004-08-27 15:56:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-11-08 06:50:13 +01:00
|
|
|
ALIAS_DEPRECATED(vtysh_terminal_length,
|
2004-08-27 15:56:39 +02:00
|
|
|
vtysh_terminal_no_length_cmd,
|
|
|
|
"terminal no length",
|
|
|
|
"Set terminal line parameters\n"
|
|
|
|
NO_STR
|
|
|
|
"Set number of lines on a screen\n")
|
|
|
|
|
2004-10-28 19:43:11 +02:00
|
|
|
DEFUN (vtysh_show_daemons,
|
|
|
|
vtysh_show_daemons_cmd,
|
|
|
|
"show daemons",
|
2004-10-03 22:11:32 +02:00
|
|
|
SHOW_STR
|
|
|
|
"Show list of running daemons\n")
|
|
|
|
{
|
2018-03-27 21:13:34 +02:00
|
|
|
unsigned int i;
|
2005-01-28 22:11:46 +01:00
|
|
|
|
2012-09-26 10:39:10 +02:00
|
|
|
for (i = 0; i < array_size(vtysh_client); i++)
|
2005-01-28 22:11:46 +01:00
|
|
|
if (vtysh_client[i].fd >= 0)
|
2018-05-15 00:13:03 +02:00
|
|
|
vty_out(vty, " %s", vtysh_client[i].name);
|
|
|
|
vty_out(vty, "\n");
|
2004-10-03 22:11:32 +02:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-12-06 13:48:06 +01:00
|
|
|
struct visual_prio {
|
|
|
|
/* 4 characters for nice alignment */
|
|
|
|
const char *label;
|
|
|
|
|
|
|
|
int c256_background;
|
|
|
|
int c256_formatarg;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* clang-format off */
|
|
|
|
struct visual_prio visual_prios[] = {
|
|
|
|
[LOG_EMERG] = {
|
|
|
|
.label = "\e[31;1mEMRG",
|
|
|
|
.c256_background = 53,
|
|
|
|
.c256_formatarg = 225,
|
|
|
|
},
|
|
|
|
[LOG_ALERT] = {
|
|
|
|
.label = "\e[31;1mALRT",
|
|
|
|
.c256_background = 53,
|
|
|
|
.c256_formatarg = 225,
|
|
|
|
},
|
|
|
|
[LOG_CRIT] = {
|
|
|
|
.label = "\e[31;1mCRIT",
|
|
|
|
.c256_background = 53,
|
|
|
|
.c256_formatarg = 225,
|
|
|
|
},
|
|
|
|
[LOG_ERR] = {
|
|
|
|
.label = "\e[38;5;202mERR!",
|
|
|
|
.c256_background = 52,
|
|
|
|
.c256_formatarg = 224,
|
|
|
|
},
|
|
|
|
[LOG_WARNING] = {
|
|
|
|
.label = "\e[38;5;222mWARN",
|
|
|
|
.c256_background = 58,
|
|
|
|
.c256_formatarg = 230,
|
|
|
|
},
|
|
|
|
[LOG_NOTICE] = {
|
|
|
|
.label = "NTFY",
|
|
|
|
.c256_background = 234,
|
|
|
|
.c256_formatarg = 195,
|
|
|
|
},
|
|
|
|
[LOG_INFO] = {
|
|
|
|
.label = "\e[38;5;192mINFO",
|
|
|
|
.c256_background = 236,
|
|
|
|
.c256_formatarg = 195,
|
|
|
|
},
|
|
|
|
[LOG_DEBUG] = {
|
|
|
|
.label = "\e[38;5;116mDEBG",
|
|
|
|
.c256_background = 238,
|
|
|
|
.c256_formatarg = 195,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
/* clang-format on */
|
|
|
|
|
|
|
|
static void vtysh_log_print(struct vtysh_client *vclient,
|
|
|
|
struct zlog_live_hdr *hdr, const char *text)
|
|
|
|
{
|
|
|
|
size_t textlen = hdr->textlen, textpos = 0;
|
|
|
|
time_t ts = hdr->ts_sec;
|
|
|
|
struct visual_prio *vis;
|
|
|
|
struct tm tm;
|
|
|
|
char ts_buf[32];
|
|
|
|
|
2022-03-01 16:00:40 +01:00
|
|
|
if (hdr->prio >= array_size(visual_prios))
|
2019-12-06 13:48:06 +01:00
|
|
|
vis = &visual_prios[LOG_CRIT];
|
|
|
|
else
|
|
|
|
vis = &visual_prios[hdr->prio];
|
|
|
|
|
|
|
|
localtime_r(&ts, &tm);
|
|
|
|
strftime(ts_buf, sizeof(ts_buf), "%Y-%m-%d %H:%M:%S", &tm);
|
|
|
|
|
|
|
|
if (!stderr_tty) {
|
|
|
|
const char *label = vis->label + strlen(vis->label) - 4;
|
|
|
|
|
|
|
|
fprintf(stderr, "%s.%03u [%s] %s: %.*s\n", ts_buf,
|
|
|
|
hdr->ts_nsec / 1000000U, label, vclient->name,
|
|
|
|
(int)textlen, text);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
fprintf(stderr,
|
|
|
|
"\e[48;5;%dm\e[38;5;247m%s.%03u [%s\e[38;5;247m] \e[38;5;255m%s\e[38;5;247m: \e[38;5;251m",
|
|
|
|
vis->c256_background, ts_buf, hdr->ts_nsec / 1000000U,
|
|
|
|
vis->label, vclient->name);
|
|
|
|
|
|
|
|
for (size_t fmtpos = 0; fmtpos < hdr->n_argpos; fmtpos++) {
|
|
|
|
struct fmt_outpos *fmt = &hdr->argpos[fmtpos];
|
|
|
|
|
|
|
|
if (fmt->off_start < textpos || fmt->off_end < fmt->off_start ||
|
|
|
|
fmt->off_end > textlen)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
while (fmt->off_end > fmt->off_start &&
|
|
|
|
text[fmt->off_end - 1] == ' ')
|
|
|
|
fmt->off_end--;
|
|
|
|
|
|
|
|
fprintf(stderr, "%.*s\e[38;5;%dm%.*s\e[38;5;251m",
|
|
|
|
(int)(fmt->off_start - textpos), text + textpos,
|
|
|
|
vis->c256_formatarg,
|
|
|
|
(int)(fmt->off_end - fmt->off_start),
|
|
|
|
text + fmt->off_start);
|
|
|
|
textpos = fmt->off_end;
|
|
|
|
}
|
|
|
|
fprintf(stderr, "%.*s\033[K\033[m\n", (int)(textlen - textpos),
|
|
|
|
text + textpos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtysh_log_read(struct thread *thread)
|
|
|
|
{
|
|
|
|
struct vtysh_client *vclient = THREAD_ARG(thread);
|
|
|
|
struct {
|
|
|
|
struct zlog_live_hdr hdr;
|
|
|
|
char text[4096];
|
|
|
|
} buf;
|
|
|
|
const char *text;
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
thread_add_read(master, vtysh_log_read, vclient, vclient->log_fd,
|
|
|
|
&vclient->log_reader);
|
|
|
|
|
|
|
|
ret = recv(vclient->log_fd, &buf, sizeof(buf), 0);
|
|
|
|
|
|
|
|
if (ret < 0 && ERRNO_IO_RETRY(errno))
|
|
|
|
return;
|
|
|
|
|
2022-03-07 17:34:17 +01:00
|
|
|
if (stderr_stdout_same) {
|
|
|
|
#ifdef HAVE_RL_CLEAR_VISIBLE_LINE
|
|
|
|
rl_clear_visible_line();
|
|
|
|
#else
|
|
|
|
puts("\r");
|
|
|
|
#endif
|
|
|
|
fflush(stdout);
|
|
|
|
}
|
|
|
|
|
2019-12-06 13:48:06 +01:00
|
|
|
if (ret <= 0) {
|
|
|
|
struct timespec ts;
|
|
|
|
|
2022-03-01 16:00:40 +01:00
|
|
|
buf.text[0] = '\0'; /* coverity */
|
|
|
|
|
2019-12-06 13:48:06 +01:00
|
|
|
if (ret != 0)
|
|
|
|
snprintfrr(buf.text, sizeof(buf.text),
|
|
|
|
"log monitor connection error: %m");
|
|
|
|
else
|
|
|
|
snprintfrr(
|
|
|
|
buf.text, sizeof(buf.text),
|
|
|
|
"log monitor connection closed unexpectedly");
|
|
|
|
buf.hdr.textlen = strlen(buf.text);
|
|
|
|
|
|
|
|
THREAD_OFF(vclient->log_reader);
|
|
|
|
close(vclient->log_fd);
|
|
|
|
vclient->log_fd = -1;
|
|
|
|
|
|
|
|
clock_gettime(CLOCK_REALTIME, &ts);
|
|
|
|
buf.hdr.ts_sec = ts.tv_sec;
|
|
|
|
buf.hdr.ts_nsec = ts.tv_nsec;
|
|
|
|
buf.hdr.prio = LOG_ERR;
|
|
|
|
buf.hdr.flags = 0;
|
2022-03-07 11:30:09 +01:00
|
|
|
buf.hdr.texthdrlen = 0;
|
2019-12-06 13:48:06 +01:00
|
|
|
buf.hdr.n_argpos = 0;
|
2022-03-07 17:34:17 +01:00
|
|
|
} else {
|
|
|
|
int32_t lost_msgs = buf.hdr.lost_msgs - vclient->lost_msgs;
|
2019-12-06 13:48:06 +01:00
|
|
|
|
2022-03-07 17:34:17 +01:00
|
|
|
if (lost_msgs > 0) {
|
|
|
|
vclient->lost_msgs = buf.hdr.lost_msgs;
|
|
|
|
fprintf(stderr,
|
|
|
|
"%d log messages from %s lost (vtysh reading too slowly)\n",
|
|
|
|
lost_msgs, vclient->name);
|
|
|
|
}
|
2019-12-06 13:48:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
text = buf.text + sizeof(buf.hdr.argpos[0]) * buf.hdr.n_argpos;
|
|
|
|
vtysh_log_print(vclient, &buf.hdr, text);
|
|
|
|
|
|
|
|
if (stderr_stdout_same)
|
|
|
|
rl_forced_update_display();
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CLIPPY
|
|
|
|
/* clippy/clidef can't process the DEFPY below without some value for this */
|
|
|
|
#define DAEMONS_LIST "daemon"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
DEFPY (vtysh_terminal_monitor,
|
|
|
|
vtysh_terminal_monitor_cmd,
|
|
|
|
"terminal monitor ["DAEMONS_LIST"]$daemon",
|
|
|
|
"Set terminal line parameters\n"
|
|
|
|
"Receive log messages to active VTY session\n"
|
|
|
|
DAEMONS_STR)
|
|
|
|
{
|
2022-04-07 14:44:23 +02:00
|
|
|
static const char line[] = "terminal monitor";
|
2019-12-06 13:48:06 +01:00
|
|
|
int ret_all = CMD_SUCCESS, ret, fd;
|
|
|
|
size_t i, ok = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < array_size(vtysh_client); i++) {
|
|
|
|
struct vtysh_client *vclient = &vtysh_client[i];
|
|
|
|
|
|
|
|
if (daemon && strcmp(vclient->name, daemon))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (; vclient; vclient = vclient->next) {
|
|
|
|
if (vclient->log_fd != -1) {
|
|
|
|
vty_out(vty, "%% %s: already monitoring logs\n",
|
|
|
|
vclient->name);
|
|
|
|
ok++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
fd = -1;
|
|
|
|
ret = vtysh_client_run(vclient, line, NULL, NULL, &fd);
|
|
|
|
if (fd != -1) {
|
|
|
|
set_nonblocking(fd);
|
|
|
|
vclient->log_fd = fd;
|
|
|
|
thread_add_read(master, vtysh_log_read, vclient,
|
|
|
|
vclient->log_fd,
|
|
|
|
&vclient->log_reader);
|
|
|
|
}
|
|
|
|
if (ret != CMD_SUCCESS) {
|
|
|
|
vty_out(vty, "%% failed to enable logs on %s\n",
|
|
|
|
vclient->name);
|
|
|
|
ret_all = CMD_WARNING;
|
|
|
|
} else
|
|
|
|
ok++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ok && ret_all == CMD_SUCCESS) {
|
|
|
|
vty_out(vty,
|
|
|
|
"%% command had no effect, relevant daemons not connected?\n");
|
|
|
|
ret_all = CMD_WARNING;
|
|
|
|
}
|
|
|
|
return ret_all;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (no_vtysh_terminal_monitor,
|
|
|
|
no_vtysh_terminal_monitor_cmd,
|
|
|
|
"no terminal monitor ["DAEMONS_LIST"]$daemon",
|
|
|
|
NO_STR
|
|
|
|
"Set terminal line parameters\n"
|
|
|
|
"Receive log messages to active VTY session\n"
|
|
|
|
DAEMONS_STR)
|
|
|
|
{
|
2022-04-07 14:44:23 +02:00
|
|
|
static const char line[] = "no terminal monitor";
|
2019-12-06 13:48:06 +01:00
|
|
|
int ret_all = CMD_SUCCESS, ret;
|
|
|
|
size_t i, ok = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < array_size(vtysh_client); i++) {
|
|
|
|
struct vtysh_client *vclient = &vtysh_client[i];
|
|
|
|
|
|
|
|
if (daemon && strcmp(vclient->name, daemon))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (; vclient; vclient = vclient->next) {
|
|
|
|
/* run this even if log_fd == -1, in case something
|
|
|
|
* got desync'd
|
|
|
|
*/
|
|
|
|
ret = vtysh_client_run(vclient, line, NULL, NULL, NULL);
|
|
|
|
if (ret != CMD_SUCCESS) {
|
|
|
|
vty_out(vty,
|
|
|
|
"%% failed to disable logs on %s\n",
|
|
|
|
vclient->name);
|
|
|
|
ret_all = CMD_WARNING;
|
|
|
|
} else
|
|
|
|
ok++;
|
|
|
|
|
|
|
|
/* with this being a datagram socket, we can't expect
|
|
|
|
* a close notification...
|
|
|
|
*/
|
|
|
|
if (vclient->log_fd != -1) {
|
|
|
|
THREAD_OFF(vclient->log_reader);
|
|
|
|
|
|
|
|
close(vclient->log_fd);
|
|
|
|
vclient->log_fd = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ok && ret_all == CMD_SUCCESS) {
|
|
|
|
vty_out(vty,
|
|
|
|
"%% command had no effect, relevant daemons not connected?\n");
|
|
|
|
ret_all = CMD_WARNING;
|
|
|
|
}
|
|
|
|
return ret_all;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Execute command in child process. */
|
2016-09-30 18:10:28 +02:00
|
|
|
static void execute_command(const char *command, int argc, const char *arg1,
|
2017-07-31 14:52:10 +02:00
|
|
|
const char *arg2)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
pid_t pid;
|
|
|
|
int status;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Call fork(). */
|
|
|
|
pid = fork();
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (pid < 0) {
|
|
|
|
/* Failure of fork(). */
|
2004-11-20 03:06:59 +01:00
|
|
|
fprintf(stderr, "Can't fork: %s\n", safe_strerror(errno));
|
2002-12-13 21:15:29 +01:00
|
|
|
exit(1);
|
|
|
|
} else if (pid == 0) {
|
|
|
|
/* This is child process. */
|
|
|
|
switch (argc) {
|
|
|
|
case 0:
|
2015-05-20 03:29:15 +02:00
|
|
|
execlp(command, command, (const char *)NULL);
|
2002-12-13 21:15:29 +01:00
|
|
|
break;
|
|
|
|
case 1:
|
2015-05-20 03:29:15 +02:00
|
|
|
execlp(command, command, arg1, (const char *)NULL);
|
2002-12-13 21:15:29 +01:00
|
|
|
break;
|
|
|
|
case 2:
|
2015-05-20 03:29:15 +02:00
|
|
|
execlp(command, command, arg1, arg2,
|
|
|
|
(const char *)NULL);
|
2002-12-13 21:15:29 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* When execlp suceed, this part is not executed. */
|
2004-11-20 03:06:59 +01:00
|
|
|
fprintf(stderr, "Can't execute %s: %s\n", command,
|
|
|
|
safe_strerror(errno));
|
2002-12-13 21:15:29 +01:00
|
|
|
exit(1);
|
|
|
|
} else {
|
|
|
|
/* This is parent. */
|
|
|
|
execute_flag = 1;
|
2015-05-20 03:29:15 +02:00
|
|
|
wait4(pid, &status, 0, NULL);
|
2002-12-13 21:15:29 +01:00
|
|
|
execute_flag = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (vtysh_ping,
|
|
|
|
vtysh_ping_cmd,
|
|
|
|
"ping WORD",
|
2003-06-25 12:49:55 +02:00
|
|
|
"Send echo messages\n"
|
2002-12-13 21:15:29 +01:00
|
|
|
"Ping destination address or hostname\n")
|
|
|
|
{
|
2017-07-31 14:52:10 +02:00
|
|
|
int idx = 1;
|
|
|
|
|
|
|
|
argv_find(argv, argc, "WORD", &idx);
|
|
|
|
execute_command("ping", 1, argv[idx]->arg, NULL);
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-11-28 12:14:25 +01:00
|
|
|
DEFUN(vtysh_motd, vtysh_motd_cmd, "show motd", SHOW_STR "Show motd\n")
|
|
|
|
{
|
|
|
|
vty_hello(vty);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2003-06-25 12:49:55 +02:00
|
|
|
ALIAS(vtysh_ping, vtysh_ping_ip_cmd, "ping ip WORD",
|
|
|
|
"Send echo messages\n"
|
|
|
|
"IP echo\n"
|
|
|
|
"Ping destination address or hostname\n")
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
DEFUN (vtysh_traceroute,
|
|
|
|
vtysh_traceroute_cmd,
|
|
|
|
"traceroute WORD",
|
|
|
|
"Trace route to destination\n"
|
|
|
|
"Trace route to destination address or hostname\n")
|
|
|
|
{
|
2017-07-31 14:52:10 +02:00
|
|
|
int idx = 1;
|
|
|
|
|
|
|
|
argv_find(argv, argc, "WORD", &idx);
|
|
|
|
execute_command("traceroute", 1, argv[idx]->arg, NULL);
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2003-06-25 12:49:55 +02:00
|
|
|
ALIAS(vtysh_traceroute, vtysh_traceroute_ip_cmd, "traceroute ip WORD",
|
|
|
|
"Trace route to destination\n"
|
|
|
|
"IP trace\n"
|
|
|
|
"Trace route to destination address or hostname\n")
|
|
|
|
|
2018-02-12 23:41:33 +01:00
|
|
|
DEFUN (vtysh_mtrace,
|
|
|
|
vtysh_mtrace_cmd,
|
2018-04-12 13:24:21 +02:00
|
|
|
"mtrace WORD [WORD]",
|
2018-02-12 23:41:33 +01:00
|
|
|
"Multicast trace route to multicast source\n"
|
2018-04-12 13:24:21 +02:00
|
|
|
"Multicast trace route to multicast source address\n"
|
|
|
|
"Multicast trace route for multicast group address\n")
|
2018-02-12 23:41:33 +01:00
|
|
|
{
|
2018-04-12 13:24:21 +02:00
|
|
|
if (argc == 2)
|
|
|
|
execute_command("mtracebis", 1, argv[1]->arg, NULL);
|
|
|
|
else
|
|
|
|
execute_command("mtracebis", 2, argv[1]->arg, argv[2]->arg);
|
2018-02-12 23:41:33 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2003-06-25 12:49:55 +02:00
|
|
|
DEFUN (vtysh_ping6,
|
|
|
|
vtysh_ping6_cmd,
|
|
|
|
"ping ipv6 WORD",
|
|
|
|
"Send echo messages\n"
|
|
|
|
"IPv6 echo\n"
|
|
|
|
"Ping destination address or hostname\n")
|
|
|
|
{
|
2017-07-31 14:52:10 +02:00
|
|
|
execute_command("ping6", 1, argv[2]->arg, NULL);
|
2003-06-25 12:49:55 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (vtysh_traceroute6,
|
|
|
|
vtysh_traceroute6_cmd,
|
|
|
|
"traceroute ipv6 WORD",
|
|
|
|
"Trace route to destination\n"
|
|
|
|
"IPv6 trace\n"
|
|
|
|
"Trace route to destination address or hostname\n")
|
|
|
|
{
|
2017-07-31 14:52:10 +02:00
|
|
|
execute_command("traceroute6", 1, argv[2]->arg, NULL);
|
2003-06-25 12:49:55 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2023-02-01 16:26:41 +01:00
|
|
|
#if CONFDATE > 20240201
|
|
|
|
CPP_NOTICE("Remove HAVE_SHELL_ACCESS and it's documentation");
|
|
|
|
#endif
|
2015-08-13 01:11:07 +02:00
|
|
|
#if defined(HAVE_SHELL_ACCESS)
|
2002-12-13 21:15:29 +01:00
|
|
|
DEFUN (vtysh_telnet,
|
|
|
|
vtysh_telnet_cmd,
|
|
|
|
"telnet WORD",
|
|
|
|
"Open a telnet connection\n"
|
|
|
|
"IP address or hostname of a remote system\n")
|
|
|
|
{
|
2017-07-31 14:52:10 +02:00
|
|
|
execute_command("telnet", 1, argv[1]->arg, NULL);
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (vtysh_telnet_port,
|
|
|
|
vtysh_telnet_port_cmd,
|
|
|
|
"telnet WORD PORT",
|
|
|
|
"Open a telnet connection\n"
|
|
|
|
"IP address or hostname of a remote system\n"
|
|
|
|
"TCP Port number\n")
|
|
|
|
{
|
2017-07-31 14:52:10 +02:00
|
|
|
execute_command("telnet", 2, argv[1]->arg, argv[2]->arg);
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2003-01-25 07:56:09 +01:00
|
|
|
DEFUN (vtysh_ssh,
|
|
|
|
vtysh_ssh_cmd,
|
|
|
|
"ssh WORD",
|
|
|
|
"Open an ssh connection\n"
|
|
|
|
"[user@]host\n")
|
|
|
|
{
|
2017-07-31 14:52:10 +02:00
|
|
|
execute_command("ssh", 1, argv[1]->arg, NULL);
|
2003-01-25 07:56:09 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
DEFUN (vtysh_start_shell,
|
|
|
|
vtysh_start_shell_cmd,
|
|
|
|
"start-shell",
|
|
|
|
"Start UNIX shell\n")
|
|
|
|
{
|
|
|
|
execute_command("sh", 0, NULL, NULL);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (vtysh_start_bash,
|
|
|
|
vtysh_start_bash_cmd,
|
|
|
|
"start-shell bash",
|
|
|
|
"Start UNIX shell\n"
|
|
|
|
"Start bash\n")
|
|
|
|
{
|
|
|
|
execute_command("bash", 0, NULL, NULL);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (vtysh_start_zsh,
|
|
|
|
vtysh_start_zsh_cmd,
|
|
|
|
"start-shell zsh",
|
|
|
|
"Start UNIX shell\n"
|
|
|
|
"Start Z shell\n")
|
|
|
|
{
|
|
|
|
execute_command("zsh", 0, NULL, NULL);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2015-08-13 01:11:07 +02:00
|
|
|
#endif
|
2004-08-25 14:22:00 +02:00
|
|
|
|
2016-11-16 07:00:52 +01:00
|
|
|
DEFUN (config_list,
|
|
|
|
config_list_cmd,
|
|
|
|
"list [permutations]",
|
|
|
|
"Print command list\n"
|
|
|
|
"Print all possible command permutations\n")
|
|
|
|
{
|
|
|
|
return cmd_list_cmds(vty, argc == 2);
|
|
|
|
}
|
|
|
|
|
2018-01-12 18:35:19 +01:00
|
|
|
DEFUN (vtysh_output_file,
|
|
|
|
vtysh_output_file_cmd,
|
|
|
|
"output file FILE",
|
|
|
|
"Direct vtysh output to file\n"
|
|
|
|
"Direct vtysh output to file\n"
|
|
|
|
"Path to dump output to\n")
|
|
|
|
{
|
|
|
|
const char *path = argv[argc - 1]->arg;
|
2018-05-15 00:13:03 +02:00
|
|
|
vty->of = fopen(path, "a");
|
|
|
|
if (!vty->of) {
|
|
|
|
vty_out(vty, "Failed to open file '%s': %s\n", path,
|
2018-01-12 18:35:19 +01:00
|
|
|
safe_strerror(errno));
|
2018-05-15 00:13:03 +02:00
|
|
|
vty->of = stdout;
|
2018-01-12 18:35:19 +01:00
|
|
|
}
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_vtysh_output_file,
|
|
|
|
no_vtysh_output_file_cmd,
|
|
|
|
"no output file [FILE]",
|
|
|
|
NO_STR
|
|
|
|
"Direct vtysh output to file\n"
|
|
|
|
"Direct vtysh output to file\n"
|
|
|
|
"Path to dump output to\n")
|
|
|
|
{
|
2018-05-15 00:13:03 +02:00
|
|
|
if (vty->of != stdout) {
|
|
|
|
fclose(vty->of);
|
|
|
|
vty->of = stdout;
|
2018-01-12 18:35:19 +01:00
|
|
|
}
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-07-25 20:20:55 +02:00
|
|
|
DEFUN(find,
|
|
|
|
find_cmd,
|
2021-03-18 20:31:35 +01:00
|
|
|
"find REGEX...",
|
2019-08-30 18:18:11 +02:00
|
|
|
"Find CLI command matching a regular expression\n"
|
|
|
|
"Search pattern (POSIX regex)\n")
|
2017-07-25 20:20:55 +02:00
|
|
|
{
|
2021-06-02 23:29:51 +02:00
|
|
|
return cmd_find_cmds(vty, argv, argc);
|
2017-07-25 20:20:55 +02:00
|
|
|
}
|
|
|
|
|
2018-04-20 20:34:46 +02:00
|
|
|
DEFUN_HIDDEN(show_cli_graph_vtysh,
|
|
|
|
show_cli_graph_vtysh_cmd,
|
|
|
|
"show cli graph",
|
|
|
|
SHOW_STR
|
|
|
|
"CLI reflection\n"
|
|
|
|
"Dump current command space as DOT graph\n")
|
|
|
|
{
|
|
|
|
struct cmd_node *cn = vector_slot(cmdvec, vty->node);
|
|
|
|
char *dot = cmd_graph_dump_dot(cn->cmdgraph);
|
|
|
|
|
|
|
|
vty_out(vty, "%s\n", dot);
|
|
|
|
XFREE(MTYPE_TMP, dot);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
static void vtysh_install_default(enum node_type node)
|
|
|
|
{
|
2020-07-21 08:28:04 +02:00
|
|
|
_install_element(node, &config_list_cmd);
|
|
|
|
_install_element(node, &find_cmd);
|
|
|
|
_install_element(node, &show_cli_graph_vtysh_cmd);
|
|
|
|
_install_element(node, &vtysh_output_file_cmd);
|
|
|
|
_install_element(node, &no_vtysh_output_file_cmd);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Making connection to protocol daemon. */
|
2005-01-28 22:11:46 +01:00
|
|
|
static int vtysh_connect(struct vtysh_client *vclient)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int sock, len;
|
|
|
|
struct sockaddr_un addr;
|
|
|
|
struct stat s_stat;
|
2016-11-13 09:48:56 +01:00
|
|
|
const char *path;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-13 09:48:56 +01:00
|
|
|
if (!vclient->path[0])
|
|
|
|
snprintf(vclient->path, sizeof(vclient->path), "%s/%s.vty",
|
2017-08-27 20:57:34 +02:00
|
|
|
vtydir, vclient->name);
|
2016-11-13 09:48:56 +01:00
|
|
|
path = vclient->path;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Stat socket to see if we have permission to access it. */
|
2017-01-25 18:43:58 +01:00
|
|
|
ret = stat(path, &s_stat);
|
2002-12-13 21:15:29 +01:00
|
|
|
if (ret < 0 && errno != ENOENT) {
|
|
|
|
fprintf(stderr, "vtysh_connect(%s): stat = %s\n", path,
|
2017-01-25 18:43:58 +01:00
|
|
|
safe_strerror(errno));
|
2017-07-13 21:56:08 +02:00
|
|
|
exit(1);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (ret >= 0) {
|
|
|
|
if (!S_ISSOCK(s_stat.st_mode)) {
|
|
|
|
fprintf(stderr, "vtysh_connect(%s): Not a socket\n",
|
2017-01-25 18:43:58 +01:00
|
|
|
path);
|
2002-12-13 21:15:29 +01:00
|
|
|
exit(1);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
sock = socket(AF_UNIX, SOCK_STREAM, 0);
|
|
|
|
if (sock < 0) {
|
|
|
|
#ifdef DEBUG
|
2017-01-25 18:43:58 +01:00
|
|
|
fprintf(stderr, "vtysh_connect(%s): socket = %s\n", path,
|
2004-11-20 03:06:59 +01:00
|
|
|
safe_strerror(errno));
|
2002-12-13 21:15:29 +01:00
|
|
|
#endif /* DEBUG */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2022-05-11 12:16:44 +02:00
|
|
|
memset(&addr, 0, sizeof(addr));
|
2002-12-13 21:15:29 +01:00
|
|
|
addr.sun_family = AF_UNIX;
|
2017-02-10 10:51:57 +01:00
|
|
|
strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
|
[autoconf] bugs 162,303,178: Fix 'present but can not be compiled' warnings
2007-05-09 Paul Jakma <paul.jakma@sun.com>
* configure.ac: sys/conf.h depends on sys/param.h, at least on
FBSD 6.2.
(bug #363) Should check for in_pktinfo for IRDP
2006-05-27 Paul Jakma <paul.jakma@sun.com>
* configure.ac: General cleanup of header and type checks, introducing
an internal define, QUAGGA_INCLUDES, to build up a list of
stuff to include so as to avoid 'present but cant be compiled'
warnings.
Misc additional checks of things missing according to autoscan.
Add LIBM, for bgpd's use of libm, so as to avoid burdening
LIBS, and all the binaries, with libm linkage.
Remove the bad practice of using m4 changequote(), just
quote the []'s in the case statements properly.
This should fix bugs 162, 303 and 178.
* */*.{c,h}: Update all HAVE_* to the standard autoconf namespaced
HAVE_* defines. I.e. HAVE_SA_LEN -> HAVE_STRUCT_SOCKADDR_SA_LEN,
* bgpd/Makefile.am: Add LIBM to bgpd's LDADD, for pow().
2007-05-10 04:38:51 +02:00
|
|
|
#ifdef HAVE_STRUCT_SOCKADDR_UN_SUN_LEN
|
2002-12-13 21:15:29 +01:00
|
|
|
len = addr.sun_len = SUN_LEN(&addr);
|
|
|
|
#else
|
|
|
|
len = sizeof(addr.sun_family) + strlen(addr.sun_path);
|
[autoconf] bugs 162,303,178: Fix 'present but can not be compiled' warnings
2007-05-09 Paul Jakma <paul.jakma@sun.com>
* configure.ac: sys/conf.h depends on sys/param.h, at least on
FBSD 6.2.
(bug #363) Should check for in_pktinfo for IRDP
2006-05-27 Paul Jakma <paul.jakma@sun.com>
* configure.ac: General cleanup of header and type checks, introducing
an internal define, QUAGGA_INCLUDES, to build up a list of
stuff to include so as to avoid 'present but cant be compiled'
warnings.
Misc additional checks of things missing according to autoscan.
Add LIBM, for bgpd's use of libm, so as to avoid burdening
LIBS, and all the binaries, with libm linkage.
Remove the bad practice of using m4 changequote(), just
quote the []'s in the case statements properly.
This should fix bugs 162, 303 and 178.
* */*.{c,h}: Update all HAVE_* to the standard autoconf namespaced
HAVE_* defines. I.e. HAVE_SA_LEN -> HAVE_STRUCT_SOCKADDR_SA_LEN,
* bgpd/Makefile.am: Add LIBM to bgpd's LDADD, for pow().
2007-05-10 04:38:51 +02:00
|
|
|
#endif /* HAVE_STRUCT_SOCKADDR_UN_SUN_LEN */
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
ret = connect(sock, (struct sockaddr *)&addr, len);
|
|
|
|
if (ret < 0) {
|
|
|
|
#ifdef DEBUG
|
2017-01-25 18:43:58 +01:00
|
|
|
fprintf(stderr, "vtysh_connect(%s): connect = %s\n", path,
|
2004-11-20 03:06:59 +01:00
|
|
|
safe_strerror(errno));
|
2002-12-13 21:15:29 +01:00
|
|
|
#endif /* DEBUG */
|
|
|
|
close(sock);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
vclient->fd = sock;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-21 20:00:51 +02:00
|
|
|
static int vtysh_reconnect(struct vtysh_client *vclient)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
fprintf(stderr, "Warning: connecting to %s...", vclient->name);
|
|
|
|
ret = vtysh_connect(vclient);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "failed!\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
fprintf(stderr, "success!\n");
|
2018-05-15 00:13:03 +02:00
|
|
|
if (vtysh_client_execute(vclient, "enable") < 0)
|
2018-05-21 20:00:51 +02:00
|
|
|
return -1;
|
|
|
|
return vtysh_execute_no_pager("end");
|
|
|
|
}
|
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
/* Return true if str ends with suffix, else return false */
|
|
|
|
static int ends_with(const char *str, const char *suffix)
|
|
|
|
{
|
|
|
|
if (!str || !suffix)
|
|
|
|
return 0;
|
|
|
|
size_t lenstr = strlen(str);
|
|
|
|
size_t lensuffix = strlen(suffix);
|
|
|
|
if (lensuffix > lenstr)
|
|
|
|
return 0;
|
|
|
|
return strncmp(str + lenstr - lensuffix, suffix, lensuffix) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtysh_client_sorted_insert(struct vtysh_client *head_client,
|
|
|
|
struct vtysh_client *client)
|
|
|
|
{
|
|
|
|
struct vtysh_client *prev_node, *current_node;
|
|
|
|
|
|
|
|
prev_node = head_client;
|
|
|
|
current_node = head_client->next;
|
|
|
|
while (current_node) {
|
|
|
|
if (strcmp(current_node->path, client->path) > 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
prev_node = current_node;
|
|
|
|
current_node = current_node->next;
|
|
|
|
}
|
|
|
|
client->next = current_node;
|
|
|
|
prev_node->next = client;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MAXIMUM_INSTANCES 10
|
|
|
|
|
2019-02-11 16:53:49 +01:00
|
|
|
static void vtysh_update_all_instances(struct vtysh_client *head_client)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
|
|
|
struct vtysh_client *client;
|
|
|
|
DIR *dir;
|
|
|
|
struct dirent *file;
|
|
|
|
int n = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
if (head_client->flag != VTYSH_OSPFD)
|
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-13 09:48:56 +01:00
|
|
|
/* ls vty_sock_dir and look for all files ending in .vty */
|
2017-08-27 20:57:34 +02:00
|
|
|
dir = opendir(vtydir);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
if (dir) {
|
|
|
|
while ((file = readdir(dir)) != NULL) {
|
2019-05-18 03:35:38 +02:00
|
|
|
if (frrstr_startswith(file->d_name, "ospfd-")
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
&& ends_with(file->d_name, ".vty")) {
|
|
|
|
if (n == MAXIMUM_INSTANCES) {
|
|
|
|
fprintf(stderr,
|
2017-01-25 18:43:58 +01:00
|
|
|
"Parsing %s, client limit(%d) reached!\n",
|
2017-08-27 20:57:34 +02:00
|
|
|
vtydir, n);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
client = (struct vtysh_client *)malloc(
|
|
|
|
sizeof(struct vtysh_client));
|
|
|
|
client->fd = -1;
|
2015-05-20 03:29:15 +02:00
|
|
|
client->name = "ospfd";
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
client->flag = VTYSH_OSPFD;
|
2016-11-13 09:48:56 +01:00
|
|
|
snprintf(client->path, sizeof(client->path),
|
2017-08-27 20:57:34 +02:00
|
|
|
"%s/%s", vtydir, file->d_name);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
client->next = NULL;
|
|
|
|
vtysh_client_sorted_insert(head_client, client);
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
closedir(dir);
|
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
static int vtysh_connect_all_instances(struct vtysh_client *head_client)
|
|
|
|
{
|
|
|
|
struct vtysh_client *client;
|
|
|
|
int rc = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-02-11 16:53:49 +01:00
|
|
|
vtysh_update_all_instances(head_client);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
client = head_client->next;
|
|
|
|
while (client) {
|
|
|
|
if (vtysh_connect(client) == 0)
|
|
|
|
rc++;
|
|
|
|
client = client->next;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
return rc;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
[vtysh] Never skip authentication, and add support for multiple -c commands
2006-07-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* vtysh.1: Document new options -d and -E, and note that now multiple
-c options may be supplied, with embedded linefeed now supported.
In BUGS section, remove warning about vtysh causing a daemon
to freeze, since this has been fixed.
* vtysh_main.c: (usage) Add new -d and -E options. And note that
-c can be used multiple times, possibly with embedded linefeeds.
(longopts) Add new -d and -E options.
(main) Add new -d and -E options, and create a linked list to
support multiple -c options. Do not call vtysh_connect_all until
after vtysh_read_config(config_default) and vtysh_auth have
succeeded. This prevents the vtysh.conf file from configuring
any daemons, and it ensures that authentication has been passed
before we send any commands to any daemons. Call vtysh_connect_all
with any daemon name supplied with -d. If it is unable to connect
to any daemons, issue an error message and exit immediately.
When used in -c mode, call vtysh_execute("enable") before
executing the commands in order to match interactive behavior.
And detect embedded linefeed chars in -c commands and break them up
appropriately.
* vtysh.h: (vtysh_connect_all) Fix proto to reflect new
daemon_name argument, and that it now returns an integer -- the
number of daemons to which we were able to connect.
* vtysh.c: (vtysh_connect_all) Add a new daemon_name argument.
If supplied, connect only to that daemon. And return
the number of daemons to which we were able to connect.
(vtysh_prompt): Performance enhancement -- make struct utsname
static so we call uname to get the hostname only once.
2006-07-27 20:01:41 +02:00
|
|
|
int vtysh_connect_all(const char *daemon_name)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2018-03-27 21:13:34 +02:00
|
|
|
unsigned int i;
|
[vtysh] Never skip authentication, and add support for multiple -c commands
2006-07-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* vtysh.1: Document new options -d and -E, and note that now multiple
-c options may be supplied, with embedded linefeed now supported.
In BUGS section, remove warning about vtysh causing a daemon
to freeze, since this has been fixed.
* vtysh_main.c: (usage) Add new -d and -E options. And note that
-c can be used multiple times, possibly with embedded linefeeds.
(longopts) Add new -d and -E options.
(main) Add new -d and -E options, and create a linked list to
support multiple -c options. Do not call vtysh_connect_all until
after vtysh_read_config(config_default) and vtysh_auth have
succeeded. This prevents the vtysh.conf file from configuring
any daemons, and it ensures that authentication has been passed
before we send any commands to any daemons. Call vtysh_connect_all
with any daemon name supplied with -d. If it is unable to connect
to any daemons, issue an error message and exit immediately.
When used in -c mode, call vtysh_execute("enable") before
executing the commands in order to match interactive behavior.
And detect embedded linefeed chars in -c commands and break them up
appropriately.
* vtysh.h: (vtysh_connect_all) Fix proto to reflect new
daemon_name argument, and that it now returns an integer -- the
number of daemons to which we were able to connect.
* vtysh.c: (vtysh_connect_all) Add a new daemon_name argument.
If supplied, connect only to that daemon. And return
the number of daemons to which we were able to connect.
(vtysh_prompt): Performance enhancement -- make struct utsname
static so we call uname to get the hostname only once.
2006-07-27 20:01:41 +02:00
|
|
|
int rc = 0;
|
|
|
|
int matches = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2012-09-26 10:39:10 +02:00
|
|
|
for (i = 0; i < array_size(vtysh_client); i++) {
|
[vtysh] Never skip authentication, and add support for multiple -c commands
2006-07-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* vtysh.1: Document new options -d and -E, and note that now multiple
-c options may be supplied, with embedded linefeed now supported.
In BUGS section, remove warning about vtysh causing a daemon
to freeze, since this has been fixed.
* vtysh_main.c: (usage) Add new -d and -E options. And note that
-c can be used multiple times, possibly with embedded linefeeds.
(longopts) Add new -d and -E options.
(main) Add new -d and -E options, and create a linked list to
support multiple -c options. Do not call vtysh_connect_all until
after vtysh_read_config(config_default) and vtysh_auth have
succeeded. This prevents the vtysh.conf file from configuring
any daemons, and it ensures that authentication has been passed
before we send any commands to any daemons. Call vtysh_connect_all
with any daemon name supplied with -d. If it is unable to connect
to any daemons, issue an error message and exit immediately.
When used in -c mode, call vtysh_execute("enable") before
executing the commands in order to match interactive behavior.
And detect embedded linefeed chars in -c commands and break them up
appropriately.
* vtysh.h: (vtysh_connect_all) Fix proto to reflect new
daemon_name argument, and that it now returns an integer -- the
number of daemons to which we were able to connect.
* vtysh.c: (vtysh_connect_all) Add a new daemon_name argument.
If supplied, connect only to that daemon. And return
the number of daemons to which we were able to connect.
(vtysh_prompt): Performance enhancement -- make struct utsname
static so we call uname to get the hostname only once.
2006-07-27 20:01:41 +02:00
|
|
|
if (!daemon_name
|
|
|
|
|| !strcmp(daemon_name, vtysh_client[i].name)) {
|
|
|
|
matches++;
|
|
|
|
if (vtysh_connect(&vtysh_client[i]) == 0)
|
|
|
|
rc++;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
rc += vtysh_connect_all_instances(&vtysh_client[i]);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
[vtysh] Never skip authentication, and add support for multiple -c commands
2006-07-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* vtysh.1: Document new options -d and -E, and note that now multiple
-c options may be supplied, with embedded linefeed now supported.
In BUGS section, remove warning about vtysh causing a daemon
to freeze, since this has been fixed.
* vtysh_main.c: (usage) Add new -d and -E options. And note that
-c can be used multiple times, possibly with embedded linefeeds.
(longopts) Add new -d and -E options.
(main) Add new -d and -E options, and create a linked list to
support multiple -c options. Do not call vtysh_connect_all until
after vtysh_read_config(config_default) and vtysh_auth have
succeeded. This prevents the vtysh.conf file from configuring
any daemons, and it ensures that authentication has been passed
before we send any commands to any daemons. Call vtysh_connect_all
with any daemon name supplied with -d. If it is unable to connect
to any daemons, issue an error message and exit immediately.
When used in -c mode, call vtysh_execute("enable") before
executing the commands in order to match interactive behavior.
And detect embedded linefeed chars in -c commands and break them up
appropriately.
* vtysh.h: (vtysh_connect_all) Fix proto to reflect new
daemon_name argument, and that it now returns an integer -- the
number of daemons to which we were able to connect.
* vtysh.c: (vtysh_connect_all) Add a new daemon_name argument.
If supplied, connect only to that daemon. And return
the number of daemons to which we were able to connect.
(vtysh_prompt): Performance enhancement -- make struct utsname
static so we call uname to get the hostname only once.
2006-07-27 20:01:41 +02:00
|
|
|
if (!matches)
|
|
|
|
fprintf(stderr, "Error: no daemons match name %s!\n",
|
|
|
|
daemon_name);
|
|
|
|
return rc;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2004-08-26 15:08:30 +02:00
|
|
|
/* To disable readline's filename completion. */
|
2003-04-19 01:55:29 +02:00
|
|
|
static char *vtysh_completion_entry_function(const char *ignore,
|
|
|
|
int invoking_key)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2003-04-19 01:55:29 +02:00
|
|
|
return NULL;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2005-01-28 22:11:46 +01:00
|
|
|
void vtysh_readline_init(void)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
/* readline related settings. */
|
2021-02-08 02:15:24 +01:00
|
|
|
char *disable_bracketed_paste =
|
|
|
|
XSTRDUP(MTYPE_TMP, "set enable-bracketed-paste off");
|
|
|
|
|
2016-11-19 10:42:52 +01:00
|
|
|
rl_initialize();
|
2021-02-08 02:15:24 +01:00
|
|
|
rl_parse_and_bind(disable_bracketed_paste);
|
2014-05-27 19:55:11 +02:00
|
|
|
rl_bind_key('?', (rl_command_func_t *)vtysh_rl_describe);
|
2003-03-25 06:07:42 +01:00
|
|
|
rl_completion_entry_function = vtysh_completion_entry_function;
|
2018-09-07 20:30:24 +02:00
|
|
|
rl_attempted_completion_function = new_completion;
|
2021-02-08 02:15:24 +01:00
|
|
|
|
|
|
|
XFREE(MTYPE_TMP, disable_bracketed_paste);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2005-01-28 22:11:46 +01:00
|
|
|
char *vtysh_prompt(void)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2019-05-30 23:56:55 +02:00
|
|
|
static char buf[512];
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2023-01-26 14:53:47 +01:00
|
|
|
#pragma GCC diagnostic push
|
|
|
|
#pragma GCC diagnostic ignored "-Wformat-nonliteral"
|
|
|
|
/* prompt formatting has a %s in the cmd_node prompt string. */
|
2020-03-08 20:43:26 +01:00
|
|
|
snprintf(buf, sizeof(buf), cmd_prompt(vty->node), cmd_hostname_get());
|
2023-01-26 14:53:47 +01:00
|
|
|
#pragma GCC diagnostic pop
|
2002-12-13 21:15:29 +01:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2017-05-10 16:38:48 +02:00
|
|
|
static void vtysh_ac_line(void *arg, const char *line)
|
|
|
|
{
|
|
|
|
vector comps = arg;
|
|
|
|
size_t i;
|
|
|
|
for (i = 0; i < vector_active(comps); i++)
|
|
|
|
if (!strcmp(line, (char *)vector_slot(comps, i)))
|
|
|
|
return;
|
|
|
|
vector_set(comps, XSTRDUP(MTYPE_COMPLETION, line));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vtysh_autocomplete(vector comps, struct cmd_token *token)
|
|
|
|
{
|
|
|
|
char accmd[256];
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
snprintf(accmd, sizeof(accmd), "autocomplete %d %s %s", token->type,
|
|
|
|
token->text, token->varname ? token->varname : "-");
|
|
|
|
|
2018-06-28 20:37:01 +02:00
|
|
|
vty->of_saved = vty->of;
|
|
|
|
vty->of = NULL;
|
2017-05-10 16:38:48 +02:00
|
|
|
for (i = 0; i < array_size(vtysh_client); i++)
|
2018-05-15 00:13:03 +02:00
|
|
|
vtysh_client_run_all(&vtysh_client[i], accmd, 1, vtysh_ac_line,
|
|
|
|
comps);
|
2018-06-28 20:37:01 +02:00
|
|
|
vty->of = vty->of_saved;
|
2017-05-10 16:38:48 +02:00
|
|
|
}
|
|
|
|
|
2017-05-20 16:50:52 +02:00
|
|
|
static const struct cmd_variable_handler vtysh_var_handler[] = {
|
2017-05-10 16:38:48 +02:00
|
|
|
{/* match all */
|
|
|
|
.tokenname = NULL,
|
|
|
|
.varname = NULL,
|
|
|
|
.completions = vtysh_autocomplete},
|
2017-05-20 16:50:52 +02:00
|
|
|
{.completions = NULL}};
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-01-24 10:12:36 +01:00
|
|
|
void vtysh_uninit(void)
|
2018-01-12 18:35:19 +01:00
|
|
|
{
|
2018-05-15 00:13:03 +02:00
|
|
|
if (vty->of != stdout)
|
|
|
|
fclose(vty->of);
|
2018-01-12 18:35:19 +01:00
|
|
|
}
|
|
|
|
|
2005-01-28 22:11:46 +01:00
|
|
|
void vtysh_init_vty(void)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2019-12-06 13:48:06 +01:00
|
|
|
struct stat st_out, st_err;
|
|
|
|
|
2021-10-18 11:51:09 +02:00
|
|
|
cmd_defer_tree(true);
|
|
|
|
|
2019-12-06 13:48:06 +01:00
|
|
|
for (size_t i = 0; i < array_size(vtysh_client); i++) {
|
|
|
|
vtysh_client[i].fd = -1;
|
|
|
|
vtysh_client[i].log_fd = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
stderr_tty = isatty(STDERR_FILENO);
|
|
|
|
|
|
|
|
if (fstat(STDOUT_FILENO, &st_out) || fstat(STDERR_FILENO, &st_err) ||
|
|
|
|
(st_out.st_dev == st_err.st_dev && st_out.st_ino == st_err.st_ino))
|
|
|
|
stderr_stdout_same = true;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Make vty structure. */
|
|
|
|
vty = vty_new();
|
|
|
|
vty->type = VTY_SHELL;
|
|
|
|
vty->node = VIEW_NODE;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-01-12 18:35:19 +01:00
|
|
|
/* set default output */
|
2018-05-15 00:13:03 +02:00
|
|
|
vty->of = stdout;
|
2019-01-23 14:15:52 +01:00
|
|
|
vtysh_pager_envdef(false);
|
2018-01-12 18:35:19 +01:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Initialize commands. */
|
|
|
|
cmd_init(0);
|
2017-05-20 16:50:52 +02:00
|
|
|
cmd_variable_handler_register(vtysh_var_handler);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-29 10:17:04 +02:00
|
|
|
/* bgpd */
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_BGPD
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(CONFIG_NODE, &router_bgp_cmd);
|
|
|
|
install_element(BGP_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_vpnv4_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BGP_NODE, &address_family_ipv4_vpn_cmd);
|
|
|
|
#ifdef KEEP_OLD_VPN_COMMANDS
|
|
|
|
install_element(BGP_NODE, &address_family_vpnv4_cmd);
|
|
|
|
#endif /* KEEP_OLD_VPN_COMMANDS */
|
|
|
|
install_element(BGP_VPNV4_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_VPNV4_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_VPNV4_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_VPNV4_NODE, &exit_address_family_cmd);
|
|
|
|
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_vpnv6_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BGP_NODE, &address_family_ipv6_vpn_cmd);
|
|
|
|
#ifdef KEEP_OLD_VPN_COMMANDS
|
|
|
|
install_element(BGP_NODE, &address_family_vpnv6_cmd);
|
|
|
|
#endif /* KEEP_OLD_VPN_COMMANDS */
|
|
|
|
install_element(BGP_VPNV6_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_VPNV6_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_VPNV6_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_VPNV6_NODE, &exit_address_family_cmd);
|
|
|
|
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_flowspecv4_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BGP_NODE, &address_family_flowspecv4_cmd);
|
|
|
|
install_element(BGP_FLOWSPECV4_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_FLOWSPECV4_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_FLOWSPECV4_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_FLOWSPECV4_NODE, &exit_address_family_cmd);
|
|
|
|
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_flowspecv6_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BGP_NODE, &address_family_flowspecv6_cmd);
|
|
|
|
install_element(BGP_FLOWSPECV6_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_FLOWSPECV6_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_FLOWSPECV6_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_FLOWSPECV6_NODE, &exit_address_family_cmd);
|
|
|
|
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_ipv4_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BGP_NODE, &address_family_ipv4_cmd);
|
|
|
|
install_element(BGP_IPV4_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_IPV4_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_IPV4_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_IPV4_NODE, &exit_address_family_cmd);
|
|
|
|
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_ipv4m_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BGP_NODE, &address_family_ipv4_multicast_cmd);
|
|
|
|
install_element(BGP_IPV4M_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_IPV4M_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_IPV4M_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_IPV4M_NODE, &exit_address_family_cmd);
|
|
|
|
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_ipv4l_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BGP_NODE, &address_family_ipv4_labeled_unicast_cmd);
|
|
|
|
install_element(BGP_IPV4L_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_IPV4L_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_IPV4L_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_IPV4L_NODE, &exit_address_family_cmd);
|
|
|
|
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_ipv6_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BGP_NODE, &address_family_ipv6_cmd);
|
|
|
|
install_element(BGP_IPV6_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_IPV6_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_IPV6_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_IPV6_NODE, &exit_address_family_cmd);
|
|
|
|
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_ipv6m_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BGP_NODE, &address_family_ipv6_multicast_cmd);
|
|
|
|
install_element(BGP_IPV6M_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_IPV6M_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_IPV6M_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_IPV6M_NODE, &exit_address_family_cmd);
|
|
|
|
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_ipv6l_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BGP_NODE, &address_family_ipv6_labeled_unicast_cmd);
|
|
|
|
install_element(BGP_IPV6L_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_IPV6L_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_IPV6L_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_IPV6L_NODE, &exit_address_family_cmd);
|
|
|
|
|
|
|
|
#if defined(ENABLE_BGP_VNC)
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_vrf_policy_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BGP_NODE, &vnc_vrf_policy_cmd);
|
|
|
|
install_element(BGP_VRF_POLICY_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_VRF_POLICY_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_VRF_POLICY_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_VRF_POLICY_NODE, &exit_vrf_policy_cmd);
|
|
|
|
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_vnc_defaults_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BGP_NODE, &vnc_defaults_cmd);
|
|
|
|
install_element(BGP_VNC_DEFAULTS_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_VNC_DEFAULTS_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_VNC_DEFAULTS_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_VNC_DEFAULTS_NODE, &exit_vnc_config_cmd);
|
|
|
|
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_vnc_nve_group_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BGP_NODE, &vnc_nve_group_cmd);
|
|
|
|
install_element(BGP_VNC_NVE_GROUP_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_VNC_NVE_GROUP_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_VNC_NVE_GROUP_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_VNC_NVE_GROUP_NODE, &exit_vnc_config_cmd);
|
|
|
|
|
2018-09-08 22:31:43 +02:00
|
|
|
install_node(&bgp_vnc_l2_group_node);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BGP_NODE, &vnc_l2_group_cmd);
|
|
|
|
install_element(BGP_VNC_L2_GROUP_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_VNC_L2_GROUP_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_VNC_L2_GROUP_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_VNC_L2_GROUP_NODE, &exit_vnc_config_cmd);
|
|
|
|
#endif
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&bgp_evpn_node);
|
|
|
|
install_element(BGP_NODE, &address_family_evpn_cmd);
|
|
|
|
install_element(BGP_EVPN_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_EVPN_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_EVPN_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_EVPN_NODE, &exit_address_family_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&bgp_evpn_vni_node);
|
|
|
|
install_element(BGP_EVPN_NODE, &bgp_evpn_vni_cmd);
|
|
|
|
install_element(BGP_EVPN_VNI_NODE, &vtysh_exit_bgpd_cmd);
|
|
|
|
install_element(BGP_EVPN_VNI_NODE, &vtysh_quit_bgpd_cmd);
|
|
|
|
install_element(BGP_EVPN_VNI_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(BGP_EVPN_VNI_NODE, &exit_vni_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&rpki_node);
|
|
|
|
install_element(CONFIG_NODE, &rpki_cmd);
|
|
|
|
install_element(RPKI_NODE, &rpki_exit_cmd);
|
|
|
|
install_element(RPKI_NODE, &rpki_quit_cmd);
|
|
|
|
install_element(RPKI_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
|
|
|
install_node(&bmp_node);
|
|
|
|
install_element(BGP_NODE, &bmp_targets_cmd);
|
|
|
|
install_element(BMP_NODE, &bmp_exit_cmd);
|
|
|
|
install_element(BMP_NODE, &bmp_quit_cmd);
|
|
|
|
install_element(BMP_NODE, &vtysh_end_all_cmd);
|
2020-12-19 00:47:11 +01:00
|
|
|
|
|
|
|
install_node(&bgp_srv6_node);
|
|
|
|
install_element(BGP_NODE, &bgp_srv6_cmd);
|
|
|
|
install_element(BGP_SRV6_NODE, &exit_bgp_srv6_cmd);
|
|
|
|
install_element(BGP_SRV6_NODE, &quit_bgp_srv6_cmd);
|
|
|
|
install_element(BGP_SRV6_NODE, &vtysh_end_all_cmd);
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_BGPD */
|
2020-09-29 10:17:04 +02:00
|
|
|
|
|
|
|
/* ripd */
|
|
|
|
install_node(&rip_node);
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_RIPD
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(CONFIG_NODE, &router_rip_cmd);
|
2002-12-13 21:15:29 +01:00
|
|
|
install_element(RIP_NODE, &vtysh_exit_ripd_cmd);
|
|
|
|
install_element(RIP_NODE, &vtysh_quit_ripd_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(RIP_NODE, &vtysh_end_all_cmd);
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_RIPD */
|
2020-09-29 10:17:04 +02:00
|
|
|
|
|
|
|
/* ripngd */
|
|
|
|
install_node(&ripng_node);
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_RIPNGD
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(CONFIG_NODE, &router_ripng_cmd);
|
2003-03-25 06:07:42 +01:00
|
|
|
install_element(RIPNG_NODE, &vtysh_exit_ripngd_cmd);
|
|
|
|
install_element(RIPNG_NODE, &vtysh_quit_ripngd_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(RIPNG_NODE, &vtysh_end_all_cmd);
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_RIPNGD */
|
2020-09-29 10:17:04 +02:00
|
|
|
|
|
|
|
/* ospfd */
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_OSPFD
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&ospf_node);
|
|
|
|
install_element(CONFIG_NODE, &router_ospf_cmd);
|
2002-12-13 21:15:29 +01:00
|
|
|
install_element(OSPF_NODE, &vtysh_exit_ospfd_cmd);
|
|
|
|
install_element(OSPF_NODE, &vtysh_quit_ospfd_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(OSPF_NODE, &vtysh_end_all_cmd);
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_OSPFD */
|
2020-09-29 10:17:04 +02:00
|
|
|
|
|
|
|
/* ospf6d */
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_OSPF6D
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&ospf6_node);
|
|
|
|
install_element(CONFIG_NODE, &router_ospf6_cmd);
|
2003-03-25 06:07:42 +01:00
|
|
|
install_element(OSPF6_NODE, &vtysh_exit_ospf6d_cmd);
|
|
|
|
install_element(OSPF6_NODE, &vtysh_quit_ospf6d_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(OSPF6_NODE, &vtysh_end_all_cmd);
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_OSPF6D */
|
2020-09-29 10:17:04 +02:00
|
|
|
|
|
|
|
/* ldpd */
|
2016-10-24 21:24:03 +02:00
|
|
|
#if defined(HAVE_LDPD)
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&ldp_node);
|
|
|
|
install_element(CONFIG_NODE, &ldp_mpls_ldp_cmd);
|
2016-08-02 00:47:15 +02:00
|
|
|
install_element(LDP_NODE, &vtysh_exit_ldpd_cmd);
|
|
|
|
install_element(LDP_NODE, &vtysh_quit_ldpd_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(LDP_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
|
|
|
install_node(&ldp_ipv4_node);
|
|
|
|
install_element(LDP_NODE, &ldp_address_family_ipv4_cmd);
|
2016-08-02 00:47:15 +02:00
|
|
|
install_element(LDP_IPV4_NODE, &vtysh_exit_ldpd_cmd);
|
|
|
|
install_element(LDP_IPV4_NODE, &vtysh_quit_ldpd_cmd);
|
2017-07-27 00:32:55 +02:00
|
|
|
install_element(LDP_IPV4_NODE, &ldp_exit_address_family_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(LDP_IPV4_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
|
|
|
install_node(&ldp_ipv6_node);
|
|
|
|
install_element(LDP_NODE, &ldp_address_family_ipv6_cmd);
|
2016-08-02 00:47:15 +02:00
|
|
|
install_element(LDP_IPV6_NODE, &vtysh_exit_ldpd_cmd);
|
|
|
|
install_element(LDP_IPV6_NODE, &vtysh_quit_ldpd_cmd);
|
2017-07-27 00:32:55 +02:00
|
|
|
install_element(LDP_IPV6_NODE, &ldp_exit_address_family_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(LDP_IPV6_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
|
|
|
install_node(&ldp_ipv4_iface_node);
|
|
|
|
install_element(LDP_IPV4_NODE, &ldp_interface_ifname_cmd);
|
2016-08-02 00:47:15 +02:00
|
|
|
install_element(LDP_IPV4_IFACE_NODE, &vtysh_exit_ldpd_cmd);
|
|
|
|
install_element(LDP_IPV4_IFACE_NODE, &vtysh_quit_ldpd_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(LDP_IPV4_IFACE_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
|
|
|
install_node(&ldp_ipv6_iface_node);
|
|
|
|
install_element(LDP_IPV6_NODE, &ldp_interface_ifname_cmd);
|
2016-08-02 00:47:15 +02:00
|
|
|
install_element(LDP_IPV6_IFACE_NODE, &vtysh_exit_ldpd_cmd);
|
|
|
|
install_element(LDP_IPV6_IFACE_NODE, &vtysh_quit_ldpd_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(LDP_IPV6_IFACE_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
|
|
|
install_node(&ldp_l2vpn_node);
|
|
|
|
install_element(CONFIG_NODE, &ldp_l2vpn_word_type_vpls_cmd);
|
2016-08-02 00:47:15 +02:00
|
|
|
install_element(LDP_L2VPN_NODE, &vtysh_exit_ldpd_cmd);
|
|
|
|
install_element(LDP_L2VPN_NODE, &vtysh_quit_ldpd_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(LDP_L2VPN_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
|
|
|
install_node(&ldp_pseudowire_node);
|
|
|
|
install_element(LDP_L2VPN_NODE, &ldp_member_pseudowire_ifname_cmd);
|
2016-08-02 00:47:15 +02:00
|
|
|
install_element(LDP_PSEUDOWIRE_NODE, &vtysh_exit_ldpd_cmd);
|
|
|
|
install_element(LDP_PSEUDOWIRE_NODE, &vtysh_quit_ldpd_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(LDP_PSEUDOWIRE_NODE, &vtysh_end_all_cmd);
|
2016-10-24 21:24:03 +02:00
|
|
|
#endif
|
2020-09-29 10:17:04 +02:00
|
|
|
|
|
|
|
/* eigrpd */
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_EIGRPD
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&eigrp_node);
|
|
|
|
install_element(CONFIG_NODE, &router_eigrp_cmd);
|
|
|
|
install_element(EIGRP_NODE, &vtysh_exit_eigrpd_cmd);
|
|
|
|
install_element(EIGRP_NODE, &vtysh_quit_eigrpd_cmd);
|
|
|
|
install_element(EIGRP_NODE, &vtysh_end_all_cmd);
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_EIGRPD */
|
2020-09-29 10:17:04 +02:00
|
|
|
|
|
|
|
/* babeld */
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_BABELD
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&babel_node);
|
|
|
|
install_element(CONFIG_NODE, &router_babel_cmd);
|
|
|
|
install_element(BABEL_NODE, &vtysh_exit_babeld_cmd);
|
|
|
|
install_element(BABEL_NODE, &vtysh_quit_babeld_cmd);
|
|
|
|
install_element(BABEL_NODE, &vtysh_end_all_cmd);
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_BABELD */
|
2020-09-29 10:17:04 +02:00
|
|
|
|
|
|
|
/* isisd */
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_ISISD
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&isis_node);
|
|
|
|
install_element(CONFIG_NODE, &router_isis_cmd);
|
2003-12-23 11:39:08 +01:00
|
|
|
install_element(ISIS_NODE, &vtysh_exit_isisd_cmd);
|
|
|
|
install_element(ISIS_NODE, &vtysh_quit_isisd_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(ISIS_NODE, &vtysh_end_all_cmd);
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_ISISD */
|
2020-09-29 10:17:04 +02:00
|
|
|
|
|
|
|
/* fabricd */
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_FABRICD
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&openfabric_node);
|
|
|
|
install_element(CONFIG_NODE, &router_openfabric_cmd);
|
2018-03-22 15:01:15 +01:00
|
|
|
install_element(OPENFABRIC_NODE, &vtysh_exit_fabricd_cmd);
|
|
|
|
install_element(OPENFABRIC_NODE, &vtysh_quit_fabricd_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(OPENFABRIC_NODE, &vtysh_end_all_cmd);
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_FABRICD */
|
2020-09-29 10:17:04 +02:00
|
|
|
|
|
|
|
/* pbrd */
|
2020-09-29 12:12:55 +02:00
|
|
|
#ifdef HAVE_PBRD
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&pbr_map_node);
|
|
|
|
install_element(CONFIG_NODE, &vtysh_pbr_map_cmd);
|
|
|
|
install_element(CONFIG_NODE, &vtysh_no_pbr_map_cmd);
|
pbrd: Add PBR to FRR
This is an implementation of PBR for FRR.
This implemenation uses a combination of rules and
tables to determine how packets will flow.
PBR introduces a new concept of 'nexthop-groups' to
specify a group of nexthops that will be used for
ecmp. Nexthop-groups are specified on the cli via:
nexthop-group DONNA
nexthop 192.168.208.1
nexthop 192.168.209.1
nexthop 192.168.210.1
!
PBR sees the nexthop-group and installs these as a default
route with these nexthops starting at table 10000
robot# show pbr nexthop-groups
Nexthop-Group: DONNA Table: 10001 Valid: 1 Installed: 1
Valid: 1 nexthop 192.168.209.1
Valid: 1 nexthop 192.168.210.1
Valid: 1 nexthop 192.168.208.1
I have also introduced the ability to specify a table
in a 'show ip route table XXX' to see the specified tables.
robot# show ip route table 10001
Codes: K - kernel route, C - connected, S - static, R - RIP,
O - OSPF, I - IS-IS, B - BGP, P - PIM, E - EIGRP, N - NHRP,
T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP,
F - PBR,
> - selected route, * - FIB route
F>* 0.0.0.0/0 [0/0] via 192.168.208.1, enp0s8, 00:14:25
* via 192.168.209.1, enp0s9, 00:14:25
* via 192.168.210.1, enp0s10, 00:14:25
PBR tracks PBR-MAPS via the pbr-map command:
!
pbr-map EVA seq 10
match src-ip 4.3.4.0/24
set nexthop-group DONNA
!
pbr-map EVA seq 20
match dst-ip 4.3.5.0/24
set nexthop-group DONNA
!
pbr-maps can have 'match src-ip <prefix>' and 'match dst-ip <prefix>'
to affect decisions about incoming packets. Additionally if you
only have one nexthop to use for a pbr-map you do not need
to setup a nexthop-group and can specify 'set nexthop XXXX'.
To apply the pbr-map to an incoming interface you do this:
interface enp0s10
pbr-policy EVA
!
When a pbr-map is applied to interfaces it can be installed
into the kernel as a rule:
[sharpd@robot frr1]$ ip rule show
0: from all lookup local
309: from 4.3.4.0/24 iif enp0s10 lookup 10001
319: from all to 4.3.5.0/24 iif enp0s10 lookup 10001
1000: from all lookup [l3mdev-table]
32766: from all lookup main
32767: from all lookup default
[sharpd@robot frr1]$ ip route show table 10001
default proto pbr metric 20
nexthop via 192.168.208.1 dev enp0s8 weight 1
nexthop via 192.168.209.1 dev enp0s9 weight 1
nexthop via 192.168.210.1 dev enp0s10 weight 1
The linux kernel now will use the rules and tables to properly
apply these policies.
Signed-off-by: Donald Sharp <sharpd@cumulusnetworks.com>
Signed-off-by: Don Slice <dslice@cumulusnetworks.com>
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2018-01-23 19:11:36 +01:00
|
|
|
install_element(PBRMAP_NODE, &vtysh_exit_pbr_map_cmd);
|
|
|
|
install_element(PBRMAP_NODE, &vtysh_quit_pbr_map_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(PBRMAP_NODE, &vtysh_end_all_cmd);
|
2020-09-29 12:12:55 +02:00
|
|
|
#endif /* HAVE_PBRD */
|
2020-09-29 10:17:04 +02:00
|
|
|
|
|
|
|
/* bfdd */
|
2018-06-27 18:26:06 +02:00
|
|
|
#if HAVE_BFDD > 0
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&bfd_node);
|
2018-06-27 18:26:06 +02:00
|
|
|
install_element(CONFIG_NODE, &bfd_enter_cmd);
|
|
|
|
install_element(BFD_NODE, &vtysh_exit_bfdd_cmd);
|
|
|
|
install_element(BFD_NODE, &vtysh_quit_bfdd_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BFD_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
|
|
|
install_node(&bfd_peer_node);
|
|
|
|
install_element(BFD_NODE, &bfd_peer_enter_cmd);
|
2018-06-27 18:26:06 +02:00
|
|
|
install_element(BFD_PEER_NODE, &vtysh_exit_bfdd_cmd);
|
|
|
|
install_element(BFD_PEER_NODE, &vtysh_quit_bfdd_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(BFD_PEER_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
|
|
|
install_node(&bfd_profile_node);
|
|
|
|
install_element(BFD_NODE, &bfd_profile_enter_cmd);
|
2020-05-15 20:24:59 +02:00
|
|
|
install_element(BFD_PROFILE_NODE, &vtysh_exit_bfdd_cmd);
|
|
|
|
install_element(BFD_PROFILE_NODE, &vtysh_quit_bfdd_cmd);
|
|
|
|
install_element(BFD_PROFILE_NODE, &vtysh_end_all_cmd);
|
2018-06-27 18:26:06 +02:00
|
|
|
#endif /* HAVE_BFDD */
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-07-31 18:04:20 +02:00
|
|
|
install_node(&segment_routing_node);
|
2022-01-16 05:08:47 +01:00
|
|
|
install_element(CONFIG_NODE, &segment_routing_cmd);
|
2021-06-05 18:34:46 +02:00
|
|
|
install_element(SEGMENT_ROUTING_NODE, &vtysh_exit_sr_cmd);
|
|
|
|
install_element(SEGMENT_ROUTING_NODE, &vtysh_quit_sr_cmd);
|
|
|
|
install_element(SEGMENT_ROUTING_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
|
|
|
#if defined(HAVE_PATHD)
|
2020-07-31 18:04:20 +02:00
|
|
|
install_node(&sr_traffic_eng_node);
|
|
|
|
install_node(&srte_segment_list_node);
|
|
|
|
install_node(&srte_policy_node);
|
|
|
|
install_node(&srte_candidate_dyn_node);
|
|
|
|
|
|
|
|
install_element(SR_TRAFFIC_ENG_NODE, &vtysh_exit_pathd_cmd);
|
|
|
|
install_element(SR_TRAFFIC_ENG_NODE, &vtysh_quit_pathd_cmd);
|
|
|
|
install_element(SR_SEGMENT_LIST_NODE, &vtysh_exit_pathd_cmd);
|
|
|
|
install_element(SR_SEGMENT_LIST_NODE, &vtysh_quit_pathd_cmd);
|
|
|
|
install_element(SR_POLICY_NODE, &vtysh_exit_pathd_cmd);
|
|
|
|
install_element(SR_POLICY_NODE, &vtysh_quit_pathd_cmd);
|
|
|
|
install_element(SR_CANDIDATE_DYN_NODE, &vtysh_exit_pathd_cmd);
|
|
|
|
install_element(SR_CANDIDATE_DYN_NODE, &vtysh_quit_pathd_cmd);
|
|
|
|
|
2021-06-05 18:34:46 +02:00
|
|
|
|
2020-07-31 18:04:20 +02:00
|
|
|
install_element(SR_TRAFFIC_ENG_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(SR_SEGMENT_LIST_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(SR_POLICY_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(SR_CANDIDATE_DYN_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
|
|
|
install_element(SEGMENT_ROUTING_NODE, &sr_traffic_eng_cmd);
|
|
|
|
install_element(SR_TRAFFIC_ENG_NODE, &srte_segment_list_cmd);
|
|
|
|
install_element(SR_TRAFFIC_ENG_NODE, &srte_policy_cmd);
|
|
|
|
install_element(SR_POLICY_NODE, &srte_policy_candidate_dyn_path_cmd);
|
2020-10-16 16:55:51 +02:00
|
|
|
|
|
|
|
install_node(&pcep_node);
|
|
|
|
install_node(&pcep_pcc_node);
|
|
|
|
install_node(&pcep_pce_node);
|
|
|
|
install_node(&pcep_pce_config_node);
|
|
|
|
|
|
|
|
install_element(PCEP_NODE, &vtysh_exit_pathd_cmd);
|
|
|
|
install_element(PCEP_NODE, &vtysh_quit_pathd_cmd);
|
|
|
|
install_element(PCEP_PCC_NODE, &vtysh_exit_pathd_cmd);
|
|
|
|
install_element(PCEP_PCC_NODE, &vtysh_quit_pathd_cmd);
|
|
|
|
install_element(PCEP_PCE_NODE, &vtysh_exit_pathd_cmd);
|
|
|
|
install_element(PCEP_PCE_NODE, &vtysh_quit_pathd_cmd);
|
|
|
|
install_element(PCEP_PCE_CONFIG_NODE, &vtysh_exit_pathd_cmd);
|
|
|
|
install_element(PCEP_PCE_CONFIG_NODE, &vtysh_quit_pathd_cmd);
|
|
|
|
|
|
|
|
install_element(PCEP_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(PCEP_PCC_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(PCEP_PCE_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(PCEP_PCE_CONFIG_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
|
|
|
install_element(SR_TRAFFIC_ENG_NODE, &pcep_cmd);
|
|
|
|
install_element(PCEP_NODE, &pcep_cli_pcc_cmd);
|
|
|
|
install_element(PCEP_NODE, &pcep_cli_pcep_pce_config_cmd);
|
|
|
|
install_element(PCEP_NODE, &pcep_cli_pce_cmd);
|
|
|
|
|
2020-07-31 18:04:20 +02:00
|
|
|
#endif /* HAVE_PATHD */
|
|
|
|
|
2020-09-29 10:17:04 +02:00
|
|
|
/* keychain */
|
|
|
|
install_node(&keychain_node);
|
|
|
|
install_element(CONFIG_NODE, &key_chain_cmd);
|
|
|
|
install_element(KEYCHAIN_NODE, &key_chain_cmd);
|
|
|
|
install_element(KEYCHAIN_NODE, &vtysh_exit_keys_cmd);
|
|
|
|
install_element(KEYCHAIN_NODE, &vtysh_quit_keys_cmd);
|
2002-12-13 21:15:29 +01:00
|
|
|
install_element(KEYCHAIN_NODE, &vtysh_end_all_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
|
|
|
|
install_node(&keychain_key_node);
|
|
|
|
install_element(KEYCHAIN_NODE, &key_cmd);
|
|
|
|
install_element(KEYCHAIN_KEY_NODE, &key_chain_cmd);
|
|
|
|
install_element(KEYCHAIN_KEY_NODE, &vtysh_exit_keys_cmd);
|
|
|
|
install_element(KEYCHAIN_KEY_NODE, &vtysh_quit_keys_cmd);
|
2002-12-13 21:15:29 +01:00
|
|
|
install_element(KEYCHAIN_KEY_NODE, &vtysh_end_all_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-29 10:17:04 +02:00
|
|
|
/* nexthop-group */
|
|
|
|
install_node(&nh_group_node);
|
|
|
|
install_element(CONFIG_NODE, &vtysh_nexthop_group_cmd);
|
|
|
|
install_element(CONFIG_NODE, &vtysh_no_nexthop_group_cmd);
|
|
|
|
install_element(NH_GROUP_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(NH_GROUP_NODE, &vtysh_exit_nexthop_group_cmd);
|
|
|
|
install_element(NH_GROUP_NODE, &vtysh_quit_nexthop_group_cmd);
|
|
|
|
|
|
|
|
/* zebra and all */
|
|
|
|
install_node(&zebra_node);
|
|
|
|
|
|
|
|
install_node(&interface_node);
|
|
|
|
install_element(CONFIG_NODE, &vtysh_interface_cmd);
|
2002-12-13 21:15:29 +01:00
|
|
|
install_element(INTERFACE_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &vtysh_exit_interface_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(INTERFACE_NODE, &vtysh_quit_interface_cmd);
|
|
|
|
|
|
|
|
install_node(&link_params_node);
|
|
|
|
install_element(INTERFACE_NODE, &vtysh_link_params_cmd);
|
2016-11-18 21:42:41 +01:00
|
|
|
install_element(LINK_PARAMS_NODE, &exit_link_params_cmd);
|
Update Traffic Engineering Support for OSPFD
NOTE: I am squashing several commits together because they
do not independently compile and we need this ability to
do any type of sane testing on the patches. Since this
series builds together I am doing this. -DBS
This new structure is the basis to get new link parameters for
Traffic Engineering from Zebra/interface layer to OSPFD and ISISD
for the support of Traffic Engineering
* lib/if.[c,h]: link parameters struture and get/set functions
* lib/command.[c,h]: creation of a new link-node
* lib/zclient.[c,h]: modification to the ZBUS message to convey the
link parameters structure
* lib/zebra.h: New ZBUS message
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support for IEEE 754 format
* lib/stream.[c,h]: Add stream_get{f,d} and stream_put{f,d}) demux and muxers to
safely convert between big-endian IEEE-754 single and double binary
format, as used in IETF RFCs, and C99. Implementation depends on host
using __STDC_IEC_559__, which should be everything we care about. Should
correctly error out otherwise.
* lib/network.[c,h]: Add ntohf and htonf converter
* lib/memtypes.c: Add new memeory type for Traffic Engineering support
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add link parameters support to Zebra
* zebra/interface.c:
- Add new link-params CLI commands
- Add new functions to set/get link parameters for interface
* zebra/redistribute.[c,h]: Add new function to propagate link parameters
to routing daemon (essentially OSPFD and ISISD) for Traffic Engineering.
* zebra/redistribute_null.c: Add new function
zebra_interface_parameters_update()
* zebra/zserv.[c,h]: Add new functions to send link parameters
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Add support of new link-params CLI to vtysh
In vtysh_config.c/vtysh_config_parse_line(), it is not possible to continue
to use the ordered version for adding line i.e. config_add_line_uniq() to print
Interface CLI commands as it completely break the new LINK_PARAMS_NODE.
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
Update Traffic Engineering support for OSPFD
These patches update original code to RFC3630 (OSPF-TE) and add support of
RFC5392 (Inter-AS v2) & RFC7471 (TE metric extensions) and partial support
of RFC6827 (ASON - GMPLS).
* ospfd/ospf_dump.[c,h]: Add new dump functions for Traffic Engineering
* ospfd/ospf_opaque.[c,h]: Add new TLV code points for RFC5392
* ospfd/ospf_packet.c: Update checking of OSPF_OPTION
* ospfd/ospf_vty.[c,h]: Update ospf_str2area_id
* ospfd/ospf_zebra.c: Add new function ospf_interface_link_params() to get
Link Parameters information from the interface to populate Traffic Engineering
metrics
* ospfd/ospfd.[c,h]: Update OSPF_OPTION flags (T -> MT and new DN)
* ospfd/ospf_te.[c,h]: Major modifications to update the code to new
link parameters structure and new RFCs
Signed-off-by: Olivier Dugeon <olivier.dugeon@orange.com>
tmp
2016-04-19 16:21:46 +02:00
|
|
|
install_element(LINK_PARAMS_NODE, &vtysh_end_all_cmd);
|
2021-07-21 16:22:14 +02:00
|
|
|
install_element(LINK_PARAMS_NODE, &vtysh_exit_link_params_cmd);
|
|
|
|
install_element(LINK_PARAMS_NODE, &vtysh_quit_link_params_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&pw_node);
|
|
|
|
install_element(CONFIG_NODE, &vtysh_pseudowire_cmd);
|
2017-08-02 01:16:28 +02:00
|
|
|
install_element(PW_NODE, &vtysh_end_all_cmd);
|
2021-07-21 16:22:14 +02:00
|
|
|
install_element(PW_NODE, &vtysh_exit_pseudowire_cmd);
|
|
|
|
install_element(PW_NODE, &vtysh_quit_pseudowire_cmd);
|
2017-08-02 01:16:28 +02:00
|
|
|
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&vrf_node);
|
|
|
|
install_element(CONFIG_NODE, &vtysh_vrf_cmd);
|
|
|
|
install_element(VRF_NODE, &exit_vrf_config_cmd);
|
2016-02-02 13:34:29 +01:00
|
|
|
install_element(VRF_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(VRF_NODE, &vtysh_exit_vrf_cmd);
|
|
|
|
install_element(VRF_NODE, &vtysh_quit_vrf_cmd);
|
2020-11-16 23:02:33 +01:00
|
|
|
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&rmap_node);
|
|
|
|
install_element(CONFIG_NODE, &vtysh_route_map_cmd);
|
|
|
|
install_element(RMAP_NODE, &vtysh_exit_rmap_cmd);
|
|
|
|
install_element(RMAP_NODE, &vtysh_quit_rmap_cmd);
|
|
|
|
install_element(RMAP_NODE, &vtysh_end_all_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-29 10:17:04 +02:00
|
|
|
install_node(&vty_node);
|
|
|
|
install_element(CONFIG_NODE, &vtysh_line_vty_cmd);
|
|
|
|
install_element(VTY_NODE, &vtysh_exit_line_vty_cmd);
|
|
|
|
install_element(VTY_NODE, &vtysh_quit_line_vty_cmd);
|
|
|
|
install_element(VTY_NODE, &vtysh_end_all_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-04-24 19:33:41 +02:00
|
|
|
|
2020-09-29 10:17:04 +02:00
|
|
|
struct cmd_node *node;
|
|
|
|
for (unsigned int i = 0; i < vector_active(cmdvec); i++) {
|
|
|
|
node = vector_slot(cmdvec, i);
|
|
|
|
if (!node || node->node == VIEW_NODE)
|
|
|
|
continue;
|
|
|
|
vtysh_install_default(node->node);
|
|
|
|
}
|
2017-11-10 13:56:24 +01:00
|
|
|
|
2020-09-29 10:17:04 +02:00
|
|
|
/* vtysh */
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-07-23 17:38:20 +02:00
|
|
|
if (!user_mode)
|
|
|
|
install_element(VIEW_NODE, &vtysh_enable_cmd);
|
2020-09-29 10:17:04 +02:00
|
|
|
install_element(ENABLE_NODE, &vtysh_config_terminal_cmd);
|
|
|
|
install_element(ENABLE_NODE, &vtysh_disable_cmd);
|
|
|
|
|
|
|
|
/* "exit" command. */
|
|
|
|
install_element(VIEW_NODE, &vtysh_exit_all_cmd);
|
|
|
|
install_element(CONFIG_NODE, &vtysh_exit_all_cmd);
|
|
|
|
install_element(VIEW_NODE, &vtysh_quit_all_cmd);
|
|
|
|
install_element(CONFIG_NODE, &vtysh_quit_all_cmd);
|
|
|
|
|
|
|
|
/* "end" command. */
|
|
|
|
install_element(CONFIG_NODE, &vtysh_end_all_cmd);
|
|
|
|
install_element(ENABLE_NODE, &vtysh_end_all_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-30 05:59:19 +02:00
|
|
|
/* SRv6 Data-plane */
|
|
|
|
install_node(&srv6_node);
|
|
|
|
install_element(SEGMENT_ROUTING_NODE, &srv6_cmd);
|
|
|
|
install_element(SRV6_NODE, &srv6_locators_cmd);
|
|
|
|
install_element(SRV6_NODE, &exit_srv6_config_cmd);
|
|
|
|
install_element(SRV6_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
|
|
|
install_node(&srv6_locs_node);
|
|
|
|
install_element(SRV6_LOCS_NODE, &srv6_locator_cmd);
|
|
|
|
install_element(SRV6_LOCS_NODE, &exit_srv6_locs_config_cmd);
|
|
|
|
install_element(SRV6_LOCS_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
|
|
|
install_node(&srv6_loc_node);
|
|
|
|
install_element(SRV6_LOC_NODE, &exit_srv6_loc_config_cmd);
|
|
|
|
install_element(SRV6_LOC_NODE, &vtysh_end_all_cmd);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
install_element(ENABLE_NODE, &vtysh_show_running_config_cmd);
|
2016-10-01 01:03:05 +02:00
|
|
|
install_element(ENABLE_NODE, &vtysh_copy_running_config_cmd);
|
2020-01-23 16:17:40 +01:00
|
|
|
install_element(ENABLE_NODE, &vtysh_copy_to_running_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2004-08-26 15:08:30 +02:00
|
|
|
/* "write terminal" command. */
|
2002-12-13 21:15:29 +01:00
|
|
|
install_element(ENABLE_NODE, &vtysh_write_terminal_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2004-10-03 22:11:32 +02:00
|
|
|
install_element(CONFIG_NODE, &vtysh_integrated_config_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_vtysh_integrated_config_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2004-08-26 15:08:30 +02:00
|
|
|
/* "write memory" command. */
|
2002-12-13 21:15:29 +01:00
|
|
|
install_element(ENABLE_NODE, &vtysh_write_memory_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-11-21 18:04:12 +01:00
|
|
|
install_element(CONFIG_NODE, &start_config_cmd);
|
|
|
|
install_element(CONFIG_NODE, &end_config_cmd);
|
|
|
|
|
2019-01-23 14:15:52 +01:00
|
|
|
install_element(CONFIG_NODE, &vtysh_terminal_paginate_cmd);
|
2018-11-08 06:50:13 +01:00
|
|
|
install_element(VIEW_NODE, &vtysh_terminal_paginate_cmd);
|
2004-08-27 15:56:39 +02:00
|
|
|
install_element(VIEW_NODE, &vtysh_terminal_length_cmd);
|
|
|
|
install_element(VIEW_NODE, &vtysh_terminal_no_length_cmd);
|
2004-10-28 19:43:11 +02:00
|
|
|
install_element(VIEW_NODE, &vtysh_show_daemons_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-12-06 13:48:06 +01:00
|
|
|
install_element(VIEW_NODE, &vtysh_terminal_monitor_cmd);
|
|
|
|
install_element(VIEW_NODE, &no_vtysh_terminal_monitor_cmd);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
install_element(VIEW_NODE, &vtysh_ping_cmd);
|
2022-11-28 12:14:25 +01:00
|
|
|
install_element(VIEW_NODE, &vtysh_motd_cmd);
|
2003-06-25 12:49:55 +02:00
|
|
|
install_element(VIEW_NODE, &vtysh_ping_ip_cmd);
|
2002-12-13 21:15:29 +01:00
|
|
|
install_element(VIEW_NODE, &vtysh_traceroute_cmd);
|
2003-06-25 12:49:55 +02:00
|
|
|
install_element(VIEW_NODE, &vtysh_traceroute_ip_cmd);
|
2018-02-12 23:41:33 +01:00
|
|
|
install_element(VIEW_NODE, &vtysh_mtrace_cmd);
|
2003-06-25 12:49:55 +02:00
|
|
|
install_element(VIEW_NODE, &vtysh_ping6_cmd);
|
|
|
|
install_element(VIEW_NODE, &vtysh_traceroute6_cmd);
|
2015-08-13 01:11:07 +02:00
|
|
|
#if defined(HAVE_SHELL_ACCESS)
|
2002-12-13 21:15:29 +01:00
|
|
|
install_element(VIEW_NODE, &vtysh_telnet_cmd);
|
|
|
|
install_element(VIEW_NODE, &vtysh_telnet_port_cmd);
|
2003-01-25 07:56:09 +01:00
|
|
|
install_element(VIEW_NODE, &vtysh_ssh_cmd);
|
2003-06-25 12:49:55 +02:00
|
|
|
#endif
|
2015-08-13 01:11:07 +02:00
|
|
|
#if defined(HAVE_SHELL_ACCESS)
|
2002-12-13 21:15:29 +01:00
|
|
|
install_element(ENABLE_NODE, &vtysh_start_shell_cmd);
|
|
|
|
install_element(ENABLE_NODE, &vtysh_start_bash_cmd);
|
|
|
|
install_element(ENABLE_NODE, &vtysh_start_zsh_cmd);
|
2015-08-13 01:11:07 +02:00
|
|
|
#endif
|
|
|
|
|
2018-02-28 22:14:45 +01:00
|
|
|
/* debugging */
|
2018-06-14 01:08:30 +02:00
|
|
|
install_element(VIEW_NODE, &vtysh_show_error_code_cmd);
|
2020-10-01 16:57:23 +02:00
|
|
|
install_element(ENABLE_NODE, &vtysh_show_debugging_cmd);
|
|
|
|
install_element(ENABLE_NODE, &vtysh_show_debugging_hashtable_cmd);
|
2018-03-28 16:57:52 +02:00
|
|
|
install_element(ENABLE_NODE, &vtysh_debug_all_cmd);
|
2018-02-28 22:14:45 +01:00
|
|
|
install_element(CONFIG_NODE, &vtysh_debug_all_cmd);
|
2019-05-30 22:14:25 +02:00
|
|
|
install_element(ENABLE_NODE, &vtysh_debug_memstats_cmd);
|
|
|
|
install_element(CONFIG_NODE, &vtysh_debug_memstats_cmd);
|
2021-11-16 13:29:44 +01:00
|
|
|
install_element(ENABLE_NODE, &vtysh_debug_uid_backtrace_cmd);
|
|
|
|
install_element(CONFIG_NODE, &vtysh_debug_uid_backtrace_cmd);
|
2018-02-28 22:14:45 +01:00
|
|
|
|
2019-09-23 14:38:02 +02:00
|
|
|
/* northbound */
|
2021-02-12 16:18:34 +01:00
|
|
|
install_element(ENABLE_NODE, &show_config_running_cmd);
|
2020-11-21 13:06:04 +01:00
|
|
|
install_element(ENABLE_NODE, &show_yang_operational_data_cmd);
|
2021-05-04 16:41:58 +02:00
|
|
|
install_element(ENABLE_NODE, &show_yang_module_cmd);
|
|
|
|
install_element(ENABLE_NODE, &show_yang_module_detail_cmd);
|
2019-09-23 14:38:02 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_nb_cmd);
|
|
|
|
install_element(CONFIG_NODE, &debug_nb_cmd);
|
|
|
|
|
2018-02-28 22:14:45 +01:00
|
|
|
/* misc lib show commands */
|
2020-12-03 18:36:29 +01:00
|
|
|
install_element(VIEW_NODE, &vtysh_show_history_cmd);
|
2006-05-28 09:54:45 +02:00
|
|
|
install_element(VIEW_NODE, &vtysh_show_memory_cmd);
|
2017-04-04 16:12:59 +02:00
|
|
|
install_element(VIEW_NODE, &vtysh_show_modules_cmd);
|
2015-08-20 03:33:13 +02:00
|
|
|
install_element(VIEW_NODE, &vtysh_show_work_queues_cmd);
|
2016-02-25 13:29:29 +01:00
|
|
|
install_element(VIEW_NODE, &vtysh_show_work_queues_daemon_cmd);
|
2015-08-20 03:33:13 +02:00
|
|
|
install_element(VIEW_NODE, &vtysh_show_thread_cmd);
|
2018-06-17 00:12:54 +02:00
|
|
|
install_element(VIEW_NODE, &vtysh_show_poll_cmd);
|
2022-02-23 16:14:53 +01:00
|
|
|
install_element(VIEW_NODE, &vtysh_show_thread_timer_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2006-05-24 00:10:01 +02:00
|
|
|
/* Logging */
|
|
|
|
install_element(VIEW_NODE, &vtysh_show_logging_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2004-10-03 22:11:32 +02:00
|
|
|
install_element(CONFIG_NODE, &vtysh_service_password_encrypt_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_vtysh_service_password_encrypt_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-07-01 22:24:52 +02:00
|
|
|
install_element(CONFIG_NODE, &vtysh_allow_reserved_ranges_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_vtysh_allow_reserved_ranges_cmd);
|
|
|
|
|
2004-10-03 22:11:32 +02:00
|
|
|
install_element(CONFIG_NODE, &vtysh_password_cmd);
|
2018-05-11 02:54:30 +02:00
|
|
|
install_element(CONFIG_NODE, &no_vtysh_password_cmd);
|
2004-10-03 22:11:32 +02:00
|
|
|
install_element(CONFIG_NODE, &vtysh_enable_password_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_vtysh_enable_password_cmd);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|