2002-12-13 21:15:29 +01:00
|
|
|
/* Hash routine.
|
|
|
|
* Copyright (C) 1998 Kunihiro Ishiguro
|
|
|
|
*
|
|
|
|
* This file is part of GNU Zebra.
|
|
|
|
*
|
|
|
|
* GNU Zebra is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published
|
|
|
|
* by the Free Software Foundation; either version 2, or (at your
|
|
|
|
* option) any later version.
|
|
|
|
*
|
|
|
|
* GNU Zebra is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
2017-05-13 10:25:29 +02:00
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; see the file COPYING; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2002-12-13 21:15:29 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <zebra.h>
|
2017-05-30 02:16:52 +02:00
|
|
|
#include <math.h>
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
#include "hash.h"
|
|
|
|
#include "memory.h"
|
2017-05-30 02:16:52 +02:00
|
|
|
#include "linklist.h"
|
|
|
|
#include "termtable.h"
|
|
|
|
#include "vty.h"
|
|
|
|
#include "command.h"
|
2017-06-19 16:22:26 +02:00
|
|
|
#include "libfrr.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-29 05:48:31 +02:00
|
|
|
DEFINE_MTYPE( LIB, HASH, "Hash")
|
|
|
|
DEFINE_MTYPE( LIB, HASH_BACKET, "Hash Bucket")
|
|
|
|
DEFINE_MTYPE_STATIC(LIB, HASH_INDEX, "Hash Index")
|
|
|
|
|
2017-06-19 16:22:26 +02:00
|
|
|
pthread_mutex_t _hashes_mtx = PTHREAD_MUTEX_INITIALIZER;
|
2017-05-30 02:16:52 +02:00
|
|
|
static struct list *_hashes;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Allocate a new hash. */
|
|
|
|
struct hash *
|
2005-05-06 Paul Jakma <paul@dishone.st>
* (general) extern and static'ification of functions in code and
header.
Cleanup any definitions with unspecified arguments.
Add casts for callback assignments where the callback is defined,
typically, as passing void *, but the function being assigned has
some other pointer type defined as its argument, as gcc complains
about casts from void * to X* via function arguments.
Fix some old K&R style function argument definitions.
Add noreturn gcc attribute to some functions, as appropriate.
Add unused gcc attribute to some functions (eg ones meant to help
while debugging)
Add guard defines to headers which were missing them.
* command.c: (install_node) add const qualifier, still doesnt shut
up the warning though, because of the double pointer.
(cmp_node) ditto
* keychain.c: (key_str2time) Add GET_LONG_RANGE() macro, derived
fromn vty.h ones to fix some of the (long) < 0 warnings.
* thread.c: (various) use thread_empty
(cpu_record_hash_key) should cast to uintptr_t, a stdint.h type
* vty.h: Add VTY_GET_IPV4_ADDRESS and VTY_GET_IPV4_PREFIX so they
removed from ospfd/ospf_vty.h
* zebra.h: Move definition of ZEBRA_PORT to here, to remove
dependence of lib on zebra/zserv.h
2005-05-06 23:25:49 +02:00
|
|
|
hash_create_size (unsigned int size, unsigned int (*hash_key) (void *),
|
2017-06-19 16:22:26 +02:00
|
|
|
int (*hash_cmp) (const void *, const void *),
|
|
|
|
const char *name)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct hash *hash;
|
|
|
|
|
2013-01-04 23:29:21 +01:00
|
|
|
assert ((size & (size-1)) == 0);
|
2017-06-19 16:22:26 +02:00
|
|
|
hash = XCALLOC (MTYPE_HASH, sizeof (struct hash));
|
2008-08-18 23:13:29 +02:00
|
|
|
hash->index = XCALLOC (MTYPE_HASH_INDEX,
|
2002-12-13 21:15:29 +01:00
|
|
|
sizeof (struct hash_backet *) * size);
|
|
|
|
hash->size = size;
|
2013-07-31 17:01:18 +02:00
|
|
|
hash->no_expand = 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
hash->hash_key = hash_key;
|
|
|
|
hash->hash_cmp = hash_cmp;
|
|
|
|
hash->count = 0;
|
2017-06-19 16:22:26 +02:00
|
|
|
hash->name = name ? XSTRDUP(MTYPE_HASH, name) : NULL;
|
|
|
|
hash->stats.empty = hash->size;
|
|
|
|
|
|
|
|
pthread_mutex_lock (&_hashes_mtx);
|
|
|
|
{
|
|
|
|
if (!_hashes)
|
|
|
|
_hashes = list_new();
|
|
|
|
|
|
|
|
listnode_add (_hashes, hash);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock (&_hashes_mtx);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate a new hash with default hash size. */
|
|
|
|
struct hash *
|
2005-05-06 Paul Jakma <paul@dishone.st>
* (general) extern and static'ification of functions in code and
header.
Cleanup any definitions with unspecified arguments.
Add casts for callback assignments where the callback is defined,
typically, as passing void *, but the function being assigned has
some other pointer type defined as its argument, as gcc complains
about casts from void * to X* via function arguments.
Fix some old K&R style function argument definitions.
Add noreturn gcc attribute to some functions, as appropriate.
Add unused gcc attribute to some functions (eg ones meant to help
while debugging)
Add guard defines to headers which were missing them.
* command.c: (install_node) add const qualifier, still doesnt shut
up the warning though, because of the double pointer.
(cmp_node) ditto
* keychain.c: (key_str2time) Add GET_LONG_RANGE() macro, derived
fromn vty.h ones to fix some of the (long) < 0 warnings.
* thread.c: (various) use thread_empty
(cpu_record_hash_key) should cast to uintptr_t, a stdint.h type
* vty.h: Add VTY_GET_IPV4_ADDRESS and VTY_GET_IPV4_PREFIX so they
removed from ospfd/ospf_vty.h
* zebra.h: Move definition of ZEBRA_PORT to here, to remove
dependence of lib on zebra/zserv.h
2005-05-06 23:25:49 +02:00
|
|
|
hash_create (unsigned int (*hash_key) (void *),
|
2017-06-19 16:22:26 +02:00
|
|
|
int (*hash_cmp) (const void *, const void *),
|
|
|
|
const char *name)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-06-19 16:22:26 +02:00
|
|
|
return hash_create_size (HASH_INITIAL_SIZE, hash_key, hash_cmp, name);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Utility function for hash_get(). When this function is specified
|
|
|
|
as alloc_func, return arugment as it is. This function is used for
|
|
|
|
intern already allocated value. */
|
|
|
|
void *
|
|
|
|
hash_alloc_intern (void *arg)
|
|
|
|
{
|
|
|
|
return arg;
|
|
|
|
}
|
|
|
|
|
2017-06-19 16:22:26 +02:00
|
|
|
#define hash_update_ssq(hz, old, new) \
|
|
|
|
do { \
|
|
|
|
long double res; \
|
|
|
|
res = powl(old, 2.0); \
|
|
|
|
hz->stats.ssq -= (uint64_t) res;\
|
|
|
|
res = powl(new, 2.0); \
|
|
|
|
hz->stats.ssq += (uint64_t) res; \
|
|
|
|
} while (0); \
|
|
|
|
|
2013-01-11 19:25:26 +01:00
|
|
|
/* Expand hash if the chain length exceeds the threshold. */
|
|
|
|
static void hash_expand (struct hash *hash)
|
|
|
|
{
|
|
|
|
unsigned int i, new_size, losers;
|
|
|
|
struct hash_backet *hb, *hbnext, **new_index;
|
|
|
|
|
|
|
|
new_size = hash->size * 2;
|
|
|
|
new_index = XCALLOC(MTYPE_HASH_INDEX, sizeof(struct hash_backet *) * new_size);
|
|
|
|
if (new_index == NULL)
|
|
|
|
return;
|
|
|
|
|
2017-06-19 16:22:26 +02:00
|
|
|
hash->stats.empty = new_size;
|
|
|
|
|
2013-01-11 19:25:26 +01:00
|
|
|
for (i = 0; i < hash->size; i++)
|
|
|
|
for (hb = hash->index[i]; hb; hb = hbnext)
|
|
|
|
{
|
|
|
|
unsigned int h = hb->key & (new_size - 1);
|
|
|
|
|
|
|
|
hbnext = hb->next;
|
|
|
|
hb->next = new_index[h];
|
2017-06-19 16:22:26 +02:00
|
|
|
|
|
|
|
int oldlen = hb->next ? hb->next->len : 0;
|
|
|
|
int newlen = oldlen + 1;
|
|
|
|
|
|
|
|
if (newlen == 1)
|
|
|
|
hash->stats.empty--;
|
|
|
|
else
|
|
|
|
hb->next->len = 0;
|
|
|
|
|
|
|
|
hb->len = newlen;
|
|
|
|
|
|
|
|
hash_update_ssq(hash, oldlen, newlen);
|
|
|
|
|
2013-01-11 19:25:26 +01:00
|
|
|
new_index[h] = hb;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Switch to new table */
|
|
|
|
XFREE(MTYPE_HASH_INDEX, hash->index);
|
|
|
|
hash->size = new_size;
|
|
|
|
hash->index = new_index;
|
|
|
|
|
|
|
|
/* Ideally, new index should have chains half as long as the original.
|
2017-06-19 16:22:26 +02:00
|
|
|
* If expansion didn't help, then not worth expanding again,
|
|
|
|
* the problem is the hash function. */
|
2013-01-11 19:25:26 +01:00
|
|
|
losers = 0;
|
|
|
|
for (i = 0; i < hash->size; i++)
|
|
|
|
{
|
2017-06-19 16:22:26 +02:00
|
|
|
unsigned int len = hash->index[i] ? hash->index[i]->len : 0;
|
|
|
|
|
|
|
|
if (len > HASH_THRESHOLD/2)
|
|
|
|
++losers;
|
|
|
|
if (len >= HASH_THRESHOLD)
|
|
|
|
hash->no_expand = 1;
|
2013-01-11 19:25:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (losers > hash->count / 2)
|
|
|
|
hash->no_expand = 1;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Lookup and return hash backet in hash. If there is no
|
|
|
|
corresponding hash backet and alloc_func is specified, create new
|
|
|
|
hash backet. */
|
|
|
|
void *
|
2005-05-06 Paul Jakma <paul@dishone.st>
* (general) extern and static'ification of functions in code and
header.
Cleanup any definitions with unspecified arguments.
Add casts for callback assignments where the callback is defined,
typically, as passing void *, but the function being assigned has
some other pointer type defined as its argument, as gcc complains
about casts from void * to X* via function arguments.
Fix some old K&R style function argument definitions.
Add noreturn gcc attribute to some functions, as appropriate.
Add unused gcc attribute to some functions (eg ones meant to help
while debugging)
Add guard defines to headers which were missing them.
* command.c: (install_node) add const qualifier, still doesnt shut
up the warning though, because of the double pointer.
(cmp_node) ditto
* keychain.c: (key_str2time) Add GET_LONG_RANGE() macro, derived
fromn vty.h ones to fix some of the (long) < 0 warnings.
* thread.c: (various) use thread_empty
(cpu_record_hash_key) should cast to uintptr_t, a stdint.h type
* vty.h: Add VTY_GET_IPV4_ADDRESS and VTY_GET_IPV4_PREFIX so they
removed from ospfd/ospf_vty.h
* zebra.h: Move definition of ZEBRA_PORT to here, to remove
dependence of lib on zebra/zserv.h
2005-05-06 23:25:49 +02:00
|
|
|
hash_get (struct hash *hash, void *data, void * (*alloc_func) (void *))
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
unsigned int key;
|
|
|
|
unsigned int index;
|
|
|
|
void *newdata;
|
2013-01-11 19:25:26 +01:00
|
|
|
unsigned int len;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct hash_backet *backet;
|
|
|
|
|
|
|
|
key = (*hash->hash_key) (data);
|
2013-01-04 23:29:21 +01:00
|
|
|
index = key & (hash->size - 1);
|
2013-01-11 19:25:26 +01:00
|
|
|
len = 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2013-01-11 19:25:26 +01:00
|
|
|
for (backet = hash->index[index]; backet != NULL; backet = backet->next)
|
|
|
|
{
|
|
|
|
if (backet->key == key && (*hash->hash_cmp) (backet->data, data))
|
|
|
|
return backet->data;
|
|
|
|
++len;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
if (alloc_func)
|
|
|
|
{
|
|
|
|
newdata = (*alloc_func) (data);
|
|
|
|
if (newdata == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
2013-01-11 19:25:26 +01:00
|
|
|
if (len > HASH_THRESHOLD && !hash->no_expand)
|
|
|
|
{
|
|
|
|
hash_expand (hash);
|
|
|
|
index = key & (hash->size - 1);
|
|
|
|
}
|
|
|
|
|
2017-06-19 16:22:26 +02:00
|
|
|
backet = XCALLOC (MTYPE_HASH_BACKET, sizeof (struct hash_backet));
|
2002-12-13 21:15:29 +01:00
|
|
|
backet->data = newdata;
|
|
|
|
backet->key = key;
|
|
|
|
backet->next = hash->index[index];
|
|
|
|
hash->index[index] = backet;
|
|
|
|
hash->count++;
|
2017-06-19 16:22:26 +02:00
|
|
|
|
|
|
|
int oldlen = backet->next ? backet->next->len : 0;
|
|
|
|
int newlen = oldlen + 1;
|
|
|
|
|
|
|
|
if (newlen == 1)
|
|
|
|
hash->stats.empty--;
|
|
|
|
else
|
|
|
|
backet->next->len = 0;
|
|
|
|
|
|
|
|
backet->len = newlen;
|
|
|
|
|
|
|
|
hash_update_ssq(hash, oldlen, newlen);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return backet->data;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Hash lookup. */
|
|
|
|
void *
|
|
|
|
hash_lookup (struct hash *hash, void *data)
|
|
|
|
{
|
|
|
|
return hash_get (hash, data, NULL);
|
|
|
|
}
|
|
|
|
|
2010-08-27 23:11:14 +02:00
|
|
|
/* Simple Bernstein hash which is simple and fast for common case */
|
|
|
|
unsigned int string_hash_make (const char *str)
|
|
|
|
{
|
|
|
|
unsigned int hash = 0;
|
|
|
|
|
|
|
|
while (*str)
|
|
|
|
hash = (hash * 33) ^ (unsigned int) *str++;
|
|
|
|
|
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* This function release registered value from specified hash. When
|
|
|
|
release is successfully finished, return the data pointer in the
|
|
|
|
hash backet. */
|
|
|
|
void *
|
|
|
|
hash_release (struct hash *hash, void *data)
|
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
unsigned int key;
|
|
|
|
unsigned int index;
|
|
|
|
struct hash_backet *backet;
|
|
|
|
struct hash_backet *pp;
|
|
|
|
|
|
|
|
key = (*hash->hash_key) (data);
|
2013-01-04 23:29:21 +01:00
|
|
|
index = key & (hash->size - 1);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
for (backet = pp = hash->index[index]; backet; backet = backet->next)
|
|
|
|
{
|
|
|
|
if (backet->key == key && (*hash->hash_cmp) (backet->data, data))
|
|
|
|
{
|
2017-06-19 16:22:26 +02:00
|
|
|
int oldlen = hash->index[index]->len;
|
|
|
|
int newlen = oldlen - 1;
|
|
|
|
|
|
|
|
if (backet == pp)
|
2002-12-13 21:15:29 +01:00
|
|
|
hash->index[index] = backet->next;
|
2017-06-19 16:22:26 +02:00
|
|
|
else
|
2002-12-13 21:15:29 +01:00
|
|
|
pp->next = backet->next;
|
|
|
|
|
2017-06-19 16:22:26 +02:00
|
|
|
if (hash->index[index])
|
|
|
|
hash->index[index]->len = newlen;
|
|
|
|
else
|
|
|
|
hash->stats.empty++;
|
|
|
|
|
|
|
|
hash_update_ssq(hash, oldlen, newlen);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
ret = backet->data;
|
|
|
|
XFREE (MTYPE_HASH_BACKET, backet);
|
|
|
|
hash->count--;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
pp = backet;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Iterator function for hash. */
|
|
|
|
void
|
|
|
|
hash_iterate (struct hash *hash,
|
|
|
|
void (*func) (struct hash_backet *, void *), void *arg)
|
|
|
|
{
|
2004-10-05 23:01:23 +02:00
|
|
|
unsigned int i;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct hash_backet *hb;
|
2004-08-31 19:28:41 +02:00
|
|
|
struct hash_backet *hbnext;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
for (i = 0; i < hash->size; i++)
|
2004-08-31 19:28:41 +02:00
|
|
|
for (hb = hash->index[i]; hb; hb = hbnext)
|
|
|
|
{
|
|
|
|
/* get pointer to next hash backet here, in case (*func)
|
|
|
|
* decides to delete hb by calling hash_release
|
|
|
|
*/
|
|
|
|
hbnext = hb->next;
|
|
|
|
(*func) (hb, arg);
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/* Iterator function for hash. */
|
|
|
|
void
|
|
|
|
hash_walk (struct hash *hash,
|
|
|
|
int (*func) (struct hash_backet *, void *), void *arg)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
struct hash_backet *hb;
|
|
|
|
struct hash_backet *hbnext;
|
|
|
|
int ret = HASHWALK_CONTINUE;
|
|
|
|
|
|
|
|
for (i = 0; i < hash->size; i++)
|
|
|
|
{
|
|
|
|
for (hb = hash->index[i]; hb; hb = hbnext)
|
|
|
|
{
|
|
|
|
/* get pointer to next hash backet here, in case (*func)
|
|
|
|
* decides to delete hb by calling hash_release
|
|
|
|
*/
|
|
|
|
hbnext = hb->next;
|
|
|
|
ret = (*func) (hb, arg);
|
|
|
|
if (ret == HASHWALK_ABORT)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Clean up hash. */
|
|
|
|
void
|
|
|
|
hash_clean (struct hash *hash, void (*free_func) (void *))
|
|
|
|
{
|
2004-10-05 23:01:23 +02:00
|
|
|
unsigned int i;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct hash_backet *hb;
|
|
|
|
struct hash_backet *next;
|
|
|
|
|
|
|
|
for (i = 0; i < hash->size; i++)
|
|
|
|
{
|
|
|
|
for (hb = hash->index[i]; hb; hb = next)
|
|
|
|
{
|
|
|
|
next = hb->next;
|
|
|
|
|
|
|
|
if (free_func)
|
|
|
|
(*free_func) (hb->data);
|
|
|
|
|
|
|
|
XFREE (MTYPE_HASH_BACKET, hb);
|
|
|
|
hash->count--;
|
|
|
|
}
|
|
|
|
hash->index[i] = NULL;
|
|
|
|
}
|
2017-06-19 16:22:26 +02:00
|
|
|
|
|
|
|
hash->stats.ssq = 0;
|
|
|
|
hash->stats.empty = hash->size;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Free hash memory. You may call hash_clean before call this
|
|
|
|
function. */
|
|
|
|
void
|
|
|
|
hash_free (struct hash *hash)
|
|
|
|
{
|
2017-06-19 16:22:26 +02:00
|
|
|
pthread_mutex_lock (&_hashes_mtx);
|
|
|
|
{
|
|
|
|
if (_hashes)
|
|
|
|
{
|
|
|
|
listnode_delete (_hashes, hash);
|
|
|
|
if (_hashes->count == 0)
|
|
|
|
{
|
|
|
|
list_delete (_hashes);
|
|
|
|
_hashes = NULL;
|
|
|
|
}
|
|
|
|
}
|
2017-05-30 02:16:52 +02:00
|
|
|
}
|
2017-06-19 16:22:26 +02:00
|
|
|
pthread_mutex_unlock (&_hashes_mtx);
|
2017-05-30 02:16:52 +02:00
|
|
|
|
2017-06-19 16:22:26 +02:00
|
|
|
if (hash->name)
|
|
|
|
XFREE (MTYPE_HASH, hash->name);
|
2017-05-30 02:16:52 +02:00
|
|
|
|
2017-06-19 16:22:26 +02:00
|
|
|
XFREE (MTYPE_HASH_INDEX, hash->index);
|
|
|
|
XFREE (MTYPE_HASH, hash);
|
2017-05-30 02:16:52 +02:00
|
|
|
}
|
|
|
|
|
2017-06-19 16:22:26 +02:00
|
|
|
|
|
|
|
/* CLI commands ------------------------------------------------------------ */
|
2017-05-30 02:16:52 +02:00
|
|
|
|
|
|
|
DEFUN(show_hash_stats,
|
|
|
|
show_hash_stats_cmd,
|
2017-06-19 16:22:26 +02:00
|
|
|
"show hashtable [statistics]",
|
2017-05-30 02:16:52 +02:00
|
|
|
SHOW_STR
|
2017-06-19 16:22:26 +02:00
|
|
|
"Statistics about hash tables\n"
|
|
|
|
"Statistics about hash tables\n")
|
2017-05-30 02:16:52 +02:00
|
|
|
{
|
|
|
|
struct hash *h;
|
|
|
|
struct listnode *ln;
|
|
|
|
struct ttable *tt = ttable_new (&ttable_styles[TTSTYLE_BLANK]);
|
|
|
|
|
2017-06-23 07:14:54 +02:00
|
|
|
ttable_add_row (tt, "Hash table|Buckets|Entries|Empty|LF|SD|FLF|SD");
|
2017-06-19 16:22:26 +02:00
|
|
|
tt->style.cell.lpad = 2;
|
|
|
|
tt->style.cell.rpad = 1;
|
|
|
|
tt->style.corner = '+';
|
2017-05-30 02:16:52 +02:00
|
|
|
ttable_restyle (tt);
|
|
|
|
ttable_rowseps (tt, 0, BOTTOM, true, '-');
|
|
|
|
|
2017-06-19 16:22:26 +02:00
|
|
|
/* Summary statistics calculated are:
|
|
|
|
*
|
|
|
|
* - Load factor: This is the number of elements in the table divided by the
|
|
|
|
* number of buckets. Since this hash table implementation uses chaining,
|
|
|
|
* this value can be greater than 1. This number provides information on
|
|
|
|
* how 'full' the table is, but does not provide information on how evenly
|
|
|
|
* distributed the elements are. Notably, a load factor >= 1 does not imply
|
|
|
|
* that every bucket has an element; with a pathological hash function, all
|
|
|
|
* elements could be in a single bucket.
|
|
|
|
*
|
|
|
|
* - Full load factor: this is the number of elements in the table divided by
|
|
|
|
* the number of buckets that have some elements in them.
|
|
|
|
*
|
2017-06-23 07:14:54 +02:00
|
|
|
* - Std. Dev.: This is the standard deviation calculated from the relevant
|
|
|
|
* load factor. If the load factor is the mean of number of elements per
|
|
|
|
* bucket, the standard deviation measures how much any particular bucket
|
|
|
|
* is likely to deviate from the mean. As a rule of thumb this number
|
|
|
|
* should be less than 2, and ideally <= 1 for optimal performance. A
|
|
|
|
* number larger than 3 generally indicates a poor hash function.
|
2017-06-19 16:22:26 +02:00
|
|
|
*/
|
|
|
|
|
2017-06-23 07:14:54 +02:00
|
|
|
double lf; // load factor
|
|
|
|
double flf; // full load factor
|
|
|
|
double var; // overall variance
|
|
|
|
double fvar; // full variance
|
|
|
|
double stdv; // overall stddev
|
|
|
|
double fstdv; // full stddev
|
2017-06-19 16:22:26 +02:00
|
|
|
|
|
|
|
long double x2; // h->count ^ 2
|
|
|
|
long double ldc; // (long double) h->count
|
|
|
|
long double full; // h->size - h->stats.empty
|
|
|
|
long double ssq; // ssq casted to long double
|
|
|
|
|
|
|
|
pthread_mutex_lock (&_hashes_mtx);
|
2017-07-11 14:52:06 +02:00
|
|
|
if (!_hashes)
|
|
|
|
{
|
|
|
|
pthread_mutex_unlock (&_hashes_mtx);
|
|
|
|
vty_outln (vty, "No hash tables in use.");
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-05-30 02:16:52 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO (_hashes, ln, h))
|
|
|
|
{
|
2017-06-19 16:22:26 +02:00
|
|
|
if (!h->name)
|
2017-05-30 02:16:52 +02:00
|
|
|
continue;
|
|
|
|
|
2017-06-19 16:22:26 +02:00
|
|
|
ssq = (long double) h->stats.ssq;
|
2017-06-23 07:14:54 +02:00
|
|
|
x2 = powl(h->count, 2.0);
|
2017-06-19 16:22:26 +02:00
|
|
|
ldc = (long double) h->count;
|
|
|
|
full = h->size - h->stats.empty;
|
|
|
|
lf = h->count / (double) h->size;
|
|
|
|
flf = full ? h->count / (double) (full) : 0;
|
2017-06-23 07:14:54 +02:00
|
|
|
var = ldc ? (1.0 / ldc) * (ssq - x2 / ldc) : 0;
|
|
|
|
fvar = full ? (1.0 / full) * (ssq - x2 / full) : 0;
|
2017-06-19 16:22:26 +02:00
|
|
|
var = (var < .0001) ? 0 : var;
|
|
|
|
fvar = (fvar < .0001) ? 0 : fvar;
|
2017-06-23 07:14:54 +02:00
|
|
|
stdv = sqrt(var);
|
|
|
|
fstdv = sqrt(fvar);
|
2017-06-19 16:22:26 +02:00
|
|
|
|
2017-06-23 07:14:54 +02:00
|
|
|
ttable_add_row (tt, "%s|%d|%ld|%.0f%%|%.2lf|%.2lf|%.2lf|%.2lf", h->name,
|
2017-06-19 16:22:26 +02:00
|
|
|
h->size, h->count,
|
2017-06-23 07:14:54 +02:00
|
|
|
(h->stats.empty / (double) h->size)*100, lf, stdv, flf,
|
|
|
|
fstdv);
|
2017-06-19 16:22:26 +02:00
|
|
|
}
|
|
|
|
pthread_mutex_unlock (&_hashes_mtx);
|
|
|
|
|
|
|
|
/* display header */
|
|
|
|
char header[] = "Showing hash table statistics for ";
|
|
|
|
char underln[sizeof(header) + strlen(frr_protonameinst)];
|
|
|
|
memset (underln, '-', sizeof(underln));
|
|
|
|
underln[sizeof(underln) - 1] = '\0';
|
2017-06-30 18:56:10 +02:00
|
|
|
vty_outln (vty, "%s%s", header, frr_protonameinst);
|
|
|
|
vty_outln (vty, "%s", underln);
|
2017-06-19 16:22:26 +02:00
|
|
|
|
2017-06-30 18:56:10 +02:00
|
|
|
vty_outln (vty, "# allocated: %d", _hashes->count);
|
|
|
|
vty_outln (vty, "# named: %d%s", tt->nrows - 1, VTYNL);
|
2017-06-19 16:22:26 +02:00
|
|
|
|
|
|
|
if (tt->nrows > 1)
|
|
|
|
{
|
|
|
|
ttable_colseps (tt, 0, RIGHT, true, '|');
|
2017-06-30 18:56:10 +02:00
|
|
|
char *table = ttable_dump (tt, VTYNL);
|
|
|
|
vty_out (vty, "%s%s", table, VTYNL);
|
2017-06-19 16:22:26 +02:00
|
|
|
XFREE (MTYPE_TMP, table);
|
2017-05-30 02:16:52 +02:00
|
|
|
}
|
2017-06-19 16:22:26 +02:00
|
|
|
else
|
2017-06-30 18:56:10 +02:00
|
|
|
vty_outln (vty, "No named hash tables to display.");
|
2017-05-30 02:16:52 +02:00
|
|
|
|
|
|
|
ttable_del (tt);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
hash_cmd_init ()
|
|
|
|
{
|
|
|
|
install_element (ENABLE_NODE, &show_hash_stats_cmd);
|
|
|
|
}
|