Based on suggestion from bitbckt: I saw this in my feed, and feel it merits comment. I hope you don't mind the input. You'll want to monitor the load factor of the hash table and re- hash the table on insert when it is exceeded. Otherwise, key lookup will degrade toward linear time for sets of keys with a high number of collisions. The easiest way to implement the load factor is to maintain a count of allocated nodes in tvm_htab_t and divide that by the bucket count to obtain the load factor. Of course, you'd need the bucket count (HTAB_SIZE) to be dynamic, too.
29 lines
573 B
C
29 lines
573 B
C
#ifndef TVM_HASHTAB_H_
|
|
#define TVM_HASHTAB_H_
|
|
|
|
#define KEY_LENGTH 64
|
|
#define HTAB_SIZE 4096
|
|
|
|
typedef struct tvm_htable_node_s
|
|
{
|
|
char* key;
|
|
int value;
|
|
struct tvm_htable_node_s* next;
|
|
} tvm_htable_node_t;
|
|
|
|
typedef struct tvm_htab_s
|
|
{
|
|
unsigned int num_nodes;
|
|
unsigned int size;
|
|
tvm_htable_node_t** nodes;
|
|
} tvm_htab_t;
|
|
|
|
tvm_htab_t* create_htab();
|
|
void destroy_htab(tvm_htab_t* htab);
|
|
|
|
int htab_add(tvm_htab_t* htab, const char* key, int value);
|
|
int htab_find(tvm_htab_t* htab, const char* key);
|
|
|
|
unsigned int htab_hash(const char* key, const unsigned int size);
|
|
|
|
#endif
|