/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IPv4 FIB: lookup engine and maintenance routines. * * Version: $Id: fib_hash.c,v 1.12 1999/08/31 07:03:27 davem Exp $ * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/config.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/bitops.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/init.h> #include <net/ip.h> #include <net/protocol.h> #include <net/route.h> #include <net/tcp.h> #include <net/sock.h> #include <net/ip_fib.h> #define FTprint(a...) /* printk(KERN_DEBUG a) */ static kmem_cache_t * fn_hash_kmem; /* These bizarre types are just to force strict type checking. When I reversed order of bytes and changed to natural mask lengths, I forgot to make fixes in several places. Now I am lazy to return it back. */ typedef struct { u32 datum; } fn_key_t; typedef struct { u32 datum; } fn_hash_idx_t; struct fib_node { struct fib_node *fn_next; struct fib_info *fn_info; #define FIB_INFO(f) ((f)->fn_info) fn_key_t fn_key; u8 fn_tos; u8 fn_type; u8 fn_scope; u8 fn_state; }; #define FN_S_ZOMBIE 1 #define FN_S_ACCESSED 2 static int fib_hash_zombies; struct fn_zone { struct fn_zone *fz_next; /* Next not empty zone */ struct fib_node **fz_hash; /* Hash table pointer */ int fz_nent; /* Number of entries */ int fz_divisor; /* Hash divisor */ u32 fz_hashmask; /* (1<<fz_divisor) - 1 */ #define FZ_HASHMASK(fz) ((fz)->fz_hashmask) int fz_order; /* Zone order */ u32 fz_mask; #define FZ_MASK(fz) ((fz)->fz_mask) }; /* NOTE. On fast computers evaluation of fz_hashmask and fz_mask can be cheaper than memory lookup, so that FZ_* macros are used. */ struct fn_hash { struct fn_zone *fn_zones[33]; struct fn_zone *fn_zone_list; }; 110 static __inline__ fn_hash_idx_t fn_hash(fn_key_t key, struct fn_zone *fz) { u32 h = ntohl(key.datum)>>(32 - fz->fz_order); h ^= (h>>20); h ^= (h>>10); h ^= (h>>5); h &= FZ_HASHMASK(fz); 117 return *(fn_hash_idx_t*)&h; } #define fz_key_0(key) ((key).datum = 0) #define fz_prefix(key,fz) ((key).datum) 123 static __inline__ fn_key_t fz_key(u32 dst, struct fn_zone *fz) { fn_key_t k; k.datum = dst & FZ_MASK(fz); 127 return k; } 130 static __inline__ struct fib_node ** fz_chain_p(fn_key_t key, struct fn_zone *fz) { 132 return &fz->fz_hash[fn_hash(key, fz).datum]; } 135 static __inline__ struct fib_node * fz_chain(fn_key_t key, struct fn_zone *fz) { 137 return fz->fz_hash[fn_hash(key, fz).datum]; } 140 extern __inline__ int fn_key_eq(fn_key_t a, fn_key_t b) { 142 return a.datum == b.datum; } 145 extern __inline__ int fn_key_leq(fn_key_t a, fn_key_t b) { 147 return a.datum <= b.datum; } static rwlock_t fib_hash_lock = RW_LOCK_UNLOCKED; #define FZ_MAX_DIVISOR 1024 #ifdef CONFIG_IP_ROUTE_LARGE_TABLES /* The fib hash lock must be held when this is called. */ static __inline__ void fn_rebuild_zone(struct fn_zone *fz, struct fib_node **old_ht, int old_divisor) { int i; struct fib_node *f, **fp, *next; for (i=0; i<old_divisor; i++) { for (f=old_ht[i]; f; f=next) { next = f->fn_next; for (fp = fz_chain_p(f->fn_key, fz); *fp && fn_key_leq((*fp)->fn_key, f->fn_key); fp = &(*fp)->fn_next) /* NONE */; f->fn_next = *fp; *fp = f; } } } static void fn_rehash_zone(struct fn_zone *fz) { struct fib_node **ht, **old_ht; int old_divisor, new_divisor; u32 new_hashmask; old_divisor = fz->fz_divisor; switch (old_divisor) { case 16: new_divisor = 256; new_hashmask = 0xFF; break; case 256: new_divisor = 1024; new_hashmask = 0x3FF; break; default: printk(KERN_CRIT "route.c: bad divisor %d!\n", old_divisor); return; } #if RT_CACHE_DEBUG >= 2 printk("fn_rehash_zone: hash for zone %d grows from %d\n", fz->fz_order, old_divisor); #endif ht = kmalloc(new_divisor*sizeof(struct fib_node*), GFP_KERNEL); if (ht) { memset(ht, 0, new_divisor*sizeof(struct fib_node*)); write_lock_bh(&fib_hash_lock); old_ht = fz->fz_hash; fz->fz_hash = ht; fz->fz_hashmask = new_hashmask; fz->fz_divisor = new_divisor; fn_rebuild_zone(fz, old_ht, old_divisor); write_unlock_bh(&fib_hash_lock); kfree(old_ht); } } #endif /* CONFIG_IP_ROUTE_LARGE_TABLES */ 218 static void fn_free_node(struct fib_node * f) { fib_release_info(FIB_INFO(f)); kmem_cache_free(fn_hash_kmem, f); } static struct fn_zone * 226 fn_new_zone(struct fn_hash *table, int z) { int i; struct fn_zone *fz = kmalloc(sizeof(struct fn_zone), GFP_KERNEL); 230 if (!fz) 231 return NULL; memset(fz, 0, sizeof(struct fn_zone)); 234 if (z) { fz->fz_divisor = 16; fz->fz_hashmask = 0xF; 237 } else { fz->fz_divisor = 1; fz->fz_hashmask = 0; } fz->fz_hash = kmalloc(fz->fz_divisor*sizeof(struct fib_node*), GFP_KERNEL); 242 if (!fz->fz_hash) { kfree(fz); 244 return NULL; } memset(fz->fz_hash, 0, fz->fz_divisor*sizeof(struct fib_node*)); fz->fz_order = z; fz->fz_mask = inet_make_mask(z); /* Find the first not empty zone with more specific mask */ 251 for (i=z+1; i<=32; i++) 252 if (table->fn_zones[i]) 253 break; 254 write_lock_bh(&fib_hash_lock); 255 if (i>32) { /* No more specific masks, we are the first. */ fz->fz_next = table->fn_zone_list; table->fn_zone_list = fz; 259 } else { fz->fz_next = table->fn_zones[i]->fz_next; table->fn_zones[i]->fz_next = fz; } table->fn_zones[z] = fz; 264 write_unlock_bh(&fib_hash_lock); 265 return fz; } static int 269 fn_hash_lookup(struct fib_table *tb, const struct rt_key *key, struct fib_result *res) { int err; struct fn_zone *fz; struct fn_hash *t = (struct fn_hash*)tb->tb_data; read_lock(&fib_hash_lock); 276 for (fz = t->fn_zone_list; fz; fz = fz->fz_next) { struct fib_node *f; fn_key_t k = fz_key(key->dst, fz); 280 for (f = fz_chain(k, fz); f; f = f->fn_next) { 281 if (!fn_key_eq(k, f->fn_key)) { 282 if (fn_key_leq(k, f->fn_key)) 283 break; 284 else 285 continue; } #ifdef CONFIG_IP_ROUTE_TOS if (f->fn_tos && f->fn_tos != key->tos) continue; #endif f->fn_state |= FN_S_ACCESSED; 293 if (f->fn_state&FN_S_ZOMBIE) 294 continue; 295 if (f->fn_scope < key->scope) 296 continue; err = fib_semantic_match(f->fn_type, FIB_INFO(f), key, res); 299 if (err == 0) { res->type = f->fn_type; res->scope = f->fn_scope; res->prefixlen = fz->fz_order; 303 goto out; } 305 if (err < 0) 306 goto out; } } err = 1; out: 311 read_unlock(&fib_hash_lock); 312 return err; } static int fn_hash_last_dflt=-1; 317 static int fib_detect_death(struct fib_info *fi, int order, struct fib_info **last_resort, int *last_idx) { struct neighbour *n; int state = NUD_NONE; n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev); 324 if (n) { state = n->nud_state; neigh_release(n); } 328 if (state==NUD_REACHABLE) 329 return 0; 330 if ((state&NUD_VALID) && order != fn_hash_last_dflt) 331 return 0; if ((state&NUD_VALID) || 333 (*last_idx<0 && order > fn_hash_last_dflt)) { *last_resort = fi; *last_idx = order; } 337 return 1; } static void 341 fn_hash_select_default(struct fib_table *tb, const struct rt_key *key, struct fib_result *res) { int order, last_idx; struct fib_node *f; struct fib_info *fi = NULL; struct fib_info *last_resort; struct fn_hash *t = (struct fn_hash*)tb->tb_data; struct fn_zone *fz = t->fn_zones[0]; 350 if (fz == NULL) 351 return; last_idx = -1; last_resort = NULL; order = -1; read_lock(&fib_hash_lock); 358 for (f = fz->fz_hash[0]; f; f = f->fn_next) { struct fib_info *next_fi = FIB_INFO(f); if ((f->fn_state&FN_S_ZOMBIE) || f->fn_scope != res->scope || 363 f->fn_type != RTN_UNICAST) 364 continue; 366 if (next_fi->fib_priority > res->fi->fib_priority) 367 break; 368 if (!next_fi->fib_nh[0].nh_gw || next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) 369 continue; f->fn_state |= FN_S_ACCESSED; 372 if (fi == NULL) { 373 if (next_fi != res->fi) 374 break; 375 } else if (!fib_detect_death(fi, order, &last_resort, &last_idx)) { 376 if (res->fi) fib_info_put(res->fi); res->fi = fi; atomic_inc(&fi->fib_clntref); fn_hash_last_dflt = order; 381 goto out; } fi = next_fi; order++; } 387 if (order<=0 || fi==NULL) { fn_hash_last_dflt = -1; 389 goto out; } 392 if (!fib_detect_death(fi, order, &last_resort, &last_idx)) { 393 if (res->fi) fib_info_put(res->fi); res->fi = fi; atomic_inc(&fi->fib_clntref); fn_hash_last_dflt = order; 398 goto out; } 401 if (last_idx >= 0) { 402 if (res->fi) fib_info_put(res->fi); res->fi = last_resort; 405 if (last_resort) atomic_inc(&last_resort->fib_clntref); } fn_hash_last_dflt = last_idx; out: 410 read_unlock(&fib_hash_lock); } #define FIB_SCAN(f, fp) \ for ( ; ((f) = *(fp)) != NULL; (fp) = &(f)->fn_next) #define FIB_SCAN_KEY(f, fp, key) \ for ( ; ((f) = *(fp)) != NULL && fn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_next) #ifndef CONFIG_IP_ROUTE_TOS #define FIB_SCAN_TOS(f, fp, key, tos) FIB_SCAN_KEY(f, fp, key) #else #define FIB_SCAN_TOS(f, fp, key, tos) \ for ( ; ((f) = *(fp)) != NULL && fn_key_eq((f)->fn_key, (key)) && \ (f)->fn_tos == (tos) ; (fp) = &(f)->fn_next) #endif #ifdef CONFIG_RTNETLINK static void rtmsg_fib(int, struct fib_node*, int, int, struct nlmsghdr *n, struct netlink_skb_parms *); #else #define rtmsg_fib(a, b, c, d, e, f) #endif static int 438 fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, struct nlmsghdr *n, struct netlink_skb_parms *req) { struct fn_hash *table = (struct fn_hash*)tb->tb_data; struct fib_node *new_f, *f, **fp, **del_fp; struct fn_zone *fz; struct fib_info *fi; int z = r->rtm_dst_len; int type = r->rtm_type; #ifdef CONFIG_IP_ROUTE_TOS u8 tos = r->rtm_tos; #endif fn_key_t key; int err; FTprint("tb(%d)_insert: %d %08x/%d %d %08x\n", tb->tb_id, r->rtm_type, rta->rta_dst ? *(u32*)rta->rta_dst : 0, z, rta->rta_oif ? *rta->rta_oif : -1, rta->rta_prefsrc ? *(u32*)rta->rta_prefsrc : 0); 457 if (z > 32) 458 return -EINVAL; fz = table->fn_zones[z]; 460 if (!fz && !(fz = fn_new_zone(table, z))) 461 return -ENOBUFS; fz_key_0(key); 464 if (rta->rta_dst) { u32 dst; memcpy(&dst, rta->rta_dst, 4); 467 if (dst & ~FZ_MASK(fz)) 468 return -EINVAL; key = fz_key(dst, fz); } 472 if ((fi = fib_create_info(r, rta, n, &err)) == NULL) 473 return err; #ifdef CONFIG_IP_ROUTE_LARGE_TABLES if (fz->fz_nent > (fz->fz_divisor<<2) && fz->fz_divisor < FZ_MAX_DIVISOR && (z==32 || (1<<z) > fz->fz_divisor)) fn_rehash_zone(fz); #endif fp = fz_chain_p(key, fz); /* * Scan list to find the first route with the same destination */ 488 FIB_SCAN(f, fp) { 489 if (fn_key_leq(key,f->fn_key)) 490 break; } #ifdef CONFIG_IP_ROUTE_TOS /* * Find route with the same destination and tos. */ FIB_SCAN_KEY(f, fp, key) { if (f->fn_tos <= tos) break; } #endif del_fp = NULL; if (f && (f->fn_state&FN_S_ZOMBIE) && #ifdef CONFIG_IP_ROUTE_TOS f->fn_tos == tos && #endif 509 fn_key_eq(f->fn_key, key)) { del_fp = fp; fp = &f->fn_next; f = *fp; 513 goto create; } 516 FIB_SCAN_TOS(f, fp, key, tos) { 517 if (fi->fib_priority <= FIB_INFO(f)->fib_priority) 518 break; } /* Now f==*fp points to the first node with the same keys [prefix,tos,priority], if such key already exists or to the node, before which we will insert new one. */ if (f && #ifdef CONFIG_IP_ROUTE_TOS f->fn_tos == tos && #endif fn_key_eq(f->fn_key, key) && 531 fi->fib_priority == FIB_INFO(f)->fib_priority) { struct fib_node **ins_fp; err = -EEXIST; 535 if (n->nlmsg_flags&NLM_F_EXCL) 536 goto out; 538 if (n->nlmsg_flags&NLM_F_REPLACE) { del_fp = fp; fp = &f->fn_next; f = *fp; 542 goto replace; } ins_fp = fp; err = -EEXIST; 548 FIB_SCAN_TOS(f, fp, key, tos) { 549 if (fi->fib_priority != FIB_INFO(f)->fib_priority) 550 break; if (f->fn_type == type && f->fn_scope == r->rtm_scope 552 && FIB_INFO(f) == fi) 553 goto out; } 556 if (!(n->nlmsg_flags&NLM_F_APPEND)) { fp = ins_fp; f = *fp; } } create: err = -ENOENT; 564 if (!(n->nlmsg_flags&NLM_F_CREATE)) 565 goto out; replace: err = -ENOBUFS; new_f = kmem_cache_alloc(fn_hash_kmem, SLAB_KERNEL); 570 if (new_f == NULL) 571 goto out; memset(new_f, 0, sizeof(struct fib_node)); new_f->fn_key = key; #ifdef CONFIG_IP_ROUTE_TOS new_f->fn_tos = tos; #endif new_f->fn_type = type; new_f->fn_scope = r->rtm_scope; FIB_INFO(new_f) = fi; /* * Insert new entry to the list. */ new_f->fn_next = f; 588 write_lock_bh(&fib_hash_lock); *fp = new_f; 590 write_unlock_bh(&fib_hash_lock); fz->fz_nent++; 593 if (del_fp) { f = *del_fp; /* Unlink replaced node */ 596 write_lock_bh(&fib_hash_lock); *del_fp = f->fn_next; 598 write_unlock_bh(&fib_hash_lock); 600 if (!(f->fn_state&FN_S_ZOMBIE)) rtmsg_fib(RTM_DELROUTE, f, z, tb->tb_id, n, req); 602 if (f->fn_state&FN_S_ACCESSED) rt_cache_flush(-1); fn_free_node(f); fz->fz_nent--; 606 } else { rt_cache_flush(-1); } rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->tb_id, n, req); 610 return 0; out: fib_release_info(fi); 614 return err; } static int 619 fn_hash_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, struct nlmsghdr *n, struct netlink_skb_parms *req) { struct fn_hash *table = (struct fn_hash*)tb->tb_data; struct fib_node **fp, **del_fp, *f; int z = r->rtm_dst_len; struct fn_zone *fz; fn_key_t key; int matched; #ifdef CONFIG_IP_ROUTE_TOS u8 tos = r->rtm_tos; #endif FTprint("tb(%d)_delete: %d %08x/%d %d\n", tb->tb_id, r->rtm_type, rta->rta_dst ? *(u32*)rta->rta_dst : 0, z, rta->rta_oif ? *rta->rta_oif : -1); 634 if (z > 32) 635 return -EINVAL; 636 if ((fz = table->fn_zones[z]) == NULL) 637 return -ESRCH; fz_key_0(key); 640 if (rta->rta_dst) { u32 dst; memcpy(&dst, rta->rta_dst, 4); 643 if (dst & ~FZ_MASK(fz)) 644 return -EINVAL; key = fz_key(dst, fz); } fp = fz_chain_p(key, fz); 651 FIB_SCAN(f, fp) { 652 if (fn_key_eq(f->fn_key, key)) 653 break; 654 if (fn_key_leq(key, f->fn_key)) { 655 return -ESRCH; } } #ifdef CONFIG_IP_ROUTE_TOS FIB_SCAN_KEY(f, fp, key) { if (f->fn_tos == tos) break; } #endif matched = 0; del_fp = NULL; 667 FIB_SCAN_TOS(f, fp, key, tos) { struct fib_info * fi = FIB_INFO(f); 670 if (f->fn_state&FN_S_ZOMBIE) { 671 return -ESRCH; } matched++; if (del_fp == NULL && (!r->rtm_type || f->fn_type == r->rtm_type) && (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) && (!r->rtm_protocol || fi->fib_protocol == r->rtm_protocol) && 679 fib_nh_match(r, n, rta, fi) == 0) del_fp = fp; } 683 if (del_fp) { f = *del_fp; rtmsg_fib(RTM_DELROUTE, f, z, tb->tb_id, n, req); 687 if (matched != 1) { 688 write_lock_bh(&fib_hash_lock); *del_fp = f->fn_next; 690 write_unlock_bh(&fib_hash_lock); 692 if (f->fn_state&FN_S_ACCESSED) rt_cache_flush(-1); fn_free_node(f); fz->fz_nent--; 696 } else { f->fn_state |= FN_S_ZOMBIE; 698 if (f->fn_state&FN_S_ACCESSED) { f->fn_state &= ~FN_S_ACCESSED; rt_cache_flush(-1); } 702 if (++fib_hash_zombies > 128) fib_flush(); } 706 return 0; } 708 return -ESRCH; } extern __inline__ int 712 fn_flush_list(struct fib_node ** fp, int z, struct fn_hash *table) { int found = 0; struct fib_node *f; 717 while ((f = *fp) != NULL) { struct fib_info *fi = FIB_INFO(f); 720 if (fi && ((f->fn_state&FN_S_ZOMBIE) || (fi->fib_flags&RTNH_F_DEAD))) { 721 write_lock_bh(&fib_hash_lock); *fp = f->fn_next; 723 write_unlock_bh(&fib_hash_lock); fn_free_node(f); found++; 727 continue; } fp = &f->fn_next; } 731 return found; } 734 static int fn_hash_flush(struct fib_table *tb) { struct fn_hash *table = (struct fn_hash*)tb->tb_data; struct fn_zone *fz; int found = 0; fib_hash_zombies = 0; 741 for (fz = table->fn_zone_list; fz; fz = fz->fz_next) { int i; int tmp = 0; 744 for (i=fz->fz_divisor-1; i>=0; i--) tmp += fn_flush_list(&fz->fz_hash[i], fz->fz_order, table); fz->fz_nent -= tmp; found += tmp; } 749 return found; } #ifdef CONFIG_PROC_FS 755 static int fn_hash_get_info(struct fib_table *tb, char *buffer, int first, int count) { struct fn_hash *table = (struct fn_hash*)tb->tb_data; struct fn_zone *fz; int pos = 0; int n = 0; read_lock(&fib_hash_lock); 763 for (fz=table->fn_zone_list; fz; fz = fz->fz_next) { int i; struct fib_node *f; int maxslot = fz->fz_divisor; struct fib_node **fp = fz->fz_hash; 769 if (fz->fz_nent == 0) 770 continue; 772 if (pos + fz->fz_nent <= first) { pos += fz->fz_nent; 774 continue; } 777 for (i=0; i < maxslot; i++, fp++) { 778 for (f = *fp; f; f = f->fn_next) { 779 if (++pos <= first) 780 continue; fib_node_get_info(f->fn_type, f->fn_state&FN_S_ZOMBIE, FIB_INFO(f), fz_prefix(f->fn_key, fz), FZ_MASK(fz), buffer); buffer += 128; 787 if (++n >= count) 788 goto out; } } } out: 793 read_unlock(&fib_hash_lock); 794 return n; } #endif #ifdef CONFIG_RTNETLINK extern __inline__ int fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb, struct fib_table *tb, struct fn_zone *fz, struct fib_node *f) { int i, s_i; s_i = cb->args[3]; for (i=0; f; i++, f=f->fn_next) { if (i < s_i) continue; if (f->fn_state&FN_S_ZOMBIE) continue; if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWROUTE, tb->tb_id, (f->fn_state&FN_S_ZOMBIE) ? 0 : f->fn_type, f->fn_scope, &f->fn_key, fz->fz_order, f->fn_tos, f->fn_info) < 0) { cb->args[3] = i; return -1; } } cb->args[3] = i; return skb->len; } extern __inline__ int fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb, struct fib_table *tb, struct fn_zone *fz) { int h, s_h; s_h = cb->args[2]; for (h=0; h < fz->fz_divisor; h++) { if (h < s_h) continue; if (h > s_h) memset(&cb->args[3], 0, sizeof(cb->args) - 3*sizeof(cb->args[0])); if (fz->fz_hash == NULL || fz->fz_hash[h] == NULL) continue; if (fn_hash_dump_bucket(skb, cb, tb, fz, fz->fz_hash[h]) < 0) { cb->args[2] = h; return -1; } } cb->args[2] = h; return skb->len; } static int fn_hash_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb) { int m, s_m; struct fn_zone *fz; struct fn_hash *table = (struct fn_hash*)tb->tb_data; s_m = cb->args[1]; read_lock(&fib_hash_lock); for (fz = table->fn_zone_list, m=0; fz; fz = fz->fz_next, m++) { if (m < s_m) continue; if (m > s_m) memset(&cb->args[2], 0, sizeof(cb->args) - 2*sizeof(cb->args[0])); if (fn_hash_dump_zone(skb, cb, tb, fz) < 0) { cb->args[1] = m; read_unlock(&fib_hash_lock); return -1; } } read_unlock(&fib_hash_lock); cb->args[1] = m; return skb->len; } static void rtmsg_fib(int event, struct fib_node* f, int z, int tb_id, struct nlmsghdr *n, struct netlink_skb_parms *req) { struct sk_buff *skb; u32 pid = req ? req->pid : 0; int size = NLMSG_SPACE(sizeof(struct rtmsg)+256); skb = alloc_skb(size, GFP_KERNEL); if (!skb) return; if (fib_dump_info(skb, pid, n->nlmsg_seq, event, tb_id, f->fn_type, f->fn_scope, &f->fn_key, z, f->fn_tos, FIB_INFO(f)) < 0) { kfree_skb(skb); return; } NETLINK_CB(skb).dst_groups = RTMGRP_IPV4_ROUTE; if (n->nlmsg_flags&NLM_F_ECHO) atomic_inc(&skb->users); netlink_broadcast(rtnl, skb, pid, RTMGRP_IPV4_ROUTE, GFP_KERNEL); if (n->nlmsg_flags&NLM_F_ECHO) netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); } #endif /* CONFIG_RTNETLINK */ #ifdef CONFIG_IP_MULTIPLE_TABLES struct fib_table * fib_hash_init(int id) #else 902 struct fib_table * __init fib_hash_init(int id) #endif { struct fib_table *tb; 907 if (fn_hash_kmem == NULL) fn_hash_kmem = kmem_cache_create("ip_fib_hash", sizeof(struct fib_node), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); tb = kmalloc(sizeof(struct fib_table) + sizeof(struct fn_hash), GFP_KERNEL); 914 if (tb == NULL) 915 return NULL; tb->tb_id = id; tb->tb_lookup = fn_hash_lookup; tb->tb_insert = fn_hash_insert; tb->tb_delete = fn_hash_delete; tb->tb_flush = fn_hash_flush; tb->tb_select_default = fn_hash_select_default; #ifdef CONFIG_RTNETLINK tb->tb_dump = fn_hash_dump; #endif #ifdef CONFIG_PROC_FS tb->tb_get_info = fn_hash_get_info; #endif memset(tb->tb_data, 0, sizeof(struct fn_hash)); 930 return tb; }