/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * Version: $Id: tcp_ipv4.c,v 1.222 2000/12/08 17:15:53 davem Exp $ * * IPv4 specific functions * * * code split from: * linux/ipv4/tcp.c * linux/ipv4/tcp_input.c * linux/ipv4/tcp_output.c * * See tcp.c for author information * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* * Changes: * David S. Miller : New socket lookup architecture. * This code is dedicated to John Dyson. * David S. Miller : Change semantics of established hash, * half is devoted to TIME_WAIT sockets * and the rest go in the other half. * Andi Kleen : Add support for syncookies and fixed * some bugs: ip options weren't passed to * the TCP layer, missed a check for an ACK bit. * Andi Kleen : Implemented fast path mtu discovery. * Fixed many serious bugs in the * open_request handling and moved * most of it into the af independent code. * Added tail drop and some other bugfixes. * Added new listen sematics. * Mike McLagan : Routing by source * Juan Jose Ciarlante: ip_dynaddr bits * Andi Kleen: various fixes. * Vitaly E. Lavrov : Transparent proxy revived after year coma. * Andi Kleen : Fix new listen. * Andi Kleen : Fix accept error reporting. */ #include <linux/config.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/random.h> #include <linux/cache.h> #include <linux/init.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/ipv6.h> #include <net/inet_common.h> #include <linux/inet.h> #include <linux/stddef.h> #include <linux/ipsec.h> extern int sysctl_ip_dynaddr; /* Check TCP sequence numbers in ICMP packets. */ #define ICMP_MIN_LENGTH 8 /* Socket used for sending RSTs */ static struct inode tcp_inode; static struct socket *tcp_socket=&tcp_inode.u.socket_i; void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len, struct sk_buff *skb); /* * ALL members must be initialised to prevent gcc-2.7.2.3 miscompilation */ struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = { __tcp_ehash: NULL, __tcp_bhash: NULL, __tcp_bhash_size: 0, __tcp_ehash_size: 0, __tcp_listening_hash: { NULL, }, __tcp_lhash_lock: RW_LOCK_UNLOCKED, __tcp_lhash_users: ATOMIC_INIT(0), __tcp_lhash_wait: __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait), __tcp_portalloc_lock: SPIN_LOCK_UNLOCKED }; /* * This array holds the first and last local port number. * For high-usage systems, use sysctl to change this to * 32768-61000 */ int sysctl_local_port_range[2] = { 1024, 4999 }; int tcp_port_rover = (1024 - 1); 102 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport, __u32 faddr, __u16 fport) { int h = ((laddr ^ lport) ^ (faddr ^ fport)); h ^= h>>16; h ^= h>>8; 108 return h & (tcp_ehash_size - 1); } 111 static __inline__ int tcp_sk_hashfn(struct sock *sk) { __u32 laddr = sk->rcv_saddr; __u16 lport = sk->num; __u32 faddr = sk->daddr; __u16 fport = sk->dport; 118 return tcp_hashfn(laddr, lport, faddr, fport); } /* Allocate and initialize a new TCP local port bind bucket. * The bindhash mutex for snum's hash chain must be held here. */ 124 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head, unsigned short snum) { struct tcp_bind_bucket *tb; tb = kmem_cache_alloc(tcp_bucket_cachep, SLAB_ATOMIC); 130 if(tb != NULL) { tb->port = snum; tb->fastreuse = 0; tb->owners = NULL; 134 if((tb->next = head->chain) != NULL) tb->next->pprev = &tb->next; head->chain = tb; tb->pprev = &head->chain; } 139 return tb; } /* Caller must disable local BH processing. */ 143 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child) { struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(child->num)]; struct tcp_bind_bucket *tb; spin_lock(&head->lock); tb = (struct tcp_bind_bucket *)sk->prev; 150 if ((child->bind_next = tb->owners) != NULL) tb->owners->bind_pprev = &child->bind_next; tb->owners = child; child->bind_pprev = &tb->owners; child->prev = (struct sock *) tb; 155 spin_unlock(&head->lock); } 158 __inline__ void tcp_inherit_port(struct sock *sk, struct sock *child) { 160 local_bh_disable(); __tcp_inherit_port(sk, child); 162 local_bh_enable(); } /* Obtain a reference to a local port for the given sock, * if snum is zero it means select any available local port. */ 168 static int tcp_v4_get_port(struct sock *sk, unsigned short snum) { struct tcp_bind_hashbucket *head; struct tcp_bind_bucket *tb; int ret; 174 local_bh_disable(); 175 if (snum == 0) { int low = sysctl_local_port_range[0]; int high = sysctl_local_port_range[1]; int remaining = (high - low) + 1; int rover; spin_lock(&tcp_portalloc_lock); rover = tcp_port_rover; 183 do { rover++; 184 if ((rover < low) || (rover > high)) rover = low; head = &tcp_bhash[tcp_bhashfn(rover)]; spin_lock(&head->lock); 188 for (tb = head->chain; tb; tb = tb->next) 189 if (tb->port == rover) 190 goto next; 191 break; next: 193 spin_unlock(&head->lock); 194 } while (--remaining > 0); tcp_port_rover = rover; 196 spin_unlock(&tcp_portalloc_lock); /* Exhausted local port range during search? */ ret = 1; 200 if (remaining <= 0) 201 goto fail; /* OK, here is the one we will use. HEAD is * non-NULL and we hold it's mutex. */ snum = rover; tb = NULL; 208 } else { head = &tcp_bhash[tcp_bhashfn(snum)]; spin_lock(&head->lock); 211 for (tb = head->chain; tb != NULL; tb = tb->next) 212 if (tb->port == snum) 213 break; } 215 if (tb != NULL && tb->owners != NULL) { 216 if (tb->fastreuse != 0 && sk->reuse != 0 && sk->state != TCP_LISTEN) { 217 goto success; 218 } else { struct sock *sk2 = tb->owners; int sk_reuse = sk->reuse; 222 for( ; sk2 != NULL; sk2 = sk2->bind_next) { if (sk != sk2 && 224 sk->bound_dev_if == sk2->bound_dev_if) { if (!sk_reuse || !sk2->reuse || 227 sk2->state == TCP_LISTEN) { if (!sk2->rcv_saddr || !sk->rcv_saddr || 230 (sk2->rcv_saddr == sk->rcv_saddr)) 231 break; } } } /* If we found a conflict, fail. */ ret = 1; 237 if (sk2 != NULL) 238 goto fail_unlock; } } ret = 1; if (tb == NULL && 243 (tb = tcp_bucket_create(head, snum)) == NULL) 244 goto fail_unlock; 245 if (tb->owners == NULL) { 246 if (sk->reuse && sk->state != TCP_LISTEN) tb->fastreuse = 1; 248 else tb->fastreuse = 0; } else if (tb->fastreuse && 251 ((sk->reuse == 0) || (sk->state == TCP_LISTEN))) tb->fastreuse = 0; success: sk->num = snum; 255 if (sk->prev == NULL) { 256 if ((sk->bind_next = tb->owners) != NULL) tb->owners->bind_pprev = &sk->bind_next; tb->owners = sk; sk->bind_pprev = &tb->owners; sk->prev = (struct sock *) tb; 261 } else { 262 BUG_TRAP(sk->prev == (struct sock *) tb); } ret = 0; fail_unlock: 267 spin_unlock(&head->lock); fail: 269 local_bh_enable(); 270 return ret; } /* Get rid of any references to a local port held by the * given sock. */ 276 __inline__ void __tcp_put_port(struct sock *sk) { struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(sk->num)]; struct tcp_bind_bucket *tb; spin_lock(&head->lock); tb = (struct tcp_bind_bucket *) sk->prev; 283 if (sk->bind_next) sk->bind_next->bind_pprev = sk->bind_pprev; *(sk->bind_pprev) = sk->bind_next; sk->prev = NULL; sk->num = 0; 288 if (tb->owners == NULL) { 289 if (tb->next) tb->next->pprev = tb->pprev; *(tb->pprev) = tb->next; kmem_cache_free(tcp_bucket_cachep, tb); } 294 spin_unlock(&head->lock); } 297 void tcp_put_port(struct sock *sk) { 299 local_bh_disable(); __tcp_put_port(sk); 301 local_bh_enable(); } /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP. * Look, when several writers sleep and reader wakes them up, all but one * immediately hit write lock and grab all the cpus. Exclusive sleep solves * this, _but_ remember, it adds useless work on UP machines (wake up each * exclusive lock release). It should be ifdefed really. */ 311 void tcp_listen_wlock(void) { write_lock(&tcp_lhash_lock); 315 if (atomic_read(&tcp_lhash_users)) { DECLARE_WAITQUEUE(wait, current); add_wait_queue_exclusive(&tcp_lhash_wait, &wait); 319 for (;;) { 320 set_current_state(TASK_UNINTERRUPTIBLE); 321 if (atomic_read(&tcp_lhash_users) == 0) 322 break; 323 write_unlock_bh(&tcp_lhash_lock); schedule(); 325 write_lock_bh(&tcp_lhash_lock); } 328 __set_current_state(TASK_RUNNING); remove_wait_queue(&tcp_lhash_wait, &wait); } } 333 static __inline__ void __tcp_v4_hash(struct sock *sk) { struct sock **skp; rwlock_t *lock; 338 BUG_TRAP(sk->pprev==NULL); 339 if(sk->state == TCP_LISTEN) { skp = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)]; lock = &tcp_lhash_lock; tcp_listen_wlock(); 343 } else { skp = &tcp_ehash[(sk->hashent = tcp_sk_hashfn(sk))].chain; lock = &tcp_ehash[sk->hashent].lock; write_lock(lock); } 348 if((sk->next = *skp) != NULL) (*skp)->pprev = &sk->next; *skp = sk; sk->pprev = skp; sock_prot_inc_use(sk->prot); 353 write_unlock(lock); 354 if (sk->state == TCP_LISTEN) wake_up(&tcp_lhash_wait); } 358 static void tcp_v4_hash(struct sock *sk) { 360 if (sk->state != TCP_CLOSE) { 361 local_bh_disable(); __tcp_v4_hash(sk); 363 local_bh_enable(); } } 367 void tcp_unhash(struct sock *sk) { rwlock_t *lock; 371 if (sk->state == TCP_LISTEN) { 372 local_bh_disable(); tcp_listen_wlock(); lock = &tcp_lhash_lock; 375 } else { struct tcp_ehash_bucket *head = &tcp_ehash[sk->hashent]; lock = &head->lock; 378 write_lock_bh(&head->lock); } 381 if(sk->pprev) { 382 if(sk->next) sk->next->pprev = sk->pprev; *sk->pprev = sk->next; sk->pprev = NULL; sock_prot_dec_use(sk->prot); } 388 write_unlock_bh(lock); 389 if (sk->state == TCP_LISTEN) wake_up(&tcp_lhash_wait); } /* Don't inline this cruft. Here are some nice properties to * exploit here. The BSD API does not allow a listening TCP * to specify the remote port nor the remote address for the * connection. So always assume those are both wildcarded * during the search since they can never be otherwise. */ 399 static struct sock *__tcp_v4_lookup_listener(struct sock *sk, u32 daddr, unsigned short hnum, int dif) { struct sock *result = NULL; int score, hiscore; hiscore=0; 405 for(; sk; sk = sk->next) { 406 if(sk->num == hnum) { __u32 rcv_saddr = sk->rcv_saddr; score = 1; 410 if(rcv_saddr) { 411 if (rcv_saddr != daddr) 412 continue; score++; } 415 if (sk->bound_dev_if) { 416 if (sk->bound_dev_if != dif) 417 continue; score++; } 420 if (score == 3) 421 return sk; 422 if (score > hiscore) { hiscore = score; result = sk; } } } 428 return result; } /* Optimize the common listener case. */ 432 __inline__ struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, int dif) { struct sock *sk; read_lock(&tcp_lhash_lock); sk = tcp_listening_hash[tcp_lhashfn(hnum)]; 438 if (sk) { if (sk->num == hnum && sk->next == NULL && (!sk->rcv_saddr || sk->rcv_saddr == daddr) && 442 !sk->bound_dev_if) 443 goto sherry_cache; sk = __tcp_v4_lookup_listener(sk, daddr, hnum, dif); } 446 if (sk) { sherry_cache: sock_hold(sk); } 450 read_unlock(&tcp_lhash_lock); 451 return sk; } /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM * * Local BH must be disabled here. */ 460 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport, u32 daddr, u16 hnum, int dif) { struct tcp_ehash_bucket *head; TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) __u32 ports = TCP_COMBINED_PORTS(sport, hnum); struct sock *sk; int hash; /* Optimize here for direct hit, only listening connections can * have wildcards anyways. */ hash = tcp_hashfn(daddr, hnum, saddr, sport); head = &tcp_ehash[hash]; read_lock(&head->lock); 475 for(sk = head->chain; sk; sk = sk->next) { 476 if(TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif)) 477 goto hit; /* You sunk my battleship! */ } /* Must check for a TIME_WAIT'er before going to listener hash. */ 481 for(sk = (head + tcp_ehash_size)->chain; sk; sk = sk->next) 482 if(TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif)) 483 goto hit; 484 read_unlock(&head->lock); 486 return NULL; hit: sock_hold(sk); 490 read_unlock(&head->lock); 491 return sk; } 494 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 hnum, int dif) { struct sock *sk; sk = __tcp_v4_lookup_established(saddr, sport, daddr, hnum, dif); 501 if (sk) 502 return sk; 504 return tcp_v4_lookup_listener(daddr, hnum, dif); } 507 __inline__ struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif) { struct sock *sk; 511 local_bh_disable(); sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif); 513 local_bh_enable(); 515 return sk; } 518 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb) { return secure_tcp_sequence_number(skb->nh.iph->daddr, skb->nh.iph->saddr, skb->h.th->dest, 523 skb->h.th->source); } 526 static int tcp_v4_check_established(struct sock *sk) { u32 daddr = sk->rcv_saddr; u32 saddr = sk->daddr; int dif = sk->bound_dev_if; TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) __u32 ports = TCP_COMBINED_PORTS(sk->dport, sk->num); int hash = tcp_hashfn(daddr, sk->num, saddr, sk->dport); struct tcp_ehash_bucket *head = &tcp_ehash[hash]; struct sock *sk2, **skp; struct tcp_tw_bucket *tw; 538 write_lock_bh(&head->lock); /* Check TIME-WAIT sockets first. */ 541 for(skp = &(head + tcp_ehash_size)->chain; (sk2=*skp) != NULL; skp = &sk2->next) { tw = (struct tcp_tw_bucket*)sk2; 545 if(TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); /* With PAWS, it is safe from the viewpoint of data integrity. Even without PAWS it is safe provided sequence spaces do not overlap i.e. at data rates <= 80Mbit/sec. Actually, the idea is close to VJ's one, only timestamp cache is held not per host, but per port pair and TW bucket is used as state holder. If TW bucket has been already destroyed we fall back to VJ's scheme and use initial timestamp retrieved from peer table. */ 562 if (tw->ts_recent_stamp) { 563 if ((tp->write_seq = tw->snd_nxt+65535+2) == 0) tp->write_seq = 1; tp->ts_recent = tw->ts_recent; tp->ts_recent_stamp = tw->ts_recent_stamp; sock_hold(sk2); skp = &head->chain; 569 goto unique; 570 } else 571 goto not_unique; } } tw = NULL; /* And established part... */ 577 for(skp = &head->chain; (sk2=*skp)!=NULL; skp = &sk2->next) { 578 if(TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif)) 579 goto not_unique; } unique: 583 BUG_TRAP(sk->pprev==NULL); 584 if ((sk->next = *skp) != NULL) (*skp)->pprev = &sk->next; *skp = sk; sk->pprev = skp; sk->hashent = hash; sock_prot_inc_use(sk->prot); 591 write_unlock_bh(&head->lock); 593 if (tw) { /* Silly. Should hash-dance instead... */ 595 local_bh_disable(); tcp_tw_deschedule(tw); tcp_timewait_kill(tw); NET_INC_STATS_BH(TimeWaitRecycled); 599 local_bh_enable(); tcp_tw_put(tw); } 604 return 0; not_unique: 607 write_unlock_bh(&head->lock); 608 return -EADDRNOTAVAIL; } /* Hash SYN-SENT socket to established hash table after * checking that it is unique. Note, that without kernel lock * we MUST make these two operations atomically. * * Optimization: if it is bound and tcp_bind_bucket has the only * owner (us), we need not to scan established bucket. */ 619 int tcp_v4_hash_connecting(struct sock *sk) { unsigned short snum = sk->num; struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(snum)]; struct tcp_bind_bucket *tb = (struct tcp_bind_bucket *)sk->prev; 625 spin_lock_bh(&head->lock); 626 if (tb->owners == sk && sk->bind_next == NULL) { __tcp_v4_hash(sk); 628 spin_unlock_bh(&head->lock); 629 return 0; 630 } else { 631 spin_unlock_bh(&head->lock); /* No definite answer... Walk to established hash table */ 634 return tcp_v4_check_established(sk); } } /* This will initiate an outgoing connection. */ 639 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; struct sk_buff *buff; struct rtable *rt; u32 daddr, nexthop; int tmp; int err; 649 if (addr_len < sizeof(struct sockaddr_in)) 650 return(-EINVAL); 652 if (usin->sin_family != AF_INET) 653 return(-EAFNOSUPPORT); nexthop = daddr = usin->sin_addr.s_addr; 656 if (sk->protinfo.af_inet.opt && sk->protinfo.af_inet.opt->srr) { 657 if (daddr == 0) 658 return -EINVAL; nexthop = sk->protinfo.af_inet.opt->faddr; } tmp = ip_route_connect(&rt, nexthop, sk->saddr, RT_TOS(sk->protinfo.af_inet.tos)|RTO_CONN|sk->localroute, sk->bound_dev_if); 664 if (tmp < 0) 665 return tmp; 667 if (rt->rt_flags&(RTCF_MULTICAST|RTCF_BROADCAST)) { ip_rt_put(rt); 669 return -ENETUNREACH; } __sk_dst_set(sk, &rt->u.dst); 674 if (!sk->protinfo.af_inet.opt || !sk->protinfo.af_inet.opt->srr) daddr = rt->rt_dst; err = -ENOBUFS; buff = alloc_skb(MAX_TCP_HEADER + 15, GFP_KERNEL); 680 if (buff == NULL) 681 goto failure; 683 if (!sk->saddr) sk->saddr = rt->rt_src; sk->rcv_saddr = sk->saddr; 687 if (tp->ts_recent_stamp && sk->daddr != daddr) { /* Reset inherited state */ tp->ts_recent = 0; tp->ts_recent_stamp = 0; tp->write_seq = 0; } if (sysctl_tcp_tw_recycle && !tp->ts_recent_stamp && 696 rt->rt_dst == daddr) { struct inet_peer *peer = rt_get_peer(rt); /* VJ's idea. We save last timestamp seen from * the destination in peer table, when entering state TIME-WAIT * and initialize ts_recent from it, when trying new connection. */ 704 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) { tp->ts_recent_stamp = peer->tcp_ts_stamp; tp->ts_recent = peer->tcp_ts; } } sk->dport = usin->sin_port; sk->daddr = daddr; 713 if (!tp->write_seq) tp->write_seq = secure_tcp_sequence_number(sk->saddr, sk->daddr, sk->sport, usin->sin_port); tp->ext_header_len = 0; 718 if (sk->protinfo.af_inet.opt) tp->ext_header_len = sk->protinfo.af_inet.opt->optlen; tp->mss_clamp = 536; err = tcp_connect(sk, buff); 724 if (err == 0) 725 return 0; failure: __sk_dst_reset(sk); sk->dport = 0; 730 return err; } 733 static __inline__ int tcp_v4_iif(struct sk_buff *skb) { 735 return ((struct rtable*)skb->dst)->rt_iif; } 738 static __inline__ unsigned tcp_v4_synq_hash(u32 raddr, u16 rport) { unsigned h = raddr ^ rport; h ^= h>>16; h ^= h>>8; 743 return h&(TCP_SYNQ_HSIZE-1); } 746 static struct open_request *tcp_v4_search_req(struct tcp_opt *tp, struct iphdr *iph, struct tcphdr *th, struct open_request ***prevp) { struct tcp_listen_opt *lopt = tp->listen_opt; struct open_request *req, **prev; __u16 rport = th->source; __u32 raddr = iph->saddr; for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport)]; 757 (req = *prev) != NULL; prev = &req->dl_next) { if (req->rmt_port == rport && req->af.v4_req.rmt_addr == raddr && req->af.v4_req.loc_addr == iph->daddr && 762 TCP_INET_FAMILY(req->class->family)) { 763 BUG_TRAP(req->sk == NULL); *prevp = prev; 765 return req; } } 769 return NULL; } 772 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req) { struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; struct tcp_listen_opt *lopt = tp->listen_opt; unsigned h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port); req->expires = jiffies + TCP_TIMEOUT_INIT; req->retrans = 0; req->sk = NULL; req->index = h; req->dl_next = lopt->syn_table[h]; write_lock(&tp->syn_wait_lock); lopt->syn_table[h] = req; 786 write_unlock(&tp->syn_wait_lock); tcp_synq_added(sk); } /* * This routine does path mtu discovery as defined in RFC1191. */ 795 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *ip, unsigned mtu) { struct dst_entry *dst; struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs * send out by Linux are always <576bytes so they should go through * unfragmented). */ 804 if (sk->state == TCP_LISTEN) 805 return; /* We don't check in the destentry if pmtu discovery is forbidden * on this route. We just assume that no packet_to_big packets * are send back when pmtu discovery is not active. * There is a small race when the user changes this flag in the * route, but I think that's acceptable. */ 813 if ((dst = __sk_dst_check(sk, 0)) == NULL) 814 return; ip_rt_update_pmtu(dst, mtu); /* Something is about to be wrong... Remember soft error * for the case, if this connection will not able to recover. */ 821 if (mtu < dst->pmtu && ip_dont_fragment(sk, dst)) sk->err_soft = EMSGSIZE; if (sk->protinfo.af_inet.pmtudisc != IP_PMTUDISC_DONT && 825 tp->pmtu_cookie > dst->pmtu) { tcp_sync_mss(sk, dst->pmtu); /* Resend the TCP packet because it's * clear that the old packet has been * dropped. This is the new "fast" path mtu * discovery. */ tcp_simple_retransmit(sk); } /* else let the usual retransmit timer handle it */ } /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. After adjustment * header points to the first 8 bytes of the tcp header. We need * to find the appropriate port. * * The locking strategy used here is very "optimistic". When * someone else accesses the socket the ICMP is just dropped * and for some paths there is no check at all. * A more general error queue to queue errors for later handling * is probably better. * */ 853 void tcp_v4_err(struct sk_buff *skb, unsigned char *dp, int len) { struct iphdr *iph = (struct iphdr*)dp; struct tcphdr *th; struct tcp_opt *tp; int type = skb->h.icmph->type; int code = skb->h.icmph->code; #if ICMP_MIN_LENGTH < 14 int no_flags = 0; #else #define no_flags 0 #endif struct sock *sk; __u32 seq; int err; 869 if (len < (iph->ihl << 2) + ICMP_MIN_LENGTH) { ICMP_INC_STATS_BH(IcmpInErrors); 871 return; } #if ICMP_MIN_LENGTH < 14 874 if (len < (iph->ihl << 2) + 14) no_flags = 1; #endif th = (struct tcphdr*)(dp+(iph->ihl<<2)); sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr, th->source, tcp_v4_iif(skb)); 881 if (sk == NULL) { ICMP_INC_STATS_BH(IcmpInErrors); 883 return; } 885 if (sk->state == TCP_TIME_WAIT) { tcp_tw_put((struct tcp_tw_bucket*)sk); 887 return; } bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. */ 894 if (sk->lock.users != 0) NET_INC_STATS_BH(LockDroppedIcmps); 897 if (sk->state == TCP_CLOSE) 898 goto out; tp = &sk->tp_pinfo.af_tcp; seq = ntohl(th->seq); 902 if (sk->state != TCP_LISTEN && !between(seq, tp->snd_una, tp->snd_nxt)) { NET_INC_STATS(OutOfWindowIcmps); 904 goto out; } 907 switch (type) { 908 case ICMP_SOURCE_QUENCH: /* This is deprecated, but if someone generated it, * we have no reasons to ignore it. */ 912 if (sk->lock.users == 0) tcp_enter_cwr(tp); 914 goto out; 915 case ICMP_PARAMETERPROB: err = EPROTO; 917 break; 918 case ICMP_DEST_UNREACH: 919 if (code > NR_ICMP_UNREACH) 920 goto out; 922 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ 923 if (sk->lock.users == 0) do_pmtu_discovery(sk, iph, ntohs(skb->h.icmph->un.frag.mtu)); 925 goto out; } err = icmp_err_convert[code].errno; 929 break; 930 case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; 932 break; 933 default: 934 goto out; } 937 switch (sk->state) { struct open_request *req, **prev; 939 case TCP_LISTEN: 940 if (sk->lock.users != 0) 941 goto out; /* The final ACK of the handshake should be already * handled in the new socket context, not here. * Strictly speaking - an ICMP error for the final * ACK should set the opening flag, but that is too * complicated right now. */ 949 if (!no_flags && !th->syn && !th->ack) 950 goto out; req = tcp_v4_search_req(tp, iph, th, &prev); 953 if (!req) 954 goto out; /* ICMPs are not backlogged, hence we cannot get an established socket here. */ 959 BUG_TRAP(req->sk == NULL); 961 if (seq != req->snt_isn) { NET_INC_STATS_BH(OutOfWindowIcmps); 963 goto out; } /* * Still in SYN_RECV, just remove it silently. * There is no good way to pass the error to the newly * created socket, and POSIX does not want network * errors returned from accept(). */ tcp_synq_drop(sk, req, prev); 973 goto out; 975 case TCP_SYN_SENT: 976 case TCP_SYN_RECV: /* Cannot happen. It can f.e. if SYNs crossed. */ 979 if (!no_flags && !th->syn) 980 goto out; 981 if (sk->lock.users == 0) { TCP_INC_STATS_BH(TcpAttemptFails); sk->err = err; sk->error_report(sk); tcp_done(sk); 988 } else { sk->err_soft = err; } 991 goto out; } /* If we've already connected we will keep trying * until we time out, or the user gives up. * * rfc1122 4.2.3.9 allows to consider as hard errors * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, * but it is obsoleted by pmtu discovery). * * Note, that in modern internet, where routing is unreliable * and in each dark corner broken firewalls sit, sending random * errors ordered by their masters even this two messages finally lose * their original sense (even Linux sends invalid PORT_UNREACHs) * * Now we are in compliance with RFCs. * --ANK (980905) */ 1010 if (sk->lock.users == 0 && sk->protinfo.af_inet.recverr) { sk->err = err; sk->error_report(sk); 1013 } else { /* Only an error on timeout */ sk->err_soft = err; } out: 1018 bh_unlock_sock(sk); sock_put(sk); } /* This routine computes an IPv4 TCP checksum. */ 1023 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len, struct sk_buff *skb) { th->check = tcp_v4_check(th, len, sk->saddr, sk->daddr, csum_partial((char *)th, th->doff<<2, skb->csum)); } /* * This routine will send an RST to the other tcp. * * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.) * for reset. * Answer: if a packet caused RST, it is not for a socket * existing in our system, if it is matched to a socket, * it is just duplicate segment or bug in other side's TCP. * So that we build reply only basing on parameters * arrived with segment. * Exception: precedence violation. We do not implement it in any case. */ 1043 static void tcp_v4_send_reset(struct sk_buff *skb) { struct tcphdr *th = skb->h.th; struct tcphdr rth; struct ip_reply_arg arg; /* Never send a reset in response to a reset. */ 1050 if (th->rst) 1051 return; 1053 if (((struct rtable*)skb->dst)->rt_type != RTN_LOCAL) 1054 return; /* Swap the send and the receive. */ memset(&rth, 0, sizeof(struct tcphdr)); rth.dest = th->source; rth.source = th->dest; rth.doff = sizeof(struct tcphdr)/4; rth.rst = 1; 1063 if (th->ack) { rth.seq = th->ack_seq; 1065 } else { rth.ack = 1; rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + skb->len - (th->doff<<2)); } memset(&arg, 0, sizeof arg); arg.iov[0].iov_base = (unsigned char *)&rth; arg.iov[0].iov_len = sizeof rth; arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, skb->nh.iph->saddr, /*XXX*/ sizeof(struct tcphdr), IPPROTO_TCP, 0); arg.n_iov = 1; arg.csumoffset = offsetof(struct tcphdr, check) / 2; ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth); TCP_INC_STATS_BH(TcpOutSegs); TCP_INC_STATS_BH(TcpOutRsts); } /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states outside socket context is ugly, certainly. What can I do? */ 1092 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) { struct tcphdr *th = skb->h.th; struct { struct tcphdr th; u32 tsopt[3]; } rep; struct ip_reply_arg arg; memset(&rep.th, 0, sizeof(struct tcphdr)); memset(&arg, 0, sizeof arg); arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); arg.n_iov = 1; 1107 if (ts) { rep.tsopt[0] = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); rep.tsopt[1] = htonl(tcp_time_stamp); rep.tsopt[2] = htonl(ts); arg.iov[0].iov_len = sizeof(rep); } /* Swap the send and the receive. */ rep.th.dest = th->source; rep.th.source = th->dest; rep.th.doff = arg.iov[0].iov_len/4; rep.th.seq = htonl(seq); rep.th.ack_seq = htonl(ack); rep.th.ack = 1; rep.th.window = htons(win); arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, skb->nh.iph->saddr, /*XXX*/ arg.iov[0].iov_len, IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len); TCP_INC_STATS_BH(TcpOutSegs); } 1138 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) { struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; tcp_v4_send_ack(skb, tw->snd_nxt, tw->rcv_nxt, tw->rcv_wnd>>tw->rcv_wscale, tw->ts_recent); tcp_tw_put(tw); } 1148 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req) { tcp_v4_send_ack(skb, req->snt_isn+1, req->rcv_isn+1, req->rcv_wnd, req->ts_recent); } 1154 static struct dst_entry* tcp_v4_route_req(struct sock *sk, struct open_request *req) { struct rtable *rt; struct ip_options *opt; opt = req->af.v4_req.opt; if(ip_route_output(&rt, ((opt && opt->srr) ? opt->faddr : req->af.v4_req.rmt_addr), req->af.v4_req.loc_addr, RT_TOS(sk->protinfo.af_inet.tos) | RTO_CONN | sk->localroute, 1165 sk->bound_dev_if)) { IP_INC_STATS_BH(IpOutNoRoutes); 1167 return NULL; } 1169 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) { ip_rt_put(rt); IP_INC_STATS_BH(IpOutNoRoutes); 1172 return NULL; } 1174 return &rt->u.dst; } /* * Send a SYN-ACK after having received an ACK. * This still operates on a open_request only, not on a big * socket. */ 1182 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req, struct dst_entry *dst) { int err = -1; struct sk_buff * skb; /* First, grab a route. */ if (dst == NULL && 1190 (dst = tcp_v4_route_req(sk, req)) == NULL) 1191 goto out; skb = tcp_make_synack(sk, dst, req); 1195 if (skb) { struct tcphdr *th = skb->h.th; th->check = tcp_v4_check(th, skb->len, req->af.v4_req.loc_addr, req->af.v4_req.rmt_addr, csum_partial((char *)th, skb->len, skb->csum)); err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr, req->af.v4_req.rmt_addr, req->af.v4_req.opt); 1204 if (err == NET_XMIT_CN) err = 0; } out: dst_release(dst); 1210 return err; } /* * IPv4 open_request destructor. */ 1216 static void tcp_v4_or_free(struct open_request *req) { 1218 if (req->af.v4_req.opt) kfree(req->af.v4_req.opt); } 1222 static inline void syn_flood_warning(struct sk_buff *skb) { static unsigned long warntime; 1226 if (jiffies - warntime > HZ*60) { warntime = jiffies; printk(KERN_INFO "possible SYN flooding on port %d. Sending cookies.\n", ntohs(skb->h.th->dest)); } } /* * Save and compile IPv4 options into the open_request if needed. */ static inline struct ip_options * 1238 tcp_v4_save_options(struct sock *sk, struct sk_buff *skb) { struct ip_options *opt = &(IPCB(skb)->opt); struct ip_options *dopt = NULL; 1243 if (opt && opt->optlen) { int opt_size = optlength(opt); dopt = kmalloc(opt_size, GFP_ATOMIC); 1246 if (dopt) { 1247 if (ip_options_echo(dopt, skb)) { kfree(dopt); dopt = NULL; } } } 1253 return dopt; } /* * Maximum number of SYN_RECV sockets in queue per LISTEN socket. * One SYN_RECV socket costs about 80bytes on a 32bit machine. * It would be better to replace it with a global counter for all sockets * but then some measure against one socket starving all other sockets * would be needed. * * It was 128 by default. Experiments with real servers show, that * it is absolutely not enough even at 100conn/sec. 256 cures most * of problems. This value is adjusted to 128 for very small machines * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb). * Further increasing requires to change hash table size. */ int sysctl_max_syn_backlog = 256; struct or_calltable or_ipv4 = { PF_INET, tcp_v4_send_synack, tcp_v4_or_send_ack, tcp_v4_or_free, tcp_v4_send_reset }; 1279 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { struct tcp_opt tp; struct open_request *req; __u32 saddr = skb->nh.iph->saddr; __u32 daddr = skb->nh.iph->daddr; __u32 isn = TCP_SKB_CB(skb)->when; struct dst_entry *dst = NULL; #ifdef CONFIG_SYN_COOKIES int want_cookie = 0; #else #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */ #endif /* Never answer to SYNs send to broadcast or multicast */ if (((struct rtable *)skb->dst)->rt_flags & 1295 (RTCF_BROADCAST|RTCF_MULTICAST)) 1296 goto drop; /* TW buckets are converted to open requests without * limitations, they conserve resources and peer is * evidently real one. */ 1302 if (tcp_synq_is_full(sk) && !isn) { #ifdef CONFIG_SYN_COOKIES if (sysctl_tcp_syncookies) { want_cookie = 1; } else #endif 1308 goto drop; } /* Accept backlog is full. If we have already queued enough * of warm entries in syn queue, drop request. It is better than * clogging syn queue with openreqs with exponentially increasing * timeout. */ 1316 if (tcp_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) 1317 goto drop; req = tcp_openreq_alloc(); 1320 if (req == NULL) 1321 goto drop; tcp_clear_options(&tp); tp.mss_clamp = 536; tp.user_mss = sk->tp_pinfo.af_tcp.user_mss; tcp_parse_options(skb, &tp, 0); 1329 if (want_cookie) { tcp_clear_options(&tp); tp.saw_tstamp = 0; } 1334 if (tp.saw_tstamp && tp.rcv_tsval == 0) { /* Some OSes (unknown ones, but I see them on web server, which * contains information interesting only for windows' * users) do not send their stamp in SYN. It is easy case. * We simply do not advertise TS support. */ tp.saw_tstamp = 0; tp.tstamp_ok = 0; } tp.tstamp_ok = tp.saw_tstamp; tcp_openreq_init(req, &tp, skb); req->af.v4_req.loc_addr = daddr; req->af.v4_req.rmt_addr = saddr; req->af.v4_req.opt = tcp_v4_save_options(sk, skb); req->class = &or_ipv4; 1351 if (!want_cookie) 1352 TCP_ECN_create_request(req, skb->h.th); 1354 if (want_cookie) { #ifdef CONFIG_SYN_COOKIES syn_flood_warning(skb); #endif isn = cookie_v4_init_sequence(sk, skb, &req->mss); 1359 } else if (isn == 0) { struct inet_peer *peer = NULL; /* VJ's idea. We save last timestamp seen * from the destination in peer table, when entering * state TIME-WAIT, and check against it before * accepting new connection request. * * If "isn" is not zero, this request hit alive * timewait bucket, so that all the necessary checks * are made in the function processing timewait state. */ if (tp.saw_tstamp && sysctl_tcp_tw_recycle && (dst = tcp_v4_route_req(sk, req)) != NULL && (peer = rt_get_peer((struct rtable*)dst)) != NULL && 1375 peer->v4daddr == saddr) { if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL && 1377 (s32)(peer->tcp_ts - req->ts_recent) > TCP_PAWS_WINDOW) { NET_INC_STATS_BH(PAWSPassiveRejected); dst_release(dst); 1380 goto drop_and_free; } } /* Kill the following clause, if you dislike this way. */ else if (!sysctl_tcp_syncookies && (sysctl_max_syn_backlog - tcp_synq_len(sk) < (sysctl_max_syn_backlog>>2)) && (!peer || !peer->tcp_ts_stamp) && 1388 (!dst || !dst->rtt)) { /* Without syncookies last quarter of * backlog is filled with destinations, proven to be alive. * It means that we continue to communicate * to destinations, already remembered * to the moment of synflood. */ 1395 NETDEBUG(if (net_ratelimit()) \ printk(KERN_DEBUG "TCP: drop open request from %u.%u.%u.%u/%u\n", \ NIPQUAD(saddr), ntohs(skb->h.th->source))); TCP_INC_STATS_BH(TcpAttemptFails); dst_release(dst); 1400 goto drop_and_free; } isn = tcp_v4_init_sequence(sk, skb); } req->snt_isn = isn; 1407 if (tcp_v4_send_synack(sk, req, dst)) 1408 goto drop_and_free; 1410 if (want_cookie) { tcp_openreq_free(req); 1412 } else { tcp_v4_synq_add(sk, req); } 1415 return 0; drop_and_free: tcp_openreq_free(req); drop: TCP_INC_STATS_BH(TcpAttemptFails); 1421 return 0; } /* * The three way handshake has completed - we got a valid synack - * now create the new socket. */ 1429 struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, struct open_request *req, struct dst_entry *dst) { struct tcp_opt *newtp; struct sock *newsk; 1436 if (tcp_acceptq_is_full(sk)) 1437 goto exit_overflow; if (dst == NULL && 1440 (dst = tcp_v4_route_req(sk, req)) == NULL) 1441 goto exit; newsk = tcp_create_openreq_child(sk, req, skb); 1444 if (!newsk) 1445 goto exit; newsk->dst_cache = dst; newtp = &(newsk->tp_pinfo.af_tcp); newsk->daddr = req->af.v4_req.rmt_addr; newsk->saddr = req->af.v4_req.loc_addr; newsk->rcv_saddr = req->af.v4_req.loc_addr; newsk->protinfo.af_inet.opt = req->af.v4_req.opt; req->af.v4_req.opt = NULL; newsk->protinfo.af_inet.mc_index = tcp_v4_iif(skb); newsk->protinfo.af_inet.mc_ttl = skb->nh.iph->ttl; newtp->ext_header_len = 0; 1458 if (newsk->protinfo.af_inet.opt) newtp->ext_header_len = newsk->protinfo.af_inet.opt->optlen; tcp_sync_mss(newsk, dst->pmtu); newtp->advmss = dst->advmss; tcp_initialize_rcv_mss(newsk); __tcp_v4_hash(newsk); __tcp_inherit_port(sk, newsk); 1468 return newsk; exit_overflow: NET_INC_STATS_BH(ListenOverflows); exit: NET_INC_STATS_BH(ListenDrops); dst_release(dst); 1475 return NULL; } 1478 static struct sock *tcp_v4_hnd_req(struct sock *sk,struct sk_buff *skb) { struct open_request *req, **prev; struct tcphdr *th = skb->h.th; struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); struct sock *nsk; /* Find possible connection requests. */ req = tcp_v4_search_req(tp, skb->nh.iph, th, &prev); 1487 if (req) 1488 return tcp_check_req(sk, skb, req, prev); nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr, th->source, skb->nh.iph->daddr, ntohs(th->dest), tcp_v4_iif(skb)); 1496 if (nsk) { 1497 if (nsk->state != TCP_TIME_WAIT) { bh_lock_sock(nsk); 1499 return nsk; } tcp_tw_put((struct tcp_tw_bucket*)sk); 1502 return NULL; } #ifdef CONFIG_SYN_COOKIES if (!th->rst && !th->syn && th->ack) sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt)); #endif 1509 return sk; } 1512 static int tcp_v4_checksum_init(struct sk_buff *skb) { 1514 if (skb->ip_summed == CHECKSUM_HW) { if (tcp_v4_check(skb->h.th,skb->len,skb->nh.iph->saddr, 1516 skb->nh.iph->daddr,skb->csum)) { 1517 NETDEBUG(printk(KERN_DEBUG "hw tcp v4 csum failed\n")); 1518 return -1; } skb->ip_summed = CHECKSUM_UNNECESSARY; 1521 } else { 1522 if (skb->len <= 76) { if (tcp_v4_check(skb->h.th,skb->len,skb->nh.iph->saddr, skb->nh.iph->daddr, 1525 csum_partial((char *)skb->h.th, skb->len, 0))) 1526 return -1; skb->ip_summed = CHECKSUM_UNNECESSARY; 1528 } else { skb->csum = ~tcp_v4_check(skb->h.th,skb->len,skb->nh.iph->saddr, skb->nh.iph->daddr,0); } } 1533 return 0; } /* The socket must have it's spinlock held when we get * here. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */ 1545 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) { #ifdef CONFIG_FILTER struct sk_filter *filter = sk->filter; if (filter && sk_filter(skb, filter)) goto discard; #endif /* CONFIG_FILTER */ IP_INC_STATS_BH(IpInDelivers); 1555 if (sk->state == TCP_ESTABLISHED) { /* Fast path */ 1556 TCP_CHECK_TIMER(sk); 1557 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) 1558 goto reset; 1559 TCP_CHECK_TIMER(sk); 1560 return 0; } 1563 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb)) 1564 goto csum_err; 1566 if (sk->state == TCP_LISTEN) { struct sock *nsk = tcp_v4_hnd_req(sk, skb); 1568 if (!nsk) 1569 goto discard; 1571 if (nsk != sk) { 1572 if (tcp_child_process(sk, nsk, skb)) 1573 goto reset; 1574 return 0; } } 1578 TCP_CHECK_TIMER(sk); 1579 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) 1580 goto reset; 1581 TCP_CHECK_TIMER(sk); 1582 return 0; reset: tcp_v4_send_reset(skb); discard: kfree_skb(skb); /* Be careful here. If this function gets more complicated and * gcc suffers from register pressure on the x86, sk (in %ebx) * might be destroyed here. This current version compiles correctly, * but you have been warned. */ 1593 return 0; csum_err: TCP_INC_STATS_BH(TcpInErrs); 1597 goto discard; } /* * From tcp_input.c */ 1604 int tcp_v4_rcv(struct sk_buff *skb, unsigned short len) { struct tcphdr *th; struct sock *sk; int ret; 1610 if (skb->pkt_type!=PACKET_HOST) 1611 goto discard_it; th = skb->h.th; /* Pull up the IP header. */ __skb_pull(skb, skb->h.raw - skb->data); /* Count it even if it's bad */ TCP_INC_STATS_BH(TcpInSegs); /* An explanation is required here, I think. * Packet length and doff are validated by header prediction, * provided case of th->doff==0 is elimineted. * So, we defer the checks. */ if (th->doff < sizeof(struct tcphdr)/4 || (skb->ip_summed != CHECKSUM_UNNECESSARY && 1627 tcp_v4_checksum_init(skb) < 0)) 1628 goto bad_packet; TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + len - th->doff*4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->when = 0; TCP_SKB_CB(skb)->flags = skb->nh.iph->tos; TCP_SKB_CB(skb)->sacked = 0; skb->used = 0; sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source, skb->nh.iph->daddr, ntohs(th->dest), tcp_v4_iif(skb)); 1642 if (!sk) 1643 goto no_tcp_socket; process: 1646 if(!ipsec_sk_policy(sk,skb)) 1647 goto discard_and_relse; 1649 if (sk->state == TCP_TIME_WAIT) 1650 goto do_time_wait; skb->dev = NULL; bh_lock_sock(sk); ret = 0; 1656 if (!sk->lock.users) { 1657 if (!tcp_prequeue(sk, skb)) ret = tcp_v4_do_rcv(sk, skb); 1659 } else 1660 sk_add_backlog(sk, skb); 1661 bh_unlock_sock(sk); sock_put(sk); 1665 return ret; no_tcp_socket: 1668 if (len < (th->doff<<2) || tcp_checksum_complete(skb)) { bad_packet: TCP_INC_STATS_BH(TcpInErrs); 1671 } else { tcp_v4_send_reset(skb); } discard_it: /* Discard frame. */ kfree_skb(skb); 1678 return 0; discard_and_relse: sock_put(sk); 1682 goto discard_it; do_time_wait: 1685 if (len < (th->doff<<2) || tcp_checksum_complete(skb)) { TCP_INC_STATS_BH(TcpInErrs); 1687 goto discard_and_relse; } switch(tcp_timewait_state_process((struct tcp_tw_bucket *)sk, 1690 skb, th, skb->len)) { 1691 case TCP_TW_SYN: { struct sock *sk2; sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr, ntohs(th->dest), tcp_v4_iif(skb)); 1696 if (sk2 != NULL) { tcp_tw_deschedule((struct tcp_tw_bucket *)sk); tcp_timewait_kill((struct tcp_tw_bucket *)sk); tcp_tw_put((struct tcp_tw_bucket *)sk); sk = sk2; 1701 goto process; } /* Fall through to ACK */ } 1705 case TCP_TW_ACK: tcp_v4_timewait_ack(sk, skb); 1707 break; 1708 case TCP_TW_RST: 1709 goto no_tcp_socket; 1710 case TCP_TW_SUCCESS:; } 1712 goto discard_it; } /* With per-bucket locks this operation is not-atomic, so that * this version is not worse. */ 1718 static void __tcp_v4_rehash(struct sock *sk) { sk->prot->unhash(sk); sk->prot->hash(sk); } 1724 static int tcp_v4_reselect_saddr(struct sock *sk) { int err; struct rtable *rt; __u32 old_saddr = sk->saddr; __u32 new_saddr; __u32 daddr = sk->daddr; 1732 if(sk->protinfo.af_inet.opt && sk->protinfo.af_inet.opt->srr) daddr = sk->protinfo.af_inet.opt->faddr; /* Query new route. */ err = ip_route_connect(&rt, daddr, 0, RT_TOS(sk->protinfo.af_inet.tos)|sk->localroute, sk->bound_dev_if); 1739 if (err) 1740 return err; __sk_dst_set(sk, &rt->u.dst); /* sk->route_caps = rt->u.dst.dev->features; */ new_saddr = rt->rt_src; 1747 if (new_saddr == old_saddr) 1748 return 0; 1750 if (sysctl_ip_dynaddr > 1) { printk(KERN_INFO "tcp_v4_rebuild_header(): shifting sk->saddr " "from %d.%d.%d.%d to %d.%d.%d.%d\n", NIPQUAD(old_saddr), NIPQUAD(new_saddr)); } sk->saddr = new_saddr; sk->rcv_saddr = new_saddr; /* XXX The only one ugly spot where we need to * XXX really change the sockets identity after * XXX it has entered the hashes. -DaveM * * Besides that, it does not check for connection * uniqueness. Wait for troubles. */ __tcp_v4_rehash(sk); 1768 return 0; } 1771 int tcp_v4_rebuild_header(struct sock *sk) { struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0); u32 daddr; int err; /* Route is OK, nothing to do. */ 1778 if (rt != NULL) 1779 return 0; /* Reroute. */ daddr = sk->daddr; 1783 if(sk->protinfo.af_inet.opt && sk->protinfo.af_inet.opt->srr) daddr = sk->protinfo.af_inet.opt->faddr; err = ip_route_output(&rt, daddr, sk->saddr, RT_TOS(sk->protinfo.af_inet.tos) | RTO_CONN | sk->localroute, sk->bound_dev_if); 1789 if (!err) { __sk_dst_set(sk, &rt->u.dst); /* sk->route_caps = rt->u.dst.dev->features; */ 1792 return 0; } /* Routing failed... */ /* sk->route_caps = 0; */ if (!sysctl_ip_dynaddr || sk->state != TCP_SYN_SENT || (sk->userlocks & SOCK_BINDADDR_LOCK) || 1801 (err = tcp_v4_reselect_saddr(sk)) != 0) { sk->err_soft=-err; /* sk->error_report(sk); */ } 1805 return err; } 1808 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) { struct sockaddr_in *sin = (struct sockaddr_in *) uaddr; sin->sin_family = AF_INET; sin->sin_addr.s_addr = sk->daddr; sin->sin_port = sk->dport; } /* VJ's idea. Save last timestamp seen from this destination * and hold it at least for normal timewait interval to use for duplicate * segment detection in subsequent connections, before they enter synchronized * state. */ 1823 int tcp_v4_remember_stamp(struct sock *sk) { struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; struct rtable *rt = (struct rtable*)__sk_dst_get(sk); struct inet_peer *peer = NULL; int release_it = 0; 1830 if (rt == NULL || rt->rt_dst != sk->daddr) { peer = inet_getpeer(sk->daddr, 1); release_it = 1; 1833 } else { 1834 if (rt->peer == NULL) rt_bind_peer(rt, 1); peer = rt->peer; } 1839 if (peer) { if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 || (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec && 1842 peer->tcp_ts_stamp <= tp->ts_recent_stamp)) { peer->tcp_ts_stamp = tp->ts_recent_stamp; peer->tcp_ts = tp->ts_recent; } 1846 if (release_it) inet_putpeer(peer); 1848 return 1; } 1851 return 0; } 1854 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw) { struct inet_peer *peer = NULL; peer = inet_getpeer(tw->daddr, 1); 1860 if (peer) { if ((s32)(peer->tcp_ts - tw->ts_recent) <= 0 || (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec && 1863 peer->tcp_ts_stamp <= tw->ts_recent_stamp)) { peer->tcp_ts_stamp = tw->ts_recent_stamp; peer->tcp_ts = tw->ts_recent; } inet_putpeer(peer); 1868 return 1; } 1871 return 0; } struct tcp_func ipv4_specific = { ip_queue_xmit, tcp_v4_send_check, tcp_v4_rebuild_header, tcp_v4_conn_request, tcp_v4_syn_recv_sock, tcp_v4_hash_connecting, tcp_v4_remember_stamp, sizeof(struct iphdr), ip_setsockopt, ip_getsockopt, v4_addr2sockaddr, sizeof(struct sockaddr_in) }; /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ 1893 static int tcp_v4_init_sock(struct sock *sk) { struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); skb_queue_head_init(&tp->out_of_order_queue); tcp_init_xmit_timers(sk); tcp_prequeue_init(tp); tp->rto = TCP_TIMEOUT_INIT; tp->mdev = TCP_TIMEOUT_INIT; /* So many TCP implementations out there (incorrectly) count the * initial SYN frame in their delayed-ACK and congestion control * algorithms that we must have the following bandaid to talk * efficiently to them. -DaveM */ tp->snd_cwnd = 2; /* See draft-stevens-tcpca-spec-01 for discussion of the * initialization of these values. */ tp->snd_ssthresh = 0x7fffffff; /* Infinity */ tp->snd_cwnd_clamp = ~0; tp->mss_cache = 536; tp->reordering = sysctl_tcp_reordering; sk->state = TCP_CLOSE; sk->write_space = tcp_write_space; sk->tp_pinfo.af_tcp.af_specific = &ipv4_specific; sk->sndbuf = sysctl_tcp_wmem[1]; sk->rcvbuf = sysctl_tcp_rmem[1]; atomic_inc(&tcp_sockets_allocated); 1931 return 0; } 1934 static int tcp_v4_destroy_sock(struct sock *sk) { struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); tcp_clear_xmit_timers(sk); /* Cleanup up the write buffer. */ tcp_writequeue_purge(sk); /* Cleans up our, hopefuly empty, out_of_order_queue. */ __skb_queue_purge(&tp->out_of_order_queue); /* Clean prequeue, it must be empty really */ __skb_queue_purge(&tp->ucopy.prequeue); /* Clean up a referenced TCP bind bucket. */ 1950 if(sk->prev != NULL) tcp_put_port(sk); atomic_dec(&tcp_sockets_allocated); 1955 return 0; } /* Proc filesystem TCP sock list dumping. */ 1959 static void get_openreq(struct sock *sk, struct open_request *req, char *tmpbuf, int i, int uid) { int ttd = req->expires - jiffies; sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08X %08X %5d %8d %u %d %p", i, req->af.v4_req.loc_addr, ntohs(sk->sport), req->af.v4_req.rmt_addr, ntohs(req->rmt_port), TCP_SYN_RECV, 0,0, /* could print option size, but that is af dependent. */ 1, /* timers active (only the expire timer) */ ttd, req->retrans, uid, 0, /* non standard timer */ 0, /* open_requests have no inode */ atomic_read(&sk->refcnt), req ); } 1983 static void get_tcp_sock(struct sock *sp, char *tmpbuf, int i) { unsigned int dest, src; __u16 destp, srcp; int timer_active; unsigned long timer_expires; struct tcp_opt *tp = &sp->tp_pinfo.af_tcp; dest = sp->daddr; src = sp->rcv_saddr; destp = ntohs(sp->dport); srcp = ntohs(sp->sport); 1995 if (tp->pending == TCP_TIME_RETRANS) { timer_active = 1; timer_expires = tp->timeout; 1998 } else if (tp->pending == TCP_TIME_PROBE0) { timer_active = 4; timer_expires = tp->timeout; 2001 } else if (timer_pending(&sp->timer)) { timer_active = 2; timer_expires = sp->timer.expires; 2004 } else { timer_active = 0; timer_expires = jiffies; } sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d", i, src, srcp, dest, destp, sp->state, tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq, timer_active, timer_expires-jiffies, tp->retransmits, sock_i_uid(sp), tp->probes_out, sock_i_ino(sp), atomic_read(&sp->refcnt), sp, tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong, tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh ); } 2024 static void get_timewait_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i) { unsigned int dest, src; __u16 destp, srcp; int ttd = tw->ttd - jiffies; 2030 if (ttd < 0) ttd = 0; dest = tw->daddr; src = tw->rcv_saddr; destp = ntohs(tw->dport); srcp = ntohs(tw->sport); sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08X %08X %5d %8d %d %d %p", i, src, srcp, dest, destp, tw->substate, 0, 0, 3, ttd, 0, 0, 0, 0, atomic_read(&tw->refcnt), tw); } #define TMPSZ 150 2047 int tcp_get_info(char *buffer, char **start, off_t offset, int length) { int len = 0, num = 0, i; off_t begin, pos = 0; char tmpbuf[TMPSZ+1]; 2053 if (offset < TMPSZ) len += sprintf(buffer, "%-*s\n", TMPSZ-1, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout inode"); pos = TMPSZ; /* First, walk listening socket table. */ tcp_listen_lock(); 2062 for(i = 0; i < TCP_LHTABLE_SIZE; i++) { struct sock *sk = tcp_listening_hash[i]; struct tcp_listen_opt *lopt; int k; 2067 for (sk = tcp_listening_hash[i]; sk; sk = sk->next, num++) { struct open_request *req; int uid; struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); 2072 if (!TCP_INET_FAMILY(sk->family)) 2073 goto skip_listen; pos += TMPSZ; 2076 if (pos >= offset) { get_tcp_sock(sk, tmpbuf, num); len += sprintf(buffer+len, "%-*s\n", TMPSZ-1, tmpbuf); 2079 if (len >= length) { tcp_listen_unlock(); 2081 goto out_no_bh; } } skip_listen: uid = sock_i_uid(sk); 2087 read_lock_bh(&tp->syn_wait_lock); lopt = tp->listen_opt; 2089 if (lopt && lopt->qlen != 0) { 2090 for (k=0; k<TCP_SYNQ_HSIZE; k++) { 2091 for (req = lopt->syn_table[k]; req; req = req->dl_next, num++) { 2092 if (!TCP_INET_FAMILY(req->class->family)) 2093 continue; pos += TMPSZ; 2096 if (pos <= offset) 2097 continue; get_openreq(sk, req, tmpbuf, num, uid); len += sprintf(buffer+len, "%-*s\n", TMPSZ-1, tmpbuf); 2100 if(len >= length) { 2101 read_unlock_bh(&tp->syn_wait_lock); tcp_listen_unlock(); 2103 goto out_no_bh; } } } } 2108 read_unlock_bh(&tp->syn_wait_lock); /* Completed requests are in normal socket hash table */ } } tcp_listen_unlock(); 2115 local_bh_disable(); /* Next, walk established hash chain. */ 2118 for (i = 0; i < tcp_ehash_size; i++) { struct tcp_ehash_bucket *head = &tcp_ehash[i]; struct sock *sk; struct tcp_tw_bucket *tw; read_lock(&head->lock); 2124 for(sk = head->chain; sk; sk = sk->next, num++) { 2125 if (!TCP_INET_FAMILY(sk->family)) 2126 continue; pos += TMPSZ; 2128 if (pos <= offset) 2129 continue; get_tcp_sock(sk, tmpbuf, num); len += sprintf(buffer+len, "%-*s\n", TMPSZ-1, tmpbuf); 2132 if(len >= length) { 2133 read_unlock(&head->lock); 2134 goto out; } } for (tw = (struct tcp_tw_bucket *)tcp_ehash[i+tcp_ehash_size].chain; 2138 tw != NULL; tw = (struct tcp_tw_bucket *)tw->next, num++) { if (!TCP_INET_FAMILY(tw->family)) continue; pos += TMPSZ; if (pos <= offset) continue; get_timewait_sock(tw, tmpbuf, num); len += sprintf(buffer+len, "%-*s\n", TMPSZ-1, tmpbuf); if(len >= length) { read_unlock(&head->lock); goto out; } } 2152 read_unlock(&head->lock); } out: 2156 local_bh_enable(); out_no_bh: begin = len - (pos - offset); *start = buffer + begin; len -= begin; 2162 if(len > length) len = length; 2164 if (len < 0) len = 0; 2166 return len; } struct proto tcp_prot = { name: "TCP", close: tcp_close, connect: tcp_v4_connect, disconnect: tcp_disconnect, accept: tcp_accept, ioctl: tcp_ioctl, init: tcp_v4_init_sock, destroy: tcp_v4_destroy_sock, shutdown: tcp_shutdown, setsockopt: tcp_setsockopt, getsockopt: tcp_getsockopt, sendmsg: tcp_sendmsg, recvmsg: tcp_recvmsg, backlog_rcv: tcp_v4_do_rcv, hash: tcp_v4_hash, unhash: tcp_unhash, get_port: tcp_v4_get_port, }; 2191 void __init tcp_v4_init(struct net_proto_family *ops) { int err; tcp_inode.i_mode = S_IFSOCK; tcp_inode.i_sock = 1; tcp_inode.i_uid = 0; tcp_inode.i_gid = 0; init_waitqueue_head(&tcp_inode.i_wait); init_waitqueue_head(&tcp_inode.u.socket_i.wait); tcp_socket->inode = &tcp_inode; tcp_socket->state = SS_UNCONNECTED; tcp_socket->type=SOCK_RAW; 2206 if ((err=ops->create(tcp_socket, IPPROTO_TCP))<0) panic("Failed to create the TCP control socket.\n"); tcp_socket->sk->allocation=GFP_ATOMIC; tcp_socket->sk->protinfo.af_inet.ttl = MAXTTL; /* Unhash it so that IP input processing does not even * see it, we do not wish this socket to see incoming * packets. */ tcp_socket->sk->prot->unhash(tcp_socket->sk); }