|  | @@ -213,18 +213,39 @@ struct tcp_mss_option {
 | 
		
	
		
			
			| 213 | 213 |  /**
 | 
		
	
		
			
			| 214 | 214 |   * Advertised TCP window size
 | 
		
	
		
			
			| 215 | 215 |   *
 | 
		
	
		
			
			|  | 216 | + * 
 | 
		
	
		
			
			| 216 | 217 |   * Our TCP window is actually limited by the amount of space available
 | 
		
	
		
			
			| 217 | 218 |   * for RX packets in the NIC's RX ring; we tend to populate the rings
 | 
		
	
		
			
			| 218 |  | - * with far fewer descriptors than a typical driver.  Since we have no
 | 
		
	
		
			
			| 219 |  | - * way of knowing how much of this RX ring space will be available for
 | 
		
	
		
			
			| 220 |  | - * received TCP packets (consider, for example, that they may all be
 | 
		
	
		
			
			| 221 |  | - * consumed by a series of unrelated ARP requests between other
 | 
		
	
		
			
			| 222 |  | - * machines on the network), it is actually not even theoretically
 | 
		
	
		
			
			| 223 |  | - * possible for us to specify an accurate window size.  We therefore
 | 
		
	
		
			
			| 224 |  | - * guess an arbitrary number that is empirically as large as possible
 | 
		
	
		
			
			| 225 |  | - * while avoiding retransmissions due to dropped packets.
 | 
		
	
		
			
			|  | 219 | + * with far fewer descriptors than a typical driver.  This would
 | 
		
	
		
			
			|  | 220 | + * result in a desperately small window size, which kills WAN download
 | 
		
	
		
			
			|  | 221 | + * performance; the maximum bandwidth on any link is limited to
 | 
		
	
		
			
			|  | 222 | + *
 | 
		
	
		
			
			|  | 223 | + *    max_bandwidth = ( tcp_window / round_trip_time )
 | 
		
	
		
			
			|  | 224 | + *
 | 
		
	
		
			
			|  | 225 | + * With a 4kB window, which probably accurately reflects our amount of
 | 
		
	
		
			
			|  | 226 | + * buffer space, and a WAN RTT of say 200ms, this gives a maximum
 | 
		
	
		
			
			|  | 227 | + * achievable bandwidth of 20kB/s, which is not acceptable.
 | 
		
	
		
			
			|  | 228 | + *
 | 
		
	
		
			
			|  | 229 | + * We therefore aim to process packets as fast as they arrive, and
 | 
		
	
		
			
			|  | 230 | + * advertise an "infinite" window.  If we don't process packets as
 | 
		
	
		
			
			|  | 231 | + * fast as they arrive, then we will drop packets and have to incur
 | 
		
	
		
			
			|  | 232 | + * the retransmission penalty.
 | 
		
	
		
			
			|  | 233 | + *
 | 
		
	
		
			
			|  | 234 | + * Since we don't store out-of-order received packets, the
 | 
		
	
		
			
			|  | 235 | + * retransmission penalty is that the whole window contents must be
 | 
		
	
		
			
			|  | 236 | + * resent.
 | 
		
	
		
			
			|  | 237 | + *
 | 
		
	
		
			
			|  | 238 | + * We choose to compromise on a window size of 64kB (which is the
 | 
		
	
		
			
			|  | 239 | + * maximum that can be represented without using TCP options).  This
 | 
		
	
		
			
			|  | 240 | + * gives a maximum bandwidth of 320kB/s at 200ms RTT, which is
 | 
		
	
		
			
			|  | 241 | + * probably faster than the actual link bandwidth.  It also limits
 | 
		
	
		
			
			|  | 242 | + * retransmissions to 64kB, which is reasonable.
 | 
		
	
		
			
			|  | 243 | + *
 | 
		
	
		
			
			|  | 244 | + * Finally, since the window goes into a 16-bit field and we cannot
 | 
		
	
		
			
			|  | 245 | + * actually use 65536, we use a window size of (65536-4) to ensure
 | 
		
	
		
			
			|  | 246 | + * that payloads remain dword-aligned.
 | 
		
	
		
			
			| 226 | 247 |   */
 | 
		
	
		
			
			| 227 |  | -#define TCP_WINDOW_SIZE	4096
 | 
		
	
		
			
			|  | 248 | +#define TCP_WINDOW_SIZE	( 65536 - 4 )
 | 
		
	
		
			
			| 228 | 249 |  
 | 
		
	
		
			
			| 229 | 250 |  /**
 | 
		
	
		
			
			| 230 | 251 |   * Advertised TCP MSS
 |