Thursday, September 9, 2021

Unlock Thoughts and start building strong foundation from early age

Unlock thoughts!! Avoid Race Healthy competition Create ideas Make mistakes and restart Create problems Provide your own solution. No formulas only concepts. Fearless minds Learn from your surroundings Be original Be a Leader for moe details: https://sameer-seth-foundation-course.blogspot.com

Friday, December 5, 2014

Tuesday, July 17, 2012

Sense fluctuation in end-to-end bandwidth using TCP Westwood implementation in wireless scenario

!!Sometime back as part of research work I worked on TCP westwood implementation to sense changes
in the end-to-end bandwidth in wireless environment. This work can be used by an applications stremaming
data in the wireless environment to adjust various parameters on the data stream to take care of the fluctuations in the bandwidth in wireless scenario.

Wireless links are characterized as lossy links where changes in distances or change is physical conditions of
the medium will cause loss signals to go week resulting in loss of packets and low bandwidth. This may happen intermittently and does not persist for a longer duration. TCP westwood implementation takes
care of smoothening bandwidth using rtt samples over a period of time and applying filters for such noises.
If applications streaming data at a very high rate get to know about these fluctuations in a timely manner, they
can adjust various parameters that can be applied on the data-stream to take care of such fluctuations. In the research work, some work was done to simulate wireless environment using linux boxes(with 2.6.* kernel) and and changes were done to TCP stack to take notify the application that needs to sense fluctuations in the bandwidth.

Wireless simulation was done on Linux machine by tweaking CBQ QOS framework. We set a bandwidth for a given class which has filter set on it. If the packet matches the filter, it belongs to the class wireless. This packet is enqueued on the class queue. We have wireless bandwidth set for the class. Which means per second how many bytes for the class is allowed to be forwarded. If the number of bytes exceeds this value, we drop the packet. To achieve this, we have a timer which expires every second. On expiry it sets the value of the wireless bandwidth to the configured value. This value is decremented by packet length everytime we
are ready to transmit a packet for this class. If the wireless bandwidth(OR budget) for the class is zero, we need to drop the packet. This was the simulation used. This simulation was running on one of the linux machine acting as a router. The two TCP ends are connected via this router simulating wireless environment.
The transmitting end pumps data continuously. All the hardware support for TCP like, GSO, TSO etc., is turned off in this experiment. Receiving end is doing nothing but ACKing the received data. *ttcp* program is used at both the ends for sending and receiving data. Following is the partial diffs for the simulation for 2.6.* kernel, I'll paste full changes soon ....

Below diffs was used for configuring wireless simulation with cbq-qos framework using qc command and timer implementation:

*#*#*#*#******#*#*#*#******#*#*#*#******#*#*#*#******


*** include/linux/pkt_sched.h.orig 2010-08-12 20:50:23.239354773 +0530
--- include/linux/pkt_sched.h 2010-08-11 09:34:02.643317532 +0530
*************** struct tc_cbq_lssopt {
*** 330,335 ****
--- 330,336 ----
  unsigned char flags;
  #define TCF_CBQ_LSS_BOUNDED 1
  #define TCF_CBQ_LSS_ISOLATED 2
+ #define TCF_CBQ_LSS_WLS 4 /* wireless simulation */
  unsigned char   ewma_log;
  unsigned char   level;
  #define TCF_CBQ_LSS_FLAGS 1
*** net/sched/sch_cbq.c.orig 2010-08-12 20:50:47.197176087 +0530
--- net/sched/sch_cbq.c 2010-08-12 20:48:43.019177914 +0530
***************
*** 71,76 ****
--- 71,82 ----
  struct cbq_sched_data;


+ typedef struct cbq_class_wl_stats_ {
+ long wireless_min; /* this is minimum bandwidth achieved for the flow */
+ long wireless_max; /* this is maximum bandwidth achieved for the flow */
+ long wireless_avg; /* this is average bandwidth achieved for the flow */
+ } cbq_class_wl_stats_t;
+
  struct cbq_class
  {
  struct Qdisc_class_common common;
*************** struct cbq_class
*** 101,107 ****
--- 107,130 ----
  /* General scheduler (WRR) parameters */
  long allot;
  long quantum; /* Allotment per WRR round */
+ long wireless_b; /* this is wireless class flow bandwidth from user */
+ long wireless_a; /*
+ * this is bandwidth acheived for a given wireless class
+ * flow. This is increented by the packet length everytime
+ * packet is queued on the class. The value is recorded and
+ * reset once the timer fires.
+ */
+ long wireless_wt; /*
+ * this iw wireless band weight for the class flow,
+ * same as budget we decrement this by packet
+ * length everytime packet is queued on the class.
+ * Once the timer fires, we set it to value in
+ * wireless_b. If the value has reached zero, we
+ * don't entertain the packets for the class,
+ * essentially drop them.
+ */
  long weight; /* Relative allotment: see below */
+ cbq_class_wl_stats_t wireless_stats; /* statistics for wireless bandwidth achieved for the flow */

  struct Qdisc *qdisc; /* Ptr to CBQ discipline */
  struct cbq_class *split; /* Ptr to split node */
*************** struct cbq_class
*** 134,142 ****
--- 157,176 ----
  struct tc_cbq_xstats xstats;

  struct tcf_proto *filter_list;
+ struct timer_list wl_timer;     /*
+       * wireless simulation timer. this fires
+       * every second to update wireless bandwidth
+       * bandwidth value for the class. The field
+       * wireless_b is initialised to quantum on
+       * timer expiry
+       */

  int refcnt;
  int filters;
+ int wireless_s;
+ /* flags for wireless_s */
+ #define TC_CBQ_WL_SIM 0x01 /* class simulates wireless */
+ #define TC_CBQ_WL_ACTIVE 0x02 /* wireless simulation is active */

  struct cbq_class *defaults[TC_PRIO_MAX+1];
  };
*************** struct cbq_sched_data
*** 174,179 ****
--- 208,219 ----
  };


+ static void cbq_class_wireless_rec_bw(struct cbq_class* );
+ static void cbq_reset_wireless_timer(struct timer_list* , unsigned long );
+ static void cbq_set_wireless_timer(struct cbq_class *);
+ static void cbq_class_wireless_timer(unsigned long );
+ static void cbq_stop_wireless_timer(struct cbq_class* );
+
  #define L2T(cl,len) qdisc_l2t((cl)->R_tab,len)

  static __inline__ struct cbq_class *
*************** cbq_enqueue(struct sk_buff *skb, struct
*** 386,392 ****
  #ifdef CONFIG_NET_CLS_ACT
  cl->q->__parent = sch;
  #endif
! ret = qdisc_enqueue(skb, cl->q);
  if (ret == NET_XMIT_SUCCESS) {
  sch->q.qlen++;
  sch->bstats.packets++;
--- 426,441 ----
  #ifdef CONFIG_NET_CLS_ACT
  cl->q->__parent = sch;
  #endif
! if (cl->wireless_s&TC_CBQ_WL_ACTIVE && (cl->wireless_wt <= 0)) {
! kfree(skb);
! ret = NET_XMIT_DROP;
! } else {
! ret = qdisc_enqueue(skb, cl->q);
! if (cl->wireless_s&TC_CBQ_WL_ACTIVE) {
! cl->wireless_wt -= qdisc_pkt_len(skb);
! cl->wireless_a  += qdisc_pkt_len(skb);
! }
! }
  if (ret == NET_XMIT_SUCCESS) {
  sch->q.qlen++;
  sch->bstats.packets++;
*************** static int cbq_set_lss(struct cbq_class
*** 1242,1247 ****
--- 1291,1299 ----
  if (lss->change&TCF_CBQ_LSS_FLAGS) {
  cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
  cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
+ cl->wireless_s = (lss->flags&TCF_CBQ_LSS_WLS) ? TC_CBQ_WL_SIM : 0;
+ if (cl->wireless_s&TC_CBQ_WL_SIM)
+ printk(KERN_ERR "cbq_set_lss: wireless simulation flag TC_CBQ_WL_SIM set on class 0x%p\n", (unsigned long )cl);
  }
  if (lss->change&TCF_CBQ_LSS_EWMA)
  cl->ewma_log = lss->ewma_log;
*************** static void cbq_destroy_class(struct Qdi
*** 1679,1684 ****
--- 1731,1741 ----
  tcf_destroy_chain(&cl->filter_list);
  qdisc_destroy(cl->q);
  qdisc_put_rtab(cl->R_tab);
+ if (cl->wireless_s&TC_CBQ_WL_SIM)  {
+ /* stopping wireless timer, if installed */
+ cbq_stop_wireless_timer(cl);
+ printk(KERN_ERR "cbq_destroy_class: destroying timer for class 0x%p\n", cl);
+ }
  gen_kill_estimator(&cl->bstats, &cl->rate_est);
  if (cl != &q->link)
  kfree(cl);
*************** static void cbq_put(struct Qdisc *sch, u
*** 1731,1736 ****
--- 1788,1849 ----
  }
  }

+ static void
+ cbq_class_wireless_rec_bw(struct cbq_class* cl) {
+
+ cbq_class_wl_stats_t *wl_stat = &cl->wireless_stats;
+
+ if (!wl_stat->wireless_min || !wl_stat->wireless_max) {
+ wl_stat->wireless_min = wl_stat->wireless_max = cl->wireless_a;
+ } else if (cl->wireless_a < wl_stat->wireless_min) {
+ wl_stat->wireless_min = cl->wireless_a;
+ } else if (cl->wireless_a > wl_stat->wireless_max) {
+ wl_stat->wireless_max = cl->wireless_a;
+ }
+ wl_stat->wireless_avg = (wl_stat->wireless_avg + cl->wireless_a)/2;
+ }
+
+ static void
+ cbq_class_wireless_timer(unsigned long data) {
+
+ struct cbq_class *cl = (struct cbq_class*)data;
+
+ cl->wireless_wt = cl->wireless_b;
+ /* average out acheive bandwidth for the class */
+ cbq_class_wireless_rec_bw(cl);
+ /*
+ * wireless_a should be incremented by packet length in
+ * enqueue routine when we are enqueuing the packet on the
+ * class queue.
+ */
+ cl->wireless_a = 0;
+ /* reset wireless timer to expire after 1 seconds */
+ cbq_reset_wireless_timer(&cl->wl_timer, (jiffies + HZ));
+ }
+
+ static void
+ cbq_set_wireless_timer(struct cbq_class *cl) {
+
+ setup_timer(&cl->wl_timer, &cbq_class_wireless_timer, (unsigned long)cl);
+ }
+
+ static void
+ cbq_reset_wireless_timer(struct timer_list* timer, unsigned long expires) {
+
+ mod_timer(timer, expires);
+ }
+
+ static void
+ cbq_stop_wireless_timer(struct cbq_class* cl) {
+
+ struct timer_list* timer = &cl->wl_timer;
+
+ if (timer_pending(timer) && del_timer(timer))
+ printk(KERN_ERR "deleted wireless timer for the class 0x%p\n", cl);
+ else
+ printk(KERN_ERR "no wireless timer installed for the class 0x%p\n", cl);
+ }
+
  static int
  cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
  unsigned long *arg)
*************** cbq_change_class(struct Qdisc *sch, u32
*** 1787,1792 ****
--- 1900,1909 ----
  if (rtab) {
  qdisc_put_rtab(cl->R_tab);
  cl->R_tab = rtab;
+ if (cl->wireless_s&TC_CBQ_WL_SIM) {
+ cl->wireless_b = cl->R_tab->rate.rate;
+ printk(KERN_ERR "cbq_change_class: for wireless simulation, rate for class 0x%p is changed to %ld\n", cl, cl->wireless_b);
+ }
  }

  if (tb[TCA_CBQ_LSSOPT])
*************** cbq_change_class(struct Qdisc *sch, u32
*** 1882,1887 ****
--- 1999,2005 ----
  cl->allot = parent->allot;
  cl->quantum = cl->allot;
  cl->weight = cl->R_tab->rate.rate;
+ cl->wireless_b = cl->R_tab->rate.rate;

  sch_tree_lock(sch);
  cbq_link_class(cl);
*************** cbq_change_class(struct Qdisc *sch, u32
*** 1891,1896 ****
--- 2009,2019 ----
  cbq_adjust_levels(parent);
  cl->minidle = -0x7FFFFFFF;
  cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
+ if (cl->wireless_s&TC_CBQ_WL_SIM)  {
+ /* initialise wireless simulation timer here */
+ cbq_set_wireless_timer(cl);
+ printk(KERN_ERR "for wireless simulation, rate is set to %ld\n", cl->wireless_b);
+ }
  cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
  if (cl->ewma_log==0)
  cl->ewma_log = q->link.ewma_log;
*************** static unsigned long cbq_bind_filter(str
*** 1987,1992 ****
--- 2110,2122 ----
  if (p && p->level <= cl->level)
  return 0;
  cl->filters++;
+ if (cl->wireless_s&TC_CBQ_WL_SIM) {
+ cl->wireless_s |= TC_CBQ_WL_ACTIVE;
+ /* need to start the timer here */
+ cbq_reset_wireless_timer(&cl->wl_timer, HZ);
+ printk(KERN_ERR "cbq_bind_filter(): wireless is set on the class 0x%p\n", cl);
+ }
+
  return (unsigned long)cl;
  }
  return 0;

*#*#*#*#******#*#*#*#******#*#*#*#******#*#*#*#******#*#*#*#


following changes are for tcp Westwood implementation to asynchronously notify the application of the fluctuations in the bandwidth -

*#*#*#*#******#*#*#*#******#*#*#*#******#*#*#*#******#*#*#*#


diff -c -p -r ../linux-2.6.33.3/include/linux/tcp.h ./include/linux/tcp.h
*** ../linux-2.6.33.3/include/linux/tcp.h 2010-04-26 20:18:30.000000000 +0530
--- ./include/linux/tcp.h 2010-09-05 21:03:01.000000000 +0530
***************
*** 20,25 ****
--- 20,26 ----
  #include
  #include
  #include
+ #include

  struct tcphdr {
  __be16 source;
*************** enum {
*** 103,108 ****
--- 104,110 ----
  #define TCP_CONGESTION 13 /* Congestion control algorithm */
  #define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */
  #define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */
+ #define TCP_WESTWOOD_DATA_GET   16

  /* for TCP_INFO socket option */
  #define TCPI_OPT_TIMESTAMPS 1
*************** static inline struct tcp_request_sock *t
*** 286,291 ****
--- 288,371 ----
  return (struct tcp_request_sock *)req;
  }

+ /*
+  * westwood bandwidth data-structures start here
+  */
+
+ typedef struct tcp_ww_bw_data_rec_ {
+   u32 ww_bw_est;                   /* smoothened bandwidth after low-pass filter */
+   u32 ww_bw_smooth;                   /* smoothened bandwidth after low-pass filter */
+   u32   ww_bw_raw;                   /* bandwidth calculated as such without low-pass filter */
+   u32   ww_bw_rtt;                   /* smoothened rtt as calculated by TCP */
+   u32   ww_bw_rtt_usec;
+   u32   ww_bw_rtt_raw;               /* raw rtt as calculated by TCP without smoothening */
+   u32   ww_bw_rtt_min;               /* minimum rtt till now, used to cauclate congestion bw */
+   u32   ww_bw_cwnd;                  /* congestion window */
+   u32   ww_bytes;
+   u32   ww_usec_window;
+   u32   ww_us_raw;
+   u32   ww_us_ff;
+   u32   ww_us_sf;
+   u32   ww_curr_rtt;
+   u32   ww_rtt_snd_una;
+   u32   ww_bw_ticks;                 /* value of jiffies when the reading was taken */
+   u32   ww_bw_conn_ticks;            /* value of jiffies when we got the connection*/
+   struct timeval ww_bw_ts;                  /* timestamp when bandwidth reading was taken */
+ } tcp_ww_bw_data_rec_t;
+
+ /* ww bw data for user */
+ typedef struct tcp_ww_bw_data_ {
+   struct tcp_ww_bw_data_  *tcp_ww_bw_next; /* next element on the queue */
+   tcp_ww_bw_data_rec_t tcp_ww_bw_data;
+ } tcp_ww_bw_data_t;
+
+ /* ww bw data queue per connection, every bw estimate goes here */
+ #define TCP_WW_BW_MAX_QLEN 100
+
+ typedef struct tcp_ww_bw_data_q_ {
+
+ tcp_ww_bw_data_t *tcp_ww_bw_head; /* head */
+ tcp_ww_bw_data_t *tcp_ww_bw_tail; /* tail */
+ int  tcp_ww_bw_len;  /* length of the queue */
+ int  tcp_ww_bw_max_len; /* max queue len */
+ } tcp_ww_bw_data_q_t;
+
+ /* array containing ww bw info, passed to the user */
+ #define TCP_WW_BW_DATA_MAX 10
+
+ typedef struct tcp_ww_bw_batch_data_{
+ tcp_ww_bw_data_rec_t tcp_ww_bw_data_rec[TCP_WW_BW_DATA_MAX];
+ int tcp_ww_bw_count; /*
+          * kernel sets no. of records read.
+                                           */
+ int tcp_ww_bw_more; /* kernel sets this, if we have more records to be read */
+ } tcp_ww_bw_batch_data_t;
+
+
+ #define TCP_IS_WW_SET(tp) \
+ (tp->tcp_ww_bw_flags & TCP_WESTWOOD_TRUE)
+ #define TCP_SET_WW(tp) \
+ (tp->tcp_ww_bw_flags |= TCP_WESTWOOD_TRUE)
+ #define TCP_WW_BW_IS_NEW_DATA(tp) \
+ (tp->tcp_ww_bw_flags & TCP_WESTWOOD_NEW_DATA)
+ #define TCP_WW_BW_IS_DATA_PENDING(tp) \
+ (tp->tcp_ww_bw_flags & TCP_WESTWOOD_DATA_PENDING)
+ #define TCP_WW_BW_IS_DATA_RDY(tp) \
+ (tp->tcp_ww_bw_flags & TCP_WESTWOOD_DATA_READY)
+ #define TCP_WW_BW_CLR_NEW_DATA(tp) \
+ (tp->tcp_ww_bw_flags &= ~(TCP_WESTWOOD_NEW_DATA))
+ #define TCP_WW_BW_CLR_DATA_PENDING(tp) \
+ (tp->tcp_ww_bw_flags &= ~(TCP_WESTWOOD_DATA_PENDING))
+ #define TCP_WW_BW_CLR_DATA_RDY(tp) \
+ (tp->tcp_ww_bw_flags &= ~(TCP_WESTWOOD_DATA_READY))
+ #define TCP_WW_BW_CLR_ALL(tp) \
+ (tp->tcp_ww_bw_flags &= ~(TCP_WESTWOOD_DATA_READY|TCP_WESTWOOD_DATA_PENDING|TCP_WESTWOOD_NEW_DATA))
+ #define TCP_WW_BW_SET_ALL(tp) \
+ (tp->tcp_ww_bw_flags |= (TCP_WESTWOOD_DATA_READY|TCP_WESTWOOD_DATA_PENDING|TCP_WESTWOOD_NEW_DATA))
+ /*
+  * westwood bandwidth data-structures end here
+  */
+
  struct tcp_sock {
  /* inet_connection_sock has to be the first member of tcp_sock */
  struct inet_connection_sock inet_conn;
*************** struct tcp_sock {
*** 343,348 ****
--- 423,429 ----
  u8 nonagle; /* Disable Nagle algorithm?             */

  /* RTT measurement */
+ u32 rrtt; /* raw round trip time from last measurement */
  u32 srtt; /* smoothed round trip time << 3 */
  u32 mdev; /* medium deviation */
  u32 mdev_max; /* maximal mdev for the last rtt period */
*************** struct tcp_sock {
*** 351,356 ****
--- 432,441 ----

  u32 packets_out; /* Packets which are "in flight" */
  u32 retrans_out; /* Retransmitted packets out */
+ u32 tcp_conn_ts; /*
+ * recorded timestamp (jiffies -> tcp_time_stamp)
+ * when connection was established
+ */

  u16 urg_data; /* Saved octet of OOB data and control flags */
  u8 ecn_flags; /* ECN status bits. */
*************** struct tcp_sock {
*** 416,421 ****
--- 501,512 ----
  u32 total_retrans; /* Total retransmits for entire connection */

  u32 urg_seq; /* Seq of received urgent pointer */
+ u32 tcp_ww_bw_flags;
+ #define TCP_WESTWOOD_NEW_DATA 0x01
+ #define TCP_WESTWOOD_DATA_PENDING 0x02
+ #define TCP_WESTWOOD_DATA_READY 0x04
+ #define TCP_WESTWOOD_TRUE 0x08
+
  unsigned int keepalive_time;  /* time before keep alive takes place */
  unsigned int keepalive_intvl;  /* time interval between keep alive probes */

*************** struct tcp_sock {
*** 448,453 ****
--- 539,545 ----
  /* TCP MD5 Signature Option information */
  struct tcp_md5sig_info *md5sig_info;
  #endif
+ tcp_ww_bw_data_q_t tcp_ww_bw_q;

  /* When the cookie options are generated and exchanged, then this
  * object holds a reference to them (cookie_values->kref).  Also
*************** struct tcp_sock {
*** 456,461 ****
--- 548,559 ----
  struct tcp_cookie_values  *cookie_values;
  };

+  
+ int tcp_ww_bw_add_rec(tcp_ww_bw_data_q_t *, tcp_ww_bw_data_t *);
+ int tcp_ww_bw_rem_rec(tcp_ww_bw_data_q_t *);
+ void tcp_ww_bw_purge_q(struct tcp_sock *);
+ int tcp_ww_bw_get_rec(struct tcp_sock *, tcp_ww_bw_batch_data_t *);
+
  static inline struct tcp_sock *tcp_sk(const struct sock *sk)
  {
  return (struct tcp_sock *)sk;
diff -c -p -r ../linux-2.6.33.3/include/net/inet_connection_sock.h ./include/net/inet_connection_sock.h
*** ../linux-2.6.33.3/include/net/inet_connection_sock.h 2010-04-26 20:18:30.000000000 +0530
--- ./include/net/inet_connection_sock.h 2010-09-05 20:40:50.000000000 +0530
*************** struct inet_connection_sock {
*** 125,132 ****
  /* Information on the current probe. */
  int  probe_size;
  } icsk_mtup;
! u32  icsk_ca_priv[16];
! #define ICSK_CA_PRIV_SIZE (16 * sizeof(u32))
  };

  #define ICSK_TIME_RETRANS 1 /* Retransmit timer */
--- 125,134 ----
  /* Information on the current probe. */
  int  probe_size;
  } icsk_mtup;
! #define ICSK_CA_PRIV_SIZE_ARR_SZ 18
! //u32  icsk_ca_priv[16];
! u32  icsk_ca_priv[ICSK_CA_PRIV_SIZE_ARR_SZ];
! #define ICSK_CA_PRIV_SIZE (ICSK_CA_PRIV_SIZE_ARR_SZ * sizeof(u32))
  };

  #define ICSK_TIME_RETRANS 1 /* Retransmit timer */
diff -c -p -r ../linux-2.6.33.3/include/net/tcp.h ./include/net/tcp.h
*** ../linux-2.6.33.3/include/net/tcp.h 2010-04-26 20:18:30.000000000 +0530
--- ./include/net/tcp.h 2010-07-16 02:07:53.000000000 +0530
*************** static inline void tcp_set_ca_state(stru
*** 736,744 ****
--- 736,750 ----
  static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
  {
  const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);

  if (icsk->icsk_ca_ops->cwnd_event)
  icsk->icsk_ca_ops->cwnd_event(sk, event);
+ if (TCP_WW_BW_IS_NEW_DATA(tp)) {
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk, 0);
+ TCP_WW_BW_CLR_NEW_DATA(tp);
+ }
  }

  /* These functions determine how the current flow behaves in respect of SACK
diff -c -p -r ../linux-2.6.33.3/net/core/sock.c ./net/core/sock.c
*** ../linux-2.6.33.3/net/core/sock.c 2010-04-26 20:18:30.000000000 +0530
--- ./net/core/sock.c 2010-09-06 23:51:05.000000000 +0530
*************** void __init sk_init(void)
*** 1245,1251 ****
  sysctl_wmem_default = 32767;
  sysctl_rmem_default = 32767;
  } else if (totalram_pages >= 131072) {
! sysctl_wmem_max = 131071;
  sysctl_rmem_max = 131071;
  }
  }
--- 1245,1252 ----
  sysctl_wmem_default = 32767;
  sysctl_rmem_default = 32767;
  } else if (totalram_pages >= 131072) {
! // sysctl_wmem_max = 131071;
! sysctl_wmem_max = 5*128*1024;
  sysctl_rmem_max = 131071;
  }
  }
diff -c -p -r ../linux-2.6.33.3/net/ipv4/tcp.c ./net/ipv4/tcp.c
*** ../linux-2.6.33.3/net/ipv4/tcp.c 2010-04-26 20:18:30.000000000 +0530
--- ./net/ipv4/tcp.c 2010-07-16 02:05:48.000000000 +0530
*************** unsigned int tcp_poll(struct file *file,
*** 456,461 ****
--- 456,467 ----

  if (tp->urg_data & TCP_URG_VALID)
  mask |= POLLPRI;
+ lock_sock(sk);
+ if (TCP_WW_BW_IS_DATA_RDY (tp)) {
+ mask |= POLLPRI;
+ TCP_WW_BW_CLR_DATA_RDY (tp);
+ }
+ release_sock(sk);
  }
  return mask;
  }
*************** void tcp_close(struct sock *sk, long tim
*** 1870,1875 ****
--- 1876,1882 ----
  struct sk_buff *skb;
  int data_was_unread = 0;
  int state;
+ struct tcp_sock *tp = tcp_sk(sk);

  lock_sock(sk);
  sk->sk_shutdown = SHUTDOWN_MASK;
*************** void tcp_close(struct sock *sk, long tim
*** 1895,1900 ****
--- 1902,1908 ----
  }

  sk_mem_reclaim(sk);
+ tcp_ww_bw_purge_q(tp);

  /* As outlined in RFC 2525, section 2.17, we send a RST here because
  * data was lost. To witness the awful effects of the old behavior of
*************** static int do_tcp_getsockopt(struct sock
*** 2465,2470 ****
--- 2473,2479 ----
  struct inet_connection_sock *icsk = inet_csk(sk);
  struct tcp_sock *tp = tcp_sk(sk);
  int val, len;
+ printk (KERN_INFO "tcp_sock_opts: entred\n");

  if (get_user(len, optlen))
  return -EFAULT;
*************** static int do_tcp_getsockopt(struct sock
*** 2525,2530 ****
--- 2534,2559 ----
  return -EFAULT;
  return 0;
  }
+ case TCP_WESTWOOD_DATA_GET: {
+
+ tcp_ww_bw_batch_data_t data;
+
+ memset (&data, 0x00, sizeof(data));
+ printk (KERN_INFO "tcp_sock_opts: entred\n");
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+ lock_sock(sk);
+ tcp_ww_bw_get_rec(tp, &data);
+ release_sock(sk);
+ len = min_t(unsigned int, len, sizeof(data));
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &data, len))
+ return -EFAULT;
+ printk (KERN_INFO "tcp_sock_opts: returning safely\n");
+ return 0;
+ }
  case TCP_QUICKACK:
  val = !icsk->icsk_ack.pingpong;
  break;
*************** int tcp_getsockopt(struct sock *sk, int
*** 2590,2595 ****
--- 2619,2625 ----
  {
  struct inet_connection_sock *icsk = inet_csk(sk);

+ printk (KERN_INFO "tcp_getsockopt: entred\n");
  if (level != SOL_TCP)
  return icsk->icsk_af_ops->getsockopt(sk, level, optname,
      optval, optlen);
diff -c -p -r ../linux-2.6.33.3/net/ipv4/tcp_input.c ./net/ipv4/tcp_input.c
*** ../linux-2.6.33.3/net/ipv4/tcp_input.c 2010-04-26 20:18:30.000000000 +0530
--- ./net/ipv4/tcp_input.c 2010-08-29 01:19:30.000000000 +0530
*************** static void tcp_rtt_estimator(struct soc
*** 611,616 ****
--- 611,617 ----
  struct tcp_sock *tp = tcp_sk(sk);
  long m = mrtt; /* RTT */

+ tp->rrtt = mrtt;
  /* The following amusing code comes from Jacobson's
  * article in SIGCOMM '88.  Note that rtt and mdev
  * are scaled versions of rtt and mean deviation.
*************** static int tcp_clean_rtx_queue(struct so
*** 3334,3346 ****
  /* High resolution needed and available? */
  if (ca_ops->flags & TCP_CONG_RTT_STAMP &&
     !ktime_equal(last_ackt,
! net_invalid_timestamp()))
  rtt_us = ktime_us_delta(ktime_get_real(),
! last_ackt);
  else if (ca_seq_rtt > 0)
  rtt_us = jiffies_to_usecs(ca_seq_rtt);
  }

  ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
  }
  }
--- 3335,3354 ----
  /* High resolution needed and available? */
  if (ca_ops->flags & TCP_CONG_RTT_STAMP &&
     !ktime_equal(last_ackt,
! net_invalid_timestamp())) {
  rtt_us = ktime_us_delta(ktime_get_real(),
! last_ackt);
! if (TCP_IS_WW_SET(tp)) {
! ca_ops->pkts_acked(sk, 0xffffffff, rtt_us);
! }
! }
  else if (ca_seq_rtt > 0)
  rtt_us = jiffies_to_usecs(ca_seq_rtt);
  }

+ if (TCP_IS_WW_SET(tp) && (ca_seq_rtt > 0)) {
+ rtt_us = jiffies_to_usecs(ca_seq_rtt);
+ }
  ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
  }
  }
*************** discard:
*** 5917,5922 ****
--- 5925,6003 ----
  return 0;
  }

+
+ int
+ tcp_ww_bw_add_rec(tcp_ww_bw_data_q_t *q, tcp_ww_bw_data_t *rec) {
+
+
+ printk (KERN_INFO "tcp_ww_bw_add_rec: q size = %d, max size = %d\n", q->tcp_ww_bw_len, q->tcp_ww_bw_max_len);
+ if (q->tcp_ww_bw_len >= q->tcp_ww_bw_max_len)
+ return (-1);
+ if (!q->tcp_ww_bw_len) {
+ q->tcp_ww_bw_head = q->tcp_ww_bw_tail = rec;
+ } else {
+ q->tcp_ww_bw_tail->tcp_ww_bw_next = rec;
+ q->tcp_ww_bw_tail = rec;
+ }
+ q->tcp_ww_bw_len++;
+ return (0);
+ }
+
+
+ int
+ tcp_ww_bw_rem_rec(tcp_ww_bw_data_q_t *q) {
+
+ tcp_ww_bw_data_t *rec_to_rem = q->tcp_ww_bw_head;
+
+ if (!q->tcp_ww_bw_len)
+ return (0);
+ if (rec_to_rem == q->tcp_ww_bw_tail) {
+ q->tcp_ww_bw_head = q->tcp_ww_bw_tail = NULL;
+ } else {
+ q->tcp_ww_bw_head = rec_to_rem->tcp_ww_bw_next;
+ }
+ q->tcp_ww_bw_len--;
+ kfree(rec_to_rem);
+ return (1);
+ }
+
+ void tcp_ww_bw_purge_q(struct tcp_sock *tp) {
+
+ tcp_ww_bw_data_q_t *q = &tp->tcp_ww_bw_q;
+
+ while (q->tcp_ww_bw_len > 0)
+ tcp_ww_bw_rem_rec(q);
+ TCP_WW_BW_CLR_ALL (tp);
+ return;
+ }
+
+
+ int
+ tcp_ww_bw_get_rec(struct tcp_sock *tp, tcp_ww_bw_batch_data_t *data) {
+
+ tcp_ww_bw_data_q_t *q = &tp->tcp_ww_bw_q;
+ int recs = 0;
+ int size = sizeof(tcp_ww_bw_data_rec_t);
+
+
+ printk (KERN_INFO "sizeof tcp_ww_bw_batch_data_t = %d, nuber of recs = %d max length = %d\n", sizeof(*data), q->tcp_ww_bw_len, q->tcp_ww_bw_max_len);
+ while ((recs < TCP_WW_BW_DATA_MAX) && q->tcp_ww_bw_len) {
+ memcpy (&data->tcp_ww_bw_data_rec[recs], &q->tcp_ww_bw_head->tcp_ww_bw_data, size);
+ recs++;
+ tcp_ww_bw_rem_rec(q);
+ }
+ data->tcp_ww_bw_count = recs;
+ if (q->tcp_ww_bw_len)
+ data->tcp_ww_bw_more  = 1;
+ else
+ TCP_WW_BW_CLR_DATA_PENDING (tp);
+ return (1);
+ }
+
+ //EXPORT_SYMBOL(tcp_ww_bw_purge_q);
+ //EXPORT_SYMBOL(tcp_ww_bw_get_rec);
+ //EXPORT_SYMBOL(tcp_ww_bw_rem_rec);
+ EXPORT_SYMBOL(tcp_ww_bw_add_rec);
  EXPORT_SYMBOL(sysctl_tcp_ecn);
  EXPORT_SYMBOL(sysctl_tcp_reordering);
  EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
diff -c -p -r ../linux-2.6.33.3/net/ipv4/tcp_ipv4.c ./net/ipv4/tcp_ipv4.c
*** ../linux-2.6.33.3/net/ipv4/tcp_ipv4.c 2010-04-26 20:18:30.000000000 +0530
--- ./net/ipv4/tcp_ipv4.c 2010-07-15 06:21:00.000000000 +0530
*************** static int tcp_v4_init_sock(struct sock
*** 1856,1861 ****
--- 1856,1866 ----
  tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
  tp->snd_cwnd_clamp = ~0;
  tp->mss_cache = TCP_MSS_DEFAULT;
+ tp->tcp_ww_bw_q.tcp_ww_bw_max_len = TCP_WW_BW_MAX_QLEN;
+   tp->tcp_ww_bw_q.tcp_ww_bw_head = tp->tcp_ww_bw_q.tcp_ww_bw_tail = NULL;
+ tp->tcp_ww_bw_q.tcp_ww_bw_len = 0;
+ tp->tcp_conn_ts = tcp_time_stamp;
+ tp->tcp_ww_bw_flags = 0;

  tp->reordering = sysctl_tcp_reordering;
  icsk->icsk_ca_ops = &tcp_init_congestion_ops;
diff -c -p -r ../linux-2.6.33.3/net/ipv4/tcp_minisocks.c ./net/ipv4/tcp_minisocks.c
*** ../linux-2.6.33.3/net/ipv4/tcp_minisocks.c 2010-04-26 20:18:30.000000000 +0530
--- ./net/ipv4/tcp_minisocks.c 2010-07-14 01:03:49.000000000 +0530
*************** struct sock *tcp_create_openreq_child(st
*** 506,511 ****
--- 506,516 ----
  TCP_ECN_openreq_child(newtp, req);

  TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
+ newtp->tcp_ww_bw_q.tcp_ww_bw_max_len = TCP_WW_BW_MAX_QLEN;
+ newtp->tcp_ww_bw_q.tcp_ww_bw_head = newtp->tcp_ww_bw_q.tcp_ww_bw_tail = NULL;
+ newtp->tcp_ww_bw_q.tcp_ww_bw_len = 0;
+ newtp->tcp_conn_ts = tcp_time_stamp;
+ newtp->tcp_ww_bw_flags = 0;
  }
  return newsk;
  }
diff -c -p -r ../linux-2.6.33.3/net/ipv4/tcp_westwood.c ./net/ipv4/tcp_westwood.c
*** ../linux-2.6.33.3/net/ipv4/tcp_westwood.c 2010-04-26 20:18:30.000000000 +0530
--- ./net/ipv4/tcp_westwood.c 2010-09-08 08:41:59.000000000 +0530
***************
*** 31,50 ****
  struct westwood {
  u32    bw_ns_est;        /* first bandwidth estimation..not too smoothed 8) */
  u32    bw_est;           /* bandwidth estimate */
  u32    rtt_win_sx;       /* here starts a new evaluation... */
  u32    bk;
  u32    snd_una;          /* used for evaluating the number of acked bytes */
  u32    cumul_ack;
  u32    accounted;
  u32    rtt;
  u32    rtt_min;          /* minimum observed RTT */
  u8     first_ack;        /* flag which infers that this is the first ack */
  u8     reset_rtt_min;    /* Reset RTT min to next RTT sample*/
  };


  /* TCP Westwood functions and constants */
! #define TCP_WESTWOOD_RTT_MIN   (HZ/20) /* 50ms */
  #define TCP_WESTWOOD_INIT_RTT  (20*HZ) /* maybe too conservative?! */

  /*
--- 31,61 ----
  struct westwood {
  u32    bw_ns_est;        /* first bandwidth estimation..not too smoothed 8) */
  u32    bw_est;           /* bandwidth estimate */
+ u32    ww_us_raw;
+ u32    ww_us_ff;
+ u32    ww_us_sf;
  u32    rtt_win_sx;       /* here starts a new evaluation... */
  u32    bk;
+ u32    ww_bytes_xmit;
+ u32    ww_delta;
  u32    snd_una;          /* used for evaluating the number of acked bytes */
  u32    cumul_ack;
  u32    accounted;
  u32    rtt;
+ u32    rtt_snd_una;          /* used for evaluating the number of acked bytes */
+ u32    rtt_usec;
  u32    rtt_min;          /* minimum observed RTT */
  u8     first_ack;        /* flag which infers that this is the first ack */
  u8     reset_rtt_min;    /* Reset RTT min to next RTT sample*/
  };

+ ktime_t ww_start_time;
+
+ static int tcp_westwood_queue_rec (struct sock *) ;

  /* TCP Westwood functions and constants */
! //#define TCP_WESTWOOD_RTT_MIN   (HZ/20) /* 50ms */
! #define TCP_WESTWOOD_RTT_MIN   (HZ/100) /* 50ms */
  #define TCP_WESTWOOD_INIT_RTT  (20*HZ) /* maybe too conservative?! */

  /*
*************** static void tcp_westwood_init(struct soc
*** 65,70 ****
--- 76,84 ----
  w->bk = 0;
  w->bw_ns_est = 0;
  w->bw_est = 0;
+ w->ww_us_raw = 0;
+         w->ww_us_ff = 0;
+ w->ww_us_sf = 0;
  w->accounted = 0;
  w->cumul_ack = 0;
  w->reset_rtt_min = 1;
*************** static void tcp_westwood_init(struct soc
*** 72,79 ****
--- 86,137 ----
  w->rtt_win_sx = tcp_time_stamp;
  w->snd_una = tcp_sk(sk)->snd_una;
  w->first_ack = 1;
+ TCP_SET_WW(tcp_sk(sk));
+ ww_start_time = ktime_get_real();
  }

+
+ static int
+ tcp_westwood_queue_rec (struct sock *sk) {
+
+ struct westwood *w = inet_csk_ca(sk);
+ struct tcp_sock *tp  = tcp_sk(sk);
+ tcp_ww_bw_data_q_t *q = &tp->tcp_ww_bw_q;
+ tcp_ww_bw_data_t *rec = NULL;
+
+ //printk (KERN_INFO "tcp_westwood_queue_rec: just entered \n");
+ if (q->tcp_ww_bw_len >= q->tcp_ww_bw_max_len) {
+ printk (KERN_INFO "tcp_westwood_queue_rec: q->tcp_ww_bw_len(%d) >= q->tcp_ww_bw_max_len(%d) \n",
+ q->tcp_ww_bw_len, q->tcp_ww_bw_max_len);
+ return (0);
+ }
+ if (!(rec = kmalloc (sizeof(*rec), GFP_ATOMIC))) {
+ printk (KERN_INFO "tcp_westwood_queue_rec:(no memory) current len =  %d max len = %d \n",
+ q->tcp_ww_bw_len, q->tcp_ww_bw_max_len);
+ return (0);
+ }
+ printk (KERN_INFO "tcp_westwood_queue_rec: q size = %d, max size = %d\n", q->tcp_ww_bw_len, q->tcp_ww_bw_max_len);
+ rec->tcp_ww_bw_data.ww_bw_est = w->bw_est;                   /* smoothened bandwidth after low-pass filter */
+   rec->tcp_ww_bw_data.ww_bw_smooth = w->bw_ns_est;             /* smoothened bandwidth after single filter */
+   rec->tcp_ww_bw_data.ww_bw_rtt = tp->srtt;                   /* smoothened rtt as calculated by TCP */
+   rec->tcp_ww_bw_data.ww_bw_rtt_raw = tp->rrtt;               /* raw rtt as calculated by TCP without smoothening */
+   rec->tcp_ww_bw_data.ww_bw_rtt_min = w->rtt_min;               /* minimum rtt till now, used to cauclate congestion bw */
+   rec->tcp_ww_bw_data.ww_bw_cwnd = tp->snd_cwnd;                  /* congestion window */
+ rec->tcp_ww_bw_data.ww_bw_ticks = tcp_time_stamp;          /* value of jiffies when the reading was taken */
+ rec->tcp_ww_bw_data.ww_bw_conn_ticks = tp->tcp_conn_ts;    /* value of jiffies when connection was established */
+ rec->tcp_ww_bw_data.ww_bytes = w->ww_bytes_xmit;
+ rec->tcp_ww_bw_data.ww_usec_window = w->ww_delta;
+ rec->tcp_ww_bw_data.ww_us_raw = w->ww_us_raw;
+ rec->tcp_ww_bw_data.ww_us_ff  = w->ww_us_ff;
+ rec->tcp_ww_bw_data.ww_us_sf  = w->ww_us_sf;
+ rec->tcp_ww_bw_data.ww_curr_rtt = w->rtt;
+ rec->tcp_ww_bw_data.ww_rtt_snd_una = w->rtt_snd_una;
+ rec->tcp_ww_bw_data.ww_bw_rtt_usec = w->rtt_usec;
+ tp->tcp_ww_bw_flags = TCP_WW_BW_SET_ALL (tp);
+
+ tcp_ww_bw_add_rec(q, rec);
+ return (1);
+ }
  /*
   * @westwood_do_filter
   * Low-pass filter. Implemented using constant coefficients.
*************** static inline u32 westwood_do_filter(u32
*** 85,97 ****
--- 143,159 ----

  static void westwood_filter(struct westwood *w, u32 delta)
  {
+
  /* If the filter is empty fill it with the first sample of bandwidth  */
  if (w->bw_ns_est == 0 && w->bw_est == 0) {
  w->bw_ns_est = w->bk / delta;
  w->bw_est = w->bw_ns_est;
+ w->ww_us_ff = w->ww_us_sf = w->ww_us_raw;
  } else {
  w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
+ w->ww_us_ff  = westwood_do_filter(w->ww_us_ff, w->ww_us_raw);
  w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
+ w->ww_us_sf  = westwood_do_filter(w->ww_us_sf, w->ww_us_ff);
  }
  }

*************** static void westwood_filter(struct westw
*** 103,111 ****
  static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
  {
  struct westwood *w = inet_csk_ca(sk);
!
! if (rtt > 0)
  w->rtt = usecs_to_jiffies(rtt);
  }

  /*
--- 165,180 ----
  static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
  {
  struct westwood *w = inet_csk_ca(sk);
! struct tcp_sock *tp = tcp_sk(sk);
!
! if (cnt == 0xffffffff) {
! w->rtt_usec = rtt;
! w->rtt_snd_una = tp->snd_una;
! printk(KERN_INFO "tcp_westwood_pkts_acked: rtt in usec: %u and ack seq is %u \n",  w->rtt_usec, tp->snd_una);
! } else if (rtt > 0) {
  w->rtt = usecs_to_jiffies(rtt);
+ printk(KERN_INFO "tcp_westwood_pkts_acked: rtt in jiffies: %d\n", w->rtt);
+ }
  }

  /*
*************** static void westwood_update_window(struc
*** 118,123 ****
--- 187,193 ----
  struct westwood *w = inet_csk_ca(sk);
  s32 delta = tcp_time_stamp - w->rtt_win_sx;

+ //printk (KERN_INFO "tcp_ca_event: entered \n");
  /* Initialize w->snd_una with the first acked sequence number in order
  * to fix mismatch between tp->snd_una and w->snd_una for the first
  * bandwidth sample
*************** static void westwood_update_window(struc
*** 137,146 ****
  * right_bound = left_bound + WESTWOOD_RTT_MIN
  */
  if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
- westwood_filter(w, delta);

  w->bk = 0;
  w->rtt_win_sx = tcp_time_stamp;
  }
  }

--- 207,222 ----
  * right_bound = left_bound + WESTWOOD_RTT_MIN
  */
  if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {

+ w->ww_bytes_xmit = w->bk;
+ w->ww_delta = ktime_to_us(ktime_sub(ktime_get_real(), ww_start_time));
+ w->ww_us_raw = ((w->ww_bytes_xmit*8)*(1000000/w->ww_delta))/1024;
+ westwood_filter(w, delta);
  w->bk = 0;
  w->rtt_win_sx = tcp_time_stamp;
+ ww_start_time = ktime_get_real();
+ //printk (KERN_INFO "tcp_ca_event: going to enter tcp_westwood_queue_rec\n");
+ tcp_westwood_queue_rec (sk);
  }
  }

*************** static void tcp_westwood_event(struct so
*** 227,232 ****
--- 303,309 ----
  struct tcp_sock *tp = tcp_sk(sk);
  struct westwood *w = inet_csk_ca(sk);

+ //printk (KERN_INFO "tcp_westwood_event: event = %d\n", event);
  switch (event) {
  case CA_EVENT_FAST_ACK:
  westwood_fast_bw(sk);
*************** static void tcp_westwood_info(struct soc
*** 273,278 ****
--- 350,356 ----


  static struct tcp_congestion_ops tcp_westwood = {
+ .flags          = TCP_CONG_RTT_STAMP,
  .init = tcp_westwood_init,
  .ssthresh = tcp_reno_ssthresh,
  .cong_avoid = tcp_reno_cong_avoid,
***************
*** 1 ****

diff -c -p -r ../linux-2.6.33.3/include/linux/tcp.h ./include/linux/tcp.h
*** ../linux-2.6.33.3/include/linux/tcp.h 2010-04-26 20:18:30.000000000 +0530
--- ./include/linux/tcp.h 2010-09-05 21:03:01.000000000 +0530
***************
*** 20,25 ****
--- 20,26 ----
  #include
  #include
  #include
+ #include

  struct tcphdr {
  __be16 source;
*************** enum {
*** 103,108 ****
--- 104,110 ----
  #define TCP_CONGESTION 13 /* Congestion control algorithm */
  #define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */
  #define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */
+ #define TCP_WESTWOOD_DATA_GET   16

  /* for TCP_INFO socket option */
  #define TCPI_OPT_TIMESTAMPS 1
*************** static inline struct tcp_request_sock *t
*** 286,291 ****
--- 288,371 ----
  return (struct tcp_request_sock *)req;
  }

+ /*
+  * westwood bandwidth data-structures start here
+  */
+
+ typedef struct tcp_ww_bw_data_rec_ {
+   u32 ww_bw_est;                   /* smoothened bandwidth after low-pass filter */
+   u32 ww_bw_smooth;                   /* smoothened bandwidth after low-pass filter */
+   u32   ww_bw_raw;                   /* bandwidth calculated as such without low-pass filter */
+   u32   ww_bw_rtt;                   /* smoothened rtt as calculated by TCP */
+   u32   ww_bw_rtt_usec;
+   u32   ww_bw_rtt_raw;               /* raw rtt as calculated by TCP without smoothening */
+   u32   ww_bw_rtt_min;               /* minimum rtt till now, used to cauclate congestion bw */
+   u32   ww_bw_cwnd;                  /* congestion window */
+   u32   ww_bytes;
+   u32   ww_usec_window;
+   u32   ww_us_raw;
+   u32   ww_us_ff;
+   u32   ww_us_sf;
+   u32   ww_curr_rtt;
+   u32   ww_rtt_snd_una;
+   u32   ww_bw_ticks;                 /* value of jiffies when the reading was taken */
+   u32   ww_bw_conn_ticks;            /* value of jiffies when we got the connection*/
+   struct timeval ww_bw_ts;                  /* timestamp when bandwidth reading was taken */
+ } tcp_ww_bw_data_rec_t;
+
+ /* ww bw data for user */
+ typedef struct tcp_ww_bw_data_ {
+   struct tcp_ww_bw_data_  *tcp_ww_bw_next; /* next element on the queue */
+   tcp_ww_bw_data_rec_t tcp_ww_bw_data;
+ } tcp_ww_bw_data_t;
+
+ /* ww bw data queue per connection, every bw estimate goes here */
+ #define TCP_WW_BW_MAX_QLEN 100
+
+ typedef struct tcp_ww_bw_data_q_ {
+
+ tcp_ww_bw_data_t *tcp_ww_bw_head; /* head */
+ tcp_ww_bw_data_t *tcp_ww_bw_tail; /* tail */
+ int  tcp_ww_bw_len;  /* length of the queue */
+ int  tcp_ww_bw_max_len; /* max queue len */
+ } tcp_ww_bw_data_q_t;
+
+ /* array containing ww bw info, passed to the user */
+ #define TCP_WW_BW_DATA_MAX 10
+
+ typedef struct tcp_ww_bw_batch_data_{
+ tcp_ww_bw_data_rec_t tcp_ww_bw_data_rec[TCP_WW_BW_DATA_MAX];
+ int tcp_ww_bw_count; /*
+          * kernel sets no. of records read.
+                                           */
+ int tcp_ww_bw_more; /* kernel sets this, if we have more records to be read */
+ } tcp_ww_bw_batch_data_t;
+
+
+ #define TCP_IS_WW_SET(tp) \
+ (tp->tcp_ww_bw_flags & TCP_WESTWOOD_TRUE)
+ #define TCP_SET_WW(tp) \
+ (tp->tcp_ww_bw_flags |= TCP_WESTWOOD_TRUE)
+ #define TCP_WW_BW_IS_NEW_DATA(tp) \
+ (tp->tcp_ww_bw_flags & TCP_WESTWOOD_NEW_DATA)
+ #define TCP_WW_BW_IS_DATA_PENDING(tp) \
+ (tp->tcp_ww_bw_flags & TCP_WESTWOOD_DATA_PENDING)
+ #define TCP_WW_BW_IS_DATA_RDY(tp) \
+ (tp->tcp_ww_bw_flags & TCP_WESTWOOD_DATA_READY)
+ #define TCP_WW_BW_CLR_NEW_DATA(tp) \
+ (tp->tcp_ww_bw_flags &= ~(TCP_WESTWOOD_NEW_DATA))
+ #define TCP_WW_BW_CLR_DATA_PENDING(tp) \
+ (tp->tcp_ww_bw_flags &= ~(TCP_WESTWOOD_DATA_PENDING))
+ #define TCP_WW_BW_CLR_DATA_RDY(tp) \
+ (tp->tcp_ww_bw_flags &= ~(TCP_WESTWOOD_DATA_READY))
+ #define TCP_WW_BW_CLR_ALL(tp) \
+ (tp->tcp_ww_bw_flags &= ~(TCP_WESTWOOD_DATA_READY|TCP_WESTWOOD_DATA_PENDING|TCP_WESTWOOD_NEW_DATA))
+ #define TCP_WW_BW_SET_ALL(tp) \
+ (tp->tcp_ww_bw_flags |= (TCP_WESTWOOD_DATA_READY|TCP_WESTWOOD_DATA_PENDING|TCP_WESTWOOD_NEW_DATA))
+ /*
+  * westwood bandwidth data-structures end here
+  */
+
  struct tcp_sock {
  /* inet_connection_sock has to be the first member of tcp_sock */
  struct inet_connection_sock inet_conn;
*************** struct tcp_sock {
*** 343,348 ****
--- 423,429 ----
  u8 nonagle; /* Disable Nagle algorithm?             */

  /* RTT measurement */
+ u32 rrtt; /* raw round trip time from last measurement */
  u32 srtt; /* smoothed round trip time << 3 */
  u32 mdev; /* medium deviation */
  u32 mdev_max; /* maximal mdev for the last rtt period */
*************** struct tcp_sock {
*** 351,356 ****
--- 432,441 ----

  u32 packets_out; /* Packets which are "in flight" */
  u32 retrans_out; /* Retransmitted packets out */
+ u32 tcp_conn_ts; /*
+ * recorded timestamp (jiffies -> tcp_time_stamp)
+ * when connection was established
+ */

  u16 urg_data; /* Saved octet of OOB data and control flags */
  u8 ecn_flags; /* ECN status bits. */
*************** struct tcp_sock {
*** 416,421 ****
--- 501,512 ----
  u32 total_retrans; /* Total retransmits for entire connection */

  u32 urg_seq; /* Seq of received urgent pointer */
+ u32 tcp_ww_bw_flags;
+ #define TCP_WESTWOOD_NEW_DATA 0x01
+ #define TCP_WESTWOOD_DATA_PENDING 0x02
+ #define TCP_WESTWOOD_DATA_READY 0x04
+ #define TCP_WESTWOOD_TRUE 0x08
+
  unsigned int keepalive_time;  /* time before keep alive takes place */
  unsigned int keepalive_intvl;  /* time interval between keep alive probes */

*************** struct tcp_sock {
*** 448,453 ****
--- 539,545 ----
  /* TCP MD5 Signature Option information */
  struct tcp_md5sig_info *md5sig_info;
  #endif
+ tcp_ww_bw_data_q_t tcp_ww_bw_q;

  /* When the cookie options are generated and exchanged, then this
  * object holds a reference to them (cookie_values->kref).  Also
*************** struct tcp_sock {
*** 456,461 ****
--- 548,559 ----
  struct tcp_cookie_values  *cookie_values;
  };

+  
+ int tcp_ww_bw_add_rec(tcp_ww_bw_data_q_t *, tcp_ww_bw_data_t *);
+ int tcp_ww_bw_rem_rec(tcp_ww_bw_data_q_t *);
+ void tcp_ww_bw_purge_q(struct tcp_sock *);
+ int tcp_ww_bw_get_rec(struct tcp_sock *, tcp_ww_bw_batch_data_t *);
+
  static inline struct tcp_sock *tcp_sk(const struct sock *sk)
  {
  return (struct tcp_sock *)sk;
diff -c -p -r ../linux-2.6.33.3/include/net/inet_connection_sock.h ./include/net/inet_connection_sock.h
*** ../linux-2.6.33.3/include/net/inet_connection_sock.h 2010-04-26 20:18:30.000000000 +0530
--- ./include/net/inet_connection_sock.h 2010-09-05 20:40:50.000000000 +0530
*************** struct inet_connection_sock {
*** 125,132 ****
  /* Information on the current probe. */
  int  probe_size;
  } icsk_mtup;
! u32  icsk_ca_priv[16];
! #define ICSK_CA_PRIV_SIZE (16 * sizeof(u32))
  };

  #define ICSK_TIME_RETRANS 1 /* Retransmit timer */
--- 125,134 ----
  /* Information on the current probe. */
  int  probe_size;
  } icsk_mtup;
! #define ICSK_CA_PRIV_SIZE_ARR_SZ 18
! //u32  icsk_ca_priv[16];
! u32  icsk_ca_priv[ICSK_CA_PRIV_SIZE_ARR_SZ];
! #define ICSK_CA_PRIV_SIZE (ICSK_CA_PRIV_SIZE_ARR_SZ * sizeof(u32))
  };

  #define ICSK_TIME_RETRANS 1 /* Retransmit timer */
diff -c -p -r ../linux-2.6.33.3/include/net/tcp.h ./include/net/tcp.h
*** ../linux-2.6.33.3/include/net/tcp.h 2010-04-26 20:18:30.000000000 +0530
--- ./include/net/tcp.h 2010-07-16 02:07:53.000000000 +0530
*************** static inline void tcp_set_ca_state(stru
*** 736,744 ****
--- 736,750 ----
  static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
  {
  const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);

  if (icsk->icsk_ca_ops->cwnd_event)
  icsk->icsk_ca_ops->cwnd_event(sk, event);
+ if (TCP_WW_BW_IS_NEW_DATA(tp)) {
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk, 0);
+ TCP_WW_BW_CLR_NEW_DATA(tp);
+ }
  }

  /* These functions determine how the current flow behaves in respect of SACK
diff -c -p -r ../linux-2.6.33.3/net/core/sock.c ./net/core/sock.c
*** ../linux-2.6.33.3/net/core/sock.c 2010-04-26 20:18:30.000000000 +0530
--- ./net/core/sock.c 2010-09-06 23:51:05.000000000 +0530
*************** void __init sk_init(void)
*** 1245,1251 ****
  sysctl_wmem_default = 32767;
  sysctl_rmem_default = 32767;
  } else if (totalram_pages >= 131072) {
! sysctl_wmem_max = 131071;
  sysctl_rmem_max = 131071;
  }
  }
--- 1245,1252 ----
  sysctl_wmem_default = 32767;
  sysctl_rmem_default = 32767;
  } else if (totalram_pages >= 131072) {
! // sysctl_wmem_max = 131071;
! sysctl_wmem_max = 5*128*1024;
  sysctl_rmem_max = 131071;
  }
  }
diff -c -p -r ../linux-2.6.33.3/net/ipv4/tcp.c ./net/ipv4/tcp.c
*** ../linux-2.6.33.3/net/ipv4/tcp.c 2010-04-26 20:18:30.000000000 +0530
--- ./net/ipv4/tcp.c 2010-07-16 02:05:48.000000000 +0530
*************** unsigned int tcp_poll(struct file *file,
*** 456,461 ****
--- 456,467 ----

  if (tp->urg_data & TCP_URG_VALID)
  mask |= POLLPRI;
+ lock_sock(sk);
+ if (TCP_WW_BW_IS_DATA_RDY (tp)) {
+ mask |= POLLPRI;
+ TCP_WW_BW_CLR_DATA_RDY (tp);
+ }
+ release_sock(sk);
  }
  return mask;
  }
*************** void tcp_close(struct sock *sk, long tim
*** 1870,1875 ****
--- 1876,1882 ----
  struct sk_buff *skb;
  int data_was_unread = 0;
  int state;
+ struct tcp_sock *tp = tcp_sk(sk);

  lock_sock(sk);
  sk->sk_shutdown = SHUTDOWN_MASK;
*************** void tcp_close(struct sock *sk, long tim
*** 1895,1900 ****
--- 1902,1908 ----
  }

  sk_mem_reclaim(sk);
+ tcp_ww_bw_purge_q(tp);

  /* As outlined in RFC 2525, section 2.17, we send a RST here because
  * data was lost. To witness the awful effects of the old behavior of
*************** static int do_tcp_getsockopt(struct sock
*** 2465,2470 ****
--- 2473,2479 ----
  struct inet_connection_sock *icsk = inet_csk(sk);
  struct tcp_sock *tp = tcp_sk(sk);
  int val, len;
+ printk (KERN_INFO "tcp_sock_opts: entred\n");

  if (get_user(len, optlen))
  return -EFAULT;
*************** static int do_tcp_getsockopt(struct sock
*** 2525,2530 ****
--- 2534,2559 ----
  return -EFAULT;
  return 0;
  }
+ case TCP_WESTWOOD_DATA_GET: {
+
+ tcp_ww_bw_batch_data_t data;
+
+ memset (&data, 0x00, sizeof(data));
+ printk (KERN_INFO "tcp_sock_opts: entred\n");
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+ lock_sock(sk);
+ tcp_ww_bw_get_rec(tp, &data);
+ release_sock(sk);
+ len = min_t(unsigned int, len, sizeof(data));
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &data, len))
+ return -EFAULT;
+ printk (KERN_INFO "tcp_sock_opts: returning safely\n");
+ return 0;
+ }
  case TCP_QUICKACK:
  val = !icsk->icsk_ack.pingpong;
  break;
*************** int tcp_getsockopt(struct sock *sk, int
*** 2590,2595 ****
--- 2619,2625 ----
  {
  struct inet_connection_sock *icsk = inet_csk(sk);

+ printk (KERN_INFO "tcp_getsockopt: entred\n");
  if (level != SOL_TCP)
  return icsk->icsk_af_ops->getsockopt(sk, level, optname,
      optval, optlen);
diff -c -p -r ../linux-2.6.33.3/net/ipv4/tcp_input.c ./net/ipv4/tcp_input.c
*** ../linux-2.6.33.3/net/ipv4/tcp_input.c 2010-04-26 20:18:30.000000000 +0530
--- ./net/ipv4/tcp_input.c 2010-08-29 01:19:30.000000000 +0530
*************** static void tcp_rtt_estimator(struct soc
*** 611,616 ****
--- 611,617 ----
  struct tcp_sock *tp = tcp_sk(sk);
  long m = mrtt; /* RTT */

+ tp->rrtt = mrtt;
  /* The following amusing code comes from Jacobson's
  * article in SIGCOMM '88.  Note that rtt and mdev
  * are scaled versions of rtt and mean deviation.
*************** static int tcp_clean_rtx_queue(struct so
*** 3334,3346 ****
  /* High resolution needed and available? */
  if (ca_ops->flags & TCP_CONG_RTT_STAMP &&
     !ktime_equal(last_ackt,
! net_invalid_timestamp()))
  rtt_us = ktime_us_delta(ktime_get_real(),
! last_ackt);
  else if (ca_seq_rtt > 0)
  rtt_us = jiffies_to_usecs(ca_seq_rtt);
  }

  ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
  }
  }
--- 3335,3354 ----
  /* High resolution needed and available? */
  if (ca_ops->flags & TCP_CONG_RTT_STAMP &&
     !ktime_equal(last_ackt,
! net_invalid_timestamp())) {
  rtt_us = ktime_us_delta(ktime_get_real(),
! last_ackt);
! if (TCP_IS_WW_SET(tp)) {
! ca_ops->pkts_acked(sk, 0xffffffff, rtt_us);
! }
! }
  else if (ca_seq_rtt > 0)
  rtt_us = jiffies_to_usecs(ca_seq_rtt);
  }

+ if (TCP_IS_WW_SET(tp) && (ca_seq_rtt > 0)) {
+ rtt_us = jiffies_to_usecs(ca_seq_rtt);
+ }
  ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
  }
  }
*************** discard:
*** 5917,5922 ****
--- 5925,6003 ----
  return 0;
  }

+
+ int
+ tcp_ww_bw_add_rec(tcp_ww_bw_data_q_t *q, tcp_ww_bw_data_t *rec) {
+
+
+ printk (KERN_INFO "tcp_ww_bw_add_rec: q size = %d, max size = %d\n", q->tcp_ww_bw_len, q->tcp_ww_bw_max_len);
+ if (q->tcp_ww_bw_len >= q->tcp_ww_bw_max_len)
+ return (-1);
+ if (!q->tcp_ww_bw_len) {
+ q->tcp_ww_bw_head = q->tcp_ww_bw_tail = rec;
+ } else {
+ q->tcp_ww_bw_tail->tcp_ww_bw_next = rec;
+ q->tcp_ww_bw_tail = rec;
+ }
+ q->tcp_ww_bw_len++;
+ return (0);
+ }
+
+
+ int
+ tcp_ww_bw_rem_rec(tcp_ww_bw_data_q_t *q) {
+
+ tcp_ww_bw_data_t *rec_to_rem = q->tcp_ww_bw_head;
+
+ if (!q->tcp_ww_bw_len)
+ return (0);
+ if (rec_to_rem == q->tcp_ww_bw_tail) {
+ q->tcp_ww_bw_head = q->tcp_ww_bw_tail = NULL;
+ } else {
+ q->tcp_ww_bw_head = rec_to_rem->tcp_ww_bw_next;
+ }
+ q->tcp_ww_bw_len--;
+ kfree(rec_to_rem);
+ return (1);
+ }
+
+ void tcp_ww_bw_purge_q(struct tcp_sock *tp) {
+
+ tcp_ww_bw_data_q_t *q = &tp->tcp_ww_bw_q;
+
+ while (q->tcp_ww_bw_len > 0)
+ tcp_ww_bw_rem_rec(q);
+ TCP_WW_BW_CLR_ALL (tp);
+ return;
+ }
+
+
+ int
+ tcp_ww_bw_get_rec(struct tcp_sock *tp, tcp_ww_bw_batch_data_t *data) {
+
+ tcp_ww_bw_data_q_t *q = &tp->tcp_ww_bw_q;
+ int recs = 0;
+ int size = sizeof(tcp_ww_bw_data_rec_t);
+
+
+ printk (KERN_INFO "sizeof tcp_ww_bw_batch_data_t = %d, nuber of recs = %d max length = %d\n", sizeof(*data), q->tcp_ww_bw_len, q->tcp_ww_bw_max_len);
+ while ((recs < TCP_WW_BW_DATA_MAX) && q->tcp_ww_bw_len) {
+ memcpy (&data->tcp_ww_bw_data_rec[recs], &q->tcp_ww_bw_head->tcp_ww_bw_data, size);
+ recs++;
+ tcp_ww_bw_rem_rec(q);
+ }
+ data->tcp_ww_bw_count = recs;
+ if (q->tcp_ww_bw_len)
+ data->tcp_ww_bw_more  = 1;
+ else
+ TCP_WW_BW_CLR_DATA_PENDING (tp);
+ return (1);
+ }
+
+ //EXPORT_SYMBOL(tcp_ww_bw_purge_q);
+ //EXPORT_SYMBOL(tcp_ww_bw_get_rec);
+ //EXPORT_SYMBOL(tcp_ww_bw_rem_rec);
+ EXPORT_SYMBOL(tcp_ww_bw_add_rec);
  EXPORT_SYMBOL(sysctl_tcp_ecn);
  EXPORT_SYMBOL(sysctl_tcp_reordering);
  EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
diff -c -p -r ../linux-2.6.33.3/net/ipv4/tcp_ipv4.c ./net/ipv4/tcp_ipv4.c
*** ../linux-2.6.33.3/net/ipv4/tcp_ipv4.c 2010-04-26 20:18:30.000000000 +0530
--- ./net/ipv4/tcp_ipv4.c 2010-07-15 06:21:00.000000000 +0530
*************** static int tcp_v4_init_sock(struct sock
*** 1856,1861 ****
--- 1856,1866 ----
  tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
  tp->snd_cwnd_clamp = ~0;
  tp->mss_cache = TCP_MSS_DEFAULT;
+ tp->tcp_ww_bw_q.tcp_ww_bw_max_len = TCP_WW_BW_MAX_QLEN;
+   tp->tcp_ww_bw_q.tcp_ww_bw_head = tp->tcp_ww_bw_q.tcp_ww_bw_tail = NULL;
+ tp->tcp_ww_bw_q.tcp_ww_bw_len = 0;
+ tp->tcp_conn_ts = tcp_time_stamp;
+ tp->tcp_ww_bw_flags = 0;

  tp->reordering = sysctl_tcp_reordering;
  icsk->icsk_ca_ops = &tcp_init_congestion_ops;
diff -c -p -r ../linux-2.6.33.3/net/ipv4/tcp_minisocks.c ./net/ipv4/tcp_minisocks.c
*** ../linux-2.6.33.3/net/ipv4/tcp_minisocks.c 2010-04-26 20:18:30.000000000 +0530
--- ./net/ipv4/tcp_minisocks.c 2010-07-14 01:03:49.000000000 +0530
*************** struct sock *tcp_create_openreq_child(st
*** 506,511 ****
--- 506,516 ----
  TCP_ECN_openreq_child(newtp, req);

  TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
+ newtp->tcp_ww_bw_q.tcp_ww_bw_max_len = TCP_WW_BW_MAX_QLEN;
+ newtp->tcp_ww_bw_q.tcp_ww_bw_head = newtp->tcp_ww_bw_q.tcp_ww_bw_tail = NULL;
+ newtp->tcp_ww_bw_q.tcp_ww_bw_len = 0;
+ newtp->tcp_conn_ts = tcp_time_stamp;
+ newtp->tcp_ww_bw_flags = 0;
  }
  return newsk;
  }
diff -c -p -r ../linux-2.6.33.3/net/ipv4/tcp_westwood.c ./net/ipv4/tcp_westwood.c
*** ../linux-2.6.33.3/net/ipv4/tcp_westwood.c 2010-04-26 20:18:30.000000000 +0530
--- ./net/ipv4/tcp_westwood.c 2010-09-08 08:41:59.000000000 +0530
***************
*** 31,50 ****
  struct westwood {
  u32    bw_ns_est;        /* first bandwidth estimation..not too smoothed 8) */
  u32    bw_est;           /* bandwidth estimate */
  u32    rtt_win_sx;       /* here starts a new evaluation... */
  u32    bk;
  u32    snd_una;          /* used for evaluating the number of acked bytes */
  u32    cumul_ack;
  u32    accounted;
  u32    rtt;
  u32    rtt_min;          /* minimum observed RTT */
  u8     first_ack;        /* flag which infers that this is the first ack */
  u8     reset_rtt_min;    /* Reset RTT min to next RTT sample*/
  };


  /* TCP Westwood functions and constants */
! #define TCP_WESTWOOD_RTT_MIN   (HZ/20) /* 50ms */
  #define TCP_WESTWOOD_INIT_RTT  (20*HZ) /* maybe too conservative?! */

  /*
--- 31,61 ----
  struct westwood {
  u32    bw_ns_est;        /* first bandwidth estimation..not too smoothed 8) */
  u32    bw_est;           /* bandwidth estimate */
+ u32    ww_us_raw;
+ u32    ww_us_ff;
+ u32    ww_us_sf;
  u32    rtt_win_sx;       /* here starts a new evaluation... */
  u32    bk;
+ u32    ww_bytes_xmit;
+ u32    ww_delta;
  u32    snd_una;          /* used for evaluating the number of acked bytes */
  u32    cumul_ack;
  u32    accounted;
  u32    rtt;
+ u32    rtt_snd_una;          /* used for evaluating the number of acked bytes */
+ u32    rtt_usec;
  u32    rtt_min;          /* minimum observed RTT */
  u8     first_ack;        /* flag which infers that this is the first ack */
  u8     reset_rtt_min;    /* Reset RTT min to next RTT sample*/
  };

+ ktime_t ww_start_time;
+
+ static int tcp_westwood_queue_rec (struct sock *) ;

  /* TCP Westwood functions and constants */
! //#define TCP_WESTWOOD_RTT_MIN   (HZ/20) /* 50ms */
! #define TCP_WESTWOOD_RTT_MIN   (HZ/100) /* 50ms */
  #define TCP_WESTWOOD_INIT_RTT  (20*HZ) /* maybe too conservative?! */

  /*
*************** static void tcp_westwood_init(struct soc
*** 65,70 ****
--- 76,84 ----
  w->bk = 0;
  w->bw_ns_est = 0;
  w->bw_est = 0;
+ w->ww_us_raw = 0;
+         w->ww_us_ff = 0;
+ w->ww_us_sf = 0;
  w->accounted = 0;
  w->cumul_ack = 0;
  w->reset_rtt_min = 1;
*************** static void tcp_westwood_init(struct soc
*** 72,79 ****
--- 86,137 ----
  w->rtt_win_sx = tcp_time_stamp;
  w->snd_una = tcp_sk(sk)->snd_una;
  w->first_ack = 1;
+ TCP_SET_WW(tcp_sk(sk));
+ ww_start_time = ktime_get_real();
  }

+
+ static int
+ tcp_westwood_queue_rec (struct sock *sk) {
+
+ struct westwood *w = inet_csk_ca(sk);
+ struct tcp_sock *tp  = tcp_sk(sk);
+ tcp_ww_bw_data_q_t *q = &tp->tcp_ww_bw_q;
+ tcp_ww_bw_data_t *rec = NULL;
+
+ //printk (KERN_INFO "tcp_westwood_queue_rec: just entered \n");
+ if (q->tcp_ww_bw_len >= q->tcp_ww_bw_max_len) {
+ printk (KERN_INFO "tcp_westwood_queue_rec: q->tcp_ww_bw_len(%d) >= q->tcp_ww_bw_max_len(%d) \n",
+ q->tcp_ww_bw_len, q->tcp_ww_bw_max_len);
+ return (0);
+ }
+ if (!(rec = kmalloc (sizeof(*rec), GFP_ATOMIC))) {
+ printk (KERN_INFO "tcp_westwood_queue_rec:(no memory) current len =  %d max len = %d \n",
+ q->tcp_ww_bw_len, q->tcp_ww_bw_max_len);
+ return (0);
+ }
+ printk (KERN_INFO "tcp_westwood_queue_rec: q size = %d, max size = %d\n", q->tcp_ww_bw_len, q->tcp_ww_bw_max_len);
+ rec->tcp_ww_bw_data.ww_bw_est = w->bw_est;                   /* smoothened bandwidth after low-pass filter */
+   rec->tcp_ww_bw_data.ww_bw_smooth = w->bw_ns_est;             /* smoothened bandwidth after single filter */
+   rec->tcp_ww_bw_data.ww_bw_rtt = tp->srtt;                   /* smoothened rtt as calculated by TCP */
+   rec->tcp_ww_bw_data.ww_bw_rtt_raw = tp->rrtt;               /* raw rtt as calculated by TCP without smoothening */
+   rec->tcp_ww_bw_data.ww_bw_rtt_min = w->rtt_min;               /* minimum rtt till now, used to cauclate congestion bw */
+   rec->tcp_ww_bw_data.ww_bw_cwnd = tp->snd_cwnd;                  /* congestion window */
+ rec->tcp_ww_bw_data.ww_bw_ticks = tcp_time_stamp;          /* value of jiffies when the reading was taken */
+ rec->tcp_ww_bw_data.ww_bw_conn_ticks = tp->tcp_conn_ts;    /* value of jiffies when connection was established */
+ rec->tcp_ww_bw_data.ww_bytes = w->ww_bytes_xmit;
+ rec->tcp_ww_bw_data.ww_usec_window = w->ww_delta;
+ rec->tcp_ww_bw_data.ww_us_raw = w->ww_us_raw;
+ rec->tcp_ww_bw_data.ww_us_ff  = w->ww_us_ff;
+ rec->tcp_ww_bw_data.ww_us_sf  = w->ww_us_sf;
+ rec->tcp_ww_bw_data.ww_curr_rtt = w->rtt;
+ rec->tcp_ww_bw_data.ww_rtt_snd_una = w->rtt_snd_una;
+ rec->tcp_ww_bw_data.ww_bw_rtt_usec = w->rtt_usec;
+ tp->tcp_ww_bw_flags = TCP_WW_BW_SET_ALL (tp);
+
+ tcp_ww_bw_add_rec(q, rec);
+ return (1);
+ }
  /*
   * @westwood_do_filter
   * Low-pass filter. Implemented using constant coefficients.
*************** static inline u32 westwood_do_filter(u32
*** 85,97 ****
--- 143,159 ----

  static void westwood_filter(struct westwood *w, u32 delta)
  {
+
  /* If the filter is empty fill it with the first sample of bandwidth  */
  if (w->bw_ns_est == 0 && w->bw_est == 0) {
  w->bw_ns_est = w->bk / delta;
  w->bw_est = w->bw_ns_est;
+ w->ww_us_ff = w->ww_us_sf = w->ww_us_raw;
  } else {
  w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
+ w->ww_us_ff  = westwood_do_filter(w->ww_us_ff, w->ww_us_raw);
  w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
+ w->ww_us_sf  = westwood_do_filter(w->ww_us_sf, w->ww_us_ff);
  }
  }

*************** static void westwood_filter(struct westw
*** 103,111 ****
  static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
  {
  struct westwood *w = inet_csk_ca(sk);
!
! if (rtt > 0)
  w->rtt = usecs_to_jiffies(rtt);
  }

  /*
--- 165,180 ----
  static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
  {
  struct westwood *w = inet_csk_ca(sk);
! struct tcp_sock *tp = tcp_sk(sk);
!
! if (cnt == 0xffffffff) {
! w->rtt_usec = rtt;
! w->rtt_snd_una = tp->snd_una;
! printk(KERN_INFO "tcp_westwood_pkts_acked: rtt in usec: %u and ack seq is %u \n",  w->rtt_usec, tp->snd_una);
! } else if (rtt > 0) {
  w->rtt = usecs_to_jiffies(rtt);
+ printk(KERN_INFO "tcp_westwood_pkts_acked: rtt in jiffies: %d\n", w->rtt);
+ }
  }

  /*
*************** static void westwood_update_window(struc
*** 118,123 ****
--- 187,193 ----
  struct westwood *w = inet_csk_ca(sk);
  s32 delta = tcp_time_stamp - w->rtt_win_sx;

+ //printk (KERN_INFO "tcp_ca_event: entered \n");
  /* Initialize w->snd_una with the first acked sequence number in order
  * to fix mismatch between tp->snd_una and w->snd_una for the first
  * bandwidth sample
*************** static void westwood_update_window(struc
*** 137,146 ****
  * right_bound = left_bound + WESTWOOD_RTT_MIN
  */
  if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
- westwood_filter(w, delta);

  w->bk = 0;
  w->rtt_win_sx = tcp_time_stamp;
  }
  }

--- 207,222 ----
  * right_bound = left_bound + WESTWOOD_RTT_MIN
  */
  if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {

+ w->ww_bytes_xmit = w->bk;
+ w->ww_delta = ktime_to_us(ktime_sub(ktime_get_real(), ww_start_time));
+ w->ww_us_raw = ((w->ww_bytes_xmit*8)*(1000000/w->ww_delta))/1024;
+ westwood_filter(w, delta);
  w->bk = 0;
  w->rtt_win_sx = tcp_time_stamp;
+ ww_start_time = ktime_get_real();
+ //printk (KERN_INFO "tcp_ca_event: going to enter tcp_westwood_queue_rec\n");
+ tcp_westwood_queue_rec (sk);
  }
  }

*************** static void tcp_westwood_event(struct so
*** 227,232 ****
--- 303,309 ----
  struct tcp_sock *tp = tcp_sk(sk);
  struct westwood *w = inet_csk_ca(sk);

+ //printk (KERN_INFO "tcp_westwood_event: event = %d\n", event);
  switch (event) {
  case CA_EVENT_FAST_ACK:
  westwood_fast_bw(sk);
*************** static void tcp_westwood_info(struct soc
*** 273,278 ****
--- 350,356 ----


  static struct tcp_congestion_ops tcp_westwood = {
+ .flags          = TCP_CONG_RTT_STAMP,
  .init = tcp_westwood_init,
  .ssthresh = tcp_reno_ssthresh,
  .cong_avoid = tcp_reno_cong_avoid,
***************
*** 1 ****

*#*#*#*#******#*#*#*#******#*#*#*#******#*#*#*#******#*#*#*#
!!!

All the work that I'm posting here may not be of great help in general but it may surely a step ahead for those who are beginners and want to sneak into the kernel implementation. Try and let me know if it helps in any way. Njoy!!