Linux-2.6.12-rc2
[linux-flexiantxendom0-natty.git] / drivers / s390 / net / qeth_tso.c
1 /*
2  * linux/drivers/s390/net/qeth_tso.c ($Revision: 1.6 $)
3  *
4  * Header file for qeth TCP Segmentation Offload support.
5  *
6  * Copyright 2004 IBM Corporation
7  *
8  *    Author(s): Frank Pavlic <pavlic@de.ibm.com>
9  *
10  *    $Revision: 1.6 $   $Date: 2005/03/24 09:04:18 $
11  *
12  */
13
14 #include <linux/skbuff.h>
15 #include <linux/tcp.h>
16 #include <linux/ip.h>
17 #include <linux/ipv6.h>
18 #include <net/ip6_checksum.h>
19 #include "qeth.h"
20 #include "qeth_mpc.h"
21 #include "qeth_tso.h"
22
23 /**
24  * skb already partially prepared
25  * classic qdio header in skb->data
26  * */
27 static inline struct qeth_hdr_tso *
28 qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
29 {
30         int rc = 0;
31
32         QETH_DBF_TEXT(trace, 5, "tsoprsk");
33         rc = qeth_realloc_headroom(card, skb,sizeof(struct qeth_hdr_ext_tso));
34         if (rc)
35                 return NULL;
36
37         return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_ext_tso));
38 }
39
40 /**
41  * fill header for a TSO packet
42  */
43 static inline void
44 qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
45 {
46         struct qeth_hdr_tso *hdr;
47         struct tcphdr *tcph;
48         struct iphdr *iph;
49
50         QETH_DBF_TEXT(trace, 5, "tsofhdr");
51
52         hdr  = (struct qeth_hdr_tso *) skb->data;
53         iph  = skb->nh.iph;
54         tcph = skb->h.th;
55         /*fix header to TSO values ...*/
56         hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
57         /*set values which are fix for the first approach ...*/
58         hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
59         hdr->ext.imb_hdr_no  = 1;
60         hdr->ext.hdr_type    = 1;
61         hdr->ext.hdr_version = 1;
62         hdr->ext.hdr_len     = 28;
63         /*insert non-fix values */
64         hdr->ext.mss = skb_shinfo(skb)->tso_size;
65         hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
66         hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
67                                        sizeof(struct qeth_hdr_tso));
68 }
69
70 /**
71  * change some header values as requested by hardware
72  */
73 static inline void
74 qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
75 {
76         struct iphdr *iph;
77         struct ipv6hdr *ip6h;
78         struct tcphdr *tcph;
79
80         iph  = skb->nh.iph;
81         ip6h = skb->nh.ipv6h;
82         tcph = skb->h.th;
83
84         tcph->check = 0;
85         if (skb->protocol == ETH_P_IPV6) {
86                 ip6h->payload_len = 0;
87                 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
88                                                0, IPPROTO_TCP, 0);
89                 return;
90         }
91         /*OSA want us to set these values ...*/
92         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
93                                          0, IPPROTO_TCP, 0);
94         iph->tot_len = 0;
95         iph->check = 0;
96 }
97
98 static inline struct qeth_hdr_tso *
99 qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
100                         int ipv, int cast_type)
101 {
102         struct qeth_hdr_tso *hdr;
103         int rc = 0;
104
105         QETH_DBF_TEXT(trace, 5, "tsoprep");
106
107         /*get headroom for tso qdio header */
108         hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
109         if (hdr == NULL) {
110                 QETH_DBF_TEXT_(trace, 4, "2err%d", rc);
111                 return NULL;
112         }
113         memset(hdr, 0, sizeof(struct qeth_hdr_tso));
114         /*fill first 32 bytes of  qdio header as used
115          *FIXME: TSO has two struct members
116          * with different names but same size
117          * */
118         qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
119         qeth_tso_fill_header(card, skb);
120         qeth_tso_set_tcpip_header(card, skb);
121         return hdr;
122 }
123
124 static inline int
125 qeth_tso_get_queue_buffer(struct qeth_qdio_out_q *queue)
126 {
127         struct qeth_qdio_out_buffer *buffer;
128         int flush_cnt = 0;
129
130         QETH_DBF_TEXT(trace, 5, "tsobuf");
131
132         /* force to non-packing*/
133         if (queue->do_pack)
134                 queue->do_pack = 0;
135         buffer = &queue->bufs[queue->next_buf_to_fill];
136         /* get a new buffer if current is already in use*/
137         if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
138             (buffer->next_element_to_fill > 0)) {
139                 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
140                 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
141                                           QDIO_MAX_BUFFERS_PER_Q;
142                 flush_cnt++;
143         }
144         return flush_cnt;
145 }
146
147 static inline void
148 __qeth_tso_fill_buffer_frag(struct qeth_qdio_out_buffer *buf,
149                           struct sk_buff *skb)
150 {
151         struct skb_frag_struct *frag;
152         struct qdio_buffer *buffer;
153         int fragno, cnt, element;
154         unsigned long addr;
155
156         QETH_DBF_TEXT(trace, 6, "tsfilfrg");
157
158         /*initialize variables ...*/
159         fragno = skb_shinfo(skb)->nr_frags;
160         buffer = buf->buffer;
161         element = buf->next_element_to_fill;
162         /*fill buffer elements .....*/
163         for (cnt = 0; cnt < fragno; cnt++) {
164                 frag = &skb_shinfo(skb)->frags[cnt];
165                 addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
166                         frag->page_offset;
167                 buffer->element[element].addr = (char *)addr;
168                 buffer->element[element].length = frag->size;
169                 if (cnt < (fragno - 1))
170                         buffer->element[element].flags =
171                                 SBAL_FLAGS_MIDDLE_FRAG;
172                 else
173                         buffer->element[element].flags =
174                                 SBAL_FLAGS_LAST_FRAG;
175                 element++;
176         }
177         buf->next_element_to_fill = element;
178 }
179
180 static inline int
181 qeth_tso_fill_buffer(struct qeth_qdio_out_buffer *buf,
182                      struct sk_buff *skb)
183 {
184         int length, length_here, element;
185         int hdr_len;
186         struct qdio_buffer *buffer;
187         struct qeth_hdr_tso *hdr;
188         char *data;
189
190         QETH_DBF_TEXT(trace, 3, "tsfilbuf");
191
192         /*increment user count and queue skb ...*/
193         atomic_inc(&skb->users);
194         skb_queue_tail(&buf->skb_list, skb);
195
196         /*initialize all variables...*/
197         buffer = buf->buffer;
198         hdr = (struct qeth_hdr_tso *)skb->data;
199         hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
200         data = skb->data + hdr_len;
201         length = skb->len - hdr_len;
202         element = buf->next_element_to_fill;
203         /*fill first buffer entry only with header information */
204         buffer->element[element].addr = skb->data;
205         buffer->element[element].length = hdr_len;
206         buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
207         buf->next_element_to_fill++;
208
209         if (skb_shinfo(skb)->nr_frags > 0) {
210                  __qeth_tso_fill_buffer_frag(buf, skb);
211                  goto out;
212         }
213
214        /*start filling buffer entries ...*/
215         element++;
216         while (length > 0) {
217                 /* length_here is the remaining amount of data in this page */
218                 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
219                 if (length < length_here)
220                         length_here = length;
221                 buffer->element[element].addr = data;
222                 buffer->element[element].length = length_here;
223                 length -= length_here;
224                 if (!length)
225                         buffer->element[element].flags =
226                                 SBAL_FLAGS_LAST_FRAG;
227                  else
228                          buffer->element[element].flags =
229                                  SBAL_FLAGS_MIDDLE_FRAG;
230                 data += length_here;
231                 element++;
232         }
233         /*set the buffer to primed  ...*/
234         buf->next_element_to_fill = element;
235 out:
236         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
237         return 1;
238 }
239
240 int
241 qeth_tso_send_packet(struct qeth_card *card, struct sk_buff *skb,
242                      struct qeth_qdio_out_q *queue, int ipv, int cast_type)
243 {
244         int flush_cnt = 0;
245         struct qeth_hdr_tso *hdr;
246         struct qeth_qdio_out_buffer *buffer;
247         int start_index;
248
249         QETH_DBF_TEXT(trace, 3, "tsosend");
250
251         if (!(hdr = qeth_tso_prepare_packet(card, skb, ipv, cast_type)))
252                 return -ENOMEM;
253         /*check if skb fits in one SBAL ...*/
254         if (!(qeth_get_elements_no(card, (void*)hdr, skb)))
255                 return -EINVAL;
256         /*lock queue, force switching to non-packing and send it ...*/
257         while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
258                                        QETH_OUT_Q_LOCKED,
259                                        &queue->state));
260         start_index = queue->next_buf_to_fill;
261         buffer = &queue->bufs[queue->next_buf_to_fill];
262         /*check if card is too busy ...*/
263         if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
264                 card->stats.tx_dropped++;
265                 goto out;
266         }
267         /*let's force to non-packing and get a new SBAL*/
268         flush_cnt += qeth_tso_get_queue_buffer(queue);
269         buffer = &queue->bufs[queue->next_buf_to_fill];
270         if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
271                 card->stats.tx_dropped++;
272                 goto out;
273         }
274         flush_cnt += qeth_tso_fill_buffer(buffer, skb);
275         queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
276                                    QDIO_MAX_BUFFERS_PER_Q;
277 out:
278         atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
279         if (flush_cnt)
280                 qeth_flush_buffers(queue, 0, start_index, flush_cnt);
281         /*do some statistics */
282         card->stats.tx_packets++;
283         card->stats.tx_bytes += skb->len;
284         return 0;
285 }