fix typo
[openwrt/svn-archive/archive.git] / package / d80211 / src / wme.c
1 /*
2 * Copyright 2004, Instant802 Networks, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/module.h>
12 #include <linux/if_arp.h>
13 #include <linux/types.h>
14 #include <net/ip.h>
15 #include <net/pkt_sched.h>
16
17 #include <net/d80211.h>
18 #include "ieee80211_i.h"
19 #include "wme.h"
20
21 #define CHILD_QDISC_OPS pfifo_qdisc_ops
22
23 static inline int WLAN_FC_IS_QOS_DATA(u16 fc)
24 {
25 return (fc & 0x8C) == 0x88;
26 }
27
28
29 ieee80211_txrx_result
30 ieee80211_rx_h_parse_qos(struct ieee80211_txrx_data *rx)
31 {
32 u8 *data = rx->skb->data;
33 int tid;
34
35 /* does the frame have a qos control field? */
36 if (WLAN_FC_IS_QOS_DATA(rx->fc)) {
37 u8 *qc = data + ieee80211_get_hdrlen(rx->fc) - QOS_CONTROL_LEN;
38 /* frame has qos control */
39 tid = qc[0] & QOS_CONTROL_TID_MASK;
40 } else {
41 if (unlikely((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)) {
42 /* Separate TID for management frames */
43 tid = NUM_RX_DATA_QUEUES - 1;
44 } else {
45 /* no qos control present */
46 tid = 0; /* 802.1d - Best Effort */
47 }
48 }
49 #ifdef CONFIG_D80211_DEBUG_COUNTERS
50 I802_DEBUG_INC(rx->local->wme_rx_queue[tid]);
51 if (rx->sta) {
52 I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]);
53 }
54 #endif /* CONFIG_D80211_DEBUG_COUNTERS */
55
56 rx->u.rx.queue = tid;
57 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
58 * For now, set skb->priority to 0 for other cases. */
59 rx->skb->priority = (tid > 7) ? 0 : tid;
60
61 return TXRX_CONTINUE;
62 }
63
64
65 ieee80211_txrx_result
66 ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx)
67 {
68 u16 fc = rx->fc;
69 u8 *data = rx->skb->data;
70 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data;
71
72 if (!WLAN_FC_IS_QOS_DATA(fc))
73 return TXRX_CONTINUE;
74
75 /* remove the qos control field, update frame type and meta-data */
76 memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2);
77 hdr = (struct ieee80211_hdr *) skb_pull(rx->skb, 2);
78 /* change frame type to non QOS */
79 rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA;
80 hdr->frame_control = cpu_to_le16(fc);
81
82 return TXRX_CONTINUE;
83 }
84
85
86 /* maximum number of hardware queues we support. */
87 #define TC_80211_MAX_QUEUES 8
88
89 struct ieee80211_sched_data
90 {
91 struct tcf_proto *filter_list;
92 struct Qdisc *queues[TC_80211_MAX_QUEUES];
93 struct sk_buff_head requeued[TC_80211_MAX_QUEUES];
94 };
95
96
97 /* given a data frame determine the 802.1p/1d tag to use */
98 static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd)
99 {
100 struct iphdr *ip;
101 int dscp;
102 int offset;
103
104 #ifdef CONFIG_NET_SCHED
105 struct ieee80211_sched_data *q = qdisc_priv(qd);
106 struct tcf_result res = { -1, 0 };
107
108 /* if there is a user set filter list, call out to that */
109 if (q->filter_list) {
110 tc_classify(skb, q->filter_list, &res);
111 if (res.class != -1)
112 return res.class;
113 }
114 #endif /* CONFIG_NET_SCHED */
115
116 /* skb->priority values from 256->263 are magic values to
117 * directly indicate a specific 802.1d priority.
118 * This is used to allow 802.1d priority to be passed directly in
119 * from VLAN tags, etc. */
120 if (skb->priority >= 256 && skb->priority <= 263)
121 return skb->priority - 256;
122
123 /* check there is a valid IP header present */
124 offset = ieee80211_get_hdrlen_from_skb(skb) + 8 /* LLC + proto */;
125 if (skb->protocol != __constant_htons(ETH_P_IP) ||
126 skb->len < offset + sizeof(*ip))
127 return 0;
128
129 ip = (struct iphdr *) (skb->data + offset);
130
131 dscp = ip->tos & 0xfc;
132 switch (dscp) {
133 case 0x20:
134 return 2;
135 case 0x40:
136 return 1;
137 case 0x60:
138 return 3;
139 case 0x80:
140 return 4;
141 case 0xa0:
142 return 5;
143 case 0xc0:
144 return 6;
145 case 0xe0:
146 return 7;
147 default:
148 return 0;
149 }
150 }
151
152
153 static inline int wme_downgrade_ac(struct sk_buff *skb)
154 {
155 switch (skb->priority) {
156 case 6:
157 case 7:
158 skb->priority = 5; /* VO -> VI */
159 return 0;
160 case 4:
161 case 5:
162 skb->priority = 3; /* VI -> BE */
163 return 0;
164 case 0:
165 case 3:
166 skb->priority = 2; /* BE -> BK */
167 return 0;
168 default:
169 return -1;
170 }
171 }
172
173
174 /* positive return value indicates which queue to use
175 * negative return value indicates to drop the frame */
176 static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd)
177 {
178 struct ieee80211_local *local = qd->dev->ieee80211_ptr;
179 struct ieee80211_tx_packet_data *pkt_data =
180 (struct ieee80211_tx_packet_data *) skb->cb;
181 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
182 unsigned short fc = le16_to_cpu(hdr->frame_control);
183 int qos;
184 const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
185
186 /* see if frame is data or non data frame */
187 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
188 /* management frames go on AC_VO queue, but are sent
189 * without QoS control fields */
190 return IEEE80211_TX_QUEUE_DATA0;
191 }
192
193 if (unlikely(pkt_data->mgmt_iface)) {
194 /* Data frames from hostapd (mainly, EAPOL) use AC_VO
195 * and they will include QoS control fields if
196 * the target STA is using WME. */
197 skb->priority = 7;
198 return ieee802_1d_to_ac[skb->priority];
199 }
200
201 /* is this a QoS frame? */
202 qos = fc & IEEE80211_STYPE_QOS_DATA;
203
204 if (!qos) {
205 skb->priority = 0; /* required for correct WPA/11i MIC */
206 return ieee802_1d_to_ac[skb->priority];
207 }
208
209 /* use the data classifier to determine what 802.1d tag the
210 * data frame has */
211 skb->priority = classify_1d(skb, qd);
212
213 /* incase we are a client verify acm is not set for this ac */
214 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
215 if (wme_downgrade_ac(skb)) {
216 /* No AC with lower priority has acm=0,
217 * drop packet. */
218 return -1;
219 }
220 }
221
222 /* look up which queue to use for frames with this 1d tag */
223 return ieee802_1d_to_ac[skb->priority];
224 }
225
226
227 static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
228 {
229 struct ieee80211_local *local = qd->dev->ieee80211_ptr;
230 struct ieee80211_sched_data *q = qdisc_priv(qd);
231 struct ieee80211_tx_packet_data *pkt_data =
232 (struct ieee80211_tx_packet_data *) skb->cb;
233 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
234 unsigned short fc = le16_to_cpu(hdr->frame_control);
235 struct Qdisc *qdisc;
236 int err, queue;
237
238 if (pkt_data->requeue) {
239 skb_queue_tail(&q->requeued[pkt_data->queue], skb);
240 return 0;
241 }
242
243 queue = classify80211(skb, qd);
244
245 /* now we know the 1d priority, fill in the QoS header if there is one
246 */
247 if (WLAN_FC_IS_QOS_DATA(fc)) {
248 u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2;
249 u8 qos_hdr = skb->priority & QOS_CONTROL_TAG1D_MASK;
250 if (local->wifi_wme_noack_test)
251 qos_hdr |= QOS_CONTROL_ACK_POLICY_NOACK <<
252 QOS_CONTROL_ACK_POLICY_SHIFT;
253 /* qos header is 2 bytes, second reserved */
254 *p = qos_hdr;
255 p++;
256 *p = 0;
257 }
258
259 if (unlikely(queue >= local->hw.queues)) {
260 #if 0
261 if (net_ratelimit()) {
262 printk(KERN_DEBUG "%s - queue=%d (hw does not "
263 "support) -> %d\n",
264 __func__, queue, local->hw.queues - 1);
265 }
266 #endif
267 queue = local->hw.queues - 1;
268 }
269
270 if (unlikely(queue < 0)) {
271 kfree_skb(skb);
272 err = NET_XMIT_DROP;
273 } else {
274 pkt_data->queue = (unsigned int) queue;
275 qdisc = q->queues[queue];
276 err = qdisc->enqueue(skb, qdisc);
277 if (err == NET_XMIT_SUCCESS) {
278 qd->q.qlen++;
279 qd->bstats.bytes += skb->len;
280 qd->bstats.packets++;
281 return NET_XMIT_SUCCESS;
282 }
283 }
284 qd->qstats.drops++;
285 return err;
286 }
287
288
289 /* TODO: clean up the cases where master_hard_start_xmit
290 * returns non 0 - it shouldn't ever do that. Once done we
291 * can remove this function */
292 static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
293 {
294 struct ieee80211_sched_data *q = qdisc_priv(qd);
295 struct ieee80211_tx_packet_data *pkt_data =
296 (struct ieee80211_tx_packet_data *) skb->cb;
297 struct Qdisc *qdisc;
298 int err;
299
300 /* we recorded which queue to use earlier! */
301 qdisc = q->queues[pkt_data->queue];
302
303 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
304 qd->q.qlen++;
305 return 0;
306 }
307 qd->qstats.drops++;
308 return err;
309 }
310
311
312 static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
313 {
314 struct ieee80211_sched_data *q = qdisc_priv(qd);
315 struct net_device *dev = qd->dev;
316 struct ieee80211_local *local = dev->ieee80211_ptr;
317 struct ieee80211_hw *hw = &local->hw;
318 struct sk_buff *skb;
319 struct Qdisc *qdisc;
320 int queue;
321
322 /* check all the h/w queues in numeric/priority order */
323 for (queue = 0; queue < hw->queues; queue++) {
324 /* see if there is room in this hardware queue */
325 if (test_bit(IEEE80211_LINK_STATE_XOFF,
326 &local->state[queue]) ||
327 test_bit(IEEE80211_LINK_STATE_PENDING,
328 &local->state[queue]))
329 continue;
330
331 /* there is space - try and get a frame */
332 skb = skb_dequeue(&q->requeued[queue]);
333 if (skb)
334 return skb;
335
336 qdisc = q->queues[queue];
337 skb = qdisc->dequeue(qdisc);
338 if (skb) {
339 qd->q.qlen--;
340 return skb;
341 }
342 }
343 /* returning a NULL here when all the h/w queues are full means we
344 * never need to call netif_stop_queue in the driver */
345 return NULL;
346 }
347
348
349 static void wme_qdiscop_reset(struct Qdisc* qd)
350 {
351 struct ieee80211_sched_data *q = qdisc_priv(qd);
352 struct ieee80211_local *local = qd->dev->ieee80211_ptr;
353 struct ieee80211_hw *hw = &local->hw;
354 int queue;
355
356 /* QUESTION: should we have some hardware flush functionality here? */
357
358 for (queue = 0; queue < hw->queues; queue++) {
359 skb_queue_purge(&q->requeued[queue]);
360 qdisc_reset(q->queues[queue]);
361 }
362 qd->q.qlen = 0;
363 }
364
365
366 static void wme_qdiscop_destroy(struct Qdisc* qd)
367 {
368 struct ieee80211_sched_data *q = qdisc_priv(qd);
369 struct ieee80211_local *local = qd->dev->ieee80211_ptr;
370 struct ieee80211_hw *hw = &local->hw;
371 struct tcf_proto *tp;
372 int queue;
373
374 while ((tp = q->filter_list) != NULL) {
375 q->filter_list = tp->next;
376 tp->ops->destroy(tp);
377 }
378
379 for (queue=0; queue < hw->queues; queue++) {
380 skb_queue_purge(&q->requeued[queue]);
381 qdisc_destroy(q->queues[queue]);
382 q->queues[queue] = &noop_qdisc;
383 }
384 }
385
386
387 /* called whenever parameters are updated on existing qdisc */
388 static int wme_qdiscop_tune(struct Qdisc *qd, struct rtattr *opt)
389 {
390 /* struct ieee80211_sched_data *q = qdisc_priv(qd);
391 */
392 /* check our options block is the right size */
393 /* copy any options to our local structure */
394 /* Ignore options block for now - always use static mapping
395 struct tc_ieee80211_qopt *qopt = RTA_DATA(opt);
396
397 if (opt->rta_len < RTA_LENGTH(sizeof(*qopt)))
398 return -EINVAL;
399 memcpy(q->tag2queue, qopt->tag2queue, sizeof(qopt->tag2queue));
400 */
401 return 0;
402 }
403
404
405 /* called during initial creation of qdisc on device */
406 static int wme_qdiscop_init(struct Qdisc *qd, struct rtattr *opt)
407 {
408 struct ieee80211_sched_data *q = qdisc_priv(qd);
409 struct net_device *dev = qd->dev;
410 struct ieee80211_local *local = dev->ieee80211_ptr;
411 int queues = local->hw.queues;
412 int err = 0, i;
413
414 /* check this device is an ieee80211 master type device */
415 if (dev->type != ARPHRD_IEEE80211)
416 return -EINVAL;
417
418 /* check that there is no qdisc currently attached to device
419 * this ensures that we will be the root qdisc. (I can't find a better
420 * way to test this explicitly) */
421 if (dev->qdisc_sleeping != &noop_qdisc)
422 return -EINVAL;
423
424 if (qd->flags & TCQ_F_INGRESS)
425 return -EINVAL;
426
427 /* if options were passed in, set them */
428 if (opt) {
429 err = wme_qdiscop_tune(qd, opt);
430 }
431
432 /* create child queues */
433 for (i = 0; i < queues; i++) {
434 skb_queue_head_init(&q->requeued[i]);
435 q->queues[i] = qdisc_create_dflt(qd->dev, &CHILD_QDISC_OPS);
436 if (q->queues[i] == 0) {
437 q->queues[i] = &noop_qdisc;
438 printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i);
439 }
440 }
441
442 return err;
443 }
444
445 static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
446 {
447 /* struct ieee80211_sched_data *q = qdisc_priv(qd);
448 unsigned char *p = skb->tail;
449 struct tc_ieee80211_qopt opt;
450
451 memcpy(&opt.tag2queue, q->tag2queue, TC_80211_MAX_TAG + 1);
452 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
453 */ return skb->len;
454 /*
455 rtattr_failure:
456 skb_trim(skb, p - skb->data);*/
457 return -1;
458 }
459
460
461 static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
462 struct Qdisc *new, struct Qdisc **old)
463 {
464 struct ieee80211_sched_data *q = qdisc_priv(qd);
465 struct ieee80211_local *local = qd->dev->ieee80211_ptr;
466 struct ieee80211_hw *hw = &local->hw;
467 unsigned long queue = arg - 1;
468
469 if (queue >= hw->queues)
470 return -EINVAL;
471
472 if (!new)
473 new = &noop_qdisc;
474
475 sch_tree_lock(qd);
476 *old = q->queues[queue];
477 q->queues[queue] = new;
478 qdisc_reset(*old);
479 sch_tree_unlock(qd);
480
481 return 0;
482 }
483
484
485 static struct Qdisc *
486 wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
487 {
488 struct ieee80211_sched_data *q = qdisc_priv(qd);
489 struct ieee80211_local *local = qd->dev->ieee80211_ptr;
490 struct ieee80211_hw *hw = &local->hw;
491 unsigned long queue = arg - 1;
492
493 if (queue >= hw->queues)
494 return NULL;
495
496 return q->queues[queue];
497 }
498
499
500 static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
501 {
502 struct ieee80211_local *local = qd->dev->ieee80211_ptr;
503 struct ieee80211_hw *hw = &local->hw;
504 unsigned long queue = TC_H_MIN(classid);
505
506 if (queue - 1 >= hw->queues)
507 return 0;
508
509 return queue;
510 }
511
512
513 static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
514 u32 classid)
515 {
516 return wme_classop_get(qd, classid);
517 }
518
519
520 static void wme_classop_put(struct Qdisc *q, unsigned long cl)
521 {
522 /* printk(KERN_DEBUG "entering %s\n", __func__); */
523 }
524
525
526 static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
527 struct rtattr **tca, unsigned long *arg)
528 {
529 unsigned long cl = *arg;
530 struct ieee80211_local *local = qd->dev->ieee80211_ptr;
531 struct ieee80211_hw *hw = &local->hw;
532 /* printk(KERN_DEBUG "entering %s\n", __func__); */
533
534 if (cl - 1 > hw->queues)
535 return -ENOENT;
536
537 /* TODO: put code to program hardware queue parameters here,
538 * to allow programming from tc command line */
539
540 return 0;
541 }
542
543
544 /* we don't support deleting hardware queues
545 * when we add WMM-SA support - TSPECs may be deleted here */
546 static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
547 {
548 struct ieee80211_local *local = qd->dev->ieee80211_ptr;
549 struct ieee80211_hw *hw = &local->hw;
550 /* printk(KERN_DEBUG "entering %s\n", __func__); */
551
552 if (cl - 1 > hw->queues)
553 return -ENOENT;
554 return 0;
555 }
556
557
558 static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
559 struct sk_buff *skb, struct tcmsg *tcm)
560 {
561 struct ieee80211_sched_data *q = qdisc_priv(qd);
562 struct ieee80211_local *local = qd->dev->ieee80211_ptr;
563 struct ieee80211_hw *hw = &local->hw;
564 /* printk(KERN_DEBUG "entering %s\n", __func__); */
565
566 if (cl - 1 > hw->queues)
567 return -ENOENT;
568 tcm->tcm_handle = TC_H_MIN(cl);
569 tcm->tcm_parent = qd->handle;
570 tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
571 return 0;
572 }
573
574
575 static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
576 {
577 struct ieee80211_local *local = qd->dev->ieee80211_ptr;
578 struct ieee80211_hw *hw = &local->hw;
579 int queue;
580 /* printk(KERN_DEBUG "entering %s\n", __func__); */
581
582 if (arg->stop)
583 return;
584
585 for (queue = 0; queue < hw->queues; queue++) {
586 if (arg->count < arg->skip) {
587 arg->count++;
588 continue;
589 }
590 /* we should return classids for our internal queues here
591 * as well as the external ones */
592 if (arg->fn(qd, queue+1, arg) < 0) {
593 arg->stop = 1;
594 break;
595 }
596 arg->count++;
597 }
598 }
599
600
601 static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
602 unsigned long cl)
603 {
604 struct ieee80211_sched_data *q = qdisc_priv(qd);
605 /* printk("entering %s\n", __func__); */
606
607 if (cl)
608 return NULL;
609
610 return &q->filter_list;
611 }
612
613
614 /* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
615 * - these are the operations on the classes */
616 static struct Qdisc_class_ops class_ops =
617 {
618 .graft = wme_classop_graft,
619 .leaf = wme_classop_leaf,
620
621 .get = wme_classop_get,
622 .put = wme_classop_put,
623 .change = wme_classop_change,
624 .delete = wme_classop_delete,
625 .walk = wme_classop_walk,
626
627 .tcf_chain = wme_classop_find_tcf,
628 .bind_tcf = wme_classop_bind,
629 .unbind_tcf = wme_classop_put,
630
631 .dump = wme_classop_dump_class,
632 };
633
634
635 /* queueing discipline operations */
636 static struct Qdisc_ops wme_qdisc_ops =
637 {
638 .next = NULL,
639 .cl_ops = &class_ops,
640 .id = "ieee80211",
641 .priv_size = sizeof(struct ieee80211_sched_data),
642
643 .enqueue = wme_qdiscop_enqueue,
644 .dequeue = wme_qdiscop_dequeue,
645 .requeue = wme_qdiscop_requeue,
646 .drop = NULL, /* drop not needed since we are always the root qdisc */
647
648 .init = wme_qdiscop_init,
649 .reset = wme_qdiscop_reset,
650 .destroy = wme_qdiscop_destroy,
651 .change = wme_qdiscop_tune,
652
653 .dump = wme_qdiscop_dump,
654 };
655
656
657 void ieee80211_install_qdisc(struct net_device *dev)
658 {
659 struct Qdisc *qdisc;
660
661 qdisc = qdisc_create_dflt(dev, &wme_qdisc_ops);
662 if (!qdisc) {
663 printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
664 return;
665 }
666
667 /* same handle as would be allocated by qdisc_alloc_handle() */
668 qdisc->handle = 0x80010000;
669
670 qdisc_lock_tree(dev);
671 list_add_tail(&qdisc->list, &dev->qdisc_list);
672 dev->qdisc_sleeping = qdisc;
673 qdisc_unlock_tree(dev);
674 }
675
676
677 int ieee80211_wme_register(void)
678 {
679 int err = 0;
680
681 #ifdef CONFIG_NET_SCHED
682 err = register_qdisc(&wme_qdisc_ops);
683 #endif
684 return err;
685 }
686
687
688 void ieee80211_wme_unregister(void)
689 {
690 #ifdef CONFIG_NET_SCHED
691 unregister_qdisc(&wme_qdisc_ops);
692 #endif
693 }