5bde01bbeae2329e5a64a9a05e4969fc7dc3385a
[openwrt/svn-archive/archive.git] / package / madwifi / patches / 124-linux24_compat.patch
1 Index: madwifi-trunk-r3314/ath/if_athvar.h
2 ===================================================================
3 --- madwifi-trunk-r3314.orig/ath/if_athvar.h
4 +++ madwifi-trunk-r3314/ath/if_athvar.h
5 @@ -126,6 +126,11 @@
6 #define ATH_GET_NETDEV_DEV(ndev) ((ndev)->class_dev.dev)
7 #endif
8
9 +#ifndef NETDEV_TX_OK
10 +#define NETDEV_TX_OK 0
11 +#define NETDEV_TX_BUSY 1
12 +#endif
13 +
14 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23)
15 static inline struct net_device *_alloc_netdev(int sizeof_priv, const char *mask,
16 void (*setup)(struct net_device *))
17 Index: madwifi-trunk-r3314/ath/if_ath_radar.c
18 ===================================================================
19 --- madwifi-trunk-r3314.orig/ath/if_ath_radar.c
20 +++ madwifi-trunk-r3314/ath/if_ath_radar.c
21 @@ -92,6 +92,13 @@
22 #define nofloat_pct(_value, _pct) \
23 ( (_value * (1000 + _pct)) / 1000 )
24
25 +#ifndef list_for_each_entry_reverse
26 +#define list_for_each_entry_reverse(pos, head, member) \
27 + for (pos = list_entry((head)->prev, typeof(*pos), member); \
28 + prefetch(pos->member.prev), &pos->member != (head); \
29 + pos = list_entry(pos->member.prev, typeof(*pos), member))
30 +#endif
31 +
32 struct radar_pattern_specification {
33 /* The name of the rule/specification (i.e. what did we detect) */
34 const char *name;
35 Index: madwifi-trunk-r3314/ath/if_ath.c
36 ===================================================================
37 --- madwifi-trunk-r3314.orig/ath/if_ath.c
38 +++ madwifi-trunk-r3314/ath/if_ath.c
39 @@ -4705,6 +4705,46 @@
40 #undef USE_SHPREAMBLE
41 }
42
43 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
44 +static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
45 +{
46 + int ret;
47 + unsigned long flags;
48 +
49 + local_irq_save(flags);
50 + ret = v->counter;
51 + if (likely(ret == old))
52 + v->counter = new;
53 + local_irq_restore(flags);
54 +
55 + return ret;
56 +}
57 +
58 +/**
59 + * atomic_add_unless - add unless the number is a given value
60 + * @v: pointer of type atomic_t
61 + * @a: the amount to add to v...
62 + * @u: ...unless v is equal to u.
63 + *
64 + * Atomically adds @a to @v, so long as it was not @u.
65 + * Returns non-zero if @v was not @u, and zero otherwise.
66 + */
67 +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
68 +{
69 + int c, old;
70 + c = atomic_read(v);
71 + for (;;) {
72 + if (unlikely(c == (u)))
73 + break;
74 + old = atomic_cmpxchg((v), c, c + (a));
75 + if (likely(old == c))
76 + break;
77 + c = old;
78 + }
79 + return c != (u);
80 +}
81 +#endif
82 +
83 /*
84 * Generate beacon frame and queue cab data for a VAP.
85 */
86 Index: madwifi-trunk-r3314/net80211/sort.c
87 ===================================================================
88 --- /dev/null
89 +++ madwifi-trunk-r3314/net80211/sort.c
90 @@ -0,0 +1,120 @@
91 +/*
92 + * A fast, small, non-recursive O(nlog n) sort for the Linux kernel
93 + *
94 + * Jan 23 2005 Matt Mackall <mpm@selenic.com>
95 + */
96 +
97 +#include <linux/kernel.h>
98 +#include <linux/module.h>
99 +#include <linux/slab.h>
100 +
101 +static void u32_swap(void *a, void *b, int size)
102 +{
103 + u32 t = *(u32 *)a;
104 + *(u32 *)a = *(u32 *)b;
105 + *(u32 *)b = t;
106 +}
107 +
108 +static void generic_swap(void *a, void *b, int size)
109 +{
110 + char t;
111 +
112 + do {
113 + t = *(char *)a;
114 + *(char *)a++ = *(char *)b;
115 + *(char *)b++ = t;
116 + } while (--size > 0);
117 +}
118 +
119 +/**
120 + * sort - sort an array of elements
121 + * @base: pointer to data to sort
122 + * @num: number of elements
123 + * @size: size of each element
124 + * @cmp: pointer to comparison function
125 + * @swap: pointer to swap function or NULL
126 + *
127 + * This function does a heapsort on the given array. You may provide a
128 + * swap function optimized to your element type.
129 + *
130 + * Sorting time is O(n log n) both on average and worst-case. While
131 + * qsort is about 20% faster on average, it suffers from exploitable
132 + * O(n*n) worst-case behavior and extra memory requirements that make
133 + * it less suitable for kernel use.
134 + */
135 +
136 +static void sort(void *base, size_t num, size_t size,
137 + int (*cmp)(const void *, const void *),
138 + void (*swap)(void *, void *, int size))
139 +{
140 + /* pre-scale counters for performance */
141 + int i = (num/2 - 1) * size, n = num * size, c, r;
142 +
143 + if (!swap)
144 + swap = (size == 4 ? u32_swap : generic_swap);
145 +
146 + /* heapify */
147 + for ( ; i >= 0; i -= size) {
148 + for (r = i; r * 2 + size < n; r = c) {
149 + c = r * 2 + size;
150 + if (c < n - size && cmp(base + c, base + c + size) < 0)
151 + c += size;
152 + if (cmp(base + r, base + c) >= 0)
153 + break;
154 + swap(base + r, base + c, size);
155 + }
156 + }
157 +
158 + /* sort */
159 + for (i = n - size; i >= 0; i -= size) {
160 + swap(base, base + i, size);
161 + for (r = 0; r * 2 + size < i; r = c) {
162 + c = r * 2 + size;
163 + if (c < i - size && cmp(base + c, base + c + size) < 0)
164 + c += size;
165 + if (cmp(base + r, base + c) >= 0)
166 + break;
167 + swap(base + r, base + c, size);
168 + }
169 + }
170 +}
171 +
172 +EXPORT_SYMBOL(sort);
173 +
174 +#if 0
175 +/* a simple boot-time regression test */
176 +
177 +int cmpint(const void *a, const void *b)
178 +{
179 + return *(int *)a - *(int *)b;
180 +}
181 +
182 +static int sort_test(void)
183 +{
184 + int *a, i, r = 1;
185 +
186 + a = kmalloc(1000 * sizeof(int), GFP_KERNEL);
187 + BUG_ON(!a);
188 +
189 + printk("testing sort()\n");
190 +
191 + for (i = 0; i < 1000; i++) {
192 + r = (r * 725861) % 6599;
193 + a[i] = r;
194 + }
195 +
196 + sort(a, 1000, sizeof(int), cmpint, NULL);
197 +
198 + for (i = 0; i < 999; i++)
199 + if (a[i] > a[i+1]) {
200 + printk("sort() failed!\n");
201 + break;
202 + }
203 +
204 + kfree(a);
205 +
206 + return 0;
207 +}
208 +
209 +module_init(sort_test);
210 +#endif