add initial support for the crisarchitecture used on foxboards to openwrt
[openwrt/staging/dedeckeh.git] / target / linux / etrax-2.6 / image / e100boot / src / libpcap-0.4 / gencode.c
1 /*
2 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
16 * written permission.
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 */
21 #ifndef lint
22 static const char rcsid[] =
23 "@(#) $Header: /usr/local/cvs/linux/tools/build/e100boot/libpcap-0.4/gencode.c,v 1.1 1999/08/26 10:05:22 johana Exp $ (LBL)";
24 #endif
25
26 #include <sys/types.h>
27 #include <sys/socket.h>
28 #include <sys/time.h>
29
30 #if __STDC__
31 struct mbuf;
32 struct rtentry;
33 #endif
34
35 #include <net/if.h>
36
37 #include <netinet/in.h>
38 #include <netinet/if_ether.h>
39
40 #include <stdlib.h>
41 #include <memory.h>
42 #include <setjmp.h>
43 #if __STDC__
44 #include <stdarg.h>
45 #else
46 #include <varargs.h>
47 #endif
48
49 #include "pcap-int.h"
50
51 #include "ethertype.h"
52 #include "gencode.h"
53 #include "ppp.h"
54 #include <pcap-namedb.h>
55
56 #include "gnuc.h"
57 #ifdef HAVE_OS_PROTO_H
58 #include "os-proto.h"
59 #endif
60
61 #define JMP(c) ((c)|BPF_JMP|BPF_K)
62
63 /* Locals */
64 static jmp_buf top_ctx;
65 static pcap_t *bpf_pcap;
66
67 /* XXX */
68 #ifdef PCAP_FDDIPAD
69 int pcap_fddipad = PCAP_FDDIPAD;
70 #else
71 int pcap_fddipad;
72 #endif
73
74 /* VARARGS */
75 __dead void
76 #if __STDC__
77 bpf_error(const char *fmt, ...)
78 #else
79 bpf_error(fmt, va_alist)
80 const char *fmt;
81 va_dcl
82 #endif
83 {
84 va_list ap;
85
86 #if __STDC__
87 va_start(ap, fmt);
88 #else
89 va_start(ap);
90 #endif
91 if (bpf_pcap != NULL)
92 (void)vsprintf(pcap_geterr(bpf_pcap), fmt, ap);
93 va_end(ap);
94 longjmp(top_ctx, 1);
95 /* NOTREACHED */
96 }
97
98 static void init_linktype(int);
99
100 static int alloc_reg(void);
101 static void free_reg(int);
102
103 static struct block *root;
104
105 /*
106 * We divy out chunks of memory rather than call malloc each time so
107 * we don't have to worry about leaking memory. It's probably
108 * not a big deal if all this memory was wasted but it this ever
109 * goes into a library that would probably not be a good idea.
110 */
111 #define NCHUNKS 16
112 #define CHUNK0SIZE 1024
113 struct chunk {
114 u_int n_left;
115 void *m;
116 };
117
118 static struct chunk chunks[NCHUNKS];
119 static int cur_chunk;
120
121 static void *newchunk(u_int);
122 static void freechunks(void);
123 static inline struct block *new_block(int);
124 static inline struct slist *new_stmt(int);
125 static struct block *gen_retblk(int);
126 static inline void syntax(void);
127
128 static void backpatch(struct block *, struct block *);
129 static void merge(struct block *, struct block *);
130 static struct block *gen_cmp(u_int, u_int, bpf_int32);
131 static struct block *gen_mcmp(u_int, u_int, bpf_int32, bpf_u_int32);
132 static struct block *gen_bcmp(u_int, u_int, const u_char *);
133 static struct block *gen_uncond(int);
134 static inline struct block *gen_true(void);
135 static inline struct block *gen_false(void);
136 static struct block *gen_linktype(int);
137 static struct block *gen_hostop(bpf_u_int32, bpf_u_int32, int, int, u_int, u_int);
138 static struct block *gen_ehostop(const u_char *, int);
139 static struct block *gen_fhostop(const u_char *, int);
140 static struct block *gen_dnhostop(bpf_u_int32, int, u_int);
141 static struct block *gen_host(bpf_u_int32, bpf_u_int32, int, int);
142 static struct block *gen_gateway(const u_char *, bpf_u_int32 **, int, int);
143 static struct block *gen_ipfrag(void);
144 static struct block *gen_portatom(int, bpf_int32);
145 struct block *gen_portop(int, int, int);
146 static struct block *gen_port(int, int, int);
147 static int lookup_proto(const char *, int);
148 static struct block *gen_proto(int, int, int);
149 static struct slist *xfer_to_x(struct arth *);
150 static struct slist *xfer_to_a(struct arth *);
151 static struct block *gen_len(int, int);
152
153 static void *
154 newchunk(n)
155 u_int n;
156 {
157 struct chunk *cp;
158 int k, size;
159
160 /* XXX Round up to nearest long. */
161 n = (n + sizeof(long) - 1) & ~(sizeof(long) - 1);
162
163 cp = &chunks[cur_chunk];
164 if (n > cp->n_left) {
165 ++cp, k = ++cur_chunk;
166 if (k >= NCHUNKS)
167 bpf_error("out of memory");
168 size = CHUNK0SIZE << k;
169 cp->m = (void *)malloc(size);
170 memset((char *)cp->m, 0, size);
171 cp->n_left = size;
172 if (n > size)
173 bpf_error("out of memory");
174 }
175 cp->n_left -= n;
176 return (void *)((char *)cp->m + cp->n_left);
177 }
178
179 static void
180 freechunks()
181 {
182 int i;
183
184 cur_chunk = 0;
185 for (i = 0; i < NCHUNKS; ++i)
186 if (chunks[i].m != NULL) {
187 free(chunks[i].m);
188 chunks[i].m = NULL;
189 }
190 }
191
192 /*
193 * A strdup whose allocations are freed after code generation is over.
194 */
195 char *
196 sdup(s)
197 register const char *s;
198 {
199 int n = strlen(s) + 1;
200 char *cp = newchunk(n);
201
202 strcpy(cp, s);
203 return (cp);
204 }
205
206 static inline struct block *
207 new_block(code)
208 int code;
209 {
210 struct block *p;
211
212 p = (struct block *)newchunk(sizeof(*p));
213 p->s.code = code;
214 p->head = p;
215
216 return p;
217 }
218
219 static inline struct slist *
220 new_stmt(code)
221 int code;
222 {
223 struct slist *p;
224
225 p = (struct slist *)newchunk(sizeof(*p));
226 p->s.code = code;
227
228 return p;
229 }
230
231 static struct block *
232 gen_retblk(v)
233 int v;
234 {
235 struct block *b = new_block(BPF_RET|BPF_K);
236
237 b->s.k = v;
238 return b;
239 }
240
241 static inline void
242 syntax()
243 {
244 bpf_error("syntax error in filter expression");
245 }
246
247 static bpf_u_int32 netmask;
248 static int snaplen;
249
250 int
251 pcap_compile(pcap_t *p, struct bpf_program *program,
252 char *buf, int optimize, bpf_u_int32 mask)
253 {
254 extern int n_errors;
255 int len;
256
257 n_errors = 0;
258 root = NULL;
259 bpf_pcap = p;
260 if (setjmp(top_ctx)) {
261 freechunks();
262 return (-1);
263 }
264
265 netmask = mask;
266 snaplen = pcap_snapshot(p);
267
268 lex_init(buf ? buf : "");
269 init_linktype(pcap_datalink(p));
270 (void)pcap_parse();
271
272 if (n_errors)
273 syntax();
274
275 if (root == NULL)
276 root = gen_retblk(snaplen);
277
278 if (optimize) {
279 bpf_optimize(&root);
280 if (root == NULL ||
281 (root->s.code == (BPF_RET|BPF_K) && root->s.k == 0))
282 bpf_error("expression rejects all packets");
283 }
284 program->bf_insns = icode_to_fcode(root, &len);
285 program->bf_len = len;
286
287 freechunks();
288 return (0);
289 }
290
291 /*
292 * Backpatch the blocks in 'list' to 'target'. The 'sense' field indicates
293 * which of the jt and jf fields has been resolved and which is a pointer
294 * back to another unresolved block (or nil). At least one of the fields
295 * in each block is already resolved.
296 */
297 static void
298 backpatch(list, target)
299 struct block *list, *target;
300 {
301 struct block *next;
302
303 while (list) {
304 if (!list->sense) {
305 next = JT(list);
306 JT(list) = target;
307 } else {
308 next = JF(list);
309 JF(list) = target;
310 }
311 list = next;
312 }
313 }
314
315 /*
316 * Merge the lists in b0 and b1, using the 'sense' field to indicate
317 * which of jt and jf is the link.
318 */
319 static void
320 merge(b0, b1)
321 struct block *b0, *b1;
322 {
323 register struct block **p = &b0;
324
325 /* Find end of list. */
326 while (*p)
327 p = !((*p)->sense) ? &JT(*p) : &JF(*p);
328
329 /* Concatenate the lists. */
330 *p = b1;
331 }
332
333 void
334 finish_parse(p)
335 struct block *p;
336 {
337 backpatch(p, gen_retblk(snaplen));
338 p->sense = !p->sense;
339 backpatch(p, gen_retblk(0));
340 root = p->head;
341 }
342
343 void
344 gen_and(b0, b1)
345 struct block *b0, *b1;
346 {
347 backpatch(b0, b1->head);
348 b0->sense = !b0->sense;
349 b1->sense = !b1->sense;
350 merge(b1, b0);
351 b1->sense = !b1->sense;
352 b1->head = b0->head;
353 }
354
355 void
356 gen_or(b0, b1)
357 struct block *b0, *b1;
358 {
359 b0->sense = !b0->sense;
360 backpatch(b0, b1->head);
361 b0->sense = !b0->sense;
362 merge(b1, b0);
363 b1->head = b0->head;
364 }
365
366 void
367 gen_not(b)
368 struct block *b;
369 {
370 b->sense = !b->sense;
371 }
372
373 static struct block *
374 gen_cmp(offset, size, v)
375 u_int offset, size;
376 bpf_int32 v;
377 {
378 struct slist *s;
379 struct block *b;
380
381 s = new_stmt(BPF_LD|BPF_ABS|size);
382 s->s.k = offset;
383
384 b = new_block(JMP(BPF_JEQ));
385 b->stmts = s;
386 b->s.k = v;
387
388 return b;
389 }
390
391 static struct block *
392 gen_mcmp(offset, size, v, mask)
393 u_int offset, size;
394 bpf_int32 v;
395 bpf_u_int32 mask;
396 {
397 struct block *b = gen_cmp(offset, size, v);
398 struct slist *s;
399
400 if (mask != 0xffffffff) {
401 s = new_stmt(BPF_ALU|BPF_AND|BPF_K);
402 s->s.k = mask;
403 b->stmts->next = s;
404 }
405 return b;
406 }
407
408 static struct block *
409 gen_bcmp(offset, size, v)
410 register u_int offset, size;
411 register const u_char *v;
412 {
413 register struct block *b, *tmp;
414
415 b = NULL;
416 while (size >= 4) {
417 register const u_char *p = &v[size - 4];
418 bpf_int32 w = ((bpf_int32)p[0] << 24) |
419 ((bpf_int32)p[1] << 16) | ((bpf_int32)p[2] << 8) | p[3];
420
421 tmp = gen_cmp(offset + size - 4, BPF_W, w);
422 if (b != NULL)
423 gen_and(b, tmp);
424 b = tmp;
425 size -= 4;
426 }
427 while (size >= 2) {
428 register const u_char *p = &v[size - 2];
429 bpf_int32 w = ((bpf_int32)p[0] << 8) | p[1];
430
431 tmp = gen_cmp(offset + size - 2, BPF_H, w);
432 if (b != NULL)
433 gen_and(b, tmp);
434 b = tmp;
435 size -= 2;
436 }
437 if (size > 0) {
438 tmp = gen_cmp(offset, BPF_B, (bpf_int32)v[0]);
439 if (b != NULL)
440 gen_and(b, tmp);
441 b = tmp;
442 }
443 return b;
444 }
445
446 /*
447 * Various code constructs need to know the layout of the data link
448 * layer. These variables give the necessary offsets. off_linktype
449 * is set to -1 for no encapsulation, in which case, IP is assumed.
450 */
451 static u_int off_linktype;
452 static u_int off_nl;
453 static int linktype;
454
455 static void
456 init_linktype(type)
457 int type;
458 {
459 linktype = type;
460
461 switch (type) {
462
463 case DLT_EN10MB:
464 off_linktype = 12;
465 off_nl = 14;
466 return;
467
468 case DLT_SLIP:
469 /*
470 * SLIP doesn't have a link level type. The 16 byte
471 * header is hacked into our SLIP driver.
472 */
473 off_linktype = -1;
474 off_nl = 16;
475 return;
476
477 case DLT_SLIP_BSDOS:
478 /* XXX this may be the same as the DLT_PPP_BSDOS case */
479 off_linktype = -1;
480 /* XXX end */
481 off_nl = 24;
482 return;
483
484 case DLT_NULL:
485 off_linktype = 0;
486 off_nl = 4;
487 return;
488
489 case DLT_PPP:
490 off_linktype = 2;
491 off_nl = 4;
492 return;
493
494 case DLT_PPP_BSDOS:
495 off_linktype = 5;
496 off_nl = 24;
497 return;
498
499 case DLT_FDDI:
500 /*
501 * FDDI doesn't really have a link-level type field.
502 * We assume that SSAP = SNAP is being used and pick
503 * out the encapsulated Ethernet type.
504 */
505 off_linktype = 19;
506 #ifdef PCAP_FDDIPAD
507 off_linktype += pcap_fddipad;
508 #endif
509 off_nl = 21;
510 #ifdef PCAP_FDDIPAD
511 off_nl += pcap_fddipad;
512 #endif
513 return;
514
515 case DLT_IEEE802:
516 off_linktype = 20;
517 off_nl = 22;
518 return;
519
520 case DLT_ATM_RFC1483:
521 /*
522 * assume routed, non-ISO PDUs
523 * (i.e., LLC = 0xAA-AA-03, OUT = 0x00-00-00)
524 */
525 off_linktype = 6;
526 off_nl = 8;
527 return;
528
529 case DLT_RAW:
530 off_linktype = -1;
531 off_nl = 0;
532 return;
533 }
534 bpf_error("unknown data link type 0x%x", linktype);
535 /* NOTREACHED */
536 }
537
538 static struct block *
539 gen_uncond(rsense)
540 int rsense;
541 {
542 struct block *b;
543 struct slist *s;
544
545 s = new_stmt(BPF_LD|BPF_IMM);
546 s->s.k = !rsense;
547 b = new_block(JMP(BPF_JEQ));
548 b->stmts = s;
549
550 return b;
551 }
552
553 static inline struct block *
554 gen_true()
555 {
556 return gen_uncond(1);
557 }
558
559 static inline struct block *
560 gen_false()
561 {
562 return gen_uncond(0);
563 }
564
565 static struct block *
566 gen_linktype(proto)
567 register int proto;
568 {
569 struct block *b0, *b1;
570
571 /* If we're not using encapsulation and checking for IP, we're done */
572 if (off_linktype == -1 && proto == ETHERTYPE_IP)
573 return gen_true();
574
575 switch (linktype) {
576
577 case DLT_SLIP:
578 return gen_false();
579
580 case DLT_PPP:
581 if (proto == ETHERTYPE_IP)
582 proto = PPP_IP; /* XXX was 0x21 */
583 break;
584
585 case DLT_PPP_BSDOS:
586 switch (proto) {
587
588 case ETHERTYPE_IP:
589 b0 = gen_cmp(off_linktype, BPF_H, PPP_IP);
590 b1 = gen_cmp(off_linktype, BPF_H, PPP_VJC);
591 gen_or(b0, b1);
592 b0 = gen_cmp(off_linktype, BPF_H, PPP_VJNC);
593 gen_or(b1, b0);
594 return b0;
595
596 case ETHERTYPE_DN:
597 proto = PPP_DECNET;
598 break;
599
600 case ETHERTYPE_ATALK:
601 proto = PPP_APPLE;
602 break;
603
604 case ETHERTYPE_NS:
605 proto = PPP_NS;
606 break;
607 }
608 break;
609
610 case DLT_NULL:
611 /* XXX */
612 if (proto == ETHERTYPE_IP)
613 return (gen_cmp(0, BPF_W, (bpf_int32)htonl(AF_INET)));
614 else
615 return gen_false();
616 }
617 return gen_cmp(off_linktype, BPF_H, (bpf_int32)proto);
618 }
619
620 static struct block *
621 gen_hostop(addr, mask, dir, proto, src_off, dst_off)
622 bpf_u_int32 addr;
623 bpf_u_int32 mask;
624 int dir, proto;
625 u_int src_off, dst_off;
626 {
627 struct block *b0, *b1;
628 u_int offset;
629
630 switch (dir) {
631
632 case Q_SRC:
633 offset = src_off;
634 break;
635
636 case Q_DST:
637 offset = dst_off;
638 break;
639
640 case Q_AND:
641 b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off);
642 b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off);
643 gen_and(b0, b1);
644 return b1;
645
646 case Q_OR:
647 case Q_DEFAULT:
648 b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off);
649 b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off);
650 gen_or(b0, b1);
651 return b1;
652
653 default:
654 abort();
655 }
656 b0 = gen_linktype(proto);
657 b1 = gen_mcmp(offset, BPF_W, (bpf_int32)addr, mask);
658 gen_and(b0, b1);
659 return b1;
660 }
661
662 static struct block *
663 gen_ehostop(eaddr, dir)
664 register const u_char *eaddr;
665 register int dir;
666 {
667 register struct block *b0, *b1;
668
669 switch (dir) {
670 case Q_SRC:
671 return gen_bcmp(6, 6, eaddr);
672
673 case Q_DST:
674 return gen_bcmp(0, 6, eaddr);
675
676 case Q_AND:
677 b0 = gen_ehostop(eaddr, Q_SRC);
678 b1 = gen_ehostop(eaddr, Q_DST);
679 gen_and(b0, b1);
680 return b1;
681
682 case Q_DEFAULT:
683 case Q_OR:
684 b0 = gen_ehostop(eaddr, Q_SRC);
685 b1 = gen_ehostop(eaddr, Q_DST);
686 gen_or(b0, b1);
687 return b1;
688 }
689 abort();
690 /* NOTREACHED */
691 }
692
693 /*
694 * Like gen_ehostop, but for DLT_FDDI
695 */
696 static struct block *
697 gen_fhostop(eaddr, dir)
698 register const u_char *eaddr;
699 register int dir;
700 {
701 struct block *b0, *b1;
702
703 switch (dir) {
704 case Q_SRC:
705 #ifdef PCAP_FDDIPAD
706 return gen_bcmp(6 + 1 + pcap_fddipad, 6, eaddr);
707 #else
708 return gen_bcmp(6 + 1, 6, eaddr);
709 #endif
710
711 case Q_DST:
712 #ifdef PCAP_FDDIPAD
713 return gen_bcmp(0 + 1 + pcap_fddipad, 6, eaddr);
714 #else
715 return gen_bcmp(0 + 1, 6, eaddr);
716 #endif
717
718 case Q_AND:
719 b0 = gen_fhostop(eaddr, Q_SRC);
720 b1 = gen_fhostop(eaddr, Q_DST);
721 gen_and(b0, b1);
722 return b1;
723
724 case Q_DEFAULT:
725 case Q_OR:
726 b0 = gen_fhostop(eaddr, Q_SRC);
727 b1 = gen_fhostop(eaddr, Q_DST);
728 gen_or(b0, b1);
729 return b1;
730 }
731 abort();
732 /* NOTREACHED */
733 }
734
735 /*
736 * This is quite tricky because there may be pad bytes in front of the
737 * DECNET header, and then there are two possible data packet formats that
738 * carry both src and dst addresses, plus 5 packet types in a format that
739 * carries only the src node, plus 2 types that use a different format and
740 * also carry just the src node.
741 *
742 * Yuck.
743 *
744 * Instead of doing those all right, we just look for data packets with
745 * 0 or 1 bytes of padding. If you want to look at other packets, that
746 * will require a lot more hacking.
747 *
748 * To add support for filtering on DECNET "areas" (network numbers)
749 * one would want to add a "mask" argument to this routine. That would
750 * make the filter even more inefficient, although one could be clever
751 * and not generate masking instructions if the mask is 0xFFFF.
752 */
753 static struct block *
754 gen_dnhostop(addr, dir, base_off)
755 bpf_u_int32 addr;
756 int dir;
757 u_int base_off;
758 {
759 struct block *b0, *b1, *b2, *tmp;
760 u_int offset_lh; /* offset if long header is received */
761 u_int offset_sh; /* offset if short header is received */
762
763 switch (dir) {
764
765 case Q_DST:
766 offset_sh = 1; /* follows flags */
767 offset_lh = 7; /* flgs,darea,dsubarea,HIORD */
768 break;
769
770 case Q_SRC:
771 offset_sh = 3; /* follows flags, dstnode */
772 offset_lh = 15; /* flgs,darea,dsubarea,did,sarea,ssub,HIORD */
773 break;
774
775 case Q_AND:
776 /* Inefficient because we do our Calvinball dance twice */
777 b0 = gen_dnhostop(addr, Q_SRC, base_off);
778 b1 = gen_dnhostop(addr, Q_DST, base_off);
779 gen_and(b0, b1);
780 return b1;
781
782 case Q_OR:
783 case Q_DEFAULT:
784 /* Inefficient because we do our Calvinball dance twice */
785 b0 = gen_dnhostop(addr, Q_SRC, base_off);
786 b1 = gen_dnhostop(addr, Q_DST, base_off);
787 gen_or(b0, b1);
788 return b1;
789
790 default:
791 abort();
792 }
793 b0 = gen_linktype(ETHERTYPE_DN);
794 /* Check for pad = 1, long header case */
795 tmp = gen_mcmp(base_off + 2, BPF_H,
796 (bpf_int32)ntohs(0x0681), (bpf_int32)ntohs(0x07FF));
797 b1 = gen_cmp(base_off + 2 + 1 + offset_lh,
798 BPF_H, (bpf_int32)ntohs(addr));
799 gen_and(tmp, b1);
800 /* Check for pad = 0, long header case */
801 tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x06, (bpf_int32)0x7);
802 b2 = gen_cmp(base_off + 2 + offset_lh, BPF_H, (bpf_int32)ntohs(addr));
803 gen_and(tmp, b2);
804 gen_or(b2, b1);
805 /* Check for pad = 1, short header case */
806 tmp = gen_mcmp(base_off + 2, BPF_H,
807 (bpf_int32)ntohs(0x0281), (bpf_int32)ntohs(0x07FF));
808 b2 = gen_cmp(base_off + 2 + 1 + offset_sh,
809 BPF_H, (bpf_int32)ntohs(addr));
810 gen_and(tmp, b2);
811 gen_or(b2, b1);
812 /* Check for pad = 0, short header case */
813 tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x02, (bpf_int32)0x7);
814 b2 = gen_cmp(base_off + 2 + offset_sh, BPF_H, (bpf_int32)ntohs(addr));
815 gen_and(tmp, b2);
816 gen_or(b2, b1);
817
818 /* Combine with test for linktype */
819 gen_and(b0, b1);
820 return b1;
821 }
822
823 static struct block *
824 gen_host(addr, mask, proto, dir)
825 bpf_u_int32 addr;
826 bpf_u_int32 mask;
827 int proto;
828 int dir;
829 {
830 struct block *b0, *b1;
831
832 switch (proto) {
833
834 case Q_DEFAULT:
835 b0 = gen_host(addr, mask, Q_IP, dir);
836 b1 = gen_host(addr, mask, Q_ARP, dir);
837 gen_or(b0, b1);
838 b0 = gen_host(addr, mask, Q_RARP, dir);
839 gen_or(b1, b0);
840 return b0;
841
842 case Q_IP:
843 return gen_hostop(addr, mask, dir, ETHERTYPE_IP,
844 off_nl + 12, off_nl + 16);
845
846 case Q_RARP:
847 return gen_hostop(addr, mask, dir, ETHERTYPE_REVARP,
848 off_nl + 14, off_nl + 24);
849
850 case Q_ARP:
851 return gen_hostop(addr, mask, dir, ETHERTYPE_ARP,
852 off_nl + 14, off_nl + 24);
853
854 case Q_TCP:
855 bpf_error("'tcp' modifier applied to host");
856
857 case Q_UDP:
858 bpf_error("'udp' modifier applied to host");
859
860 case Q_ICMP:
861 bpf_error("'icmp' modifier applied to host");
862
863 case Q_IGMP:
864 bpf_error("'igmp' modifier applied to host");
865
866 case Q_IGRP:
867 bpf_error("'igrp' modifier applied to host");
868
869 case Q_ATALK:
870 bpf_error("ATALK host filtering not implemented");
871
872 case Q_DECNET:
873 return gen_dnhostop(addr, dir, off_nl);
874
875 case Q_SCA:
876 bpf_error("SCA host filtering not implemented");
877
878 case Q_LAT:
879 bpf_error("LAT host filtering not implemented");
880
881 case Q_MOPDL:
882 bpf_error("MOPDL host filtering not implemented");
883
884 case Q_MOPRC:
885 bpf_error("MOPRC host filtering not implemented");
886
887 default:
888 abort();
889 }
890 /* NOTREACHED */
891 }
892
893 static struct block *
894 gen_gateway(eaddr, alist, proto, dir)
895 const u_char *eaddr;
896 bpf_u_int32 **alist;
897 int proto;
898 int dir;
899 {
900 struct block *b0, *b1, *tmp;
901
902 if (dir != 0)
903 bpf_error("direction applied to 'gateway'");
904
905 switch (proto) {
906 case Q_DEFAULT:
907 case Q_IP:
908 case Q_ARP:
909 case Q_RARP:
910 if (linktype == DLT_EN10MB)
911 b0 = gen_ehostop(eaddr, Q_OR);
912 else if (linktype == DLT_FDDI)
913 b0 = gen_fhostop(eaddr, Q_OR);
914 else
915 bpf_error(
916 "'gateway' supported only on ethernet or FDDI");
917
918 b1 = gen_host(**alist++, 0xffffffff, proto, Q_OR);
919 while (*alist) {
920 tmp = gen_host(**alist++, 0xffffffff, proto, Q_OR);
921 gen_or(b1, tmp);
922 b1 = tmp;
923 }
924 gen_not(b1);
925 gen_and(b0, b1);
926 return b1;
927 }
928 bpf_error("illegal modifier of 'gateway'");
929 /* NOTREACHED */
930 }
931
932 struct block *
933 gen_proto_abbrev(proto)
934 int proto;
935 {
936 struct block *b0, *b1;
937
938 switch (proto) {
939
940 case Q_TCP:
941 b0 = gen_linktype(ETHERTYPE_IP);
942 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_TCP);
943 gen_and(b0, b1);
944 break;
945
946 case Q_UDP:
947 b0 = gen_linktype(ETHERTYPE_IP);
948 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_UDP);
949 gen_and(b0, b1);
950 break;
951
952 case Q_ICMP:
953 b0 = gen_linktype(ETHERTYPE_IP);
954 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_ICMP);
955 gen_and(b0, b1);
956 break;
957
958 case Q_IGMP:
959 b0 = gen_linktype(ETHERTYPE_IP);
960 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)2);
961 gen_and(b0, b1);
962 break;
963
964 #ifndef IPPROTO_IGRP
965 #define IPPROTO_IGRP 9
966 #endif
967 case Q_IGRP:
968 b0 = gen_linktype(ETHERTYPE_IP);
969 b1 = gen_cmp(off_nl + 9, BPF_B, (long)IPPROTO_IGRP);
970 gen_and(b0, b1);
971 break;
972
973 case Q_IP:
974 b1 = gen_linktype(ETHERTYPE_IP);
975 break;
976
977 case Q_ARP:
978 b1 = gen_linktype(ETHERTYPE_ARP);
979 break;
980
981 case Q_RARP:
982 b1 = gen_linktype(ETHERTYPE_REVARP);
983 break;
984
985 case Q_LINK:
986 bpf_error("link layer applied in wrong context");
987
988 case Q_ATALK:
989 b1 = gen_linktype(ETHERTYPE_ATALK);
990 break;
991
992 case Q_DECNET:
993 b1 = gen_linktype(ETHERTYPE_DN);
994 break;
995
996 case Q_SCA:
997 b1 = gen_linktype(ETHERTYPE_SCA);
998 break;
999
1000 case Q_LAT:
1001 b1 = gen_linktype(ETHERTYPE_LAT);
1002 break;
1003
1004 case Q_MOPDL:
1005 b1 = gen_linktype(ETHERTYPE_MOPDL);
1006 break;
1007
1008 case Q_MOPRC:
1009 b1 = gen_linktype(ETHERTYPE_MOPRC);
1010 break;
1011
1012 default:
1013 abort();
1014 }
1015 return b1;
1016 }
1017
1018 static struct block *
1019 gen_ipfrag()
1020 {
1021 struct slist *s;
1022 struct block *b;
1023
1024 /* not ip frag */
1025 s = new_stmt(BPF_LD|BPF_H|BPF_ABS);
1026 s->s.k = off_nl + 6;
1027 b = new_block(JMP(BPF_JSET));
1028 b->s.k = 0x1fff;
1029 b->stmts = s;
1030 gen_not(b);
1031
1032 return b;
1033 }
1034
1035 static struct block *
1036 gen_portatom(off, v)
1037 int off;
1038 bpf_int32 v;
1039 {
1040 struct slist *s;
1041 struct block *b;
1042
1043 s = new_stmt(BPF_LDX|BPF_MSH|BPF_B);
1044 s->s.k = off_nl;
1045
1046 s->next = new_stmt(BPF_LD|BPF_IND|BPF_H);
1047 s->next->s.k = off_nl + off;
1048
1049 b = new_block(JMP(BPF_JEQ));
1050 b->stmts = s;
1051 b->s.k = v;
1052
1053 return b;
1054 }
1055
1056 struct block *
1057 gen_portop(port, proto, dir)
1058 int port, proto, dir;
1059 {
1060 struct block *b0, *b1, *tmp;
1061
1062 /* ip proto 'proto' */
1063 tmp = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)proto);
1064 b0 = gen_ipfrag();
1065 gen_and(tmp, b0);
1066
1067 switch (dir) {
1068 case Q_SRC:
1069 b1 = gen_portatom(0, (bpf_int32)port);
1070 break;
1071
1072 case Q_DST:
1073 b1 = gen_portatom(2, (bpf_int32)port);
1074 break;
1075
1076 case Q_OR:
1077 case Q_DEFAULT:
1078 tmp = gen_portatom(0, (bpf_int32)port);
1079 b1 = gen_portatom(2, (bpf_int32)port);
1080 gen_or(tmp, b1);
1081 break;
1082
1083 case Q_AND:
1084 tmp = gen_portatom(0, (bpf_int32)port);
1085 b1 = gen_portatom(2, (bpf_int32)port);
1086 gen_and(tmp, b1);
1087 break;
1088
1089 default:
1090 abort();
1091 }
1092 gen_and(b0, b1);
1093
1094 return b1;
1095 }
1096
1097 static struct block *
1098 gen_port(port, ip_proto, dir)
1099 int port;
1100 int ip_proto;
1101 int dir;
1102 {
1103 struct block *b0, *b1, *tmp;
1104
1105 /* ether proto ip */
1106 b0 = gen_linktype(ETHERTYPE_IP);
1107
1108 switch (ip_proto) {
1109 case IPPROTO_UDP:
1110 case IPPROTO_TCP:
1111 b1 = gen_portop(port, ip_proto, dir);
1112 break;
1113
1114 case PROTO_UNDEF:
1115 tmp = gen_portop(port, IPPROTO_TCP, dir);
1116 b1 = gen_portop(port, IPPROTO_UDP, dir);
1117 gen_or(tmp, b1);
1118 break;
1119
1120 default:
1121 abort();
1122 }
1123 gen_and(b0, b1);
1124 return b1;
1125 }
1126
1127 static int
1128 lookup_proto(name, proto)
1129 register const char *name;
1130 register int proto;
1131 {
1132 register int v;
1133
1134 switch (proto) {
1135
1136 case Q_DEFAULT:
1137 case Q_IP:
1138 v = pcap_nametoproto(name);
1139 if (v == PROTO_UNDEF)
1140 bpf_error("unknown ip proto '%s'", name);
1141 break;
1142
1143 case Q_LINK:
1144 /* XXX should look up h/w protocol type based on linktype */
1145 v = pcap_nametoeproto(name);
1146 if (v == PROTO_UNDEF)
1147 bpf_error("unknown ether proto '%s'", name);
1148 break;
1149
1150 default:
1151 v = PROTO_UNDEF;
1152 break;
1153 }
1154 return v;
1155 }
1156
1157 static struct block *
1158 gen_proto(v, proto, dir)
1159 int v;
1160 int proto;
1161 int dir;
1162 {
1163 struct block *b0, *b1;
1164
1165 if (dir != Q_DEFAULT)
1166 bpf_error("direction applied to 'proto'");
1167
1168 switch (proto) {
1169 case Q_DEFAULT:
1170 case Q_IP:
1171 b0 = gen_linktype(ETHERTYPE_IP);
1172 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)v);
1173 gen_and(b0, b1);
1174 return b1;
1175
1176 case Q_ARP:
1177 bpf_error("arp does not encapsulate another protocol");
1178 /* NOTREACHED */
1179
1180 case Q_RARP:
1181 bpf_error("rarp does not encapsulate another protocol");
1182 /* NOTREACHED */
1183
1184 case Q_ATALK:
1185 bpf_error("atalk encapsulation is not specifiable");
1186 /* NOTREACHED */
1187
1188 case Q_DECNET:
1189 bpf_error("decnet encapsulation is not specifiable");
1190 /* NOTREACHED */
1191
1192 case Q_SCA:
1193 bpf_error("sca does not encapsulate another protocol");
1194 /* NOTREACHED */
1195
1196 case Q_LAT:
1197 bpf_error("lat does not encapsulate another protocol");
1198 /* NOTREACHED */
1199
1200 case Q_MOPRC:
1201 bpf_error("moprc does not encapsulate another protocol");
1202 /* NOTREACHED */
1203
1204 case Q_MOPDL:
1205 bpf_error("mopdl does not encapsulate another protocol");
1206 /* NOTREACHED */
1207
1208 case Q_LINK:
1209 return gen_linktype(v);
1210
1211 case Q_UDP:
1212 bpf_error("'udp proto' is bogus");
1213 /* NOTREACHED */
1214
1215 case Q_TCP:
1216 bpf_error("'tcp proto' is bogus");
1217 /* NOTREACHED */
1218
1219 case Q_ICMP:
1220 bpf_error("'icmp proto' is bogus");
1221 /* NOTREACHED */
1222
1223 case Q_IGMP:
1224 bpf_error("'igmp proto' is bogus");
1225 /* NOTREACHED */
1226
1227 case Q_IGRP:
1228 bpf_error("'igrp proto' is bogus");
1229 /* NOTREACHED */
1230
1231 default:
1232 abort();
1233 /* NOTREACHED */
1234 }
1235 /* NOTREACHED */
1236 }
1237
1238 struct block *
1239 gen_scode(name, q)
1240 register const char *name;
1241 struct qual q;
1242 {
1243 int proto = q.proto;
1244 int dir = q.dir;
1245 int tproto;
1246 u_char *eaddr;
1247 bpf_u_int32 mask, addr, **alist;
1248 struct block *b, *tmp;
1249 int port, real_proto;
1250
1251 switch (q.addr) {
1252
1253 case Q_NET:
1254 addr = pcap_nametonetaddr(name);
1255 if (addr == 0)
1256 bpf_error("unknown network '%s'", name);
1257 /* Left justify network addr and calculate its network mask */
1258 mask = 0xffffffff;
1259 while (addr && (addr & 0xff000000) == 0) {
1260 addr <<= 8;
1261 mask <<= 8;
1262 }
1263 return gen_host(addr, mask, proto, dir);
1264
1265 case Q_DEFAULT:
1266 case Q_HOST:
1267 if (proto == Q_LINK) {
1268 switch (linktype) {
1269
1270 case DLT_EN10MB:
1271 eaddr = pcap_ether_hostton(name);
1272 if (eaddr == NULL)
1273 bpf_error(
1274 "unknown ether host '%s'", name);
1275 return gen_ehostop(eaddr, dir);
1276
1277 case DLT_FDDI:
1278 eaddr = pcap_ether_hostton(name);
1279 if (eaddr == NULL)
1280 bpf_error(
1281 "unknown FDDI host '%s'", name);
1282 return gen_fhostop(eaddr, dir);
1283
1284 default:
1285 bpf_error(
1286 "only ethernet/FDDI supports link-level host name");
1287 break;
1288 }
1289 } else if (proto == Q_DECNET) {
1290 unsigned short dn_addr = __pcap_nametodnaddr(name);
1291 /*
1292 * I don't think DECNET hosts can be multihomed, so
1293 * there is no need to build up a list of addresses
1294 */
1295 return (gen_host(dn_addr, 0, proto, dir));
1296 } else {
1297 alist = pcap_nametoaddr(name);
1298 if (alist == NULL || *alist == NULL)
1299 bpf_error("unknown host '%s'", name);
1300 tproto = proto;
1301 if (off_linktype == -1 && tproto == Q_DEFAULT)
1302 tproto = Q_IP;
1303 b = gen_host(**alist++, 0xffffffff, tproto, dir);
1304 while (*alist) {
1305 tmp = gen_host(**alist++, 0xffffffff,
1306 tproto, dir);
1307 gen_or(b, tmp);
1308 b = tmp;
1309 }
1310 return b;
1311 }
1312
1313 case Q_PORT:
1314 if (proto != Q_DEFAULT && proto != Q_UDP && proto != Q_TCP)
1315 bpf_error("illegal qualifier of 'port'");
1316 if (pcap_nametoport(name, &port, &real_proto) == 0)
1317 bpf_error("unknown port '%s'", name);
1318 if (proto == Q_UDP) {
1319 if (real_proto == IPPROTO_TCP)
1320 bpf_error("port '%s' is tcp", name);
1321 else
1322 /* override PROTO_UNDEF */
1323 real_proto = IPPROTO_UDP;
1324 }
1325 if (proto == Q_TCP) {
1326 if (real_proto == IPPROTO_UDP)
1327 bpf_error("port '%s' is udp", name);
1328 else
1329 /* override PROTO_UNDEF */
1330 real_proto = IPPROTO_TCP;
1331 }
1332 return gen_port(port, real_proto, dir);
1333
1334 case Q_GATEWAY:
1335 eaddr = pcap_ether_hostton(name);
1336 if (eaddr == NULL)
1337 bpf_error("unknown ether host: %s", name);
1338
1339 alist = pcap_nametoaddr(name);
1340 if (alist == NULL || *alist == NULL)
1341 bpf_error("unknown host '%s'", name);
1342 return gen_gateway(eaddr, alist, proto, dir);
1343
1344 case Q_PROTO:
1345 real_proto = lookup_proto(name, proto);
1346 if (real_proto >= 0)
1347 return gen_proto(real_proto, proto, dir);
1348 else
1349 bpf_error("unknown protocol: %s", name);
1350
1351 case Q_UNDEF:
1352 syntax();
1353 /* NOTREACHED */
1354 }
1355 abort();
1356 /* NOTREACHED */
1357 }
1358
1359 struct block *
1360 gen_mcode(s1, s2, masklen, q)
1361 register const char *s1, *s2;
1362 register int masklen;
1363 struct qual q;
1364 {
1365 register int nlen, mlen;
1366 bpf_u_int32 n, m;
1367
1368 nlen = __pcap_atoin(s1, &n);
1369 /* Promote short ipaddr */
1370 n <<= 32 - nlen;
1371
1372 if (s2 != NULL) {
1373 mlen = __pcap_atoin(s2, &m);
1374 /* Promote short ipaddr */
1375 m <<= 32 - mlen;
1376 if ((n & ~m) != 0)
1377 bpf_error("non-network bits set in \"%s mask %s\"",
1378 s1, s2);
1379 } else {
1380 /* Convert mask len to mask */
1381 if (masklen > 32)
1382 bpf_error("mask length must be <= 32");
1383 m = 0xffffffff << (32 - masklen);
1384 if ((n & ~m) != 0)
1385 bpf_error("non-network bits set in \"%s/%d\"",
1386 s1, masklen);
1387 }
1388
1389 switch (q.addr) {
1390
1391 case Q_NET:
1392 return gen_host(n, m, q.proto, q.dir);
1393
1394 default:
1395 bpf_error("Mask syntax for networks only");
1396 /* NOTREACHED */
1397 }
1398 }
1399
1400 struct block *
1401 gen_ncode(s, v, q)
1402 register const char *s;
1403 bpf_u_int32 v;
1404 struct qual q;
1405 {
1406 bpf_u_int32 mask;
1407 int proto = q.proto;
1408 int dir = q.dir;
1409 register int vlen;
1410
1411 if (s == NULL)
1412 vlen = 32;
1413 else if (q.proto == Q_DECNET)
1414 vlen = __pcap_atodn(s, &v);
1415 else
1416 vlen = __pcap_atoin(s, &v);
1417
1418 switch (q.addr) {
1419
1420 case Q_DEFAULT:
1421 case Q_HOST:
1422 case Q_NET:
1423 if (proto == Q_DECNET)
1424 return gen_host(v, 0, proto, dir);
1425 else if (proto == Q_LINK) {
1426 bpf_error("illegal link layer address");
1427 } else {
1428 mask = 0xffffffff;
1429 if (s == NULL && q.addr == Q_NET) {
1430 /* Promote short net number */
1431 while (v && (v & 0xff000000) == 0) {
1432 v <<= 8;
1433 mask <<= 8;
1434 }
1435 } else {
1436 /* Promote short ipaddr */
1437 v <<= 32 - vlen;
1438 mask <<= 32 - vlen;
1439 }
1440 return gen_host(v, mask, proto, dir);
1441 }
1442
1443 case Q_PORT:
1444 if (proto == Q_UDP)
1445 proto = IPPROTO_UDP;
1446 else if (proto == Q_TCP)
1447 proto = IPPROTO_TCP;
1448 else if (proto == Q_DEFAULT)
1449 proto = PROTO_UNDEF;
1450 else
1451 bpf_error("illegal qualifier of 'port'");
1452
1453 return gen_port((int)v, proto, dir);
1454
1455 case Q_GATEWAY:
1456 bpf_error("'gateway' requires a name");
1457 /* NOTREACHED */
1458
1459 case Q_PROTO:
1460 return gen_proto((int)v, proto, dir);
1461
1462 case Q_UNDEF:
1463 syntax();
1464 /* NOTREACHED */
1465
1466 default:
1467 abort();
1468 /* NOTREACHED */
1469 }
1470 /* NOTREACHED */
1471 }
1472
1473 struct block *
1474 gen_ecode(eaddr, q)
1475 register const u_char *eaddr;
1476 struct qual q;
1477 {
1478 if ((q.addr == Q_HOST || q.addr == Q_DEFAULT) && q.proto == Q_LINK) {
1479 if (linktype == DLT_EN10MB)
1480 return gen_ehostop(eaddr, (int)q.dir);
1481 if (linktype == DLT_FDDI)
1482 return gen_fhostop(eaddr, (int)q.dir);
1483 }
1484 bpf_error("ethernet address used in non-ether expression");
1485 /* NOTREACHED */
1486 }
1487
1488 void
1489 sappend(s0, s1)
1490 struct slist *s0, *s1;
1491 {
1492 /*
1493 * This is definitely not the best way to do this, but the
1494 * lists will rarely get long.
1495 */
1496 while (s0->next)
1497 s0 = s0->next;
1498 s0->next = s1;
1499 }
1500
1501 static struct slist *
1502 xfer_to_x(a)
1503 struct arth *a;
1504 {
1505 struct slist *s;
1506
1507 s = new_stmt(BPF_LDX|BPF_MEM);
1508 s->s.k = a->regno;
1509 return s;
1510 }
1511
1512 static struct slist *
1513 xfer_to_a(a)
1514 struct arth *a;
1515 {
1516 struct slist *s;
1517
1518 s = new_stmt(BPF_LD|BPF_MEM);
1519 s->s.k = a->regno;
1520 return s;
1521 }
1522
1523 struct arth *
1524 gen_load(proto, index, size)
1525 int proto;
1526 struct arth *index;
1527 int size;
1528 {
1529 struct slist *s, *tmp;
1530 struct block *b;
1531 int regno = alloc_reg();
1532
1533 free_reg(index->regno);
1534 switch (size) {
1535
1536 default:
1537 bpf_error("data size must be 1, 2, or 4");
1538
1539 case 1:
1540 size = BPF_B;
1541 break;
1542
1543 case 2:
1544 size = BPF_H;
1545 break;
1546
1547 case 4:
1548 size = BPF_W;
1549 break;
1550 }
1551 switch (proto) {
1552 default:
1553 bpf_error("unsupported index operation");
1554
1555 case Q_LINK:
1556 s = xfer_to_x(index);
1557 tmp = new_stmt(BPF_LD|BPF_IND|size);
1558 sappend(s, tmp);
1559 sappend(index->s, s);
1560 break;
1561
1562 case Q_IP:
1563 case Q_ARP:
1564 case Q_RARP:
1565 case Q_ATALK:
1566 case Q_DECNET:
1567 case Q_SCA:
1568 case Q_LAT:
1569 case Q_MOPRC:
1570 case Q_MOPDL:
1571 /* XXX Note that we assume a fixed link link header here. */
1572 s = xfer_to_x(index);
1573 tmp = new_stmt(BPF_LD|BPF_IND|size);
1574 tmp->s.k = off_nl;
1575 sappend(s, tmp);
1576 sappend(index->s, s);
1577
1578 b = gen_proto_abbrev(proto);
1579 if (index->b)
1580 gen_and(index->b, b);
1581 index->b = b;
1582 break;
1583
1584 case Q_TCP:
1585 case Q_UDP:
1586 case Q_ICMP:
1587 case Q_IGMP:
1588 case Q_IGRP:
1589 s = new_stmt(BPF_LDX|BPF_MSH|BPF_B);
1590 s->s.k = off_nl;
1591 sappend(s, xfer_to_a(index));
1592 sappend(s, new_stmt(BPF_ALU|BPF_ADD|BPF_X));
1593 sappend(s, new_stmt(BPF_MISC|BPF_TAX));
1594 sappend(s, tmp = new_stmt(BPF_LD|BPF_IND|size));
1595 tmp->s.k = off_nl;
1596 sappend(index->s, s);
1597
1598 gen_and(gen_proto_abbrev(proto), b = gen_ipfrag());
1599 if (index->b)
1600 gen_and(index->b, b);
1601 index->b = b;
1602 break;
1603 }
1604 index->regno = regno;
1605 s = new_stmt(BPF_ST);
1606 s->s.k = regno;
1607 sappend(index->s, s);
1608
1609 return index;
1610 }
1611
1612 struct block *
1613 gen_relation(code, a0, a1, reversed)
1614 int code;
1615 struct arth *a0, *a1;
1616 int reversed;
1617 {
1618 struct slist *s0, *s1, *s2;
1619 struct block *b, *tmp;
1620
1621 s0 = xfer_to_x(a1);
1622 s1 = xfer_to_a(a0);
1623 s2 = new_stmt(BPF_ALU|BPF_SUB|BPF_X);
1624 b = new_block(JMP(code));
1625 if (code == BPF_JGT || code == BPF_JGE) {
1626 reversed = !reversed;
1627 b->s.k = 0x80000000;
1628 }
1629 if (reversed)
1630 gen_not(b);
1631
1632 sappend(s1, s2);
1633 sappend(s0, s1);
1634 sappend(a1->s, s0);
1635 sappend(a0->s, a1->s);
1636
1637 b->stmts = a0->s;
1638
1639 free_reg(a0->regno);
1640 free_reg(a1->regno);
1641
1642 /* 'and' together protocol checks */
1643 if (a0->b) {
1644 if (a1->b) {
1645 gen_and(a0->b, tmp = a1->b);
1646 }
1647 else
1648 tmp = a0->b;
1649 } else
1650 tmp = a1->b;
1651
1652 if (tmp)
1653 gen_and(tmp, b);
1654
1655 return b;
1656 }
1657
1658 struct arth *
1659 gen_loadlen()
1660 {
1661 int regno = alloc_reg();
1662 struct arth *a = (struct arth *)newchunk(sizeof(*a));
1663 struct slist *s;
1664
1665 s = new_stmt(BPF_LD|BPF_LEN);
1666 s->next = new_stmt(BPF_ST);
1667 s->next->s.k = regno;
1668 a->s = s;
1669 a->regno = regno;
1670
1671 return a;
1672 }
1673
1674 struct arth *
1675 gen_loadi(val)
1676 int val;
1677 {
1678 struct arth *a;
1679 struct slist *s;
1680 int reg;
1681
1682 a = (struct arth *)newchunk(sizeof(*a));
1683
1684 reg = alloc_reg();
1685
1686 s = new_stmt(BPF_LD|BPF_IMM);
1687 s->s.k = val;
1688 s->next = new_stmt(BPF_ST);
1689 s->next->s.k = reg;
1690 a->s = s;
1691 a->regno = reg;
1692
1693 return a;
1694 }
1695
1696 struct arth *
1697 gen_neg(a)
1698 struct arth *a;
1699 {
1700 struct slist *s;
1701
1702 s = xfer_to_a(a);
1703 sappend(a->s, s);
1704 s = new_stmt(BPF_ALU|BPF_NEG);
1705 s->s.k = 0;
1706 sappend(a->s, s);
1707 s = new_stmt(BPF_ST);
1708 s->s.k = a->regno;
1709 sappend(a->s, s);
1710
1711 return a;
1712 }
1713
1714 struct arth *
1715 gen_arth(code, a0, a1)
1716 int code;
1717 struct arth *a0, *a1;
1718 {
1719 struct slist *s0, *s1, *s2;
1720
1721 s0 = xfer_to_x(a1);
1722 s1 = xfer_to_a(a0);
1723 s2 = new_stmt(BPF_ALU|BPF_X|code);
1724
1725 sappend(s1, s2);
1726 sappend(s0, s1);
1727 sappend(a1->s, s0);
1728 sappend(a0->s, a1->s);
1729
1730 free_reg(a1->regno);
1731
1732 s0 = new_stmt(BPF_ST);
1733 a0->regno = s0->s.k = alloc_reg();
1734 sappend(a0->s, s0);
1735
1736 return a0;
1737 }
1738
1739 /*
1740 * Here we handle simple allocation of the scratch registers.
1741 * If too many registers are alloc'd, the allocator punts.
1742 */
1743 static int regused[BPF_MEMWORDS];
1744 static int curreg;
1745
1746 /*
1747 * Return the next free register.
1748 */
1749 static int
1750 alloc_reg()
1751 {
1752 int n = BPF_MEMWORDS;
1753
1754 while (--n >= 0) {
1755 if (regused[curreg])
1756 curreg = (curreg + 1) % BPF_MEMWORDS;
1757 else {
1758 regused[curreg] = 1;
1759 return curreg;
1760 }
1761 }
1762 bpf_error("too many registers needed to evaluate expression");
1763 /* NOTREACHED */
1764 }
1765
1766 /*
1767 * Return a register to the table so it can
1768 * be used later.
1769 */
1770 static void
1771 free_reg(n)
1772 int n;
1773 {
1774 regused[n] = 0;
1775 }
1776
1777 static struct block *
1778 gen_len(jmp, n)
1779 int jmp, n;
1780 {
1781 struct slist *s;
1782 struct block *b;
1783
1784 s = new_stmt(BPF_LD|BPF_LEN);
1785 b = new_block(JMP(jmp));
1786 b->stmts = s;
1787 b->s.k = n;
1788
1789 return b;
1790 }
1791
1792 struct block *
1793 gen_greater(n)
1794 int n;
1795 {
1796 return gen_len(BPF_JGE, n);
1797 }
1798
1799 struct block *
1800 gen_less(n)
1801 int n;
1802 {
1803 struct block *b;
1804
1805 b = gen_len(BPF_JGT, n);
1806 gen_not(b);
1807
1808 return b;
1809 }
1810
1811 struct block *
1812 gen_byteop(op, idx, val)
1813 int op, idx, val;
1814 {
1815 struct block *b;
1816 struct slist *s;
1817
1818 switch (op) {
1819 default:
1820 abort();
1821
1822 case '=':
1823 return gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1824
1825 case '<':
1826 b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1827 b->s.code = JMP(BPF_JGE);
1828 gen_not(b);
1829 return b;
1830
1831 case '>':
1832 b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1833 b->s.code = JMP(BPF_JGT);
1834 return b;
1835
1836 case '|':
1837 s = new_stmt(BPF_ALU|BPF_OR|BPF_K);
1838 break;
1839
1840 case '&':
1841 s = new_stmt(BPF_ALU|BPF_AND|BPF_K);
1842 break;
1843 }
1844 s->s.k = val;
1845 b = new_block(JMP(BPF_JEQ));
1846 b->stmts = s;
1847 gen_not(b);
1848
1849 return b;
1850 }
1851
1852 struct block *
1853 gen_broadcast(proto)
1854 int proto;
1855 {
1856 bpf_u_int32 hostmask;
1857 struct block *b0, *b1, *b2;
1858 static u_char ebroadcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1859
1860 switch (proto) {
1861
1862 case Q_DEFAULT:
1863 case Q_LINK:
1864 if (linktype == DLT_EN10MB)
1865 return gen_ehostop(ebroadcast, Q_DST);
1866 if (linktype == DLT_FDDI)
1867 return gen_fhostop(ebroadcast, Q_DST);
1868 bpf_error("not a broadcast link");
1869 break;
1870
1871 case Q_IP:
1872 b0 = gen_linktype(ETHERTYPE_IP);
1873 hostmask = ~netmask;
1874 b1 = gen_mcmp(off_nl + 16, BPF_W, (bpf_int32)0, hostmask);
1875 b2 = gen_mcmp(off_nl + 16, BPF_W,
1876 (bpf_int32)(~0 & hostmask), hostmask);
1877 gen_or(b1, b2);
1878 gen_and(b0, b2);
1879 return b2;
1880 }
1881 bpf_error("only ether/ip broadcast filters supported");
1882 }
1883
1884 struct block *
1885 gen_multicast(proto)
1886 int proto;
1887 {
1888 register struct block *b0, *b1;
1889 register struct slist *s;
1890
1891 switch (proto) {
1892
1893 case Q_DEFAULT:
1894 case Q_LINK:
1895 if (linktype == DLT_EN10MB) {
1896 /* ether[0] & 1 != 0 */
1897 s = new_stmt(BPF_LD|BPF_B|BPF_ABS);
1898 s->s.k = 0;
1899 b0 = new_block(JMP(BPF_JSET));
1900 b0->s.k = 1;
1901 b0->stmts = s;
1902 return b0;
1903 }
1904
1905 if (linktype == DLT_FDDI) {
1906 /* XXX TEST THIS: MIGHT NOT PORT PROPERLY XXX */
1907 /* fddi[1] & 1 != 0 */
1908 s = new_stmt(BPF_LD|BPF_B|BPF_ABS);
1909 s->s.k = 1;
1910 b0 = new_block(JMP(BPF_JSET));
1911 b0->s.k = 1;
1912 b0->stmts = s;
1913 return b0;
1914 }
1915 /* Link not known to support multicasts */
1916 break;
1917
1918 case Q_IP:
1919 b0 = gen_linktype(ETHERTYPE_IP);
1920 b1 = gen_cmp(off_nl + 16, BPF_B, (bpf_int32)224);
1921 b1->s.code = JMP(BPF_JGE);
1922 gen_and(b0, b1);
1923 return b1;
1924 }
1925 bpf_error("only IP multicast filters supported on ethernet/FDDI");
1926 }
1927
1928 /*
1929 * generate command for inbound/outbound. It's here so we can
1930 * make it link-type specific. 'dir' = 0 implies "inbound",
1931 * = 1 implies "outbound".
1932 */
1933 struct block *
1934 gen_inbound(dir)
1935 int dir;
1936 {
1937 register struct block *b0;
1938
1939 b0 = gen_relation(BPF_JEQ,
1940 gen_load(Q_LINK, gen_loadi(0), 1),
1941 gen_loadi(0),
1942 dir);
1943 return (b0);
1944 }