1 From a799ca0c6314ad73a97bc6c89382d2712a9c0b0e Mon Sep 17 00:00:00 2001
2 From: Simon Kelley <simon@thekelleys.org.uk>
3 Date: Thu, 18 Oct 2018 19:35:29 +0100
4 Subject: [PATCH 01/32] Impove cache behaviour for TCP connections.
6 For ease of implementaion, dnsmasq has always forked a new process to
7 handle each incoming TCP connection. A side-effect of this is that any
8 DNS queries answered from TCP connections are not cached: when TCP
9 connections were rare, this was not a problem. With the coming of
10 DNSSEC, it's now the case that some DNSSEC queries have answers which
11 spill to TCP, and if, for instance, this applies to the keys for the
12 root then those never get cached, and performance is very bad. This
13 fix passes cache entries back from the TCP child process to the main
14 server process, and fixes the problem.
16 Signed-off-by: Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>
19 src/blockdata.c | 37 ++++++++-
20 src/cache.c | 196 ++++++++++++++++++++++++++++++++++++++++++++++--
21 src/dnsmasq.c | 58 ++++++++++++--
23 5 files changed, 291 insertions(+), 19 deletions(-)
29 + Impove cache behaviour for TCP connections. For ease of
30 + implementaion, dnsmasq has always forked a new process to handle
31 + each incoming TCP connection. A side-effect of this is that
32 + any DNS queries answered from TCP connections are not cached:
33 + when TCP connections were rare, this was not a problem.
34 + With the coming of DNSSEC, it's now the case that some
35 + DNSSEC queries have answers which spill to TCP, and if,
36 + for instance, this applies to the keys for the root then
37 + those never get cached, and performance is very bad.
38 + This fix passes cache entries back from the TCP child process to
39 + the main server process, and fixes the problem.
43 Add support for RFC 4039 DHCP rapid commit. Thanks to Ashram Method
44 for the initial patch and motivation.
47 @@ -61,7 +61,7 @@ void blockdata_report(void)
48 blockdata_alloced * sizeof(struct blockdata));
51 -struct blockdata *blockdata_alloc(char *data, size_t len)
52 +static struct blockdata *blockdata_alloc_real(int fd, char *data, size_t len)
54 struct blockdata *block, *ret = NULL;
55 struct blockdata **prev = &ret;
56 @@ -89,8 +89,17 @@ struct blockdata *blockdata_alloc(char *
57 blockdata_hwm = blockdata_count;
59 blen = len > KEYBLOCK_LEN ? KEYBLOCK_LEN : len;
60 - memcpy(block->key, data, blen);
64 + memcpy(block->key, data, blen);
67 + else if (!read_write(fd, block->key, blen, 1))
69 + /* failed read free partial chain */
70 + blockdata_free(ret);
76 @@ -100,6 +109,10 @@ struct blockdata *blockdata_alloc(char *
80 +struct blockdata *blockdata_alloc(char *data, size_t len)
82 + return blockdata_alloc_real(0, data, len);
85 void blockdata_free(struct blockdata *blocks)
87 @@ -148,5 +161,21 @@ void *blockdata_retrieve(struct blockdat
94 +void blockdata_write(struct blockdata *block, size_t len, int fd)
96 + for (; len > 0 && block; block = block->next)
98 + size_t blen = len > KEYBLOCK_LEN ? KEYBLOCK_LEN : len;
99 + read_write(fd, block->key, blen, 0);
104 +struct blockdata *blockdata_read(int fd, size_t len)
106 + return blockdata_alloc_real(fd, NULL, len);
112 @@ -26,6 +26,8 @@ static union bigname *big_free = NULL;
113 static int bignames_left, hash_size;
115 static void make_non_terminals(struct crec *source);
116 +static struct crec *really_insert(char *name, struct all_addr *addr,
117 + time_t now, unsigned long ttl, unsigned short flags);
119 /* type->string mapping: this is also used by the name-hash function as a mixing table. */
120 static const struct {
121 @@ -464,16 +466,10 @@ void cache_start_insert(void)
127 struct crec *cache_insert(char *name, struct all_addr *addr,
128 time_t now, unsigned long ttl, unsigned short flags)
130 - struct crec *new, *target_crec = NULL;
131 - union bigname *big_name = NULL;
132 - int freed_all = flags & F_REVERSE;
133 - int free_avail = 0;
134 - unsigned int target_uid;
136 /* Don't log DNSSEC records here, done elsewhere */
137 if (flags & (F_IPV4 | F_IPV6 | F_CNAME))
139 @@ -484,7 +480,20 @@ struct crec *cache_insert(char *name, st
140 if (daemon->min_cache_ttl != 0 && daemon->min_cache_ttl > ttl)
141 ttl = daemon->min_cache_ttl;
144 + return really_insert(name, addr, now, ttl, flags);
148 +static struct crec *really_insert(char *name, struct all_addr *addr,
149 + time_t now, unsigned long ttl, unsigned short flags)
151 + struct crec *new, *target_crec = NULL;
152 + union bigname *big_name = NULL;
153 + int freed_all = flags & F_REVERSE;
154 + int free_avail = 0;
155 + unsigned int target_uid;
157 /* if previous insertion failed give up now. */
160 @@ -645,12 +654,185 @@ void cache_end_insert(void)
161 cache_hash(new_chain);
162 cache_link(new_chain);
163 daemon->metrics[METRIC_DNS_CACHE_INSERTED]++;
165 + /* If we're a child process, send this cache entry up the pipe to the master.
166 + The marshalling process is rather nasty. */
167 + if (daemon->pipe_to_parent != -1)
169 + char *name = cache_get_name(new_chain);
170 + ssize_t m = strlen(name);
171 + unsigned short flags = new_chain->flags;
173 + u16 class = new_chain->uid;
176 + read_write(daemon->pipe_to_parent, (unsigned char *)&m, sizeof(m), 0);
177 + read_write(daemon->pipe_to_parent, (unsigned char *)name, m, 0);
178 + read_write(daemon->pipe_to_parent, (unsigned char *)&new_chain->ttd, sizeof(new_chain->ttd), 0);
179 + read_write(daemon->pipe_to_parent, (unsigned char *)&flags, sizeof(flags), 0);
181 + if (flags & (F_IPV4 | F_IPV6))
182 + read_write(daemon->pipe_to_parent, (unsigned char *)&new_chain->addr, sizeof(new_chain->addr), 0);
184 + else if (flags & F_DNSKEY)
186 + read_write(daemon->pipe_to_parent, (unsigned char *)&class, sizeof(class), 0);
187 + read_write(daemon->pipe_to_parent, (unsigned char *)&new_chain->addr.key.algo, sizeof(new_chain->addr.key.algo), 0);
188 + read_write(daemon->pipe_to_parent, (unsigned char *)&new_chain->addr.key.keytag, sizeof(new_chain->addr.key.keytag), 0);
189 + read_write(daemon->pipe_to_parent, (unsigned char *)&new_chain->addr.key.flags, sizeof(new_chain->addr.key.flags), 0);
190 + read_write(daemon->pipe_to_parent, (unsigned char *)&new_chain->addr.key.keylen, sizeof(new_chain->addr.key.keylen), 0);
191 + blockdata_write(new_chain->addr.key.keydata, new_chain->addr.key.keylen, daemon->pipe_to_parent);
193 + else if (flags & F_DS)
195 + read_write(daemon->pipe_to_parent, (unsigned char *)&class, sizeof(class), 0);
196 + /* A negative DS entry is possible and has no data, obviously. */
197 + if (!(flags & F_NEG))
199 + read_write(daemon->pipe_to_parent, (unsigned char *)&new_chain->addr.ds.algo, sizeof(new_chain->addr.ds.algo), 0);
200 + read_write(daemon->pipe_to_parent, (unsigned char *)&new_chain->addr.ds.keytag, sizeof(new_chain->addr.ds.keytag), 0);
201 + read_write(daemon->pipe_to_parent, (unsigned char *)&new_chain->addr.ds.digest, sizeof(new_chain->addr.ds.digest), 0);
202 + read_write(daemon->pipe_to_parent, (unsigned char *)&new_chain->addr.ds.keylen, sizeof(new_chain->addr.ds.keylen), 0);
203 + blockdata_write(new_chain->addr.ds.keydata, new_chain->addr.ds.keylen, daemon->pipe_to_parent);
214 + /* signal end of cache insert in master process */
215 + if (daemon->pipe_to_parent != -1)
218 + read_write(daemon->pipe_to_parent, (unsigned char *)&m, sizeof(m), 0);
225 +/* A marshalled cache entry arrives on fd, read, unmarshall and insert into cache of master process. */
226 +int cache_recv_insert(time_t now, int fd)
229 + struct all_addr addr;
232 + unsigned short flags;
233 + struct crec *crecp = NULL;
235 + cache_start_insert();
240 + if (!read_write(fd, (unsigned char *)&m, sizeof(m), 1))
245 + cache_end_insert();
249 + if (!read_write(fd, (unsigned char *)daemon->namebuff, m, 1) ||
250 + !read_write(fd, (unsigned char *)&ttd, sizeof(ttd), 1) ||
251 + !read_write(fd, (unsigned char *)&flags, sizeof(flags), 1))
254 + daemon->namebuff[m] = 0;
256 + ttl = difftime(ttd, now);
258 + if (flags & (F_IPV4 | F_IPV6))
260 + if (!read_write(fd, (unsigned char *)&addr, sizeof(addr), 1))
262 + crecp = really_insert(daemon->namebuff, &addr, now, ttl, flags);
264 + else if (flags & F_CNAME)
266 + struct crec *newc = really_insert(daemon->namebuff, NULL, now, ttl, flags);
267 + /* This relies on the fact the the target of a CNAME immediately preceeds
268 + it because of the order of extraction in extract_addresses, and
269 + the order reversal on the new_chain. */
274 + newc->addr.cname.target.cache = NULL;
275 + /* anything other than zero, to avoid being mistaken for CNAME to interface-name */
276 + newc->addr.cname.uid = 1;
281 + newc->addr.cname.target.cache = crecp;
282 + newc->addr.cname.uid = crecp->uid;
287 + else if (flags & (F_DNSKEY | F_DS))
289 + unsigned short class, keylen, keyflags, keytag;
290 + unsigned char algo, digest;
291 + struct blockdata *keydata;
293 + if (!read_write(fd, (unsigned char *)&class, sizeof(class), 1))
295 + /* Cache needs to known class for DNSSEC stuff */
296 + addr.addr.dnssec.class = class;
298 + crecp = really_insert(daemon->namebuff, &addr, now, ttl, flags);
300 + if (flags & F_DNSKEY)
302 + if (!read_write(fd, (unsigned char *)&algo, sizeof(algo), 1) ||
303 + !read_write(fd, (unsigned char *)&keytag, sizeof(keytag), 1) ||
304 + !read_write(fd, (unsigned char *)&keyflags, sizeof(keyflags), 1) ||
305 + !read_write(fd, (unsigned char *)&keylen, sizeof(keylen), 1) ||
306 + !(keydata = blockdata_read(fd, keylen)))
309 + else if (!(flags & F_NEG))
311 + if (!read_write(fd, (unsigned char *)&algo, sizeof(algo), 1) ||
312 + !read_write(fd, (unsigned char *)&keytag, sizeof(keytag), 1) ||
313 + !read_write(fd, (unsigned char *)&digest, sizeof(digest), 1) ||
314 + !read_write(fd, (unsigned char *)&keylen, sizeof(keylen), 1) ||
315 + !(keydata = blockdata_read(fd, keylen)))
321 + if (flags & F_DNSKEY)
323 + crecp->addr.key.algo = algo;
324 + crecp->addr.key.keytag = keytag;
325 + crecp->addr.key.flags = flags;
326 + crecp->addr.key.keylen = keylen;
327 + crecp->addr.key.keydata = keydata;
329 + else if (!(flags & F_NEG))
331 + crecp->addr.ds.algo = algo;
332 + crecp->addr.ds.keytag = keytag;
333 + crecp->addr.ds.digest = digest;
334 + crecp->addr.ds.keylen = keylen;
335 + crecp->addr.ds.keydata = keydata;
343 int cache_find_non_terminal(char *name, time_t now)
348 @@ -930,6 +930,10 @@ int main (int argc, char **argv)
353 + daemon->pipe_to_parent = -1;
354 + for (i = 0; i < MAX_PROCS; i++)
355 + daemon->tcp_pipes[i] = -1;
358 /* Using inotify, have to select a resolv file at startup */
359 @@ -1611,7 +1615,7 @@ static int set_dns_listeners(time_t now)
360 we don't need to explicitly arrange to wake up here */
361 if (listener->tcpfd != -1)
362 for (i = 0; i < MAX_PROCS; i++)
363 - if (daemon->tcp_pids[i] == 0)
364 + if (daemon->tcp_pids[i] == 0 && daemon->tcp_pipes[i] == -1)
366 poll_listen(listener->tcpfd, POLLIN);
368 @@ -1624,6 +1628,13 @@ static int set_dns_listeners(time_t now)
373 + if (!option_bool(OPT_DEBUG))
374 + for (i = 0; i < MAX_PROCS; i++)
375 + if (daemon->tcp_pipes[i] != -1)
376 + poll_listen(daemon->tcp_pipes[i], POLLIN);
382 @@ -1632,7 +1643,10 @@ static void check_dns_listeners(time_t n
383 struct serverfd *serverfdp;
384 struct listener *listener;
391 for (serverfdp = daemon->sfds; serverfdp; serverfdp = serverfdp->next)
392 if (poll_check(serverfdp->fd, POLLIN))
393 reply_query(serverfdp->fd, serverfdp->source_addr.sa.sa_family, now);
394 @@ -1642,7 +1656,26 @@ static void check_dns_listeners(time_t n
395 if (daemon->randomsocks[i].refcount != 0 &&
396 poll_check(daemon->randomsocks[i].fd, POLLIN))
397 reply_query(daemon->randomsocks[i].fd, daemon->randomsocks[i].family, now);
401 + /* Races. The child process can die before we read all of the data from the
402 + pipe, or vice versa. Therefore send tcp_pids to zero when we wait() the
403 + process, and tcp_pipes to -1 and close the FD when we read the last
404 + of the data - indicated by cache_recv_insert returning zero.
405 + The order of these events is indeterminate, and both are needed
406 + to free the process slot. Once the child process has gone, poll()
407 + returns POLLHUP, not POLLIN, so have to check for both here. */
408 + if (!option_bool(OPT_DEBUG))
409 + for (i = 0; i < MAX_PROCS; i++)
410 + if (daemon->tcp_pipes[i] != -1 &&
411 + poll_check(daemon->tcp_pipes[i], POLLIN | POLLHUP) &&
412 + !cache_recv_insert(now, daemon->tcp_pipes[i]))
414 + close(daemon->tcp_pipes[i]);
415 + daemon->tcp_pipes[i] = -1;
419 for (listener = daemon->listeners; listener; listener = listener->next)
421 if (listener->fd != -1 && poll_check(listener->fd, POLLIN))
422 @@ -1736,15 +1769,20 @@ static void check_dns_listeners(time_t n
423 while (retry_send(close(confd)));
426 - else if (!option_bool(OPT_DEBUG) && (p = fork()) != 0)
427 + else if (!option_bool(OPT_DEBUG) && pipe(pipefd) == 0 && (p = fork()) != 0)
430 + close(pipefd[1]); /* parent needs read pipe end. */
437 for (i = 0; i < MAX_PROCS; i++)
438 - if (daemon->tcp_pids[i] == 0)
439 + if (daemon->tcp_pids[i] == 0 && daemon->tcp_pipes[i] == -1)
441 daemon->tcp_pids[i] = p;
442 + daemon->tcp_pipes[i] = pipefd[0];
446 @@ -1761,7 +1799,7 @@ static void check_dns_listeners(time_t n
448 struct in_addr netmask;
454 netmask = iface->netmask;
455 @@ -1777,7 +1815,11 @@ static void check_dns_listeners(time_t n
456 /* Arrange for SIGALRM after CHILD_LIFETIME seconds to
457 terminate the process. */
458 if (!option_bool(OPT_DEBUG))
459 - alarm(CHILD_LIFETIME);
461 + alarm(CHILD_LIFETIME);
462 + close(pipefd[0]); /* close read end in child. */
463 + daemon->pipe_to_parent = pipefd[1];
467 /* start with no upstream connections. */
470 @@ -1091,6 +1091,8 @@ extern struct daemon {
471 size_t packet_len; /* " " */
472 struct randfd *rfd_save; /* " " */
473 pid_t tcp_pids[MAX_PROCS];
474 + int tcp_pipes[MAX_PROCS];
475 + int pipe_to_parent;
476 struct randfd randomsocks[RANDOM_SOCKS];
478 struct addrlist *interface_addrs; /* list of all addresses/prefix lengths associated with all local interfaces */
479 @@ -1152,6 +1154,7 @@ struct crec *cache_find_by_name(struct c
480 char *name, time_t now, unsigned int prot);
481 void cache_end_insert(void);
482 void cache_start_insert(void);
483 +int cache_recv_insert(time_t now, int fd);
484 struct crec *cache_insert(char *name, struct all_addr *addr,
485 time_t now, unsigned long ttl, unsigned short flags);
486 void cache_reload(void);
487 @@ -1174,6 +1177,8 @@ void blockdata_init(void);
488 void blockdata_report(void);
489 struct blockdata *blockdata_alloc(char *data, size_t len);
490 void *blockdata_retrieve(struct blockdata *block, size_t len, void *data);
491 +struct blockdata *blockdata_read(int fd, size_t len);
492 +void blockdata_write(struct blockdata *block, size_t len, int fd);
493 void blockdata_free(struct blockdata *blocks);