1 | /*
|
---|
2 | Unix SMB/CIFS implementation.
|
---|
3 | main select loop and event handling
|
---|
4 | Copyright (C) Andrew Tridgell 2003-2005
|
---|
5 | Copyright (C) Stefan Metzmacher 2005-2009
|
---|
6 |
|
---|
7 | ** NOTE! The following LGPL license applies to the tevent
|
---|
8 | ** library. This does NOT imply that all of Samba is released
|
---|
9 | ** under the LGPL
|
---|
10 |
|
---|
11 | This library is free software; you can redistribute it and/or
|
---|
12 | modify it under the terms of the GNU Lesser General Public
|
---|
13 | License as published by the Free Software Foundation; either
|
---|
14 | version 3 of the License, or (at your option) any later version.
|
---|
15 |
|
---|
16 | This library is distributed in the hope that it will be useful,
|
---|
17 | but WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
18 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
19 | Lesser General Public License for more details.
|
---|
20 |
|
---|
21 | You should have received a copy of the GNU Lesser General Public
|
---|
22 | License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
---|
23 | */
|
---|
24 |
|
---|
25 | #include "replace.h"
|
---|
26 | #include "system/filesys.h"
|
---|
27 | #include "system/select.h"
|
---|
28 | #include "tevent.h"
|
---|
29 | #include "tevent_util.h"
|
---|
30 | #include "tevent_internal.h"
|
---|
31 |
|
---|
32 | struct poll_event_context {
|
---|
33 | /* a pointer back to the generic event_context */
|
---|
34 | struct tevent_context *ev;
|
---|
35 |
|
---|
36 | /*
|
---|
37 | * A DLIST for fresh fde's added by poll_event_add_fd but not
|
---|
38 | * picked up yet by poll_event_loop_once
|
---|
39 | */
|
---|
40 | struct tevent_fd *fresh;
|
---|
41 | /*
|
---|
42 | * A DLIST for disabled fde's.
|
---|
43 | */
|
---|
44 | struct tevent_fd *disabled;
|
---|
45 | /*
|
---|
46 | * one or more events were deleted or disabled
|
---|
47 | */
|
---|
48 | bool deleted;
|
---|
49 |
|
---|
50 | /*
|
---|
51 | * These two arrays are maintained together.
|
---|
52 | */
|
---|
53 | struct pollfd *fds;
|
---|
54 | struct tevent_fd **fdes;
|
---|
55 | unsigned num_fds;
|
---|
56 |
|
---|
57 | /*
|
---|
58 | * Signal fd to wake the poll() thread
|
---|
59 | */
|
---|
60 | int signal_fd;
|
---|
61 | };
|
---|
62 |
|
---|
63 | static int poll_event_context_destructor(struct poll_event_context *poll_ev)
|
---|
64 | {
|
---|
65 | struct tevent_fd *fd, *fn;
|
---|
66 |
|
---|
67 | for (fd = poll_ev->fresh; fd; fd = fn) {
|
---|
68 | fn = fd->next;
|
---|
69 | fd->event_ctx = NULL;
|
---|
70 | DLIST_REMOVE(poll_ev->fresh, fd);
|
---|
71 | }
|
---|
72 |
|
---|
73 | for (fd = poll_ev->disabled; fd; fd = fn) {
|
---|
74 | fn = fd->next;
|
---|
75 | fd->event_ctx = NULL;
|
---|
76 | DLIST_REMOVE(poll_ev->disabled, fd);
|
---|
77 | }
|
---|
78 |
|
---|
79 | if (poll_ev->signal_fd == -1) {
|
---|
80 | /*
|
---|
81 | * Non-threaded, no signal pipe
|
---|
82 | */
|
---|
83 | return 0;
|
---|
84 | }
|
---|
85 |
|
---|
86 | close(poll_ev->signal_fd);
|
---|
87 | poll_ev->signal_fd = -1;
|
---|
88 |
|
---|
89 | if (poll_ev->num_fds == 0) {
|
---|
90 | return 0;
|
---|
91 | }
|
---|
92 | if (poll_ev->fds[0].fd != -1) {
|
---|
93 | close(poll_ev->fds[0].fd);
|
---|
94 | poll_ev->fds[0].fd = -1;
|
---|
95 | }
|
---|
96 | return 0;
|
---|
97 | }
|
---|
98 |
|
---|
99 | /*
|
---|
100 | create a poll_event_context structure.
|
---|
101 | */
|
---|
102 | static int poll_event_context_init(struct tevent_context *ev)
|
---|
103 | {
|
---|
104 | struct poll_event_context *poll_ev;
|
---|
105 |
|
---|
106 | /*
|
---|
107 | * we might be called during tevent_re_initialise()
|
---|
108 | * which means we need to free our old additional_data
|
---|
109 | * in order to detach old fd events from the
|
---|
110 | * poll_ev->fresh list
|
---|
111 | */
|
---|
112 | TALLOC_FREE(ev->additional_data);
|
---|
113 |
|
---|
114 | poll_ev = talloc_zero(ev, struct poll_event_context);
|
---|
115 | if (poll_ev == NULL) {
|
---|
116 | return -1;
|
---|
117 | }
|
---|
118 | poll_ev->ev = ev;
|
---|
119 | poll_ev->signal_fd = -1;
|
---|
120 | ev->additional_data = poll_ev;
|
---|
121 | talloc_set_destructor(poll_ev, poll_event_context_destructor);
|
---|
122 | return 0;
|
---|
123 | }
|
---|
124 |
|
---|
125 | static bool set_nonblock(int fd)
|
---|
126 | {
|
---|
127 | int val;
|
---|
128 |
|
---|
129 | val = fcntl(fd, F_GETFL, 0);
|
---|
130 | if (val == -1) {
|
---|
131 | return false;
|
---|
132 | }
|
---|
133 | val |= O_NONBLOCK;
|
---|
134 |
|
---|
135 | return (fcntl(fd, F_SETFL, val) != -1);
|
---|
136 | }
|
---|
137 |
|
---|
138 | static int poll_event_context_init_mt(struct tevent_context *ev)
|
---|
139 | {
|
---|
140 | struct poll_event_context *poll_ev;
|
---|
141 | struct pollfd *pfd;
|
---|
142 | int fds[2];
|
---|
143 | int ret;
|
---|
144 |
|
---|
145 | ret = poll_event_context_init(ev);
|
---|
146 | if (ret == -1) {
|
---|
147 | return ret;
|
---|
148 | }
|
---|
149 |
|
---|
150 | poll_ev = talloc_get_type_abort(
|
---|
151 | ev->additional_data, struct poll_event_context);
|
---|
152 |
|
---|
153 | poll_ev->fds = talloc_zero(poll_ev, struct pollfd);
|
---|
154 | if (poll_ev->fds == NULL) {
|
---|
155 | return -1;
|
---|
156 | }
|
---|
157 |
|
---|
158 | ret = pipe(fds);
|
---|
159 | if (ret == -1) {
|
---|
160 | return -1;
|
---|
161 | }
|
---|
162 |
|
---|
163 | if (!set_nonblock(fds[0]) || !set_nonblock(fds[1])) {
|
---|
164 | close(fds[0]);
|
---|
165 | close(fds[1]);
|
---|
166 | return -1;
|
---|
167 | }
|
---|
168 |
|
---|
169 | poll_ev->signal_fd = fds[1];
|
---|
170 |
|
---|
171 | pfd = &poll_ev->fds[0];
|
---|
172 | pfd->fd = fds[0];
|
---|
173 | pfd->events = (POLLIN|POLLHUP);
|
---|
174 |
|
---|
175 | poll_ev->num_fds = 1;
|
---|
176 |
|
---|
177 | talloc_set_destructor(poll_ev, poll_event_context_destructor);
|
---|
178 |
|
---|
179 | return 0;
|
---|
180 | }
|
---|
181 |
|
---|
182 | static void poll_event_wake_pollthread(struct poll_event_context *poll_ev)
|
---|
183 | {
|
---|
184 | char c;
|
---|
185 | ssize_t ret;
|
---|
186 |
|
---|
187 | if (poll_ev->signal_fd == -1) {
|
---|
188 | return;
|
---|
189 | }
|
---|
190 | c = 0;
|
---|
191 | do {
|
---|
192 | ret = write(poll_ev->signal_fd, &c, sizeof(c));
|
---|
193 | } while ((ret == -1) && (errno == EINTR));
|
---|
194 | }
|
---|
195 |
|
---|
196 | static void poll_event_drain_signal_fd(struct poll_event_context *poll_ev)
|
---|
197 | {
|
---|
198 | char buf[16];
|
---|
199 | ssize_t ret;
|
---|
200 | int fd;
|
---|
201 |
|
---|
202 | if (poll_ev->signal_fd == -1) {
|
---|
203 | return;
|
---|
204 | }
|
---|
205 |
|
---|
206 | if (poll_ev->num_fds < 1) {
|
---|
207 | return;
|
---|
208 | }
|
---|
209 | fd = poll_ev->fds[0].fd;
|
---|
210 |
|
---|
211 | do {
|
---|
212 | ret = read(fd, buf, sizeof(buf));
|
---|
213 | } while (ret == sizeof(buf));
|
---|
214 | }
|
---|
215 |
|
---|
216 | /*
|
---|
217 | destroy an fd_event
|
---|
218 | */
|
---|
219 | static int poll_event_fd_destructor(struct tevent_fd *fde)
|
---|
220 | {
|
---|
221 | struct tevent_context *ev = fde->event_ctx;
|
---|
222 | struct poll_event_context *poll_ev;
|
---|
223 | uint64_t del_idx = fde->additional_flags;
|
---|
224 |
|
---|
225 | if (ev == NULL) {
|
---|
226 | goto done;
|
---|
227 | }
|
---|
228 |
|
---|
229 | poll_ev = talloc_get_type_abort(
|
---|
230 | ev->additional_data, struct poll_event_context);
|
---|
231 |
|
---|
232 | if (del_idx == UINT64_MAX) {
|
---|
233 | struct tevent_fd **listp =
|
---|
234 | (struct tevent_fd **)fde->additional_data;
|
---|
235 |
|
---|
236 | DLIST_REMOVE((*listp), fde);
|
---|
237 | goto done;
|
---|
238 | }
|
---|
239 |
|
---|
240 | poll_ev->fdes[del_idx] = NULL;
|
---|
241 | poll_ev->deleted = true;
|
---|
242 | poll_event_wake_pollthread(poll_ev);
|
---|
243 | done:
|
---|
244 | return tevent_common_fd_destructor(fde);
|
---|
245 | }
|
---|
246 |
|
---|
247 | static void poll_event_schedule_immediate(struct tevent_immediate *im,
|
---|
248 | struct tevent_context *ev,
|
---|
249 | tevent_immediate_handler_t handler,
|
---|
250 | void *private_data,
|
---|
251 | const char *handler_name,
|
---|
252 | const char *location)
|
---|
253 | {
|
---|
254 | struct poll_event_context *poll_ev = talloc_get_type_abort(
|
---|
255 | ev->additional_data, struct poll_event_context);
|
---|
256 |
|
---|
257 | tevent_common_schedule_immediate(im, ev, handler, private_data,
|
---|
258 | handler_name, location);
|
---|
259 | poll_event_wake_pollthread(poll_ev);
|
---|
260 | }
|
---|
261 |
|
---|
262 | /*
|
---|
263 | Private function called by "standard" backend fallback.
|
---|
264 | Note this only allows fallback to "poll" backend, not "poll-mt".
|
---|
265 | */
|
---|
266 | _PRIVATE_ void tevent_poll_event_add_fd_internal(struct tevent_context *ev,
|
---|
267 | struct tevent_fd *fde)
|
---|
268 | {
|
---|
269 | struct poll_event_context *poll_ev = talloc_get_type_abort(
|
---|
270 | ev->additional_data, struct poll_event_context);
|
---|
271 | struct tevent_fd **listp;
|
---|
272 |
|
---|
273 | if (fde->flags != 0) {
|
---|
274 | listp = &poll_ev->fresh;
|
---|
275 | } else {
|
---|
276 | listp = &poll_ev->disabled;
|
---|
277 | }
|
---|
278 |
|
---|
279 | fde->additional_flags = UINT64_MAX;
|
---|
280 | fde->additional_data = listp;
|
---|
281 |
|
---|
282 | DLIST_ADD((*listp), fde);
|
---|
283 | talloc_set_destructor(fde, poll_event_fd_destructor);
|
---|
284 | }
|
---|
285 |
|
---|
286 | /*
|
---|
287 | add a fd based event
|
---|
288 | return NULL on failure (memory allocation error)
|
---|
289 | */
|
---|
290 | static struct tevent_fd *poll_event_add_fd(struct tevent_context *ev,
|
---|
291 | TALLOC_CTX *mem_ctx,
|
---|
292 | int fd, uint16_t flags,
|
---|
293 | tevent_fd_handler_t handler,
|
---|
294 | void *private_data,
|
---|
295 | const char *handler_name,
|
---|
296 | const char *location)
|
---|
297 | {
|
---|
298 | struct poll_event_context *poll_ev = talloc_get_type_abort(
|
---|
299 | ev->additional_data, struct poll_event_context);
|
---|
300 | struct tevent_fd *fde;
|
---|
301 |
|
---|
302 | if (fd < 0) {
|
---|
303 | return NULL;
|
---|
304 | }
|
---|
305 |
|
---|
306 | fde = talloc(mem_ctx ? mem_ctx : ev, struct tevent_fd);
|
---|
307 | if (fde == NULL) {
|
---|
308 | return NULL;
|
---|
309 | }
|
---|
310 | fde->event_ctx = ev;
|
---|
311 | fde->fd = fd;
|
---|
312 | fde->flags = flags;
|
---|
313 | fde->handler = handler;
|
---|
314 | fde->close_fn = NULL;
|
---|
315 | fde->private_data = private_data;
|
---|
316 | fde->handler_name = handler_name;
|
---|
317 | fde->location = location;
|
---|
318 | fde->additional_flags = UINT64_MAX;
|
---|
319 | fde->additional_data = NULL;
|
---|
320 |
|
---|
321 | tevent_poll_event_add_fd_internal(ev, fde);
|
---|
322 | poll_event_wake_pollthread(poll_ev);
|
---|
323 |
|
---|
324 | /*
|
---|
325 | * poll_event_loop_poll will take care of the rest in
|
---|
326 | * poll_event_setup_fresh
|
---|
327 | */
|
---|
328 | return fde;
|
---|
329 | }
|
---|
330 |
|
---|
331 | /*
|
---|
332 | set the fd event flags
|
---|
333 | */
|
---|
334 | static void poll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
|
---|
335 | {
|
---|
336 | struct tevent_context *ev = fde->event_ctx;
|
---|
337 | struct poll_event_context *poll_ev;
|
---|
338 | uint64_t idx = fde->additional_flags;
|
---|
339 | uint16_t pollflags;
|
---|
340 |
|
---|
341 | if (ev == NULL) {
|
---|
342 | return;
|
---|
343 | }
|
---|
344 | poll_ev = talloc_get_type_abort(
|
---|
345 | ev->additional_data, struct poll_event_context);
|
---|
346 |
|
---|
347 | fde->flags = flags;
|
---|
348 |
|
---|
349 | if (idx == UINT64_MAX) {
|
---|
350 | struct tevent_fd **listp =
|
---|
351 | (struct tevent_fd **)fde->additional_data;
|
---|
352 |
|
---|
353 | /*
|
---|
354 | * We move it between the fresh and disabled lists.
|
---|
355 | */
|
---|
356 | DLIST_REMOVE((*listp), fde);
|
---|
357 | tevent_poll_event_add_fd_internal(ev, fde);
|
---|
358 | poll_event_wake_pollthread(poll_ev);
|
---|
359 | return;
|
---|
360 | }
|
---|
361 |
|
---|
362 | if (fde->flags == 0) {
|
---|
363 | /*
|
---|
364 | * We need to remove it from the array
|
---|
365 | * and move it to the disabled list.
|
---|
366 | */
|
---|
367 | poll_ev->fdes[idx] = NULL;
|
---|
368 | poll_ev->deleted = true;
|
---|
369 | DLIST_REMOVE(ev->fd_events, fde);
|
---|
370 | tevent_poll_event_add_fd_internal(ev, fde);
|
---|
371 | poll_event_wake_pollthread(poll_ev);
|
---|
372 | return;
|
---|
373 | }
|
---|
374 |
|
---|
375 | pollflags = 0;
|
---|
376 |
|
---|
377 | if (flags & TEVENT_FD_READ) {
|
---|
378 | pollflags |= (POLLIN|POLLHUP);
|
---|
379 | }
|
---|
380 | if (flags & TEVENT_FD_WRITE) {
|
---|
381 | pollflags |= (POLLOUT);
|
---|
382 | }
|
---|
383 | poll_ev->fds[idx].events = pollflags;
|
---|
384 |
|
---|
385 | poll_event_wake_pollthread(poll_ev);
|
---|
386 | }
|
---|
387 |
|
---|
388 | static bool poll_event_setup_fresh(struct tevent_context *ev,
|
---|
389 | struct poll_event_context *poll_ev)
|
---|
390 | {
|
---|
391 | struct tevent_fd *fde, *next;
|
---|
392 | unsigned num_fresh, num_fds;
|
---|
393 |
|
---|
394 | if (poll_ev->deleted) {
|
---|
395 | unsigned first_fd = (poll_ev->signal_fd != -1) ? 1 : 0;
|
---|
396 | unsigned i;
|
---|
397 |
|
---|
398 | for (i=first_fd; i < poll_ev->num_fds;) {
|
---|
399 | fde = poll_ev->fdes[i];
|
---|
400 | if (fde != NULL) {
|
---|
401 | i++;
|
---|
402 | continue;
|
---|
403 | }
|
---|
404 |
|
---|
405 | /*
|
---|
406 | * This fde was talloc_free()'ed. Delete it
|
---|
407 | * from the arrays
|
---|
408 | */
|
---|
409 | poll_ev->num_fds -= 1;
|
---|
410 | if (poll_ev->num_fds == i) {
|
---|
411 | break;
|
---|
412 | }
|
---|
413 | poll_ev->fds[i] = poll_ev->fds[poll_ev->num_fds];
|
---|
414 | poll_ev->fdes[i] = poll_ev->fdes[poll_ev->num_fds];
|
---|
415 | if (poll_ev->fdes[i] != NULL) {
|
---|
416 | poll_ev->fdes[i]->additional_flags = i;
|
---|
417 | }
|
---|
418 | }
|
---|
419 | poll_ev->deleted = false;
|
---|
420 | }
|
---|
421 |
|
---|
422 | if (poll_ev->fresh == NULL) {
|
---|
423 | return true;
|
---|
424 | }
|
---|
425 |
|
---|
426 | num_fresh = 0;
|
---|
427 | for (fde = poll_ev->fresh; fde; fde = fde->next) {
|
---|
428 | num_fresh += 1;
|
---|
429 | }
|
---|
430 | num_fds = poll_ev->num_fds + num_fresh;
|
---|
431 |
|
---|
432 | /*
|
---|
433 | * We check the length of fdes here. It is the last one
|
---|
434 | * enlarged, so if the realloc for poll_fd->fdes fails,
|
---|
435 | * poll_fd->fds will have at least the size of poll_fd->fdes
|
---|
436 | */
|
---|
437 |
|
---|
438 | if (num_fds >= talloc_array_length(poll_ev->fdes)) {
|
---|
439 | struct pollfd *tmp_fds;
|
---|
440 | struct tevent_fd **tmp_fdes;
|
---|
441 | unsigned array_length;
|
---|
442 |
|
---|
443 | array_length = (num_fds + 15) & ~15; /* round up to 16 */
|
---|
444 |
|
---|
445 | tmp_fds = talloc_realloc(
|
---|
446 | poll_ev, poll_ev->fds, struct pollfd, array_length);
|
---|
447 | if (tmp_fds == NULL) {
|
---|
448 | return false;
|
---|
449 | }
|
---|
450 | poll_ev->fds = tmp_fds;
|
---|
451 |
|
---|
452 | tmp_fdes = talloc_realloc(
|
---|
453 | poll_ev, poll_ev->fdes, struct tevent_fd *,
|
---|
454 | array_length);
|
---|
455 | if (tmp_fdes == NULL) {
|
---|
456 | return false;
|
---|
457 | }
|
---|
458 | poll_ev->fdes = tmp_fdes;
|
---|
459 | }
|
---|
460 |
|
---|
461 | for (fde = poll_ev->fresh; fde; fde = next) {
|
---|
462 | struct pollfd *pfd;
|
---|
463 |
|
---|
464 | pfd = &poll_ev->fds[poll_ev->num_fds];
|
---|
465 |
|
---|
466 | pfd->fd = fde->fd;
|
---|
467 | pfd->events = 0;
|
---|
468 | pfd->revents = 0;
|
---|
469 |
|
---|
470 | if (fde->flags & TEVENT_FD_READ) {
|
---|
471 | pfd->events |= (POLLIN|POLLHUP);
|
---|
472 | }
|
---|
473 | if (fde->flags & TEVENT_FD_WRITE) {
|
---|
474 | pfd->events |= (POLLOUT);
|
---|
475 | }
|
---|
476 |
|
---|
477 | fde->additional_flags = poll_ev->num_fds;
|
---|
478 | poll_ev->fdes[poll_ev->num_fds] = fde;
|
---|
479 |
|
---|
480 | next = fde->next;
|
---|
481 | DLIST_REMOVE(poll_ev->fresh, fde);
|
---|
482 | DLIST_ADD(ev->fd_events, fde);
|
---|
483 |
|
---|
484 | poll_ev->num_fds += 1;
|
---|
485 | }
|
---|
486 | return true;
|
---|
487 | }
|
---|
488 |
|
---|
489 | /*
|
---|
490 | event loop handling using poll()
|
---|
491 | */
|
---|
492 | static int poll_event_loop_poll(struct tevent_context *ev,
|
---|
493 | struct timeval *tvalp)
|
---|
494 | {
|
---|
495 | struct poll_event_context *poll_ev = talloc_get_type_abort(
|
---|
496 | ev->additional_data, struct poll_event_context);
|
---|
497 | int pollrtn;
|
---|
498 | int timeout = -1;
|
---|
499 | int poll_errno;
|
---|
500 | struct tevent_fd *fde = NULL;
|
---|
501 | struct tevent_fd *next = NULL;
|
---|
502 | unsigned i;
|
---|
503 |
|
---|
504 | if (ev->signal_events && tevent_common_check_signal(ev)) {
|
---|
505 | return 0;
|
---|
506 | }
|
---|
507 |
|
---|
508 | if (tvalp != NULL) {
|
---|
509 | timeout = tvalp->tv_sec * 1000;
|
---|
510 | timeout += (tvalp->tv_usec + 999) / 1000;
|
---|
511 | }
|
---|
512 |
|
---|
513 | poll_event_drain_signal_fd(poll_ev);
|
---|
514 |
|
---|
515 | if (!poll_event_setup_fresh(ev, poll_ev)) {
|
---|
516 | return -1;
|
---|
517 | }
|
---|
518 |
|
---|
519 | tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_BEFORE_WAIT);
|
---|
520 | pollrtn = poll(poll_ev->fds, poll_ev->num_fds, timeout);
|
---|
521 | poll_errno = errno;
|
---|
522 | tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_AFTER_WAIT);
|
---|
523 |
|
---|
524 | if (pollrtn == -1 && poll_errno == EINTR && ev->signal_events) {
|
---|
525 | tevent_common_check_signal(ev);
|
---|
526 | return 0;
|
---|
527 | }
|
---|
528 |
|
---|
529 | if (pollrtn == 0 && tvalp) {
|
---|
530 | /* we don't care about a possible delay here */
|
---|
531 | tevent_common_loop_timer_delay(ev);
|
---|
532 | return 0;
|
---|
533 | }
|
---|
534 |
|
---|
535 | if (pollrtn <= 0) {
|
---|
536 | /*
|
---|
537 | * No fd's ready
|
---|
538 | */
|
---|
539 | return 0;
|
---|
540 | }
|
---|
541 |
|
---|
542 | /* at least one file descriptor is ready - check
|
---|
543 | which ones and call the handler, being careful to allow
|
---|
544 | the handler to remove itself when called */
|
---|
545 |
|
---|
546 | for (fde = ev->fd_events; fde; fde = next) {
|
---|
547 | uint64_t idx = fde->additional_flags;
|
---|
548 | struct pollfd *pfd;
|
---|
549 | uint16_t flags = 0;
|
---|
550 |
|
---|
551 | next = fde->next;
|
---|
552 |
|
---|
553 | if (idx == UINT64_MAX) {
|
---|
554 | continue;
|
---|
555 | }
|
---|
556 |
|
---|
557 | pfd = &poll_ev->fds[idx];
|
---|
558 |
|
---|
559 | if (pfd->revents & POLLNVAL) {
|
---|
560 | /*
|
---|
561 | * the socket is dead! this should never
|
---|
562 | * happen as the socket should have first been
|
---|
563 | * made readable and that should have removed
|
---|
564 | * the event, so this must be a bug.
|
---|
565 | *
|
---|
566 | * We ignore it here to match the epoll
|
---|
567 | * behavior.
|
---|
568 | */
|
---|
569 | tevent_debug(ev, TEVENT_DEBUG_ERROR,
|
---|
570 | "POLLNVAL on fde[%p] fd[%d] - disabling\n",
|
---|
571 | fde, pfd->fd);
|
---|
572 | poll_ev->fdes[idx] = NULL;
|
---|
573 | poll_ev->deleted = true;
|
---|
574 | DLIST_REMOVE(ev->fd_events, fde);
|
---|
575 | fde->event_ctx = NULL;
|
---|
576 | continue;
|
---|
577 | }
|
---|
578 |
|
---|
579 | if (pfd->revents & (POLLHUP|POLLERR)) {
|
---|
580 | /* If we only wait for TEVENT_FD_WRITE, we
|
---|
581 | should not tell the event handler about it,
|
---|
582 | and remove the writable flag, as we only
|
---|
583 | report errors when waiting for read events
|
---|
584 | to match the select behavior. */
|
---|
585 | if (!(fde->flags & TEVENT_FD_READ)) {
|
---|
586 | TEVENT_FD_NOT_WRITEABLE(fde);
|
---|
587 | continue;
|
---|
588 | }
|
---|
589 | flags |= TEVENT_FD_READ;
|
---|
590 | }
|
---|
591 | if (pfd->revents & POLLIN) {
|
---|
592 | flags |= TEVENT_FD_READ;
|
---|
593 | }
|
---|
594 | if (pfd->revents & POLLOUT) {
|
---|
595 | flags |= TEVENT_FD_WRITE;
|
---|
596 | }
|
---|
597 | /*
|
---|
598 | * Note that fde->flags could be changed when using
|
---|
599 | * the poll_mt backend together with threads,
|
---|
600 | * that why we need to check pfd->revents and fde->flags
|
---|
601 | */
|
---|
602 | flags &= fde->flags;
|
---|
603 | if (flags != 0) {
|
---|
604 | DLIST_DEMOTE(ev->fd_events, fde);
|
---|
605 | fde->handler(ev, fde, flags, fde->private_data);
|
---|
606 | return 0;
|
---|
607 | }
|
---|
608 | }
|
---|
609 |
|
---|
610 | for (i = 0; i < poll_ev->num_fds; i++) {
|
---|
611 | if (poll_ev->fds[i].revents & POLLNVAL) {
|
---|
612 | /*
|
---|
613 | * the socket is dead! this should never
|
---|
614 | * happen as the socket should have first been
|
---|
615 | * made readable and that should have removed
|
---|
616 | * the event, so this must be a bug or
|
---|
617 | * a race in the poll_mt usage.
|
---|
618 | */
|
---|
619 | fde = poll_ev->fdes[i];
|
---|
620 | tevent_debug(ev, TEVENT_DEBUG_WARNING,
|
---|
621 | "POLLNVAL on dangling fd[%d] fde[%p] - disabling\n",
|
---|
622 | poll_ev->fds[i].fd, fde);
|
---|
623 | poll_ev->fdes[i] = NULL;
|
---|
624 | poll_ev->deleted = true;
|
---|
625 | if (fde != NULL) {
|
---|
626 | DLIST_REMOVE(ev->fd_events, fde);
|
---|
627 | fde->event_ctx = NULL;
|
---|
628 | }
|
---|
629 | }
|
---|
630 | }
|
---|
631 |
|
---|
632 | return 0;
|
---|
633 | }
|
---|
634 |
|
---|
635 | /*
|
---|
636 | do a single event loop using the events defined in ev
|
---|
637 | */
|
---|
638 | static int poll_event_loop_once(struct tevent_context *ev,
|
---|
639 | const char *location)
|
---|
640 | {
|
---|
641 | struct timeval tval;
|
---|
642 |
|
---|
643 | if (ev->signal_events &&
|
---|
644 | tevent_common_check_signal(ev)) {
|
---|
645 | return 0;
|
---|
646 | }
|
---|
647 |
|
---|
648 | if (ev->immediate_events &&
|
---|
649 | tevent_common_loop_immediate(ev)) {
|
---|
650 | return 0;
|
---|
651 | }
|
---|
652 |
|
---|
653 | tval = tevent_common_loop_timer_delay(ev);
|
---|
654 | if (tevent_timeval_is_zero(&tval)) {
|
---|
655 | return 0;
|
---|
656 | }
|
---|
657 |
|
---|
658 | return poll_event_loop_poll(ev, &tval);
|
---|
659 | }
|
---|
660 |
|
---|
661 | static int poll_event_loop_wait(struct tevent_context *ev,
|
---|
662 | const char *location)
|
---|
663 | {
|
---|
664 | struct poll_event_context *poll_ev = talloc_get_type_abort(
|
---|
665 | ev->additional_data, struct poll_event_context);
|
---|
666 |
|
---|
667 | /*
|
---|
668 | * loop as long as we have events pending
|
---|
669 | */
|
---|
670 | while (ev->fd_events ||
|
---|
671 | ev->timer_events ||
|
---|
672 | ev->immediate_events ||
|
---|
673 | ev->signal_events ||
|
---|
674 | poll_ev->fresh ||
|
---|
675 | poll_ev->disabled) {
|
---|
676 | int ret;
|
---|
677 | ret = _tevent_loop_once(ev, location);
|
---|
678 | if (ret != 0) {
|
---|
679 | tevent_debug(ev, TEVENT_DEBUG_FATAL,
|
---|
680 | "_tevent_loop_once() failed: %d - %s\n",
|
---|
681 | ret, strerror(errno));
|
---|
682 | return ret;
|
---|
683 | }
|
---|
684 | }
|
---|
685 |
|
---|
686 | tevent_debug(ev, TEVENT_DEBUG_WARNING,
|
---|
687 | "poll_event_loop_wait() out of events\n");
|
---|
688 | return 0;
|
---|
689 | }
|
---|
690 |
|
---|
691 | static const struct tevent_ops poll_event_ops = {
|
---|
692 | .context_init = poll_event_context_init,
|
---|
693 | .add_fd = poll_event_add_fd,
|
---|
694 | .set_fd_close_fn = tevent_common_fd_set_close_fn,
|
---|
695 | .get_fd_flags = tevent_common_fd_get_flags,
|
---|
696 | .set_fd_flags = poll_event_set_fd_flags,
|
---|
697 | .add_timer = tevent_common_add_timer_v2,
|
---|
698 | .schedule_immediate = tevent_common_schedule_immediate,
|
---|
699 | .add_signal = tevent_common_add_signal,
|
---|
700 | .loop_once = poll_event_loop_once,
|
---|
701 | .loop_wait = poll_event_loop_wait,
|
---|
702 | };
|
---|
703 |
|
---|
704 | _PRIVATE_ bool tevent_poll_init(void)
|
---|
705 | {
|
---|
706 | return tevent_register_backend("poll", &poll_event_ops);
|
---|
707 | }
|
---|
708 |
|
---|
709 | static const struct tevent_ops poll_event_mt_ops = {
|
---|
710 | .context_init = poll_event_context_init_mt,
|
---|
711 | .add_fd = poll_event_add_fd,
|
---|
712 | .set_fd_close_fn = tevent_common_fd_set_close_fn,
|
---|
713 | .get_fd_flags = tevent_common_fd_get_flags,
|
---|
714 | .set_fd_flags = poll_event_set_fd_flags,
|
---|
715 | .add_timer = tevent_common_add_timer_v2,
|
---|
716 | .schedule_immediate = poll_event_schedule_immediate,
|
---|
717 | .add_signal = tevent_common_add_signal,
|
---|
718 | .loop_once = poll_event_loop_once,
|
---|
719 | .loop_wait = poll_event_loop_wait,
|
---|
720 | };
|
---|
721 |
|
---|
722 | _PRIVATE_ bool tevent_poll_mt_init(void)
|
---|
723 | {
|
---|
724 | return tevent_register_backend("poll_mt", &poll_event_mt_ops);
|
---|
725 | }
|
---|