[429] | 1 | /*
|
---|
| 2 | Unix SMB/CIFS implementation.
|
---|
| 3 | Timed event library.
|
---|
| 4 | Copyright (C) Andrew Tridgell 1992-1998
|
---|
| 5 | Copyright (C) Volker Lendecke 2005
|
---|
| 6 |
|
---|
| 7 | This program is free software; you can redistribute it and/or modify
|
---|
| 8 | it under the terms of the GNU General Public License as published by
|
---|
| 9 | the Free Software Foundation; either version 3 of the License, or
|
---|
| 10 | (at your option) any later version.
|
---|
| 11 |
|
---|
| 12 | This program is distributed in the hope that it will be useful,
|
---|
| 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
| 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
---|
| 15 | GNU General Public License for more details.
|
---|
| 16 |
|
---|
| 17 | You should have received a copy of the GNU General Public License
|
---|
| 18 | along with this program. If not, see <http://www.gnu.org/licenses/>.
|
---|
| 19 | */
|
---|
| 20 |
|
---|
| 21 | #include "includes.h"
|
---|
[745] | 22 | #include "lib/tevent/tevent_internal.h"
|
---|
| 23 | #include "../lib/util/select.h"
|
---|
| 24 | #include "system/select.h"
|
---|
[429] | 25 |
|
---|
[745] | 26 | struct tevent_poll_private {
|
---|
| 27 | /*
|
---|
| 28 | * Index from file descriptor into the pollfd array
|
---|
| 29 | */
|
---|
| 30 | int *pollfd_idx;
|
---|
| 31 |
|
---|
| 32 | /*
|
---|
| 33 | * Cache for s3_event_loop_once to avoid reallocs
|
---|
| 34 | */
|
---|
| 35 | struct pollfd *pfds;
|
---|
| 36 | };
|
---|
| 37 |
|
---|
| 38 | static struct tevent_poll_private *tevent_get_poll_private(
|
---|
| 39 | struct tevent_context *ev)
|
---|
[429] | 40 | {
|
---|
[745] | 41 | struct tevent_poll_private *state;
|
---|
[429] | 42 |
|
---|
[745] | 43 | state = (struct tevent_poll_private *)ev->additional_data;
|
---|
| 44 | if (state == NULL) {
|
---|
| 45 | state = TALLOC_ZERO_P(ev, struct tevent_poll_private);
|
---|
| 46 | ev->additional_data = (void *)state;
|
---|
| 47 | if (state == NULL) {
|
---|
| 48 | DEBUG(10, ("talloc failed\n"));
|
---|
| 49 | }
|
---|
| 50 | }
|
---|
| 51 | return state;
|
---|
[429] | 52 | }
|
---|
| 53 |
|
---|
[745] | 54 | static void count_fds(struct tevent_context *ev,
|
---|
| 55 | int *pnum_fds, int *pmax_fd)
|
---|
[429] | 56 | {
|
---|
[745] | 57 | struct tevent_fd *fde;
|
---|
| 58 | int num_fds = 0;
|
---|
| 59 | int max_fd = 0;
|
---|
[429] | 60 |
|
---|
[745] | 61 | for (fde = ev->fd_events; fde != NULL; fde = fde->next) {
|
---|
| 62 | if (fde->flags & (EVENT_FD_READ|EVENT_FD_WRITE)) {
|
---|
| 63 | num_fds += 1;
|
---|
| 64 | if (fde->fd > max_fd) {
|
---|
| 65 | max_fd = fde->fd;
|
---|
| 66 | }
|
---|
| 67 | }
|
---|
| 68 | }
|
---|
| 69 | *pnum_fds = num_fds;
|
---|
| 70 | *pmax_fd = max_fd;
|
---|
[429] | 71 | }
|
---|
| 72 |
|
---|
[745] | 73 | bool event_add_to_poll_args(struct tevent_context *ev, TALLOC_CTX *mem_ctx,
|
---|
| 74 | struct pollfd **pfds, int *pnum_pfds,
|
---|
| 75 | int *ptimeout)
|
---|
[429] | 76 | {
|
---|
[745] | 77 | struct tevent_poll_private *state;
|
---|
[429] | 78 | struct tevent_fd *fde;
|
---|
[745] | 79 | int i, num_fds, max_fd, num_pollfds, idx_len;
|
---|
| 80 | struct pollfd *fds;
|
---|
| 81 | struct timeval now, diff;
|
---|
| 82 | int timeout;
|
---|
[429] | 83 |
|
---|
[745] | 84 | state = tevent_get_poll_private(ev);
|
---|
| 85 | if (state == NULL) {
|
---|
| 86 | return false;
|
---|
| 87 | }
|
---|
| 88 | count_fds(ev, &num_fds, &max_fd);
|
---|
| 89 |
|
---|
| 90 | idx_len = max_fd+1;
|
---|
| 91 |
|
---|
| 92 | if (talloc_array_length(state->pollfd_idx) < idx_len) {
|
---|
| 93 | state->pollfd_idx = TALLOC_REALLOC_ARRAY(
|
---|
| 94 | state, state->pollfd_idx, int, idx_len);
|
---|
| 95 | if (state->pollfd_idx == NULL) {
|
---|
| 96 | DEBUG(10, ("talloc_realloc failed\n"));
|
---|
| 97 | return false;
|
---|
| 98 | }
|
---|
| 99 | }
|
---|
| 100 |
|
---|
| 101 | fds = *pfds;
|
---|
| 102 | num_pollfds = *pnum_pfds;
|
---|
| 103 |
|
---|
| 104 | /*
|
---|
| 105 | * The +1 is for the sys_poll calling convention. It expects
|
---|
| 106 | * an array 1 longer for the signal pipe
|
---|
| 107 | */
|
---|
| 108 |
|
---|
| 109 | if (talloc_array_length(fds) < num_pollfds + num_fds + 1) {
|
---|
| 110 | fds = TALLOC_REALLOC_ARRAY(mem_ctx, fds, struct pollfd,
|
---|
| 111 | num_pollfds + num_fds + 1);
|
---|
| 112 | if (fds == NULL) {
|
---|
| 113 | DEBUG(10, ("talloc_realloc failed\n"));
|
---|
| 114 | return false;
|
---|
| 115 | }
|
---|
| 116 | }
|
---|
| 117 |
|
---|
| 118 | memset(&fds[num_pollfds], 0, sizeof(struct pollfd) * num_fds);
|
---|
| 119 |
|
---|
| 120 | /*
|
---|
| 121 | * This needs tuning. We need to cope with multiple fde's for a file
|
---|
| 122 | * descriptor. The problem is that we need to re-use pollfd_idx across
|
---|
| 123 | * calls for efficiency. One way would be a direct bitmask that might
|
---|
| 124 | * be initialized quicker, but our bitmap_init implementation is
|
---|
| 125 | * pretty heavy-weight as well.
|
---|
| 126 | */
|
---|
| 127 | for (i=0; i<idx_len; i++) {
|
---|
| 128 | state->pollfd_idx[i] = -1;
|
---|
| 129 | }
|
---|
| 130 |
|
---|
[429] | 131 | for (fde = ev->fd_events; fde; fde = fde->next) {
|
---|
[745] | 132 | struct pollfd *pfd;
|
---|
| 133 |
|
---|
| 134 | if ((fde->flags & (EVENT_FD_READ|EVENT_FD_WRITE)) == 0) {
|
---|
[593] | 135 | continue;
|
---|
| 136 | }
|
---|
| 137 |
|
---|
[745] | 138 | if (state->pollfd_idx[fde->fd] == -1) {
|
---|
| 139 | /*
|
---|
| 140 | * We haven't seen this fd yet. Allocate a new pollfd.
|
---|
| 141 | */
|
---|
| 142 | state->pollfd_idx[fde->fd] = num_pollfds;
|
---|
| 143 | pfd = &fds[num_pollfds];
|
---|
| 144 | num_pollfds += 1;
|
---|
| 145 | } else {
|
---|
| 146 | /*
|
---|
| 147 | * We have already seen this fd. OR in the flags.
|
---|
| 148 | */
|
---|
| 149 | pfd = &fds[state->pollfd_idx[fde->fd]];
|
---|
| 150 | }
|
---|
| 151 |
|
---|
| 152 | pfd->fd = fde->fd;
|
---|
| 153 |
|
---|
[429] | 154 | if (fde->flags & EVENT_FD_READ) {
|
---|
[745] | 155 | pfd->events |= (POLLIN|POLLHUP);
|
---|
[429] | 156 | }
|
---|
| 157 | if (fde->flags & EVENT_FD_WRITE) {
|
---|
[745] | 158 | pfd->events |= POLLOUT;
|
---|
[429] | 159 | }
|
---|
| 160 | }
|
---|
[745] | 161 | *pfds = fds;
|
---|
| 162 | *pnum_pfds = num_pollfds;
|
---|
[429] | 163 |
|
---|
| 164 | if (ev->immediate_events != NULL) {
|
---|
[745] | 165 | *ptimeout = 0;
|
---|
[429] | 166 | return true;
|
---|
| 167 | }
|
---|
| 168 | if (ev->timer_events == NULL) {
|
---|
[745] | 169 | *ptimeout = MIN(*ptimeout, INT_MAX);
|
---|
| 170 | return true;
|
---|
[429] | 171 | }
|
---|
| 172 |
|
---|
[745] | 173 | now = timeval_current();
|
---|
| 174 | diff = timeval_until(&now, &ev->timer_events->next_event);
|
---|
| 175 | timeout = timeval_to_msec(diff);
|
---|
[429] | 176 |
|
---|
[745] | 177 | if (timeout < *ptimeout) {
|
---|
| 178 | *ptimeout = timeout;
|
---|
| 179 | }
|
---|
| 180 |
|
---|
[429] | 181 | return true;
|
---|
| 182 | }
|
---|
| 183 |
|
---|
[745] | 184 | bool run_events_poll(struct tevent_context *ev, int pollrtn,
|
---|
| 185 | struct pollfd *pfds, int num_pfds)
|
---|
[429] | 186 | {
|
---|
[745] | 187 | struct tevent_poll_private *state;
|
---|
| 188 | int *pollfd_idx;
|
---|
[429] | 189 | struct tevent_fd *fde;
|
---|
| 190 | struct timeval now;
|
---|
| 191 |
|
---|
| 192 | if (ev->signal_events &&
|
---|
| 193 | tevent_common_check_signal(ev)) {
|
---|
| 194 | return true;
|
---|
| 195 | }
|
---|
| 196 |
|
---|
| 197 | if (ev->immediate_events &&
|
---|
| 198 | tevent_common_loop_immediate(ev)) {
|
---|
| 199 | return true;
|
---|
| 200 | }
|
---|
| 201 |
|
---|
| 202 | GetTimeOfDay(&now);
|
---|
| 203 |
|
---|
| 204 | if ((ev->timer_events != NULL)
|
---|
| 205 | && (timeval_compare(&now, &ev->timer_events->next_event) >= 0)) {
|
---|
| 206 | /* this older events system did not auto-free timed
|
---|
| 207 | events on running them, and had a race condition
|
---|
| 208 | where the event could be called twice if the
|
---|
| 209 | talloc_free of the te happened after the callback
|
---|
| 210 | made a call which invoked the event loop. To avoid
|
---|
| 211 | this while still allowing old code which frees the
|
---|
| 212 | te, we need to create a temporary context which
|
---|
| 213 | will be used to ensure the te is freed. We also
|
---|
| 214 | remove the te from the timed event list before we
|
---|
| 215 | call the handler, to ensure we can't loop */
|
---|
| 216 |
|
---|
| 217 | struct tevent_timer *te = ev->timer_events;
|
---|
| 218 | TALLOC_CTX *tmp_ctx = talloc_new(ev);
|
---|
| 219 |
|
---|
| 220 | DEBUG(10, ("Running timed event \"%s\" %p\n",
|
---|
| 221 | ev->timer_events->handler_name, ev->timer_events));
|
---|
| 222 |
|
---|
| 223 | DLIST_REMOVE(ev->timer_events, te);
|
---|
| 224 | talloc_steal(tmp_ctx, te);
|
---|
| 225 |
|
---|
| 226 | te->handler(ev, te, now, te->private_data);
|
---|
| 227 |
|
---|
| 228 | talloc_free(tmp_ctx);
|
---|
| 229 | return true;
|
---|
| 230 | }
|
---|
| 231 |
|
---|
[745] | 232 | if (pollrtn <= 0) {
|
---|
[429] | 233 | /*
|
---|
| 234 | * No fd ready
|
---|
| 235 | */
|
---|
| 236 | return false;
|
---|
| 237 | }
|
---|
| 238 |
|
---|
[745] | 239 | state = (struct tevent_poll_private *)ev->additional_data;
|
---|
| 240 | pollfd_idx = state->pollfd_idx;
|
---|
| 241 |
|
---|
[429] | 242 | for (fde = ev->fd_events; fde; fde = fde->next) {
|
---|
[745] | 243 | struct pollfd *pfd;
|
---|
[429] | 244 | uint16 flags = 0;
|
---|
| 245 |
|
---|
[745] | 246 | if ((fde->flags & (EVENT_FD_READ|EVENT_FD_WRITE)) == 0) {
|
---|
| 247 | continue;
|
---|
| 248 | }
|
---|
[429] | 249 |
|
---|
[745] | 250 | if (pollfd_idx[fde->fd] >= num_pfds) {
|
---|
| 251 | DEBUG(1, ("internal error: pollfd_idx[fde->fd] (%d) "
|
---|
| 252 | ">= num_pfds (%d)\n", pollfd_idx[fde->fd],
|
---|
| 253 | num_pfds));
|
---|
| 254 | return false;
|
---|
| 255 | }
|
---|
| 256 | pfd = &pfds[pollfd_idx[fde->fd]];
|
---|
| 257 |
|
---|
| 258 | if (pfd->fd != fde->fd) {
|
---|
| 259 | DEBUG(1, ("internal error: pfd->fd (%d) "
|
---|
| 260 | "!= fde->fd (%d)\n", pollfd_idx[fde->fd],
|
---|
| 261 | num_pfds));
|
---|
| 262 | return false;
|
---|
| 263 | }
|
---|
| 264 |
|
---|
| 265 | if (pfd->revents & (POLLHUP|POLLERR)) {
|
---|
| 266 | /* If we only wait for EVENT_FD_WRITE, we
|
---|
| 267 | should not tell the event handler about it,
|
---|
| 268 | and remove the writable flag, as we only
|
---|
| 269 | report errors when waiting for read events
|
---|
| 270 | to match the select behavior. */
|
---|
| 271 | if (!(fde->flags & EVENT_FD_READ)) {
|
---|
| 272 | EVENT_FD_NOT_WRITEABLE(fde);
|
---|
| 273 | continue;
|
---|
| 274 | }
|
---|
| 275 | flags |= EVENT_FD_READ;
|
---|
| 276 | }
|
---|
| 277 |
|
---|
| 278 | if (pfd->revents & POLLIN) {
|
---|
| 279 | flags |= EVENT_FD_READ;
|
---|
| 280 | }
|
---|
| 281 | if (pfd->revents & POLLOUT) {
|
---|
| 282 | flags |= EVENT_FD_WRITE;
|
---|
| 283 | }
|
---|
[429] | 284 | if (flags & fde->flags) {
|
---|
[745] | 285 | DLIST_DEMOTE(ev->fd_events, fde, struct tevent_fd);
|
---|
[429] | 286 | fde->handler(ev, fde, flags, fde->private_data);
|
---|
| 287 | return true;
|
---|
| 288 | }
|
---|
| 289 | }
|
---|
| 290 |
|
---|
| 291 | return false;
|
---|
| 292 | }
|
---|
| 293 |
|
---|
| 294 | struct timeval *get_timed_events_timeout(struct tevent_context *ev,
|
---|
| 295 | struct timeval *to_ret)
|
---|
| 296 | {
|
---|
| 297 | struct timeval now;
|
---|
| 298 |
|
---|
| 299 | if ((ev->timer_events == NULL) && (ev->immediate_events == NULL)) {
|
---|
| 300 | return NULL;
|
---|
| 301 | }
|
---|
| 302 | if (ev->immediate_events != NULL) {
|
---|
| 303 | *to_ret = timeval_zero();
|
---|
| 304 | return to_ret;
|
---|
| 305 | }
|
---|
| 306 |
|
---|
| 307 | now = timeval_current();
|
---|
| 308 | *to_ret = timeval_until(&now, &ev->timer_events->next_event);
|
---|
| 309 |
|
---|
| 310 | DEBUG(10, ("timed_events_timeout: %d/%d\n", (int)to_ret->tv_sec,
|
---|
| 311 | (int)to_ret->tv_usec));
|
---|
| 312 |
|
---|
| 313 | return to_ret;
|
---|
| 314 | }
|
---|
| 315 |
|
---|
| 316 | static int s3_event_loop_once(struct tevent_context *ev, const char *location)
|
---|
| 317 | {
|
---|
[745] | 318 | struct tevent_poll_private *state;
|
---|
| 319 | int timeout;
|
---|
| 320 | int num_pfds;
|
---|
[429] | 321 | int ret;
|
---|
| 322 |
|
---|
[745] | 323 | timeout = INT_MAX;
|
---|
[429] | 324 |
|
---|
[745] | 325 | state = tevent_get_poll_private(ev);
|
---|
| 326 | if (state == NULL) {
|
---|
| 327 | errno = ENOMEM;
|
---|
| 328 | return -1;
|
---|
| 329 | }
|
---|
[429] | 330 |
|
---|
[745] | 331 | if (run_events_poll(ev, 0, NULL, 0)) {
|
---|
[429] | 332 | return 0;
|
---|
| 333 | }
|
---|
| 334 |
|
---|
[745] | 335 | num_pfds = 0;
|
---|
| 336 | if (!event_add_to_poll_args(ev, state,
|
---|
| 337 | &state->pfds, &num_pfds, &timeout)) {
|
---|
[429] | 338 | return -1;
|
---|
| 339 | }
|
---|
| 340 |
|
---|
[745] | 341 | ret = sys_poll(state->pfds, num_pfds, timeout);
|
---|
[429] | 342 | if (ret == -1 && errno != EINTR) {
|
---|
| 343 | tevent_debug(ev, TEVENT_DEBUG_FATAL,
|
---|
[745] | 344 | "poll() failed: %d:%s\n",
|
---|
[429] | 345 | errno, strerror(errno));
|
---|
| 346 | return -1;
|
---|
| 347 | }
|
---|
| 348 |
|
---|
[745] | 349 | run_events_poll(ev, ret, state->pfds, num_pfds);
|
---|
[429] | 350 | return 0;
|
---|
| 351 | }
|
---|
| 352 |
|
---|
| 353 | static int s3_event_context_init(struct tevent_context *ev)
|
---|
| 354 | {
|
---|
| 355 | return 0;
|
---|
| 356 | }
|
---|
| 357 |
|
---|
| 358 | void dump_event_list(struct tevent_context *ev)
|
---|
| 359 | {
|
---|
| 360 | struct tevent_timer *te;
|
---|
| 361 | struct tevent_fd *fe;
|
---|
| 362 | struct timeval evt, now;
|
---|
| 363 |
|
---|
| 364 | if (!ev) {
|
---|
| 365 | return;
|
---|
| 366 | }
|
---|
| 367 |
|
---|
| 368 | now = timeval_current();
|
---|
| 369 |
|
---|
| 370 | DEBUG(10,("dump_event_list:\n"));
|
---|
| 371 |
|
---|
| 372 | for (te = ev->timer_events; te; te = te->next) {
|
---|
| 373 |
|
---|
| 374 | evt = timeval_until(&now, &te->next_event);
|
---|
| 375 |
|
---|
| 376 | DEBUGADD(10,("Timed Event \"%s\" %p handled in %d seconds (at %s)\n",
|
---|
| 377 | te->handler_name,
|
---|
| 378 | te,
|
---|
| 379 | (int)evt.tv_sec,
|
---|
| 380 | http_timestring(talloc_tos(), te->next_event.tv_sec)));
|
---|
| 381 | }
|
---|
| 382 |
|
---|
| 383 | for (fe = ev->fd_events; fe; fe = fe->next) {
|
---|
| 384 |
|
---|
| 385 | DEBUGADD(10,("FD Event %d %p, flags: 0x%04x\n",
|
---|
| 386 | fe->fd,
|
---|
| 387 | fe,
|
---|
| 388 | fe->flags));
|
---|
| 389 | }
|
---|
| 390 | }
|
---|
| 391 |
|
---|
| 392 | static const struct tevent_ops s3_event_ops = {
|
---|
| 393 | .context_init = s3_event_context_init,
|
---|
| 394 | .add_fd = tevent_common_add_fd,
|
---|
| 395 | .set_fd_close_fn = tevent_common_fd_set_close_fn,
|
---|
| 396 | .get_fd_flags = tevent_common_fd_get_flags,
|
---|
| 397 | .set_fd_flags = tevent_common_fd_set_flags,
|
---|
| 398 | .add_timer = tevent_common_add_timer,
|
---|
| 399 | .schedule_immediate = tevent_common_schedule_immediate,
|
---|
| 400 | .add_signal = tevent_common_add_signal,
|
---|
| 401 | .loop_once = s3_event_loop_once,
|
---|
| 402 | .loop_wait = tevent_common_loop_wait,
|
---|
| 403 | };
|
---|
| 404 |
|
---|
| 405 | static bool s3_tevent_init(void)
|
---|
| 406 | {
|
---|
| 407 | static bool initialized;
|
---|
| 408 | if (initialized) {
|
---|
| 409 | return true;
|
---|
| 410 | }
|
---|
| 411 | initialized = tevent_register_backend("s3", &s3_event_ops);
|
---|
| 412 | tevent_set_default_backend("s3");
|
---|
| 413 | return initialized;
|
---|
| 414 | }
|
---|
| 415 |
|
---|
| 416 | /*
|
---|
| 417 | this is used to catch debug messages from events
|
---|
| 418 | */
|
---|
| 419 | static void s3_event_debug(void *context, enum tevent_debug_level level,
|
---|
| 420 | const char *fmt, va_list ap) PRINTF_ATTRIBUTE(3,0);
|
---|
| 421 |
|
---|
| 422 | static void s3_event_debug(void *context, enum tevent_debug_level level,
|
---|
| 423 | const char *fmt, va_list ap)
|
---|
| 424 | {
|
---|
| 425 | int samba_level = -1;
|
---|
| 426 | char *s = NULL;
|
---|
| 427 | switch (level) {
|
---|
| 428 | case TEVENT_DEBUG_FATAL:
|
---|
| 429 | samba_level = 0;
|
---|
| 430 | break;
|
---|
| 431 | case TEVENT_DEBUG_ERROR:
|
---|
| 432 | samba_level = 1;
|
---|
| 433 | break;
|
---|
| 434 | case TEVENT_DEBUG_WARNING:
|
---|
| 435 | samba_level = 2;
|
---|
| 436 | break;
|
---|
| 437 | case TEVENT_DEBUG_TRACE:
|
---|
| 438 | samba_level = 11;
|
---|
| 439 | break;
|
---|
| 440 |
|
---|
| 441 | };
|
---|
| 442 | if (vasprintf(&s, fmt, ap) == -1) {
|
---|
| 443 | return;
|
---|
| 444 | }
|
---|
| 445 | DEBUG(samba_level, ("s3_event: %s", s));
|
---|
| 446 | free(s);
|
---|
| 447 | }
|
---|
| 448 |
|
---|
| 449 | struct tevent_context *s3_tevent_context_init(TALLOC_CTX *mem_ctx)
|
---|
| 450 | {
|
---|
| 451 | struct tevent_context *ev;
|
---|
| 452 |
|
---|
| 453 | s3_tevent_init();
|
---|
| 454 |
|
---|
| 455 | ev = tevent_context_init_byname(mem_ctx, "s3");
|
---|
| 456 | if (ev) {
|
---|
| 457 | tevent_set_debug(ev, s3_event_debug, NULL);
|
---|
| 458 | }
|
---|
| 459 |
|
---|
| 460 | return ev;
|
---|
| 461 | }
|
---|
| 462 |
|
---|