Changeset 740 for vendor/current/source3/smbd/aio.c
- Timestamp:
- Nov 14, 2012, 12:59:34 PM (13 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
vendor/current/source3/smbd/aio.c
r414 r740 20 20 21 21 #include "includes.h" 22 #include "smbd/smbd.h" 22 23 #include "smbd/globals.h" 24 #include "../lib/util/tevent_ntstatus.h" 23 25 24 26 #if defined(WITH_AIO) … … 44 46 SMB_STRUCT_AIOCB acb; 45 47 files_struct *fsp; 46 struct smb_request *req; 47 char *outbuf; 48 struct smb_request *smbreq; 49 DATA_BLOB outbuf; 50 struct lock_struct lock; 51 bool write_through; 48 52 int (*handle_completion)(struct aio_extra *ex, int errcode); 49 53 }; 50 54 55 /**************************************************************************** 56 Initialize the signal handler for aio read/write. 57 *****************************************************************************/ 58 59 static void smbd_aio_signal_handler(struct tevent_context *ev_ctx, 60 struct tevent_signal *se, 61 int signum, int count, 62 void *_info, void *private_data) 63 { 64 siginfo_t *info = (siginfo_t *)_info; 65 struct aio_extra *aio_ex = (struct aio_extra *) 66 info->si_value.sival_ptr; 67 68 smbd_aio_complete_aio_ex(aio_ex); 69 } 70 71 72 static bool initialize_async_io_handler(void) 73 { 74 static bool tried_signal_setup = false; 75 76 if (aio_signal_event) { 77 return true; 78 } 79 if (tried_signal_setup) { 80 return false; 81 } 82 tried_signal_setup = true; 83 84 aio_signal_event = tevent_add_signal(smbd_event_context(), 85 smbd_event_context(), 86 RT_SIGNAL_AIO, SA_SIGINFO, 87 smbd_aio_signal_handler, 88 NULL); 89 if (!aio_signal_event) { 90 DEBUG(10, ("Failed to setup RT_SIGNAL_AIO handler\n")); 91 return false; 92 } 93 94 /* tevent supports 100 signal with SA_SIGINFO */ 95 aio_pending_size = 100; 96 return true; 97 } 98 51 99 static int handle_aio_read_complete(struct aio_extra *aio_ex, int errcode); 52 100 static int handle_aio_write_complete(struct aio_extra *aio_ex, int errcode); 101 static int handle_aio_smb2_read_complete(struct aio_extra *aio_ex, int errcode); 102 static int handle_aio_smb2_write_complete(struct aio_extra *aio_ex, int errcode); 53 103 54 104 static int aio_extra_destructor(struct aio_extra *aio_ex) … … 63 113 *****************************************************************************/ 64 114 65 static struct aio_extra *create_aio_extra(files_struct *fsp, size_t buflen) 66 { 67 struct aio_extra *aio_ex = TALLOC_ZERO_P(NULL, struct aio_extra); 115 static struct aio_extra *create_aio_extra(TALLOC_CTX *mem_ctx, 116 files_struct *fsp, 117 size_t buflen) 118 { 119 struct aio_extra *aio_ex = TALLOC_ZERO_P(mem_ctx, struct aio_extra); 68 120 69 121 if (!aio_ex) { … … 75 127 is the start of the reply data portion of that buffer. */ 76 128 77 aio_ex->outbuf = TALLOC_ARRAY(aio_ex, char, buflen); 78 if (!aio_ex->outbuf) { 79 TALLOC_FREE(aio_ex); 80 return NULL; 129 if (buflen) { 130 aio_ex->outbuf = data_blob_talloc(aio_ex, NULL, buflen); 131 if (!aio_ex->outbuf.data) { 132 TALLOC_FREE(aio_ex); 133 return NULL; 134 } 81 135 } 82 136 DLIST_ADD(aio_list_head, aio_ex); … … 87 141 88 142 /**************************************************************************** 89 Given the mid find the extended aio struct containing it.90 *****************************************************************************/91 92 static struct aio_extra *find_aio_ex(uint16 mid)93 {94 struct aio_extra *p;95 96 for( p = aio_list_head; p; p = p->next) {97 if (mid == p->req->mid) {98 return p;99 }100 }101 return NULL;102 }103 104 /****************************************************************************105 We can have these many aio buffers in flight.106 *****************************************************************************/107 108 /****************************************************************************109 143 Set up an aio request from a SMBreadX call. 110 144 *****************************************************************************/ 111 145 112 boolschedule_aio_read_and_X(connection_struct *conn,113 struct smb_request * req,146 NTSTATUS schedule_aio_read_and_X(connection_struct *conn, 147 struct smb_request *smbreq, 114 148 files_struct *fsp, SMB_OFF_T startpos, 115 149 size_t smb_maxcnt) … … 121 155 int ret; 122 156 157 /* Ensure aio is initialized. */ 158 if (!initialize_async_io_handler()) { 159 return NT_STATUS_RETRY; 160 } 161 123 162 if (fsp->base_fsp != NULL) { 124 163 /* No AIO on streams yet */ 125 164 DEBUG(10, ("AIO on streams not yet supported\n")); 126 return false;165 return NT_STATUS_RETRY; 127 166 } 128 167 … … 134 173 (unsigned int)smb_maxcnt, 135 174 (unsigned int)min_aio_read_size )); 136 return False;175 return NT_STATUS_RETRY; 137 176 } 138 177 139 178 /* Only do this on non-chained and non-chaining reads not using the 140 179 * write cache. */ 141 if (req_is_in_chain( req) || (lp_write_cache_size(SNUM(conn)) != 0)) {142 return False;180 if (req_is_in_chain(smbreq) || (lp_write_cache_size(SNUM(conn)) != 0)) { 181 return NT_STATUS_RETRY; 143 182 } 144 183 … … 147 186 "activities outstanding.\n", 148 187 outstanding_aio_calls )); 149 return False;188 return NT_STATUS_RETRY; 150 189 } 151 190 … … 155 194 bufsize = smb_size + 12 * 2 + smb_maxcnt; 156 195 157 if ((aio_ex = create_aio_extra( fsp, bufsize)) == NULL) {196 if ((aio_ex = create_aio_extra(NULL, fsp, bufsize)) == NULL) { 158 197 DEBUG(10,("schedule_aio_read_and_X: malloc fail.\n")); 159 return False;198 return NT_STATUS_NO_MEMORY; 160 199 } 161 200 aio_ex->handle_completion = handle_aio_read_complete; 162 201 163 construct_reply_common_req(req, aio_ex->outbuf); 164 srv_set_message(aio_ex->outbuf, 12, 0, True); 165 SCVAL(aio_ex->outbuf,smb_vwv0,0xFF); /* Never a chained reply. */ 202 construct_reply_common_req(smbreq, (char *)aio_ex->outbuf.data); 203 srv_set_message((char *)aio_ex->outbuf.data, 12, 0, True); 204 SCVAL(aio_ex->outbuf.data,smb_vwv0,0xFF); /* Never a chained reply. */ 205 206 init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid, 207 (uint64_t)startpos, (uint64_t)smb_maxcnt, READ_LOCK, 208 &aio_ex->lock); 209 210 /* Take the lock until the AIO completes. */ 211 if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) { 212 TALLOC_FREE(aio_ex); 213 return NT_STATUS_FILE_LOCK_CONFLICT; 214 } 166 215 167 216 a = &aio_ex->acb; … … 170 219 171 220 a->aio_fildes = fsp->fh->fd; 172 a->aio_buf = smb_buf(aio_ex->outbuf );221 a->aio_buf = smb_buf(aio_ex->outbuf.data); 173 222 a->aio_nbytes = smb_maxcnt; 174 223 a->aio_offset = startpos; 175 224 a->aio_sigevent.sigev_notify = SIGEV_SIGNAL; 176 225 a->aio_sigevent.sigev_signo = RT_SIGNAL_AIO; 177 a->aio_sigevent.sigev_value.sival_ int = req->mid;226 a->aio_sigevent.sigev_value.sival_ptr = aio_ex; 178 227 179 228 ret = SMB_VFS_AIO_READ(fsp, a); … … 181 230 DEBUG(0,("schedule_aio_read_and_X: aio_read failed. " 182 231 "Error %s\n", strerror(errno) )); 232 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock); 183 233 TALLOC_FREE(aio_ex); 184 return False;234 return NT_STATUS_RETRY; 185 235 } 186 236 187 237 outstanding_aio_calls++; 188 aio_ex-> req = talloc_move(aio_ex, &req);238 aio_ex->smbreq = talloc_move(aio_ex, &smbreq); 189 239 190 240 DEBUG(10,("schedule_aio_read_and_X: scheduled aio_read for file %s, " 191 241 "offset %.0f, len = %u (mid = %u)\n", 192 242 fsp_str_dbg(fsp), (double)startpos, (unsigned int)smb_maxcnt, 193 (unsigned int)aio_ex-> req->mid ));194 195 return True;243 (unsigned int)aio_ex->smbreq->mid )); 244 245 return NT_STATUS_OK; 196 246 } 197 247 … … 200 250 *****************************************************************************/ 201 251 202 boolschedule_aio_write_and_X(connection_struct *conn,203 struct smb_request * req,252 NTSTATUS schedule_aio_write_and_X(connection_struct *conn, 253 struct smb_request *smbreq, 204 254 files_struct *fsp, char *data, 205 255 SMB_OFF_T startpos, … … 209 259 SMB_STRUCT_AIOCB *a; 210 260 size_t bufsize; 211 bool write_through = BITSETW(req->vwv+7,0);212 261 size_t min_aio_write_size = lp_aio_write_size(SNUM(conn)); 213 262 int ret; 263 264 /* Ensure aio is initialized. */ 265 if (!initialize_async_io_handler()) { 266 return NT_STATUS_RETRY; 267 } 214 268 215 269 if (fsp->base_fsp != NULL) { 216 270 /* No AIO on streams yet */ 217 271 DEBUG(10, ("AIO on streams not yet supported\n")); 218 return false;272 return NT_STATUS_RETRY; 219 273 } 220 274 … … 226 280 (unsigned int)numtowrite, 227 281 (unsigned int)min_aio_write_size )); 228 return False;229 } 230 231 /* Only do this on non-chained and non-chaining reads not using the282 return NT_STATUS_RETRY; 283 } 284 285 /* Only do this on non-chained and non-chaining writes not using the 232 286 * write cache. */ 233 if (req_is_in_chain( req) || (lp_write_cache_size(SNUM(conn)) != 0)) {234 return False;287 if (req_is_in_chain(smbreq) || (lp_write_cache_size(SNUM(conn)) != 0)) { 288 return NT_STATUS_RETRY; 235 289 } 236 290 … … 244 298 fsp_str_dbg(fsp), (double)startpos, 245 299 (unsigned int)numtowrite, 246 (unsigned int) req->mid ));247 return False;300 (unsigned int)smbreq->mid )); 301 return NT_STATUS_RETRY; 248 302 } 249 303 250 304 bufsize = smb_size + 6*2; 251 305 252 if (!(aio_ex = create_aio_extra( fsp, bufsize))) {306 if (!(aio_ex = create_aio_extra(NULL, fsp, bufsize))) { 253 307 DEBUG(0,("schedule_aio_write_and_X: malloc fail.\n")); 254 return False;308 return NT_STATUS_NO_MEMORY; 255 309 } 256 310 aio_ex->handle_completion = handle_aio_write_complete; 257 258 construct_reply_common_req(req, aio_ex->outbuf); 259 srv_set_message(aio_ex->outbuf, 6, 0, True); 260 SCVAL(aio_ex->outbuf,smb_vwv0,0xFF); /* Never a chained reply. */ 311 aio_ex->write_through = BITSETW(smbreq->vwv+7,0); 312 313 construct_reply_common_req(smbreq, (char *)aio_ex->outbuf.data); 314 srv_set_message((char *)aio_ex->outbuf.data, 6, 0, True); 315 SCVAL(aio_ex->outbuf.data,smb_vwv0,0xFF); /* Never a chained reply. */ 316 317 init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid, 318 (uint64_t)startpos, (uint64_t)numtowrite, WRITE_LOCK, 319 &aio_ex->lock); 320 321 /* Take the lock until the AIO completes. */ 322 if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) { 323 TALLOC_FREE(aio_ex); 324 return NT_STATUS_FILE_LOCK_CONFLICT; 325 } 261 326 262 327 a = &aio_ex->acb; … … 270 335 a->aio_sigevent.sigev_notify = SIGEV_SIGNAL; 271 336 a->aio_sigevent.sigev_signo = RT_SIGNAL_AIO; 272 a->aio_sigevent.sigev_value.sival_ int = req->mid;337 a->aio_sigevent.sigev_value.sival_ptr = aio_ex; 273 338 274 339 ret = SMB_VFS_AIO_WRITE(fsp, a); … … 276 341 DEBUG(3,("schedule_aio_wrote_and_X: aio_write failed. " 277 342 "Error %s\n", strerror(errno) )); 343 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock); 278 344 TALLOC_FREE(aio_ex); 279 return False;345 return NT_STATUS_RETRY; 280 346 } 281 347 282 348 outstanding_aio_calls++; 283 aio_ex-> req = talloc_move(aio_ex, &req);349 aio_ex->smbreq = talloc_move(aio_ex, &smbreq); 284 350 285 351 /* This should actually be improved to span the write. */ … … 287 353 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WRITE); 288 354 289 if (! write_through && !lp_syncalways(SNUM(fsp->conn))355 if (!aio_ex->write_through && !lp_syncalways(SNUM(fsp->conn)) 290 356 && fsp->aio_write_behind) { 291 357 /* Lie to the client and immediately claim we finished the 292 358 * write. */ 293 SSVAL(aio_ex->outbuf,smb_vwv2,numtowrite); 294 SSVAL(aio_ex->outbuf,smb_vwv4,(numtowrite>>16)&1); 295 show_msg(aio_ex->outbuf); 296 if (!srv_send_smb(smbd_server_fd(),aio_ex->outbuf, 297 true, aio_ex->req->seqnum+1, 359 SSVAL(aio_ex->outbuf.data,smb_vwv2,numtowrite); 360 SSVAL(aio_ex->outbuf.data,smb_vwv4,(numtowrite>>16)&1); 361 show_msg((char *)aio_ex->outbuf.data); 362 if (!srv_send_smb(aio_ex->smbreq->sconn, 363 (char *)aio_ex->outbuf.data, 364 true, aio_ex->smbreq->seqnum+1, 298 365 IS_CONN_ENCRYPTED(fsp->conn), 299 &aio_ex-> req->pcd)) {300 exit_server_cleanly(" handle_aio_write: srv_send_smb"301 " failed.");366 &aio_ex->smbreq->pcd)) { 367 exit_server_cleanly("schedule_aio_write_and_X: " 368 "srv_send_smb failed."); 302 369 } 303 370 DEBUG(10,("schedule_aio_write_and_X: scheduled aio_write " … … 309 376 "outstanding_aio_calls = %d\n", 310 377 fsp_str_dbg(fsp), (double)startpos, (unsigned int)numtowrite, 311 (unsigned int)aio_ex->req->mid, outstanding_aio_calls )); 312 313 return True; 314 } 315 378 (unsigned int)aio_ex->smbreq->mid, outstanding_aio_calls )); 379 380 return NT_STATUS_OK; 381 } 382 383 /**************************************************************************** 384 Set up an aio request from a SMB2 read call. 385 *****************************************************************************/ 386 387 NTSTATUS schedule_smb2_aio_read(connection_struct *conn, 388 struct smb_request *smbreq, 389 files_struct *fsp, 390 TALLOC_CTX *ctx, 391 DATA_BLOB *preadbuf, 392 SMB_OFF_T startpos, 393 size_t smb_maxcnt) 394 { 395 struct aio_extra *aio_ex; 396 SMB_STRUCT_AIOCB *a; 397 size_t min_aio_read_size = lp_aio_read_size(SNUM(conn)); 398 int ret; 399 400 /* Ensure aio is initialized. */ 401 if (!initialize_async_io_handler()) { 402 return NT_STATUS_RETRY; 403 } 404 405 if (fsp->base_fsp != NULL) { 406 /* No AIO on streams yet */ 407 DEBUG(10, ("AIO on streams not yet supported\n")); 408 return NT_STATUS_RETRY; 409 } 410 411 if ((!min_aio_read_size || (smb_maxcnt < min_aio_read_size)) 412 && !SMB_VFS_AIO_FORCE(fsp)) { 413 /* Too small a read for aio request. */ 414 DEBUG(10,("smb2: read size (%u) too small " 415 "for minimum aio_read of %u\n", 416 (unsigned int)smb_maxcnt, 417 (unsigned int)min_aio_read_size )); 418 return NT_STATUS_RETRY; 419 } 420 421 /* Only do this on reads not using the write cache. */ 422 if (lp_write_cache_size(SNUM(conn)) != 0) { 423 return NT_STATUS_RETRY; 424 } 425 426 if (outstanding_aio_calls >= aio_pending_size) { 427 DEBUG(10,("smb2: Already have %d aio " 428 "activities outstanding.\n", 429 outstanding_aio_calls )); 430 return NT_STATUS_RETRY; 431 } 432 433 /* Create the out buffer. */ 434 *preadbuf = data_blob_talloc(ctx, NULL, smb_maxcnt); 435 if (preadbuf->data == NULL) { 436 return NT_STATUS_NO_MEMORY; 437 } 438 439 if (!(aio_ex = create_aio_extra(smbreq->smb2req, fsp, 0))) { 440 return NT_STATUS_NO_MEMORY; 441 } 442 aio_ex->handle_completion = handle_aio_smb2_read_complete; 443 444 init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid, 445 (uint64_t)startpos, (uint64_t)smb_maxcnt, READ_LOCK, 446 &aio_ex->lock); 447 448 /* Take the lock until the AIO completes. */ 449 if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) { 450 TALLOC_FREE(aio_ex); 451 return NT_STATUS_FILE_LOCK_CONFLICT; 452 } 453 454 a = &aio_ex->acb; 455 456 /* Now set up the aio record for the read call. */ 457 458 a->aio_fildes = fsp->fh->fd; 459 a->aio_buf = preadbuf->data; 460 a->aio_nbytes = smb_maxcnt; 461 a->aio_offset = startpos; 462 a->aio_sigevent.sigev_notify = SIGEV_SIGNAL; 463 a->aio_sigevent.sigev_signo = RT_SIGNAL_AIO; 464 a->aio_sigevent.sigev_value.sival_ptr = aio_ex; 465 466 ret = SMB_VFS_AIO_READ(fsp, a); 467 if (ret == -1) { 468 DEBUG(0,("smb2: aio_read failed. " 469 "Error %s\n", strerror(errno) )); 470 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock); 471 TALLOC_FREE(aio_ex); 472 return NT_STATUS_RETRY; 473 } 474 475 outstanding_aio_calls++; 476 /* We don't need talloc_move here as both aio_ex and 477 * smbreq are children of smbreq->smb2req. */ 478 aio_ex->smbreq = smbreq; 479 480 DEBUG(10,("smb2: scheduled aio_read for file %s, " 481 "offset %.0f, len = %u (mid = %u)\n", 482 fsp_str_dbg(fsp), (double)startpos, (unsigned int)smb_maxcnt, 483 (unsigned int)aio_ex->smbreq->mid )); 484 485 return NT_STATUS_OK; 486 } 487 488 /**************************************************************************** 489 Set up an aio request from a SMB2write call. 490 *****************************************************************************/ 491 492 NTSTATUS schedule_aio_smb2_write(connection_struct *conn, 493 struct smb_request *smbreq, 494 files_struct *fsp, 495 uint64_t in_offset, 496 DATA_BLOB in_data, 497 bool write_through) 498 { 499 struct aio_extra *aio_ex = NULL; 500 SMB_STRUCT_AIOCB *a = NULL; 501 size_t min_aio_write_size = lp_aio_write_size(SNUM(conn)); 502 int ret; 503 504 /* Ensure aio is initialized. */ 505 if (!initialize_async_io_handler()) { 506 return NT_STATUS_RETRY; 507 } 508 509 if (fsp->base_fsp != NULL) { 510 /* No AIO on streams yet */ 511 DEBUG(10, ("AIO on streams not yet supported\n")); 512 return NT_STATUS_RETRY; 513 } 514 515 if ((!min_aio_write_size || (in_data.length < min_aio_write_size)) 516 && !SMB_VFS_AIO_FORCE(fsp)) { 517 /* Too small a write for aio request. */ 518 DEBUG(10,("smb2: write size (%u) too " 519 "small for minimum aio_write of %u\n", 520 (unsigned int)in_data.length, 521 (unsigned int)min_aio_write_size )); 522 return NT_STATUS_RETRY; 523 } 524 525 /* Only do this on writes not using the write cache. */ 526 if (lp_write_cache_size(SNUM(conn)) != 0) { 527 return NT_STATUS_RETRY; 528 } 529 530 if (outstanding_aio_calls >= aio_pending_size) { 531 DEBUG(3,("smb2: Already have %d aio " 532 "activities outstanding.\n", 533 outstanding_aio_calls )); 534 return NT_STATUS_RETRY; 535 } 536 537 if (!(aio_ex = create_aio_extra(smbreq->smb2req, fsp, 0))) { 538 return NT_STATUS_NO_MEMORY; 539 } 540 541 aio_ex->handle_completion = handle_aio_smb2_write_complete; 542 aio_ex->write_through = write_through; 543 544 init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid, 545 in_offset, (uint64_t)in_data.length, WRITE_LOCK, 546 &aio_ex->lock); 547 548 /* Take the lock until the AIO completes. */ 549 if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) { 550 TALLOC_FREE(aio_ex); 551 return NT_STATUS_FILE_LOCK_CONFLICT; 552 } 553 554 a = &aio_ex->acb; 555 556 /* Now set up the aio record for the write call. */ 557 558 a->aio_fildes = fsp->fh->fd; 559 a->aio_buf = in_data.data; 560 a->aio_nbytes = in_data.length; 561 a->aio_offset = in_offset; 562 a->aio_sigevent.sigev_notify = SIGEV_SIGNAL; 563 a->aio_sigevent.sigev_signo = RT_SIGNAL_AIO; 564 a->aio_sigevent.sigev_value.sival_ptr = aio_ex; 565 566 ret = SMB_VFS_AIO_WRITE(fsp, a); 567 if (ret == -1) { 568 DEBUG(3,("smb2: aio_write failed. " 569 "Error %s\n", strerror(errno) )); 570 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock); 571 TALLOC_FREE(aio_ex); 572 return NT_STATUS_RETRY; 573 } 574 575 outstanding_aio_calls++; 576 /* We don't need talloc_move here as both aio_ex and 577 * smbreq are children of smbreq->smb2req. */ 578 aio_ex->smbreq = smbreq; 579 580 /* This should actually be improved to span the write. */ 581 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WRITE); 582 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WRITE); 583 584 /* 585 * We don't want to do write behind due to ownership 586 * issues of the request structs. Maybe add it if I 587 * figure those out. JRA. 588 */ 589 590 DEBUG(10,("smb2: scheduled aio_write for file " 591 "%s, offset %.0f, len = %u (mid = %u) " 592 "outstanding_aio_calls = %d\n", 593 fsp_str_dbg(fsp), 594 (double)in_offset, 595 (unsigned int)in_data.length, 596 (unsigned int)aio_ex->smbreq->mid, 597 outstanding_aio_calls )); 598 599 return NT_STATUS_OK; 600 } 316 601 317 602 /**************************************************************************** … … 323 608 { 324 609 int outsize; 325 char *outbuf = aio_ex->outbuf;610 char *outbuf = (char *)aio_ex->outbuf.data; 326 611 char *data = smb_buf(outbuf); 327 612 ssize_t nread = SMB_VFS_AIO_RETURN(aio_ex->fsp,&aio_ex->acb); … … 358 643 smb_setlen(outbuf,outsize - 4); 359 644 show_msg(outbuf); 360 if (!srv_send_smb( smbd_server_fd(),outbuf,361 true, aio_ex-> req->seqnum+1,645 if (!srv_send_smb(aio_ex->smbreq->sconn, outbuf, 646 true, aio_ex->smbreq->seqnum+1, 362 647 IS_CONN_ENCRYPTED(aio_ex->fsp->conn), NULL)) { 363 648 exit_server_cleanly("handle_aio_read_complete: srv_send_smb " … … 381 666 { 382 667 files_struct *fsp = aio_ex->fsp; 383 char *outbuf = aio_ex->outbuf;668 char *outbuf = (char *)aio_ex->outbuf.data; 384 669 ssize_t numtowrite = aio_ex->acb.aio_nbytes; 385 670 ssize_t nwritten = SMB_VFS_AIO_RETURN(fsp,&aio_ex->acb); … … 422 707 srv_set_message(outbuf,0,0,true); 423 708 } else { 424 bool write_through = BITSETW(aio_ex->req->vwv+7,0);425 709 NTSTATUS status; 426 710 … … 434 718 DEBUG(3,("handle_aio_write: fnum=%d num=%d wrote=%d\n", 435 719 fsp->fnum, (int)numtowrite, (int)nwritten)); 436 status = sync_file(fsp->conn,fsp, write_through);720 status = sync_file(fsp->conn,fsp, aio_ex->write_through); 437 721 if (!NT_STATUS_IS_OK(status)) { 438 722 errcode = errno; … … 448 732 449 733 show_msg(outbuf); 450 if (!srv_send_smb( smbd_server_fd(),outbuf,451 true, aio_ex-> req->seqnum+1,734 if (!srv_send_smb(aio_ex->smbreq->sconn, outbuf, 735 true, aio_ex->smbreq->seqnum+1, 452 736 IS_CONN_ENCRYPTED(fsp->conn), 453 737 NULL)) { 454 exit_server_cleanly("handle_aio_write: srv_send_smb failed."); 738 exit_server_cleanly("handle_aio_write_complete: " 739 "srv_send_smb failed."); 455 740 } 456 741 … … 464 749 465 750 /**************************************************************************** 751 Complete the read and return the data or error back to the client. 752 Returns errno or zero if all ok. 753 *****************************************************************************/ 754 755 static int handle_aio_smb2_read_complete(struct aio_extra *aio_ex, int errcode) 756 { 757 NTSTATUS status; 758 struct tevent_req *subreq = aio_ex->smbreq->smb2req->subreq; 759 ssize_t nread = SMB_VFS_AIO_RETURN(aio_ex->fsp,&aio_ex->acb); 760 761 /* Common error or success code processing for async or sync 762 read returns. */ 763 764 status = smb2_read_complete(subreq, nread, errcode); 765 766 if (nread > 0) { 767 aio_ex->fsp->fh->pos = aio_ex->acb.aio_offset + nread; 768 aio_ex->fsp->fh->position_information = aio_ex->fsp->fh->pos; 769 } 770 771 DEBUG(10,("smb2: scheduled aio_read completed " 772 "for file %s, offset %.0f, len = %u " 773 "(errcode = %d, NTSTATUS = %s)\n", 774 fsp_str_dbg(aio_ex->fsp), 775 (double)aio_ex->acb.aio_offset, 776 (unsigned int)nread, 777 errcode, 778 nt_errstr(status) )); 779 780 if (!NT_STATUS_IS_OK(status)) { 781 tevent_req_nterror(subreq, status); 782 return errcode; 783 } 784 785 tevent_req_done(subreq); 786 return errcode; 787 } 788 789 /**************************************************************************** 790 Complete the SMB2 write and return the data or error back to the client. 791 Returns error code or zero if all ok. 792 *****************************************************************************/ 793 794 static int handle_aio_smb2_write_complete(struct aio_extra *aio_ex, int errcode) 795 { 796 files_struct *fsp = aio_ex->fsp; 797 ssize_t numtowrite = aio_ex->acb.aio_nbytes; 798 ssize_t nwritten = SMB_VFS_AIO_RETURN(fsp,&aio_ex->acb); 799 struct tevent_req *subreq = aio_ex->smbreq->smb2req->subreq; 800 NTSTATUS status; 801 802 status = smb2_write_complete(subreq, nwritten, errcode); 803 804 DEBUG(10,("smb2: scheduled aio_write completed " 805 "for file %s, offset %.0f, requested %u, " 806 "written = %u (errcode = %d, NTSTATUS = %s)\n", 807 fsp_str_dbg(fsp), 808 (double)aio_ex->acb.aio_offset, 809 (unsigned int)numtowrite, 810 (unsigned int)nwritten, 811 errcode, 812 nt_errstr(status) )); 813 814 if (!NT_STATUS_IS_OK(status)) { 815 tevent_req_nterror(subreq, status); 816 return errcode; 817 } 818 819 tevent_req_done(subreq); 820 return errcode; 821 } 822 823 /**************************************************************************** 466 824 Handle any aio completion. Returns True if finished (and sets *perr if err 467 825 was non-zero), False if not. … … 470 828 static bool handle_aio_completed(struct aio_extra *aio_ex, int *perr) 471 829 { 830 files_struct *fsp = NULL; 472 831 int err; 473 832 … … 477 836 } 478 837 838 fsp = aio_ex->fsp; 839 479 840 /* Ensure the operation has really completed. */ 480 err = SMB_VFS_AIO_ERROR( aio_ex->fsp, &aio_ex->acb);841 err = SMB_VFS_AIO_ERROR(fsp, &aio_ex->acb); 481 842 if (err == EINPROGRESS) { 482 DEBUG(10,( "handle_aio_completed: operation mid %u still in " 483 "process for file %s\n", 484 aio_ex->req->mid, fsp_str_dbg(aio_ex->fsp))); 843 DEBUG(10,( "handle_aio_completed: operation mid %llu still in " 844 "process for file %s\n", 845 (unsigned long long)aio_ex->smbreq->mid, 846 fsp_str_dbg(aio_ex->fsp))); 485 847 return False; 486 } else if (err == ECANCELED) { 848 } 849 850 /* Unlock now we're done. */ 851 SMB_VFS_STRICT_UNLOCK(fsp->conn, fsp, &aio_ex->lock); 852 853 if (err == ECANCELED) { 487 854 /* If error is ECANCELED then don't return anything to the 488 855 * client. */ 489 DEBUG(10,( "handle_aio_completed: operation mid %u" 490 " canceled\n", aio_ex->req->mid)); 856 DEBUG(10,( "handle_aio_completed: operation mid %llu" 857 " canceled\n", 858 (unsigned long long)aio_ex->smbreq->mid)); 491 859 return True; 492 860 } … … 504 872 *****************************************************************************/ 505 873 506 void smbd_aio_complete_ mid(unsigned int mid)874 void smbd_aio_complete_aio_ex(struct aio_extra *aio_ex) 507 875 { 508 876 files_struct *fsp = NULL; 509 struct aio_extra *aio_ex = find_aio_ex(mid);510 877 int ret = 0; 511 878 512 879 outstanding_aio_calls--; 513 880 514 DEBUG(10,("smbd_aio_complete_mid: mid[%u]\n", mid)); 515 516 if (!aio_ex) { 517 DEBUG(3,("smbd_aio_complete_mid: Can't find record to " 518 "match mid %u.\n", mid)); 519 return; 520 } 881 DEBUG(10,("smbd_aio_complete_mid: mid[%llu]\n", 882 (unsigned long long)aio_ex->smbreq->mid)); 521 883 522 884 fsp = aio_ex->fsp; … … 525 887 * ignore. */ 526 888 DEBUG( 3,( "smbd_aio_complete_mid: file closed whilst " 527 "aio outstanding (mid[%u]).\n", mid)); 889 "aio outstanding (mid[%llu]).\n", 890 (unsigned long long)aio_ex->smbreq->mid)); 528 891 return; 529 892 } … … 534 897 535 898 TALLOC_FREE(aio_ex); 536 }537 538 static void smbd_aio_signal_handler(struct tevent_context *ev_ctx,539 struct tevent_signal *se,540 int signum, int count,541 void *_info, void *private_data)542 {543 siginfo_t *info = (siginfo_t *)_info;544 unsigned int mid = (unsigned int)info->si_value.sival_int;545 546 smbd_aio_complete_mid(mid);547 899 } 548 900 … … 560 912 const SMB_STRUCT_AIOCB **aiocb_list; 561 913 int aio_completion_count = 0; 562 time_t start_time = time (NULL);914 time_t start_time = time_mono(NULL); 563 915 int seconds_left; 564 916 … … 625 977 * so. */ 626 978 for( i = 0; i < aio_completion_count; i++) { 627 uint16 mid = aiocb_list[i]->aio_sigevent.sigev_value.sival_int; 628 629 aio_ex = find_aio_ex(mid); 630 631 if (!aio_ex) { 632 DEBUG(0, ("wait_for_aio_completion: mid %u " 633 "doesn't match an aio record\n", 634 (unsigned int)mid )); 635 continue; 636 } 979 aio_ex = (struct aio_extra *)aiocb_list[i]->aio_sigevent.sigev_value.sival_ptr; 637 980 638 981 if (!handle_aio_completed(aio_ex, &err)) { … … 644 987 SAFE_FREE(aiocb_list); 645 988 seconds_left = SMB_TIME_FOR_AIO_COMPLETE_WAIT 646 - (time (NULL) - start_time);989 - (time_mono(NULL) - start_time); 647 990 } 648 991 … … 666 1009 for( aio_ex = aio_list_head; aio_ex; aio_ex = aio_ex->next) { 667 1010 if (aio_ex->fsp == fsp) { 1011 /* Unlock now we're done. */ 1012 SMB_VFS_STRICT_UNLOCK(fsp->conn, fsp, &aio_ex->lock); 1013 668 1014 /* Don't delete the aio_extra record as we may have 669 1015 completed and don't yet know it. Just do the … … 676 1022 } 677 1023 678 /****************************************************************************679 Initialize the signal handler for aio read/write.680 *****************************************************************************/681 682 void initialize_async_io_handler(void)683 {684 aio_signal_event = tevent_add_signal(smbd_event_context(),685 smbd_event_context(),686 RT_SIGNAL_AIO, SA_SIGINFO,687 smbd_aio_signal_handler,688 NULL);689 if (!aio_signal_event) {690 exit_server("Failed to setup RT_SIGNAL_AIO handler");691 }692 693 /* tevent supports 100 signal with SA_SIGINFO */694 aio_pending_size = 100;695 }696 697 1024 #else 698 void initialize_async_io_handler(void) 699 { 700 } 701 702 bool schedule_aio_read_and_X(connection_struct *conn, 703 struct smb_request *req, 1025 NTSTATUS schedule_aio_read_and_X(connection_struct *conn, 1026 struct smb_request *smbreq, 704 1027 files_struct *fsp, SMB_OFF_T startpos, 705 1028 size_t smb_maxcnt) 706 1029 { 707 return False;708 } 709 710 boolschedule_aio_write_and_X(connection_struct *conn,711 struct smb_request * req,1030 return NT_STATUS_RETRY; 1031 } 1032 1033 NTSTATUS schedule_aio_write_and_X(connection_struct *conn, 1034 struct smb_request *smbreq, 712 1035 files_struct *fsp, char *data, 713 1036 SMB_OFF_T startpos, 714 1037 size_t numtowrite) 715 1038 { 716 return False; 1039 return NT_STATUS_RETRY; 1040 } 1041 1042 NTSTATUS schedule_smb2_aio_read(connection_struct *conn, 1043 struct smb_request *smbreq, 1044 files_struct *fsp, 1045 TALLOC_CTX *ctx, 1046 DATA_BLOB *preadbuf, 1047 SMB_OFF_T startpos, 1048 size_t smb_maxcnt) 1049 { 1050 return NT_STATUS_RETRY; 1051 } 1052 1053 NTSTATUS schedule_aio_smb2_write(connection_struct *conn, 1054 struct smb_request *smbreq, 1055 files_struct *fsp, 1056 uint64_t in_offset, 1057 DATA_BLOB in_data, 1058 bool write_through) 1059 { 1060 return NT_STATUS_RETRY; 717 1061 } 718 1062 … … 723 1067 int wait_for_aio_completion(files_struct *fsp) 724 1068 { 725 return ENOSYS;726 } 727 728 void smbd_aio_complete_mid(u nsigned int mid);1069 return 0; 1070 } 1071 1072 void smbd_aio_complete_mid(uint64_t mid); 729 1073 730 1074 #endif
Note:
See TracChangeset
for help on using the changeset viewer.