Changeset 988 for vendor/current/source3/libsmb/clireadwrite.c
- Timestamp:
- Nov 24, 2016, 1:14:11 PM (9 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
vendor/current/source3/libsmb/clireadwrite.c
r746 r988 23 23 #include "async_smb.h" 24 24 #include "trans2.h" 25 #include "../libcli/smb/smbXcli_base.h" 25 26 26 27 /**************************************************************************** … … 29 30 static size_t cli_read_max_bufsize(struct cli_state *cli) 30 31 { 31 size_t data_offset = smb_size - 4; 32 size_t wct = 12; 33 34 size_t useable_space; 35 36 if (!client_is_signing_on(cli) && !cli_encryption_on(cli) 37 && (cli->server_posix_capabilities & CIFS_UNIX_LARGE_READ_CAP)) { 38 return CLI_SAMBA_MAX_POSIX_LARGE_READX_SIZE; 39 } 40 if (cli->capabilities & CAP_LARGE_READX) { 41 return cli->is_samba 42 ? CLI_SAMBA_MAX_LARGE_READX_SIZE 43 : CLI_WINDOWS_MAX_LARGE_READX_SIZE; 44 } 45 32 uint8_t wct = 12; 33 uint32_t min_space; 34 uint32_t data_offset; 35 uint32_t useable_space = 0; 36 37 data_offset = HDR_VWV; 46 38 data_offset += wct * sizeof(uint16_t); 39 data_offset += sizeof(uint16_t); /* byte count */ 47 40 data_offset += 1; /* pad */ 48 41 49 useable_space = cli->max_xmit - data_offset; 50 51 return useable_space; 42 min_space = cli_state_available_size(cli, data_offset); 43 44 if (cli->server_posix_capabilities & CIFS_UNIX_LARGE_READ_CAP) { 45 useable_space = 0xFFFFFF - data_offset; 46 47 if (smb1cli_conn_signing_is_active(cli->conn)) { 48 return min_space; 49 } 50 51 if (smb1cli_conn_encryption_on(cli->conn)) { 52 return min_space; 53 } 54 55 return useable_space; 56 } else if (smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_READX) { 57 /* 58 * Note: CAP_LARGE_READX also works with signing 59 */ 60 useable_space = 0x1FFFF - data_offset; 61 62 useable_space = MIN(useable_space, UINT16_MAX); 63 64 return useable_space; 65 } 66 67 return min_space; 52 68 } 53 69 … … 59 75 uint8_t wct) 60 76 { 61 if (write_mode == 0 && 62 !client_is_signing_on(cli) && 63 !cli_encryption_on(cli) && 64 (cli->server_posix_capabilities & CIFS_UNIX_LARGE_WRITE_CAP) && 65 (cli->capabilities & CAP_LARGE_FILES)) { 66 /* Only do massive writes if we can do them direct 67 * with no signing or encrypting - not on a pipe. */ 68 return CLI_SAMBA_MAX_POSIX_LARGE_WRITEX_SIZE; 69 } 70 71 if (cli->is_samba) { 72 return CLI_SAMBA_MAX_LARGE_WRITEX_SIZE; 73 } 74 75 if (((cli->capabilities & CAP_LARGE_WRITEX) == 0) 76 || client_is_signing_on(cli) 77 || strequal(cli->dev, "LPT1:")) { 78 size_t data_offset = smb_size - 4; 79 size_t useable_space; 80 81 data_offset += wct * sizeof(uint16_t); 82 data_offset += 1; /* pad */ 83 84 useable_space = cli->max_xmit - data_offset; 85 86 return useable_space; 87 } 88 89 return CLI_WINDOWS_MAX_LARGE_WRITEX_SIZE; 77 uint32_t min_space; 78 uint32_t data_offset; 79 uint32_t useable_space = 0; 80 81 data_offset = HDR_VWV; 82 data_offset += wct * sizeof(uint16_t); 83 data_offset += sizeof(uint16_t); /* byte count */ 84 data_offset += 1; /* pad */ 85 86 min_space = cli_state_available_size(cli, data_offset); 87 88 if (cli->server_posix_capabilities & CIFS_UNIX_LARGE_WRITE_CAP) { 89 useable_space = 0xFFFFFF - data_offset; 90 } else if (smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_WRITEX) { 91 useable_space = 0x1FFFF - data_offset; 92 } else { 93 return min_space; 94 } 95 96 if (write_mode != 0) { 97 return min_space; 98 } 99 100 if (smb1cli_conn_signing_is_active(cli->conn)) { 101 return min_space; 102 } 103 104 if (smb1cli_conn_encryption_on(cli->conn)) { 105 return min_space; 106 } 107 108 if (strequal(cli->dev, "LPT1:")) { 109 return min_space; 110 } 111 112 return useable_space; 90 113 } 91 114 … … 101 124 102 125 struct tevent_req *cli_read_andx_create(TALLOC_CTX *mem_ctx, 103 struct event_context *ev,126 struct tevent_context *ev, 104 127 struct cli_state *cli, uint16_t fnum, 105 128 off_t offset, size_t size, … … 109 132 struct cli_read_andx_state *state; 110 133 uint8_t wct = 10; 111 112 if (size > cli_read_max_bufsize(cli)) {113 DEBUG(0, ("cli_read_andx_send got size=%d, can only handle "114 "size=%d\n", (int)size,115 (int)cli_read_max_bufsize(cli)));116 return NULL;117 }118 134 119 135 req = tevent_req_create(mem_ctx, &state, struct cli_read_andx_state); … … 134 150 SSVAL(state->vwv + 9, 0, 0); 135 151 136 if ( cli->capabilities& CAP_LARGE_FILES) {152 if (smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_FILES) { 137 153 SIVAL(state->vwv + 10, 0, 138 154 (((uint64_t)offset)>>32) & 0xffffffff); … … 159 175 160 176 struct tevent_req *cli_read_andx_send(TALLOC_CTX *mem_ctx, 161 struct event_context *ev,177 struct tevent_context *ev, 162 178 struct cli_state *cli, uint16_t fnum, 163 179 off_t offset, size_t size) … … 172 188 } 173 189 174 status = cli_smb_req_send(subreq);190 status = smb1cli_req_chain_submit(&subreq, 1); 175 191 if (tevent_req_nterror(req, status)) { 176 192 return tevent_req_post(req, ev); … … 221 237 } 222 238 223 state->buf = (uint8_t *)smb_base(inbuf) + SVAL(vwv+6, 0);224 225 if (trans_oob(smb_len_ large(inbuf), SVAL(vwv+6, 0), state->received)239 state->buf = discard_const_p(uint8_t, smb_base(inbuf)) + SVAL(vwv+6, 0); 240 241 if (trans_oob(smb_len_tcp(inbuf), SVAL(vwv+6, 0), state->received) 226 242 || ((state->received != 0) && (state->buf < bytes))) { 227 243 DEBUG(5, ("server returned invalid read&x data offset\n")); … … 254 270 } 255 271 256 struct cli_readall_state { 272 struct cli_pull_chunk; 273 274 struct cli_pull_state { 257 275 struct tevent_context *ev; 258 276 struct cli_state *cli; 259 277 uint16_t fnum; 260 278 off_t start_offset; 261 size_t size; 262 size_t received; 279 off_t size; 280 281 NTSTATUS (*sink)(char *buf, size_t n, void *priv); 282 void *priv; 283 284 size_t chunk_size; 285 off_t next_offset; 286 off_t remaining; 287 288 /* 289 * How many bytes did we push into "sink"? 290 */ 291 off_t pushed; 292 293 /* 294 * Outstanding requests 295 * 296 * The maximum is 256: 297 * - which would be a window of 256 MByte 298 * for SMB2 with multi-credit 299 * or smb1 unix extensions. 300 */ 301 uint16_t max_chunks; 302 uint16_t num_chunks; 303 uint16_t num_waiting; 304 struct cli_pull_chunk *chunks; 305 }; 306 307 struct cli_pull_chunk { 308 struct cli_pull_chunk *prev, *next; 309 struct tevent_req *req;/* This is the main request! Not the subreq */ 310 struct tevent_req *subreq; 311 off_t ofs; 263 312 uint8_t *buf; 313 size_t total_size; 314 size_t tmp_size; 315 bool done; 264 316 }; 265 317 266 static void cli_readall_done(struct tevent_req *subreq); 267 268 static struct tevent_req *cli_readall_send(TALLOC_CTX *mem_ctx, 269 struct event_context *ev, 270 struct cli_state *cli, 271 uint16_t fnum, 272 off_t offset, size_t size) 273 { 274 struct tevent_req *req, *subreq; 275 struct cli_readall_state *state; 276 277 req = tevent_req_create(mem_ctx, &state, struct cli_readall_state); 278 if (req == NULL) { 279 return NULL; 280 } 281 state->ev = ev; 282 state->cli = cli; 283 state->fnum = fnum; 284 state->start_offset = offset; 285 state->size = size; 286 state->received = 0; 287 state->buf = NULL; 288 289 subreq = cli_read_andx_send(state, ev, cli, fnum, offset, size); 290 if (tevent_req_nomem(subreq, req)) { 291 return tevent_req_post(req, ev); 292 } 293 tevent_req_set_callback(subreq, cli_readall_done, req); 294 return req; 295 } 296 297 static void cli_readall_done(struct tevent_req *subreq) 298 { 299 struct tevent_req *req = tevent_req_callback_data( 300 subreq, struct tevent_req); 301 struct cli_readall_state *state = tevent_req_data( 302 req, struct cli_readall_state); 303 ssize_t received; 304 uint8_t *buf; 305 NTSTATUS status; 306 307 status = cli_read_andx_recv(subreq, &received, &buf); 308 if (tevent_req_nterror(req, status)) { 309 return; 310 } 311 312 if (received == 0) { 313 /* EOF */ 314 tevent_req_done(req); 315 return; 316 } 317 318 if ((state->received == 0) && (received == state->size)) { 319 /* Ideal case: Got it all in one run */ 320 state->buf = buf; 321 state->received += received; 322 tevent_req_done(req); 323 return; 324 } 325 326 /* 327 * We got a short read, issue a read for the 328 * rest. Unfortunately we have to allocate the buffer 329 * ourselves now, as our caller expects to receive a single 330 * buffer. cli_read_andx does it from the buffer received from 331 * the net, but with a short read we have to put it together 332 * from several reads. 333 */ 334 335 if (state->buf == NULL) { 336 state->buf = talloc_array(state, uint8_t, state->size); 337 if (tevent_req_nomem(state->buf, req)) { 338 return; 339 } 340 } 341 memcpy(state->buf + state->received, buf, received); 342 state->received += received; 343 344 TALLOC_FREE(subreq); 345 346 if (state->received >= state->size) { 347 tevent_req_done(req); 348 return; 349 } 350 351 subreq = cli_read_andx_send(state, state->ev, state->cli, state->fnum, 352 state->start_offset + state->received, 353 state->size - state->received); 354 if (tevent_req_nomem(subreq, req)) { 355 return; 356 } 357 tevent_req_set_callback(subreq, cli_readall_done, req); 358 } 359 360 static NTSTATUS cli_readall_recv(struct tevent_req *req, ssize_t *received, 361 uint8_t **rcvbuf) 362 { 363 struct cli_readall_state *state = tevent_req_data( 364 req, struct cli_readall_state); 365 NTSTATUS status; 366 367 if (tevent_req_is_nterror(req, &status)) { 368 return status; 369 } 370 *received = state->received; 371 *rcvbuf = state->buf; 372 return NT_STATUS_OK; 373 } 374 375 struct cli_pull_subreq { 376 struct tevent_req *req; 377 ssize_t received; 378 uint8_t *buf; 379 }; 318 static void cli_pull_setup_chunks(struct tevent_req *req); 319 static void cli_pull_chunk_ship(struct cli_pull_chunk *chunk); 320 static void cli_pull_chunk_done(struct tevent_req *subreq); 380 321 381 322 /* … … 387 328 */ 388 329 389 struct cli_pull_state {390 struct tevent_req *req;391 392 struct event_context *ev;393 struct cli_state *cli;394 uint16_t fnum;395 off_t start_offset;396 SMB_OFF_T size;397 398 NTSTATUS (*sink)(char *buf, size_t n, void *priv);399 void *priv;400 401 size_t chunk_size;402 403 /*404 * Outstanding requests405 */406 int num_reqs;407 struct cli_pull_subreq *reqs;408 409 /*410 * For how many bytes did we send requests already?411 */412 SMB_OFF_T requested;413 414 /*415 * Next request index to push into "sink". This walks around the "req"416 * array, taking care that the requests are pushed to "sink" in the417 * right order. If necessary (i.e. replies don't come in in the right418 * order), replies are held back in "reqs".419 */420 int top_req;421 422 /*423 * How many bytes did we push into "sink"?424 */425 426 SMB_OFF_T pushed;427 };428 429 static char *cli_pull_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)430 {431 struct cli_pull_state *state = tevent_req_data(432 req, struct cli_pull_state);433 char *result;434 435 result = tevent_req_default_print(req, mem_ctx);436 if (result == NULL) {437 return NULL;438 }439 440 return talloc_asprintf_append_buffer(441 result, "num_reqs=%d, top_req=%d",442 state->num_reqs, state->top_req);443 }444 445 static void cli_pull_read_done(struct tevent_req *read_req);446 447 /*448 * Prepare an async pull request449 */450 451 330 struct tevent_req *cli_pull_send(TALLOC_CTX *mem_ctx, 452 struct event_context *ev,331 struct tevent_context *ev, 453 332 struct cli_state *cli, 454 333 uint16_t fnum, off_t start_offset, 455 SMB_OFF_Tsize, size_t window_size,334 off_t size, size_t window_size, 456 335 NTSTATUS (*sink)(char *buf, size_t n, 457 336 void *priv), … … 460 339 struct tevent_req *req; 461 340 struct cli_pull_state *state; 462 int i; 341 size_t page_size = 1024; 342 uint64_t tmp64; 463 343 464 344 req = tevent_req_create(mem_ctx, &state, struct cli_pull_state); … … 466 346 return NULL; 467 347 } 468 tevent_req_set_print_fn(req, cli_pull_print);469 state->req = req;470 471 348 state->cli = cli; 472 349 state->ev = ev; … … 476 353 state->sink = sink; 477 354 state->priv = priv; 478 479 state->pushed = 0; 480 state->top_req = 0; 355 state->next_offset = start_offset; 356 state->remaining = size; 481 357 482 358 if (size == 0) { … … 485 361 } 486 362 487 state->chunk_size = cli_read_max_bufsize(cli); 488 489 state->num_reqs = MAX(window_size/state->chunk_size, 1); 490 state->num_reqs = MIN(state->num_reqs, cli->max_mux); 491 492 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_pull_subreq, 493 state->num_reqs); 494 if (state->reqs == NULL) { 495 goto failed; 496 } 497 498 state->requested = 0; 499 500 for (i=0; i<state->num_reqs; i++) { 501 struct cli_pull_subreq *subreq = &state->reqs[i]; 502 SMB_OFF_T size_left; 503 size_t request_thistime; 504 505 if (state->requested >= size) { 506 state->num_reqs = i; 363 if (smbXcli_conn_protocol(state->cli->conn) >= PROTOCOL_SMB2_02) { 364 state->chunk_size = smb2cli_conn_max_read_size(cli->conn); 365 } else { 366 state->chunk_size = cli_read_max_bufsize(cli); 367 } 368 if (state->chunk_size > page_size) { 369 state->chunk_size &= ~(page_size - 1); 370 } 371 372 if (window_size == 0) { 373 /* 374 * We use 16 MByte as default window size. 375 */ 376 window_size = 16 * 1024 * 1024; 377 } 378 379 tmp64 = window_size/state->chunk_size; 380 if ((window_size % state->chunk_size) > 0) { 381 tmp64 += 1; 382 } 383 tmp64 = MAX(tmp64, 1); 384 tmp64 = MIN(tmp64, 256); 385 state->max_chunks = tmp64; 386 387 /* 388 * We defer the callback because of the complex 389 * substate/subfunction logic 390 */ 391 tevent_req_defer_callback(req, ev); 392 393 cli_pull_setup_chunks(req); 394 if (!tevent_req_is_in_progress(req)) { 395 return tevent_req_post(req, ev); 396 } 397 398 return req; 399 } 400 401 static void cli_pull_setup_chunks(struct tevent_req *req) 402 { 403 struct cli_pull_state *state = 404 tevent_req_data(req, 405 struct cli_pull_state); 406 struct cli_pull_chunk *chunk, *next = NULL; 407 size_t i; 408 409 for (chunk = state->chunks; chunk; chunk = next) { 410 /* 411 * Note that chunk might be removed from this call. 412 */ 413 next = chunk->next; 414 cli_pull_chunk_ship(chunk); 415 if (!tevent_req_is_in_progress(req)) { 416 return; 417 } 418 } 419 420 for (i = state->num_chunks; i < state->max_chunks; i++) { 421 422 if (state->num_waiting > 0) { 423 return; 424 } 425 426 if (state->remaining == 0) { 507 427 break; 508 428 } 509 429 510 size_left = size - state->requested; 511 request_thistime = MIN(size_left, state->chunk_size); 512 513 subreq->req = cli_readall_send( 514 state->reqs, ev, cli, fnum, 515 state->start_offset + state->requested, 516 request_thistime); 517 518 if (subreq->req == NULL) { 519 goto failed; 520 } 521 tevent_req_set_callback(subreq->req, cli_pull_read_done, req); 522 state->requested += request_thistime; 523 } 524 return req; 525 526 failed: 527 TALLOC_FREE(req); 528 return NULL; 529 } 530 531 /* 532 * Handle incoming read replies, push the data into sink and send out new 533 * requests if necessary. 534 */ 535 536 static void cli_pull_read_done(struct tevent_req *subreq) 537 { 538 struct tevent_req *req = tevent_req_callback_data( 539 subreq, struct tevent_req); 540 struct cli_pull_state *state = tevent_req_data( 541 req, struct cli_pull_state); 542 struct cli_pull_subreq *pull_subreq = NULL; 430 chunk = talloc_zero(state, struct cli_pull_chunk); 431 if (tevent_req_nomem(chunk, req)) { 432 return; 433 } 434 chunk->req = req; 435 chunk->ofs = state->next_offset; 436 chunk->total_size = MIN(state->remaining, state->chunk_size); 437 state->next_offset += chunk->total_size; 438 state->remaining -= chunk->total_size; 439 440 DLIST_ADD_END(state->chunks, chunk); 441 state->num_chunks++; 442 state->num_waiting++; 443 444 cli_pull_chunk_ship(chunk); 445 if (!tevent_req_is_in_progress(req)) { 446 return; 447 } 448 } 449 450 if (state->remaining > 0) { 451 return; 452 } 453 454 if (state->num_chunks > 0) { 455 return; 456 } 457 458 tevent_req_done(req); 459 } 460 461 static void cli_pull_chunk_ship(struct cli_pull_chunk *chunk) 462 { 463 struct tevent_req *req = chunk->req; 464 struct cli_pull_state *state = 465 tevent_req_data(req, 466 struct cli_pull_state); 467 bool ok; 468 off_t ofs; 469 size_t size; 470 471 if (chunk->done) { 472 NTSTATUS status; 473 474 if (chunk != state->chunks) { 475 /* 476 * this chunk is not the 477 * first one in the list. 478 * 479 * which means we should not 480 * push it into the sink yet. 481 */ 482 return; 483 } 484 485 if (chunk->tmp_size == 0) { 486 /* 487 * we git a short read, we're done 488 */ 489 tevent_req_done(req); 490 return; 491 } 492 493 status = state->sink((char *)chunk->buf, 494 chunk->tmp_size, 495 state->priv); 496 if (tevent_req_nterror(req, status)) { 497 return; 498 } 499 state->pushed += chunk->tmp_size; 500 501 if (chunk->tmp_size < chunk->total_size) { 502 /* 503 * we git a short read, we're done 504 */ 505 tevent_req_done(req); 506 return; 507 } 508 509 DLIST_REMOVE(state->chunks, chunk); 510 SMB_ASSERT(state->num_chunks > 0); 511 state->num_chunks--; 512 TALLOC_FREE(chunk); 513 514 return; 515 } 516 517 if (chunk->subreq != NULL) { 518 return; 519 } 520 521 SMB_ASSERT(state->num_waiting > 0); 522 523 ofs = chunk->ofs + chunk->tmp_size; 524 size = chunk->total_size - chunk->tmp_size; 525 526 if (smbXcli_conn_protocol(state->cli->conn) >= PROTOCOL_SMB2_02) { 527 uint32_t max_size; 528 529 ok = smb2cli_conn_req_possible(state->cli->conn, &max_size); 530 if (!ok) { 531 return; 532 } 533 534 /* 535 * downgrade depending on the available credits 536 */ 537 size = MIN(max_size, size); 538 539 chunk->subreq = cli_smb2_read_send(chunk, 540 state->ev, 541 state->cli, 542 state->fnum, 543 ofs, 544 size); 545 if (tevent_req_nomem(chunk->subreq, req)) { 546 return; 547 } 548 } else { 549 ok = smb1cli_conn_req_possible(state->cli->conn); 550 if (!ok) { 551 return; 552 } 553 554 chunk->subreq = cli_read_andx_send(chunk, 555 state->ev, 556 state->cli, 557 state->fnum, 558 ofs, 559 size); 560 if (tevent_req_nomem(chunk->subreq, req)) { 561 return; 562 } 563 } 564 tevent_req_set_callback(chunk->subreq, 565 cli_pull_chunk_done, 566 chunk); 567 568 state->num_waiting--; 569 return; 570 } 571 572 static void cli_pull_chunk_done(struct tevent_req *subreq) 573 { 574 struct cli_pull_chunk *chunk = 575 tevent_req_callback_data(subreq, 576 struct cli_pull_chunk); 577 struct tevent_req *req = chunk->req; 578 struct cli_pull_state *state = 579 tevent_req_data(req, 580 struct cli_pull_state); 543 581 NTSTATUS status; 544 int i; 545 546 for (i = 0; i < state->num_reqs; i++) { 547 pull_subreq = &state->reqs[i]; 548 if (subreq == pull_subreq->req) { 549 break; 550 } 551 } 552 if (i == state->num_reqs) { 553 /* Huh -- received something we did not send?? */ 554 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR); 555 return; 556 } 557 558 status = cli_readall_recv(subreq, &pull_subreq->received, 559 &pull_subreq->buf); 560 if (!NT_STATUS_IS_OK(status)) { 561 tevent_req_nterror(state->req, status); 562 return; 563 } 564 565 /* 566 * This loop is the one to take care of out-of-order replies. All 567 * pending requests are in state->reqs, state->reqs[top_req] is the 568 * one that is to be pushed next. If however a request later than 569 * top_req is replied to, then we can't push yet. If top_req is 570 * replied to at a later point then, we need to push all the finished 571 * requests. 572 */ 573 574 while (state->reqs[state->top_req].req != NULL) { 575 struct cli_pull_subreq *top_subreq; 576 577 DEBUG(11, ("cli_pull_read_done: top_req = %d\n", 578 state->top_req)); 579 580 top_subreq = &state->reqs[state->top_req]; 581 582 if (tevent_req_is_in_progress(top_subreq->req)) { 583 DEBUG(11, ("cli_pull_read_done: top request not yet " 584 "done\n")); 585 return; 586 } 587 588 DEBUG(10, ("cli_pull_read_done: Pushing %d bytes, %d already " 589 "pushed\n", (int)top_subreq->received, 590 (int)state->pushed)); 591 592 status = state->sink((char *)top_subreq->buf, 593 top_subreq->received, state->priv); 594 if (tevent_req_nterror(state->req, status)) { 595 return; 596 } 597 state->pushed += top_subreq->received; 598 599 TALLOC_FREE(state->reqs[state->top_req].req); 600 601 if (state->requested < state->size) { 602 struct tevent_req *new_req; 603 SMB_OFF_T size_left; 604 size_t request_thistime; 605 606 size_left = state->size - state->requested; 607 request_thistime = MIN(size_left, state->chunk_size); 608 609 DEBUG(10, ("cli_pull_read_done: Requesting %d bytes " 610 "at %d, position %d\n", 611 (int)request_thistime, 612 (int)(state->start_offset 613 + state->requested), 614 state->top_req)); 615 616 new_req = cli_readall_send( 617 state->reqs, state->ev, state->cli, 618 state->fnum, 619 state->start_offset + state->requested, 620 request_thistime); 621 622 if (tevent_req_nomem(new_req, state->req)) { 623 return; 624 } 625 tevent_req_set_callback(new_req, cli_pull_read_done, 626 req); 627 628 state->reqs[state->top_req].req = new_req; 629 state->requested += request_thistime; 630 } 631 632 state->top_req = (state->top_req+1) % state->num_reqs; 633 } 634 635 tevent_req_done(req); 636 } 637 638 NTSTATUS cli_pull_recv(struct tevent_req *req, SMB_OFF_T *received) 582 size_t expected = chunk->total_size - chunk->tmp_size; 583 ssize_t received; 584 uint8_t *buf = NULL; 585 586 chunk->subreq = NULL; 587 588 if (smbXcli_conn_protocol(state->cli->conn) >= PROTOCOL_SMB2_02) { 589 status = cli_smb2_read_recv(subreq, &received, &buf); 590 } else { 591 status = cli_read_andx_recv(subreq, &received, &buf); 592 } 593 if (NT_STATUS_EQUAL(status, NT_STATUS_END_OF_FILE)) { 594 received = 0; 595 status = NT_STATUS_OK; 596 } 597 if (tevent_req_nterror(req, status)) { 598 return; 599 } 600 601 if (received > expected) { 602 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE); 603 return; 604 } 605 606 if (received == 0) { 607 /* 608 * We got EOF we're done 609 */ 610 chunk->done = true; 611 cli_pull_setup_chunks(req); 612 return; 613 } 614 615 if (received == chunk->total_size) { 616 /* 617 * We got it in the first run. 618 * 619 * We don't call TALLOC_FREE(subreq) 620 * here and keep the returned buffer. 621 */ 622 chunk->buf = buf; 623 } else if (chunk->buf == NULL) { 624 chunk->buf = talloc_array(chunk, uint8_t, chunk->total_size); 625 if (tevent_req_nomem(chunk->buf, req)) { 626 return; 627 } 628 } 629 630 if (received != chunk->total_size) { 631 uint8_t *p = chunk->buf + chunk->tmp_size; 632 memcpy(p, buf, received); 633 TALLOC_FREE(subreq); 634 } 635 636 chunk->tmp_size += received; 637 638 if (chunk->tmp_size == chunk->total_size) { 639 chunk->done = true; 640 } else { 641 state->num_waiting++; 642 } 643 644 cli_pull_setup_chunks(req); 645 } 646 647 NTSTATUS cli_pull_recv(struct tevent_req *req, off_t *received) 639 648 { 640 649 struct cli_pull_state *state = tevent_req_data( … … 643 652 644 653 if (tevent_req_is_nterror(req, &status)) { 654 tevent_req_received(req); 645 655 return status; 646 656 } 647 657 *received = state->pushed; 658 tevent_req_received(req); 648 659 return NT_STATUS_OK; 649 660 } 650 661 651 662 NTSTATUS cli_pull(struct cli_state *cli, uint16_t fnum, 652 off_t start_offset, SMB_OFF_Tsize, size_t window_size,663 off_t start_offset, off_t size, size_t window_size, 653 664 NTSTATUS (*sink)(char *buf, size_t n, void *priv), 654 void *priv, SMB_OFF_T*received)665 void *priv, off_t *received) 655 666 { 656 667 TALLOC_CTX *frame = talloc_stackframe(); 657 struct event_context *ev;668 struct tevent_context *ev; 658 669 struct tevent_req *req; 659 670 NTSTATUS status = NT_STATUS_OK; 660 671 661 if ( cli_has_async_calls(cli)) {672 if (smbXcli_conn_has_async_calls(cli->conn)) { 662 673 /* 663 674 * Can't use sync call while an async call is in flight … … 667 678 } 668 679 669 ev = event_context_init(frame);680 ev = samba_tevent_context_init(frame); 670 681 if (ev == NULL) { 671 682 status = NT_STATUS_NO_MEMORY; … … 680 691 } 681 692 682 if (!tevent_req_poll(req, ev)) { 683 status = map_nt_error_from_unix(errno); 693 if (!tevent_req_poll_ntstatus(req, ev, &status)) { 684 694 goto fail; 685 695 } … … 699 709 } 700 710 701 ssize_t cli_read(struct cli_state *cli, uint16_t fnum, char *buf, 702 off_t offset, size_t size) 711 NTSTATUS cli_read(struct cli_state *cli, uint16_t fnum, 712 char *buf, off_t offset, size_t size, 713 size_t *nread) 703 714 { 704 715 NTSTATUS status; 705 SMB_OFF_Tret;716 off_t ret; 706 717 707 718 status = cli_pull(cli, fnum, offset, size, size, 708 719 cli_read_sink, &buf, &ret); 709 720 if (!NT_STATUS_IS_OK(status)) { 710 return -1; 711 } 712 return ret; 721 return status; 722 } 723 724 if (nread) { 725 *nread = ret; 726 } 727 728 return NT_STATUS_OK; 713 729 } 714 730 … … 727 743 */ 728 744 729 bytes = TALLOC_ARRAY(talloc_tos(), uint8_t, 3);745 bytes = talloc_array(talloc_tos(), uint8_t, 3); 730 746 if (bytes == NULL) { 731 747 return NT_STATUS_NO_MEMORY; … … 734 750 735 751 do { 736 size_t size = MIN(size1, cli->max_xmit - 48); 752 uint32_t usable_space = cli_state_available_size(cli, 48); 753 size_t size = MIN(size1, usable_space); 737 754 struct tevent_req *req; 738 755 uint16_t vwv[5]; … … 745 762 SSVAL(vwv+4, 0, 0); 746 763 747 bytes = TALLOC_REALLOC_ARRAY(talloc_tos(), bytes, uint8_t,764 bytes = talloc_realloc(talloc_tos(), bytes, uint8_t, 748 765 size+3); 749 766 if (bytes == NULL) { … … 795 812 796 813 struct tevent_req *cli_write_andx_create(TALLOC_CTX *mem_ctx, 797 struct event_context *ev,814 struct tevent_context *ev, 798 815 struct cli_state *cli, uint16_t fnum, 799 816 uint16_t mode, const uint8_t *buf, … … 805 822 struct tevent_req *req, *subreq; 806 823 struct cli_write_andx_state *state; 807 bool bigoffset = (( cli->capabilities& CAP_LARGE_FILES) != 0);824 bool bigoffset = ((smb1cli_conn_capabilities(cli->conn) & CAP_LARGE_FILES) != 0); 808 825 uint8_t wct = bigoffset ? 14 : 12; 809 826 size_t max_write = cli_write_max_bufsize(cli, mode, wct); … … 831 848 832 849 SSVAL(vwv+11, 0, 833 cli_smb_wct_ofs(reqs_before, num_reqs_before)850 smb1cli_req_wct_ofs(reqs_before, num_reqs_before) 834 851 + 1 /* the wct field */ 835 852 + wct * 2 /* vwv */ … … 844 861 state->iov[0].iov_base = (void *)&state->pad; 845 862 state->iov[0].iov_len = 1; 846 state->iov[1].iov_base = CONST_DISCARD(void *, buf);863 state->iov[1].iov_base = discard_const_p(void, buf); 847 864 state->iov[1].iov_len = state->size; 848 865 … … 858 875 859 876 struct tevent_req *cli_write_andx_send(TALLOC_CTX *mem_ctx, 860 struct event_context *ev,877 struct tevent_context *ev, 861 878 struct cli_state *cli, uint16_t fnum, 862 879 uint16_t mode, const uint8_t *buf, … … 872 889 } 873 890 874 status = cli_smb_req_send(subreq);891 status = smb1cli_req_chain_submit(&subreq, 1); 875 892 if (tevent_req_nterror(req, status)) { 876 893 return tevent_req_post(req, ev); … … 887 904 uint8_t wct; 888 905 uint16_t *vwv; 889 uint8_t *inbuf;890 906 NTSTATUS status; 891 907 892 status = cli_smb_recv(subreq, state, &inbuf, 6, &wct, &vwv,908 status = cli_smb_recv(subreq, state, NULL, 6, &wct, &vwv, 893 909 NULL, NULL); 894 910 TALLOC_FREE(subreq); … … 922 938 return status; 923 939 } 924 *pwritten = state->written; 940 if (pwritten != 0) { 941 *pwritten = state->written; 942 } 925 943 return NT_STATUS_OK; 926 944 } 927 945 928 946 struct cli_writeall_state { 929 struct event_context *ev;947 struct tevent_context *ev; 930 948 struct cli_state *cli; 931 949 uint16_t fnum; … … 940 958 941 959 static struct tevent_req *cli_writeall_send(TALLOC_CTX *mem_ctx, 942 struct event_context *ev,960 struct tevent_context *ev, 943 961 struct cli_state *cli, 944 962 uint16_t fnum, … … 1033 1051 { 1034 1052 TALLOC_CTX *frame = talloc_stackframe(); 1035 struct event_context *ev;1053 struct tevent_context *ev; 1036 1054 struct tevent_req *req; 1037 1055 NTSTATUS status = NT_STATUS_NO_MEMORY; 1038 1056 1039 if ( cli_has_async_calls(cli)) {1057 if (smbXcli_conn_has_async_calls(cli->conn)) { 1040 1058 /* 1041 1059 * Can't use sync call while an async call is in flight … … 1044 1062 goto fail; 1045 1063 } 1046 ev = event_context_init(frame);1064 ev = samba_tevent_context_init(frame); 1047 1065 if (ev == NULL) { 1048 1066 goto fail; 1049 1067 } 1050 req = cli_writeall_send(frame, ev, cli, fnum, mode, buf, offset, size); 1068 if (smbXcli_conn_protocol(cli->conn) >= PROTOCOL_SMB2_02) { 1069 req = cli_smb2_writeall_send(frame, ev, cli, fnum, mode, 1070 buf, offset, size); 1071 } else { 1072 req = cli_writeall_send(frame, ev, cli, fnum, mode, 1073 buf, offset, size); 1074 } 1051 1075 if (req == NULL) { 1052 1076 goto fail; 1053 1077 } 1054 if (!tevent_req_poll(req, ev)) { 1055 status = map_nt_error_from_unix(errno); 1078 if (!tevent_req_poll_ntstatus(req, ev, &status)) { 1056 1079 goto fail; 1057 1080 } 1058 status = cli_writeall_recv(req, pwritten); 1081 if (smbXcli_conn_protocol(cli->conn) >= PROTOCOL_SMB2_02) { 1082 status = cli_smb2_writeall_recv(req, pwritten); 1083 } else { 1084 status = cli_writeall_recv(req, pwritten); 1085 } 1059 1086 fail: 1060 1087 TALLOC_FREE(frame); … … 1062 1089 } 1063 1090 1064 struct cli_push_write_state { 1065 struct tevent_req *req;/* This is the main request! Not the subreq */ 1066 uint32_t idx; 1067 off_t ofs; 1068 uint8_t *buf; 1069 size_t size; 1070 }; 1091 struct cli_push_chunk; 1071 1092 1072 1093 struct cli_push_state { 1073 struct event_context *ev;1094 struct tevent_context *ev; 1074 1095 struct cli_state *cli; 1075 1096 uint16_t fnum; 1076 1097 uint16_t mode; 1077 1098 off_t start_offset; 1078 size_t window_size;1079 1099 1080 1100 size_t (*source)(uint8_t *buf, size_t n, void *priv); … … 1088 1108 /* 1089 1109 * Outstanding requests 1110 * 1111 * The maximum is 256: 1112 * - which would be a window of 256 MByte 1113 * for SMB2 with multi-credit 1114 * or smb1 unix extensions. 1090 1115 */ 1091 uint32_t pending; 1092 uint32_t num_reqs; 1093 struct cli_push_write_state **reqs; 1116 uint16_t max_chunks; 1117 uint16_t num_chunks; 1118 uint16_t num_waiting; 1119 struct cli_push_chunk *chunks; 1094 1120 }; 1095 1121 1096 static void cli_push_written(struct tevent_req *req); 1097 1098 static bool cli_push_write_setup(struct tevent_req *req, 1099 struct cli_push_state *state, 1100 uint32_t idx) 1101 { 1102 struct cli_push_write_state *substate; 1122 struct cli_push_chunk { 1123 struct cli_push_chunk *prev, *next; 1124 struct tevent_req *req;/* This is the main request! Not the subreq */ 1103 1125 struct tevent_req *subreq; 1104 1105 substate = talloc(state->reqs, struct cli_push_write_state); 1106 if (!substate) { 1107 return false; 1108 } 1109 substate->req = req; 1110 substate->idx = idx; 1111 substate->ofs = state->next_offset; 1112 substate->buf = talloc_array(substate, uint8_t, state->chunk_size); 1113 if (!substate->buf) { 1114 talloc_free(substate); 1115 return false; 1116 } 1117 substate->size = state->source(substate->buf, 1118 state->chunk_size, 1119 state->priv); 1120 if (substate->size == 0) { 1121 state->eof = true; 1122 /* nothing to send */ 1123 talloc_free(substate); 1124 return true; 1125 } 1126 1127 subreq = cli_writeall_send(substate, 1128 state->ev, state->cli, 1129 state->fnum, state->mode, 1130 substate->buf, 1131 substate->ofs, 1132 substate->size); 1133 if (!subreq) { 1134 talloc_free(substate); 1135 return false; 1136 } 1137 tevent_req_set_callback(subreq, cli_push_written, substate); 1138 1139 state->reqs[idx] = substate; 1140 state->pending += 1; 1141 state->next_offset += substate->size; 1142 1143 return true; 1144 } 1145 1146 struct tevent_req *cli_push_send(TALLOC_CTX *mem_ctx, struct event_context *ev, 1126 off_t ofs; 1127 uint8_t *buf; 1128 size_t total_size; 1129 size_t tmp_size; 1130 bool done; 1131 }; 1132 1133 static void cli_push_setup_chunks(struct tevent_req *req); 1134 static void cli_push_chunk_ship(struct cli_push_chunk *chunk); 1135 static void cli_push_chunk_done(struct tevent_req *subreq); 1136 1137 struct tevent_req *cli_push_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, 1147 1138 struct cli_state *cli, 1148 1139 uint16_t fnum, uint16_t mode, … … 1154 1145 struct tevent_req *req; 1155 1146 struct cli_push_state *state; 1156 uint32_t i; 1147 size_t page_size = 1024; 1148 uint64_t tmp64; 1157 1149 1158 1150 req = tevent_req_create(mem_ctx, &state, struct cli_push_state); … … 1167 1159 state->source = source; 1168 1160 state->priv = priv; 1169 state->eof = false;1170 state->pending = 0;1171 1161 state->next_offset = start_offset; 1172 1162 1173 state->chunk_size = cli_write_max_bufsize(cli, mode, 14); 1163 if (smbXcli_conn_protocol(state->cli->conn) >= PROTOCOL_SMB2_02) { 1164 state->chunk_size = smb2cli_conn_max_write_size(cli->conn); 1165 } else { 1166 state->chunk_size = cli_write_max_bufsize(cli, mode, 14); 1167 } 1168 if (state->chunk_size > page_size) { 1169 state->chunk_size &= ~(page_size - 1); 1170 } 1174 1171 1175 1172 if (window_size == 0) { 1176 window_size = cli->max_mux * state->chunk_size; 1177 } 1178 state->num_reqs = window_size/state->chunk_size; 1173 /* 1174 * We use 16 MByte as default window size. 1175 */ 1176 window_size = 16 * 1024 * 1024; 1177 } 1178 1179 tmp64 = window_size/state->chunk_size; 1179 1180 if ((window_size % state->chunk_size) > 0) { 1180 state->num_reqs += 1; 1181 } 1182 state->num_reqs = MIN(state->num_reqs, cli->max_mux); 1183 state->num_reqs = MAX(state->num_reqs, 1); 1184 1185 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_push_write_state *, 1186 state->num_reqs); 1187 if (state->reqs == NULL) { 1188 goto failed; 1189 } 1190 1191 for (i=0; i<state->num_reqs; i++) { 1192 if (!cli_push_write_setup(req, state, i)) { 1193 goto failed; 1181 tmp64 += 1; 1182 } 1183 tmp64 = MAX(tmp64, 1); 1184 tmp64 = MIN(tmp64, 256); 1185 state->max_chunks = tmp64; 1186 1187 /* 1188 * We defer the callback because of the complex 1189 * substate/subfunction logic 1190 */ 1191 tevent_req_defer_callback(req, ev); 1192 1193 cli_push_setup_chunks(req); 1194 if (!tevent_req_is_in_progress(req)) { 1195 return tevent_req_post(req, ev); 1196 } 1197 1198 return req; 1199 } 1200 1201 static void cli_push_setup_chunks(struct tevent_req *req) 1202 { 1203 struct cli_push_state *state = 1204 tevent_req_data(req, 1205 struct cli_push_state); 1206 struct cli_push_chunk *chunk, *next = NULL; 1207 size_t i; 1208 1209 for (chunk = state->chunks; chunk; chunk = next) { 1210 /* 1211 * Note that chunk might be removed from this call. 1212 */ 1213 next = chunk->next; 1214 cli_push_chunk_ship(chunk); 1215 if (!tevent_req_is_in_progress(req)) { 1216 return; 1217 } 1218 } 1219 1220 for (i = state->num_chunks; i < state->max_chunks; i++) { 1221 1222 if (state->num_waiting > 0) { 1223 return; 1194 1224 } 1195 1225 … … 1197 1227 break; 1198 1228 } 1199 } 1200 1201 if (state->pending == 0) { 1202 tevent_req_done(req); 1203 return tevent_req_post(req, ev); 1204 } 1205 1206 return req; 1207 1208 failed: 1209 tevent_req_nterror(req, NT_STATUS_NO_MEMORY); 1210 return tevent_req_post(req, ev); 1211 } 1212 1213 static void cli_push_written(struct tevent_req *subreq) 1214 { 1215 struct cli_push_write_state *substate = tevent_req_callback_data( 1216 subreq, struct cli_push_write_state); 1217 struct tevent_req *req = substate->req; 1218 struct cli_push_state *state = tevent_req_data( 1219 req, struct cli_push_state); 1229 1230 chunk = talloc_zero(state, struct cli_push_chunk); 1231 if (tevent_req_nomem(chunk, req)) { 1232 return; 1233 } 1234 chunk->req = req; 1235 chunk->ofs = state->next_offset; 1236 chunk->buf = talloc_array(chunk, 1237 uint8_t, 1238 state->chunk_size); 1239 if (tevent_req_nomem(chunk->buf, req)) { 1240 return; 1241 } 1242 chunk->total_size = state->source(chunk->buf, 1243 state->chunk_size, 1244 state->priv); 1245 if (chunk->total_size == 0) { 1246 /* nothing to send */ 1247 talloc_free(chunk); 1248 state->eof = true; 1249 break; 1250 } 1251 state->next_offset += chunk->total_size; 1252 1253 DLIST_ADD_END(state->chunks, chunk); 1254 state->num_chunks++; 1255 state->num_waiting++; 1256 1257 cli_push_chunk_ship(chunk); 1258 if (!tevent_req_is_in_progress(req)) { 1259 return; 1260 } 1261 } 1262 1263 if (!state->eof) { 1264 return; 1265 } 1266 1267 if (state->num_chunks > 0) { 1268 return; 1269 } 1270 1271 tevent_req_done(req); 1272 } 1273 1274 static void cli_push_chunk_ship(struct cli_push_chunk *chunk) 1275 { 1276 struct tevent_req *req = chunk->req; 1277 struct cli_push_state *state = 1278 tevent_req_data(req, 1279 struct cli_push_state); 1280 bool ok; 1281 const uint8_t *buf; 1282 off_t ofs; 1283 size_t size; 1284 1285 if (chunk->done) { 1286 DLIST_REMOVE(state->chunks, chunk); 1287 SMB_ASSERT(state->num_chunks > 0); 1288 state->num_chunks--; 1289 TALLOC_FREE(chunk); 1290 1291 return; 1292 } 1293 1294 if (chunk->subreq != NULL) { 1295 return; 1296 } 1297 1298 SMB_ASSERT(state->num_waiting > 0); 1299 1300 buf = chunk->buf + chunk->tmp_size; 1301 ofs = chunk->ofs + chunk->tmp_size; 1302 size = chunk->total_size - chunk->tmp_size; 1303 1304 if (smbXcli_conn_protocol(state->cli->conn) >= PROTOCOL_SMB2_02) { 1305 uint32_t max_size; 1306 1307 ok = smb2cli_conn_req_possible(state->cli->conn, &max_size); 1308 if (!ok) { 1309 return; 1310 } 1311 1312 /* 1313 * downgrade depending on the available credits 1314 */ 1315 size = MIN(max_size, size); 1316 1317 chunk->subreq = cli_smb2_write_send(chunk, 1318 state->ev, 1319 state->cli, 1320 state->fnum, 1321 state->mode, 1322 buf, 1323 ofs, 1324 size); 1325 if (tevent_req_nomem(chunk->subreq, req)) { 1326 return; 1327 } 1328 } else { 1329 ok = smb1cli_conn_req_possible(state->cli->conn); 1330 if (!ok) { 1331 return; 1332 } 1333 1334 chunk->subreq = cli_write_andx_send(chunk, 1335 state->ev, 1336 state->cli, 1337 state->fnum, 1338 state->mode, 1339 buf, 1340 ofs, 1341 size); 1342 if (tevent_req_nomem(chunk->subreq, req)) { 1343 return; 1344 } 1345 } 1346 tevent_req_set_callback(chunk->subreq, 1347 cli_push_chunk_done, 1348 chunk); 1349 1350 state->num_waiting--; 1351 return; 1352 } 1353 1354 static void cli_push_chunk_done(struct tevent_req *subreq) 1355 { 1356 struct cli_push_chunk *chunk = 1357 tevent_req_callback_data(subreq, 1358 struct cli_push_chunk); 1359 struct tevent_req *req = chunk->req; 1360 struct cli_push_state *state = 1361 tevent_req_data(req, 1362 struct cli_push_state); 1220 1363 NTSTATUS status; 1221 uint32_t idx = substate->idx; 1222 1223 state->reqs[idx] = NULL; 1224 state->pending -= 1; 1225 1226 status = cli_writeall_recv(subreq, NULL); 1364 size_t expected = chunk->total_size - chunk->tmp_size; 1365 size_t written; 1366 1367 chunk->subreq = NULL; 1368 1369 if (smbXcli_conn_protocol(state->cli->conn) >= PROTOCOL_SMB2_02) { 1370 status = cli_smb2_write_recv(subreq, &written); 1371 } else { 1372 status = cli_write_andx_recv(subreq, &written); 1373 } 1227 1374 TALLOC_FREE(subreq); 1228 TALLOC_FREE(substate);1229 1375 if (tevent_req_nterror(req, status)) { 1230 1376 return; 1231 1377 } 1232 1378 1233 if (!state->eof) { 1234 if (!cli_push_write_setup(req, state, idx)) { 1235 tevent_req_nterror(req, NT_STATUS_NO_MEMORY); 1236 return; 1237 } 1238 } 1239 1240 if (state->pending == 0) { 1241 tevent_req_done(req); 1242 return; 1243 } 1379 if (written > expected) { 1380 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE); 1381 return; 1382 } 1383 1384 if (written == 0) { 1385 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE); 1386 return; 1387 } 1388 1389 chunk->tmp_size += written; 1390 1391 if (chunk->tmp_size == chunk->total_size) { 1392 chunk->done = true; 1393 } else { 1394 state->num_waiting++; 1395 } 1396 1397 cli_push_setup_chunks(req); 1244 1398 } 1245 1399 … … 1255 1409 { 1256 1410 TALLOC_CTX *frame = talloc_stackframe(); 1257 struct event_context *ev;1411 struct tevent_context *ev; 1258 1412 struct tevent_req *req; 1259 1413 NTSTATUS status = NT_STATUS_OK; 1260 1414 1261 if ( cli_has_async_calls(cli)) {1415 if (smbXcli_conn_has_async_calls(cli->conn)) { 1262 1416 /* 1263 1417 * Can't use sync call while an async call is in flight … … 1267 1421 } 1268 1422 1269 ev = event_context_init(frame);1423 ev = samba_tevent_context_init(frame); 1270 1424 if (ev == NULL) { 1271 1425 status = NT_STATUS_NO_MEMORY; … … 1280 1434 } 1281 1435 1282 if (!tevent_req_poll(req, ev)) { 1283 status = map_nt_error_from_unix(errno); 1436 if (!tevent_req_poll_ntstatus(req, ev, &status)) { 1284 1437 goto fail; 1285 1438 } … … 1290 1443 return status; 1291 1444 } 1445 1446 #define SPLICE_BLOCK_SIZE 1024 * 1024 1447 1448 static NTSTATUS cli_splice_fallback(TALLOC_CTX *frame, 1449 struct cli_state *srccli, 1450 struct cli_state *dstcli, 1451 uint16_t src_fnum, uint16_t dst_fnum, 1452 off_t initial_size, 1453 off_t src_offset, off_t dst_offset, 1454 off_t *written, 1455 int (*splice_cb)(off_t n, void *priv), 1456 void *priv) 1457 { 1458 NTSTATUS status; 1459 uint8_t *buf = talloc_size(frame, SPLICE_BLOCK_SIZE); 1460 size_t nread; 1461 off_t remaining = initial_size; 1462 1463 while (remaining) { 1464 status = cli_read(srccli, src_fnum, 1465 (char *)buf, src_offset, SPLICE_BLOCK_SIZE, 1466 &nread); 1467 if (!NT_STATUS_IS_OK(status)) { 1468 return status; 1469 } 1470 1471 status = cli_writeall(dstcli, dst_fnum, 0, 1472 buf, dst_offset, nread, NULL); 1473 if (!NT_STATUS_IS_OK(status)) { 1474 return status; 1475 } 1476 1477 if ((src_offset > INT64_MAX - nread) || 1478 (dst_offset > INT64_MAX - nread)) { 1479 return NT_STATUS_FILE_TOO_LARGE; 1480 } 1481 src_offset += nread; 1482 dst_offset += nread; 1483 if (remaining < nread) { 1484 return NT_STATUS_INTERNAL_ERROR; 1485 } 1486 remaining -= nread; 1487 if (!splice_cb(initial_size - remaining, priv)) { 1488 return NT_STATUS_CANCELLED; 1489 } 1490 } 1491 1492 return NT_STATUS_OK; 1493 } 1494 1495 NTSTATUS cli_splice(struct cli_state *srccli, struct cli_state *dstcli, 1496 uint16_t src_fnum, uint16_t dst_fnum, 1497 off_t size, 1498 off_t src_offset, off_t dst_offset, 1499 off_t *written, 1500 int (*splice_cb)(off_t n, void *priv), void *priv) 1501 { 1502 TALLOC_CTX *frame = talloc_stackframe(); 1503 struct tevent_context *ev; 1504 struct tevent_req *req; 1505 NTSTATUS status = NT_STATUS_NO_MEMORY; 1506 bool retry_fallback = false; 1507 1508 if (smbXcli_conn_has_async_calls(srccli->conn) || 1509 smbXcli_conn_has_async_calls(dstcli->conn)) 1510 { 1511 /* 1512 * Can't use sync call while an async call is in flight 1513 */ 1514 status = NT_STATUS_INVALID_PARAMETER; 1515 goto out; 1516 } 1517 1518 do { 1519 ev = samba_tevent_context_init(frame); 1520 if (ev == NULL) { 1521 goto out; 1522 } 1523 if (srccli == dstcli && 1524 smbXcli_conn_protocol(srccli->conn) >= PROTOCOL_SMB2_02 && 1525 !retry_fallback) 1526 { 1527 req = cli_smb2_splice_send(frame, ev, 1528 srccli, src_fnum, dst_fnum, 1529 size, src_offset, dst_offset, 1530 splice_cb, priv); 1531 } else { 1532 status = cli_splice_fallback(frame, 1533 srccli, dstcli, 1534 src_fnum, dst_fnum, 1535 size, 1536 src_offset, dst_offset, 1537 written, 1538 splice_cb, priv); 1539 goto out; 1540 } 1541 if (req == NULL) { 1542 goto out; 1543 } 1544 if (!tevent_req_poll(req, ev)) { 1545 status = map_nt_error_from_unix(errno); 1546 goto out; 1547 } 1548 status = cli_smb2_splice_recv(req, written); 1549 1550 /* 1551 * Older versions of Samba don't support 1552 * FSCTL_SRV_COPYCHUNK_WRITE so use the fallback. 1553 */ 1554 retry_fallback = NT_STATUS_EQUAL(status, NT_STATUS_INVALID_DEVICE_REQUEST); 1555 } while (retry_fallback); 1556 1557 out: 1558 TALLOC_FREE(frame); 1559 return status; 1560 }
Note:
See TracChangeset
for help on using the changeset viewer.