source: trunk/server/source3/libsmb/clireadwrite.c

Last change on this file was 751, checked in by Silvan Scherrer, 13 years ago

Samba Server: updated trunk to 3.6.9

File size: 31.8 KB
Line 
1/*
2 Unix SMB/CIFS implementation.
3 client file read/write routines
4 Copyright (C) Andrew Tridgell 1994-1998
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
18*/
19
20#include "includes.h"
21#include "libsmb/libsmb.h"
22#include "../lib/util/tevent_ntstatus.h"
23#include "async_smb.h"
24#include "trans2.h"
25
26/****************************************************************************
27 Calculate the recommended read buffer size
28****************************************************************************/
29static size_t cli_read_max_bufsize(struct cli_state *cli)
30{
31 size_t data_offset = smb_size - 4;
32 size_t wct = 12;
33
34 size_t useable_space;
35
36 if (!client_is_signing_on(cli) && !cli_encryption_on(cli)
37 && (cli->server_posix_capabilities & CIFS_UNIX_LARGE_READ_CAP)) {
38 return CLI_SAMBA_MAX_POSIX_LARGE_READX_SIZE;
39 }
40 if (cli->capabilities & CAP_LARGE_READX) {
41 return cli->is_samba
42 ? CLI_SAMBA_MAX_LARGE_READX_SIZE
43 : CLI_WINDOWS_MAX_LARGE_READX_SIZE;
44 }
45
46 data_offset += wct * sizeof(uint16_t);
47 data_offset += 1; /* pad */
48
49 useable_space = cli->max_xmit - data_offset;
50
51 return useable_space;
52}
53
54/****************************************************************************
55 Calculate the recommended write buffer size
56****************************************************************************/
57static size_t cli_write_max_bufsize(struct cli_state *cli,
58 uint16_t write_mode,
59 uint8_t wct)
60{
61 if (write_mode == 0 &&
62 !client_is_signing_on(cli) &&
63 !cli_encryption_on(cli) &&
64 (cli->server_posix_capabilities & CIFS_UNIX_LARGE_WRITE_CAP) &&
65 (cli->capabilities & CAP_LARGE_FILES)) {
66 /* Only do massive writes if we can do them direct
67 * with no signing or encrypting - not on a pipe. */
68 return CLI_SAMBA_MAX_POSIX_LARGE_WRITEX_SIZE;
69 }
70
71 if (cli->is_samba) {
72 return CLI_SAMBA_MAX_LARGE_WRITEX_SIZE;
73 }
74
75 if (((cli->capabilities & CAP_LARGE_WRITEX) == 0)
76 || client_is_signing_on(cli)
77 || strequal(cli->dev, "LPT1:")) {
78 size_t data_offset = smb_size - 4;
79 size_t useable_space;
80
81 data_offset += wct * sizeof(uint16_t);
82 data_offset += 1; /* pad */
83
84 useable_space = cli->max_xmit - data_offset;
85
86 return useable_space;
87 }
88
89 return CLI_WINDOWS_MAX_LARGE_WRITEX_SIZE;
90}
91
92struct cli_read_andx_state {
93 size_t size;
94 uint16_t vwv[12];
95 NTSTATUS status;
96 size_t received;
97 uint8_t *buf;
98};
99
100static void cli_read_andx_done(struct tevent_req *subreq);
101
102struct tevent_req *cli_read_andx_create(TALLOC_CTX *mem_ctx,
103 struct event_context *ev,
104 struct cli_state *cli, uint16_t fnum,
105 off_t offset, size_t size,
106 struct tevent_req **psmbreq)
107{
108 struct tevent_req *req, *subreq;
109 struct cli_read_andx_state *state;
110 uint8_t wct = 10;
111
112 if (size > cli_read_max_bufsize(cli)) {
113 DEBUG(0, ("cli_read_andx_send got size=%d, can only handle "
114 "size=%d\n", (int)size,
115 (int)cli_read_max_bufsize(cli)));
116 return NULL;
117 }
118
119 req = tevent_req_create(mem_ctx, &state, struct cli_read_andx_state);
120 if (req == NULL) {
121 return NULL;
122 }
123 state->size = size;
124
125 SCVAL(state->vwv + 0, 0, 0xFF);
126 SCVAL(state->vwv + 0, 1, 0);
127 SSVAL(state->vwv + 1, 0, 0);
128 SSVAL(state->vwv + 2, 0, fnum);
129 SIVAL(state->vwv + 3, 0, offset);
130 SSVAL(state->vwv + 5, 0, size);
131 SSVAL(state->vwv + 6, 0, size);
132 SSVAL(state->vwv + 7, 0, (size >> 16));
133 SSVAL(state->vwv + 8, 0, 0);
134 SSVAL(state->vwv + 9, 0, 0);
135
136 if (cli->capabilities & CAP_LARGE_FILES) {
137 SIVAL(state->vwv + 10, 0,
138 (((uint64_t)offset)>>32) & 0xffffffff);
139 wct = 12;
140 } else {
141 if ((((uint64_t)offset) & 0xffffffff00000000LL) != 0) {
142 DEBUG(10, ("cli_read_andx_send got large offset where "
143 "the server does not support it\n"));
144 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
145 return tevent_req_post(req, ev);
146 }
147 }
148
149 subreq = cli_smb_req_create(state, ev, cli, SMBreadX, 0, wct,
150 state->vwv, 0, NULL);
151 if (subreq == NULL) {
152 TALLOC_FREE(req);
153 return NULL;
154 }
155 tevent_req_set_callback(subreq, cli_read_andx_done, req);
156 *psmbreq = subreq;
157 return req;
158}
159
160struct tevent_req *cli_read_andx_send(TALLOC_CTX *mem_ctx,
161 struct event_context *ev,
162 struct cli_state *cli, uint16_t fnum,
163 off_t offset, size_t size)
164{
165 struct tevent_req *req, *subreq;
166 NTSTATUS status;
167
168 req = cli_read_andx_create(mem_ctx, ev, cli, fnum, offset, size,
169 &subreq);
170 if (req == NULL) {
171 return NULL;
172 }
173
174 status = cli_smb_req_send(subreq);
175 if (tevent_req_nterror(req, status)) {
176 return tevent_req_post(req, ev);
177 }
178 return req;
179}
180
181static void cli_read_andx_done(struct tevent_req *subreq)
182{
183 struct tevent_req *req = tevent_req_callback_data(
184 subreq, struct tevent_req);
185 struct cli_read_andx_state *state = tevent_req_data(
186 req, struct cli_read_andx_state);
187 uint8_t *inbuf;
188 uint8_t wct;
189 uint16_t *vwv;
190 uint32_t num_bytes;
191 uint8_t *bytes;
192
193 state->status = cli_smb_recv(subreq, state, &inbuf, 12, &wct, &vwv,
194 &num_bytes, &bytes);
195 TALLOC_FREE(subreq);
196 if (NT_STATUS_IS_ERR(state->status)) {
197 tevent_req_nterror(req, state->status);
198 return;
199 }
200
201 /* size is the number of bytes the server returned.
202 * Might be zero. */
203 state->received = SVAL(vwv + 5, 0);
204 state->received |= (((unsigned int)SVAL(vwv + 7, 0)) << 16);
205
206 if (state->received > state->size) {
207 DEBUG(5,("server returned more than we wanted!\n"));
208 tevent_req_nterror(req, NT_STATUS_UNEXPECTED_IO_ERROR);
209 return;
210 }
211
212 /*
213 * bcc field must be valid for small reads, for large reads the 16-bit
214 * bcc field can't be correct.
215 */
216
217 if ((state->received < 0xffff) && (state->received > num_bytes)) {
218 DEBUG(5, ("server announced more bytes than sent\n"));
219 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
220 return;
221 }
222
223 state->buf = (uint8_t *)smb_base(inbuf) + SVAL(vwv+6, 0);
224
225 if (trans_oob(smb_len_large(inbuf), SVAL(vwv+6, 0), state->received)
226 || ((state->received != 0) && (state->buf < bytes))) {
227 DEBUG(5, ("server returned invalid read&x data offset\n"));
228 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
229 return;
230 }
231 tevent_req_done(req);
232}
233
234/*
235 * Pull the data out of a finished async read_and_x request. rcvbuf is
236 * talloced from the request, so better make sure that you copy it away before
237 * you talloc_free(req). "rcvbuf" is NOT a talloc_ctx of its own, so do not
238 * talloc_move it!
239 */
240
241NTSTATUS cli_read_andx_recv(struct tevent_req *req, ssize_t *received,
242 uint8_t **rcvbuf)
243{
244 struct cli_read_andx_state *state = tevent_req_data(
245 req, struct cli_read_andx_state);
246 NTSTATUS status;
247
248 if (tevent_req_is_nterror(req, &status)) {
249 return status;
250 }
251 *received = state->received;
252 *rcvbuf = state->buf;
253 return NT_STATUS_OK;
254}
255
256struct cli_readall_state {
257 struct tevent_context *ev;
258 struct cli_state *cli;
259 uint16_t fnum;
260 off_t start_offset;
261 size_t size;
262 size_t received;
263 uint8_t *buf;
264};
265
266static void cli_readall_done(struct tevent_req *subreq);
267
268static struct tevent_req *cli_readall_send(TALLOC_CTX *mem_ctx,
269 struct event_context *ev,
270 struct cli_state *cli,
271 uint16_t fnum,
272 off_t offset, size_t size)
273{
274 struct tevent_req *req, *subreq;
275 struct cli_readall_state *state;
276
277 req = tevent_req_create(mem_ctx, &state, struct cli_readall_state);
278 if (req == NULL) {
279 return NULL;
280 }
281 state->ev = ev;
282 state->cli = cli;
283 state->fnum = fnum;
284 state->start_offset = offset;
285 state->size = size;
286 state->received = 0;
287 state->buf = NULL;
288
289 subreq = cli_read_andx_send(state, ev, cli, fnum, offset, size);
290 if (tevent_req_nomem(subreq, req)) {
291 return tevent_req_post(req, ev);
292 }
293 tevent_req_set_callback(subreq, cli_readall_done, req);
294 return req;
295}
296
297static void cli_readall_done(struct tevent_req *subreq)
298{
299 struct tevent_req *req = tevent_req_callback_data(
300 subreq, struct tevent_req);
301 struct cli_readall_state *state = tevent_req_data(
302 req, struct cli_readall_state);
303 ssize_t received;
304 uint8_t *buf;
305 NTSTATUS status;
306
307 status = cli_read_andx_recv(subreq, &received, &buf);
308 if (tevent_req_nterror(req, status)) {
309 return;
310 }
311
312 if (received == 0) {
313 /* EOF */
314 tevent_req_done(req);
315 return;
316 }
317
318 if ((state->received == 0) && (received == state->size)) {
319 /* Ideal case: Got it all in one run */
320 state->buf = buf;
321 state->received += received;
322 tevent_req_done(req);
323 return;
324 }
325
326 /*
327 * We got a short read, issue a read for the
328 * rest. Unfortunately we have to allocate the buffer
329 * ourselves now, as our caller expects to receive a single
330 * buffer. cli_read_andx does it from the buffer received from
331 * the net, but with a short read we have to put it together
332 * from several reads.
333 */
334
335 if (state->buf == NULL) {
336 state->buf = talloc_array(state, uint8_t, state->size);
337 if (tevent_req_nomem(state->buf, req)) {
338 return;
339 }
340 }
341 memcpy(state->buf + state->received, buf, received);
342 state->received += received;
343
344 TALLOC_FREE(subreq);
345
346 if (state->received >= state->size) {
347 tevent_req_done(req);
348 return;
349 }
350
351 subreq = cli_read_andx_send(state, state->ev, state->cli, state->fnum,
352 state->start_offset + state->received,
353 state->size - state->received);
354 if (tevent_req_nomem(subreq, req)) {
355 return;
356 }
357 tevent_req_set_callback(subreq, cli_readall_done, req);
358}
359
360static NTSTATUS cli_readall_recv(struct tevent_req *req, ssize_t *received,
361 uint8_t **rcvbuf)
362{
363 struct cli_readall_state *state = tevent_req_data(
364 req, struct cli_readall_state);
365 NTSTATUS status;
366
367 if (tevent_req_is_nterror(req, &status)) {
368 return status;
369 }
370 *received = state->received;
371 *rcvbuf = state->buf;
372 return NT_STATUS_OK;
373}
374
375struct cli_pull_subreq {
376 struct tevent_req *req;
377 ssize_t received;
378 uint8_t *buf;
379};
380
381/*
382 * Parallel read support.
383 *
384 * cli_pull sends as many read&x requests as the server would allow via
385 * max_mux at a time. When replies flow back in, the data is written into
386 * the callback function "sink" in the right order.
387 */
388
389struct cli_pull_state {
390 struct tevent_req *req;
391
392 struct event_context *ev;
393 struct cli_state *cli;
394 uint16_t fnum;
395 off_t start_offset;
396 SMB_OFF_T size;
397
398 NTSTATUS (*sink)(char *buf, size_t n, void *priv);
399 void *priv;
400
401 size_t chunk_size;
402
403 /*
404 * Outstanding requests
405 */
406 int num_reqs;
407 struct cli_pull_subreq *reqs;
408
409 /*
410 * For how many bytes did we send requests already?
411 */
412 SMB_OFF_T requested;
413
414 /*
415 * Next request index to push into "sink". This walks around the "req"
416 * array, taking care that the requests are pushed to "sink" in the
417 * right order. If necessary (i.e. replies don't come in in the right
418 * order), replies are held back in "reqs".
419 */
420 int top_req;
421
422 /*
423 * How many bytes did we push into "sink"?
424 */
425
426 SMB_OFF_T pushed;
427};
428
429static char *cli_pull_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
430{
431 struct cli_pull_state *state = tevent_req_data(
432 req, struct cli_pull_state);
433 char *result;
434
435 result = tevent_req_default_print(req, mem_ctx);
436 if (result == NULL) {
437 return NULL;
438 }
439
440 return talloc_asprintf_append_buffer(
441 result, "num_reqs=%d, top_req=%d",
442 state->num_reqs, state->top_req);
443}
444
445static void cli_pull_read_done(struct tevent_req *read_req);
446
447/*
448 * Prepare an async pull request
449 */
450
451struct tevent_req *cli_pull_send(TALLOC_CTX *mem_ctx,
452 struct event_context *ev,
453 struct cli_state *cli,
454 uint16_t fnum, off_t start_offset,
455 SMB_OFF_T size, size_t window_size,
456 NTSTATUS (*sink)(char *buf, size_t n,
457 void *priv),
458 void *priv)
459{
460 struct tevent_req *req;
461 struct cli_pull_state *state;
462 int i;
463
464 req = tevent_req_create(mem_ctx, &state, struct cli_pull_state);
465 if (req == NULL) {
466 return NULL;
467 }
468 tevent_req_set_print_fn(req, cli_pull_print);
469 state->req = req;
470
471 state->cli = cli;
472 state->ev = ev;
473 state->fnum = fnum;
474 state->start_offset = start_offset;
475 state->size = size;
476 state->sink = sink;
477 state->priv = priv;
478
479 state->pushed = 0;
480 state->top_req = 0;
481
482 if (size == 0) {
483 tevent_req_done(req);
484 return tevent_req_post(req, ev);
485 }
486
487 state->chunk_size = cli_read_max_bufsize(cli);
488
489 state->num_reqs = MAX(window_size/state->chunk_size, 1);
490 state->num_reqs = MIN(state->num_reqs, cli->max_mux);
491
492 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_pull_subreq,
493 state->num_reqs);
494 if (state->reqs == NULL) {
495 goto failed;
496 }
497
498 state->requested = 0;
499
500 for (i=0; i<state->num_reqs; i++) {
501 struct cli_pull_subreq *subreq = &state->reqs[i];
502 SMB_OFF_T size_left;
503 size_t request_thistime;
504
505 if (state->requested >= size) {
506 state->num_reqs = i;
507 break;
508 }
509
510 size_left = size - state->requested;
511 request_thistime = MIN(size_left, state->chunk_size);
512
513 subreq->req = cli_readall_send(
514 state->reqs, ev, cli, fnum,
515 state->start_offset + state->requested,
516 request_thistime);
517
518 if (subreq->req == NULL) {
519 goto failed;
520 }
521 tevent_req_set_callback(subreq->req, cli_pull_read_done, req);
522 state->requested += request_thistime;
523 }
524 return req;
525
526failed:
527 TALLOC_FREE(req);
528 return NULL;
529}
530
531/*
532 * Handle incoming read replies, push the data into sink and send out new
533 * requests if necessary.
534 */
535
536static void cli_pull_read_done(struct tevent_req *subreq)
537{
538 struct tevent_req *req = tevent_req_callback_data(
539 subreq, struct tevent_req);
540 struct cli_pull_state *state = tevent_req_data(
541 req, struct cli_pull_state);
542 struct cli_pull_subreq *pull_subreq = NULL;
543 NTSTATUS status;
544 int i;
545
546 for (i = 0; i < state->num_reqs; i++) {
547 pull_subreq = &state->reqs[i];
548 if (subreq == pull_subreq->req) {
549 break;
550 }
551 }
552 if (i == state->num_reqs) {
553 /* Huh -- received something we did not send?? */
554 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
555 return;
556 }
557
558 status = cli_readall_recv(subreq, &pull_subreq->received,
559 &pull_subreq->buf);
560 if (!NT_STATUS_IS_OK(status)) {
561 tevent_req_nterror(state->req, status);
562 return;
563 }
564
565 /*
566 * This loop is the one to take care of out-of-order replies. All
567 * pending requests are in state->reqs, state->reqs[top_req] is the
568 * one that is to be pushed next. If however a request later than
569 * top_req is replied to, then we can't push yet. If top_req is
570 * replied to at a later point then, we need to push all the finished
571 * requests.
572 */
573
574 while (state->reqs[state->top_req].req != NULL) {
575 struct cli_pull_subreq *top_subreq;
576
577 DEBUG(11, ("cli_pull_read_done: top_req = %d\n",
578 state->top_req));
579
580 top_subreq = &state->reqs[state->top_req];
581
582 if (tevent_req_is_in_progress(top_subreq->req)) {
583 DEBUG(11, ("cli_pull_read_done: top request not yet "
584 "done\n"));
585 return;
586 }
587
588 DEBUG(10, ("cli_pull_read_done: Pushing %d bytes, %d already "
589 "pushed\n", (int)top_subreq->received,
590 (int)state->pushed));
591
592 status = state->sink((char *)top_subreq->buf,
593 top_subreq->received, state->priv);
594 if (tevent_req_nterror(state->req, status)) {
595 return;
596 }
597 state->pushed += top_subreq->received;
598
599 TALLOC_FREE(state->reqs[state->top_req].req);
600
601 if (state->requested < state->size) {
602 struct tevent_req *new_req;
603 SMB_OFF_T size_left;
604 size_t request_thistime;
605
606 size_left = state->size - state->requested;
607 request_thistime = MIN(size_left, state->chunk_size);
608
609 DEBUG(10, ("cli_pull_read_done: Requesting %d bytes "
610 "at %d, position %d\n",
611 (int)request_thistime,
612 (int)(state->start_offset
613 + state->requested),
614 state->top_req));
615
616 new_req = cli_readall_send(
617 state->reqs, state->ev, state->cli,
618 state->fnum,
619 state->start_offset + state->requested,
620 request_thistime);
621
622 if (tevent_req_nomem(new_req, state->req)) {
623 return;
624 }
625 tevent_req_set_callback(new_req, cli_pull_read_done,
626 req);
627
628 state->reqs[state->top_req].req = new_req;
629 state->requested += request_thistime;
630 }
631
632 state->top_req = (state->top_req+1) % state->num_reqs;
633 }
634
635 tevent_req_done(req);
636}
637
638NTSTATUS cli_pull_recv(struct tevent_req *req, SMB_OFF_T *received)
639{
640 struct cli_pull_state *state = tevent_req_data(
641 req, struct cli_pull_state);
642 NTSTATUS status;
643
644 if (tevent_req_is_nterror(req, &status)) {
645 return status;
646 }
647 *received = state->pushed;
648 return NT_STATUS_OK;
649}
650
651NTSTATUS cli_pull(struct cli_state *cli, uint16_t fnum,
652 off_t start_offset, SMB_OFF_T size, size_t window_size,
653 NTSTATUS (*sink)(char *buf, size_t n, void *priv),
654 void *priv, SMB_OFF_T *received)
655{
656 TALLOC_CTX *frame = talloc_stackframe();
657 struct event_context *ev;
658 struct tevent_req *req;
659 NTSTATUS status = NT_STATUS_OK;
660
661 if (cli_has_async_calls(cli)) {
662 /*
663 * Can't use sync call while an async call is in flight
664 */
665 status = NT_STATUS_INVALID_PARAMETER;
666 goto fail;
667 }
668
669 ev = event_context_init(frame);
670 if (ev == NULL) {
671 status = NT_STATUS_NO_MEMORY;
672 goto fail;
673 }
674
675 req = cli_pull_send(frame, ev, cli, fnum, start_offset, size,
676 window_size, sink, priv);
677 if (req == NULL) {
678 status = NT_STATUS_NO_MEMORY;
679 goto fail;
680 }
681
682 if (!tevent_req_poll(req, ev)) {
683 status = map_nt_error_from_unix(errno);
684 goto fail;
685 }
686
687 status = cli_pull_recv(req, received);
688 fail:
689 TALLOC_FREE(frame);
690 return status;
691}
692
693static NTSTATUS cli_read_sink(char *buf, size_t n, void *priv)
694{
695 char **pbuf = (char **)priv;
696 memcpy(*pbuf, buf, n);
697 *pbuf += n;
698 return NT_STATUS_OK;
699}
700
701ssize_t cli_read(struct cli_state *cli, uint16_t fnum, char *buf,
702 off_t offset, size_t size)
703{
704 NTSTATUS status;
705 SMB_OFF_T ret;
706
707 status = cli_pull(cli, fnum, offset, size, size,
708 cli_read_sink, &buf, &ret);
709 if (!NT_STATUS_IS_OK(status)) {
710 return -1;
711 }
712 return ret;
713}
714
715/****************************************************************************
716 write to a file using a SMBwrite and not bypassing 0 byte writes
717****************************************************************************/
718
719NTSTATUS cli_smbwrite(struct cli_state *cli, uint16_t fnum, char *buf,
720 off_t offset, size_t size1, size_t *ptotal)
721{
722 uint8_t *bytes;
723 ssize_t total = 0;
724
725 /*
726 * 3 bytes prefix
727 */
728
729 bytes = TALLOC_ARRAY(talloc_tos(), uint8_t, 3);
730 if (bytes == NULL) {
731 return NT_STATUS_NO_MEMORY;
732 }
733 bytes[0] = 1;
734
735 do {
736 size_t size = MIN(size1, cli->max_xmit - 48);
737 struct tevent_req *req;
738 uint16_t vwv[5];
739 uint16_t *ret_vwv;
740 NTSTATUS status;
741
742 SSVAL(vwv+0, 0, fnum);
743 SSVAL(vwv+1, 0, size);
744 SIVAL(vwv+2, 0, offset);
745 SSVAL(vwv+4, 0, 0);
746
747 bytes = TALLOC_REALLOC_ARRAY(talloc_tos(), bytes, uint8_t,
748 size+3);
749 if (bytes == NULL) {
750 return NT_STATUS_NO_MEMORY;
751 }
752 SSVAL(bytes, 1, size);
753 memcpy(bytes + 3, buf + total, size);
754
755 status = cli_smb(talloc_tos(), cli, SMBwrite, 0, 5, vwv,
756 size+3, bytes, &req, 1, NULL, &ret_vwv,
757 NULL, NULL);
758 if (!NT_STATUS_IS_OK(status)) {
759 TALLOC_FREE(bytes);
760 return status;
761 }
762
763 size = SVAL(ret_vwv+0, 0);
764 TALLOC_FREE(req);
765 if (size == 0) {
766 break;
767 }
768 size1 -= size;
769 total += size;
770 offset += size;
771
772 } while (size1);
773
774 TALLOC_FREE(bytes);
775
776 if (ptotal != NULL) {
777 *ptotal = total;
778 }
779 return NT_STATUS_OK;
780}
781
782/*
783 * Send a write&x request
784 */
785
786struct cli_write_andx_state {
787 size_t size;
788 uint16_t vwv[14];
789 size_t written;
790 uint8_t pad;
791 struct iovec iov[2];
792};
793
794static void cli_write_andx_done(struct tevent_req *subreq);
795
796struct tevent_req *cli_write_andx_create(TALLOC_CTX *mem_ctx,
797 struct event_context *ev,
798 struct cli_state *cli, uint16_t fnum,
799 uint16_t mode, const uint8_t *buf,
800 off_t offset, size_t size,
801 struct tevent_req **reqs_before,
802 int num_reqs_before,
803 struct tevent_req **psmbreq)
804{
805 struct tevent_req *req, *subreq;
806 struct cli_write_andx_state *state;
807 bool bigoffset = ((cli->capabilities & CAP_LARGE_FILES) != 0);
808 uint8_t wct = bigoffset ? 14 : 12;
809 size_t max_write = cli_write_max_bufsize(cli, mode, wct);
810 uint16_t *vwv;
811
812 req = tevent_req_create(mem_ctx, &state, struct cli_write_andx_state);
813 if (req == NULL) {
814 return NULL;
815 }
816
817 state->size = MIN(size, max_write);
818
819 vwv = state->vwv;
820
821 SCVAL(vwv+0, 0, 0xFF);
822 SCVAL(vwv+0, 1, 0);
823 SSVAL(vwv+1, 0, 0);
824 SSVAL(vwv+2, 0, fnum);
825 SIVAL(vwv+3, 0, offset);
826 SIVAL(vwv+5, 0, 0);
827 SSVAL(vwv+7, 0, mode);
828 SSVAL(vwv+8, 0, 0);
829 SSVAL(vwv+9, 0, (state->size>>16));
830 SSVAL(vwv+10, 0, state->size);
831
832 SSVAL(vwv+11, 0,
833 cli_smb_wct_ofs(reqs_before, num_reqs_before)
834 + 1 /* the wct field */
835 + wct * 2 /* vwv */
836 + 2 /* num_bytes field */
837 + 1 /* pad */);
838
839 if (bigoffset) {
840 SIVAL(vwv+12, 0, (((uint64_t)offset)>>32) & 0xffffffff);
841 }
842
843 state->pad = 0;
844 state->iov[0].iov_base = (void *)&state->pad;
845 state->iov[0].iov_len = 1;
846 state->iov[1].iov_base = CONST_DISCARD(void *, buf);
847 state->iov[1].iov_len = state->size;
848
849 subreq = cli_smb_req_create(state, ev, cli, SMBwriteX, 0, wct, vwv,
850 2, state->iov);
851 if (tevent_req_nomem(subreq, req)) {
852 return tevent_req_post(req, ev);
853 }
854 tevent_req_set_callback(subreq, cli_write_andx_done, req);
855 *psmbreq = subreq;
856 return req;
857}
858
859struct tevent_req *cli_write_andx_send(TALLOC_CTX *mem_ctx,
860 struct event_context *ev,
861 struct cli_state *cli, uint16_t fnum,
862 uint16_t mode, const uint8_t *buf,
863 off_t offset, size_t size)
864{
865 struct tevent_req *req, *subreq;
866 NTSTATUS status;
867
868 req = cli_write_andx_create(mem_ctx, ev, cli, fnum, mode, buf, offset,
869 size, NULL, 0, &subreq);
870 if (req == NULL) {
871 return NULL;
872 }
873
874 status = cli_smb_req_send(subreq);
875 if (tevent_req_nterror(req, status)) {
876 return tevent_req_post(req, ev);
877 }
878 return req;
879}
880
881static void cli_write_andx_done(struct tevent_req *subreq)
882{
883 struct tevent_req *req = tevent_req_callback_data(
884 subreq, struct tevent_req);
885 struct cli_write_andx_state *state = tevent_req_data(
886 req, struct cli_write_andx_state);
887 uint8_t wct;
888 uint16_t *vwv;
889 uint8_t *inbuf;
890 NTSTATUS status;
891
892 status = cli_smb_recv(subreq, state, &inbuf, 6, &wct, &vwv,
893 NULL, NULL);
894 TALLOC_FREE(subreq);
895 if (NT_STATUS_IS_ERR(status)) {
896 tevent_req_nterror(req, status);
897 return;
898 }
899 state->written = SVAL(vwv+2, 0);
900 if (state->size > UINT16_MAX) {
901 /*
902 * It is important that we only set the
903 * high bits only if we asked for a large write.
904 *
905 * OS/2 print shares get this wrong and may send
906 * invalid values.
907 *
908 * See bug #5326.
909 */
910 state->written |= SVAL(vwv+4, 0)<<16;
911 }
912 tevent_req_done(req);
913}
914
915NTSTATUS cli_write_andx_recv(struct tevent_req *req, size_t *pwritten)
916{
917 struct cli_write_andx_state *state = tevent_req_data(
918 req, struct cli_write_andx_state);
919 NTSTATUS status;
920
921 if (tevent_req_is_nterror(req, &status)) {
922 return status;
923 }
924 *pwritten = state->written;
925 return NT_STATUS_OK;
926}
927
928struct cli_writeall_state {
929 struct event_context *ev;
930 struct cli_state *cli;
931 uint16_t fnum;
932 uint16_t mode;
933 const uint8_t *buf;
934 off_t offset;
935 size_t size;
936 size_t written;
937};
938
939static void cli_writeall_written(struct tevent_req *req);
940
941static struct tevent_req *cli_writeall_send(TALLOC_CTX *mem_ctx,
942 struct event_context *ev,
943 struct cli_state *cli,
944 uint16_t fnum,
945 uint16_t mode,
946 const uint8_t *buf,
947 off_t offset, size_t size)
948{
949 struct tevent_req *req, *subreq;
950 struct cli_writeall_state *state;
951
952 req = tevent_req_create(mem_ctx, &state, struct cli_writeall_state);
953 if (req == NULL) {
954 return NULL;
955 }
956 state->ev = ev;
957 state->cli = cli;
958 state->fnum = fnum;
959 state->mode = mode;
960 state->buf = buf;
961 state->offset = offset;
962 state->size = size;
963 state->written = 0;
964
965 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
966 state->mode, state->buf, state->offset,
967 state->size);
968 if (tevent_req_nomem(subreq, req)) {
969 return tevent_req_post(req, ev);
970 }
971 tevent_req_set_callback(subreq, cli_writeall_written, req);
972 return req;
973}
974
975static void cli_writeall_written(struct tevent_req *subreq)
976{
977 struct tevent_req *req = tevent_req_callback_data(
978 subreq, struct tevent_req);
979 struct cli_writeall_state *state = tevent_req_data(
980 req, struct cli_writeall_state);
981 NTSTATUS status;
982 size_t written, to_write;
983
984 status = cli_write_andx_recv(subreq, &written);
985 TALLOC_FREE(subreq);
986 if (tevent_req_nterror(req, status)) {
987 return;
988 }
989
990 state->written += written;
991
992 if (state->written > state->size) {
993 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
994 return;
995 }
996
997 to_write = state->size - state->written;
998
999 if (to_write == 0) {
1000 tevent_req_done(req);
1001 return;
1002 }
1003
1004 subreq = cli_write_andx_send(state, state->ev, state->cli, state->fnum,
1005 state->mode,
1006 state->buf + state->written,
1007 state->offset + state->written, to_write);
1008 if (tevent_req_nomem(subreq, req)) {
1009 return;
1010 }
1011 tevent_req_set_callback(subreq, cli_writeall_written, req);
1012}
1013
1014static NTSTATUS cli_writeall_recv(struct tevent_req *req,
1015 size_t *pwritten)
1016{
1017 struct cli_writeall_state *state = tevent_req_data(
1018 req, struct cli_writeall_state);
1019 NTSTATUS status;
1020
1021 if (tevent_req_is_nterror(req, &status)) {
1022 return status;
1023 }
1024 if (pwritten != NULL) {
1025 *pwritten = state->written;
1026 }
1027 return NT_STATUS_OK;
1028}
1029
1030NTSTATUS cli_writeall(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1031 const uint8_t *buf, off_t offset, size_t size,
1032 size_t *pwritten)
1033{
1034 TALLOC_CTX *frame = talloc_stackframe();
1035 struct event_context *ev;
1036 struct tevent_req *req;
1037 NTSTATUS status = NT_STATUS_NO_MEMORY;
1038
1039 if (cli_has_async_calls(cli)) {
1040 /*
1041 * Can't use sync call while an async call is in flight
1042 */
1043 status = NT_STATUS_INVALID_PARAMETER;
1044 goto fail;
1045 }
1046 ev = event_context_init(frame);
1047 if (ev == NULL) {
1048 goto fail;
1049 }
1050 req = cli_writeall_send(frame, ev, cli, fnum, mode, buf, offset, size);
1051 if (req == NULL) {
1052 goto fail;
1053 }
1054 if (!tevent_req_poll(req, ev)) {
1055 status = map_nt_error_from_unix(errno);
1056 goto fail;
1057 }
1058 status = cli_writeall_recv(req, pwritten);
1059 fail:
1060 TALLOC_FREE(frame);
1061 return status;
1062}
1063
1064struct cli_push_write_state {
1065 struct tevent_req *req;/* This is the main request! Not the subreq */
1066 uint32_t idx;
1067 off_t ofs;
1068 uint8_t *buf;
1069 size_t size;
1070};
1071
1072struct cli_push_state {
1073 struct event_context *ev;
1074 struct cli_state *cli;
1075 uint16_t fnum;
1076 uint16_t mode;
1077 off_t start_offset;
1078 size_t window_size;
1079
1080 size_t (*source)(uint8_t *buf, size_t n, void *priv);
1081 void *priv;
1082
1083 bool eof;
1084
1085 size_t chunk_size;
1086 off_t next_offset;
1087
1088 /*
1089 * Outstanding requests
1090 */
1091 uint32_t pending;
1092 uint32_t num_reqs;
1093 struct cli_push_write_state **reqs;
1094};
1095
1096static void cli_push_written(struct tevent_req *req);
1097
1098static bool cli_push_write_setup(struct tevent_req *req,
1099 struct cli_push_state *state,
1100 uint32_t idx)
1101{
1102 struct cli_push_write_state *substate;
1103 struct tevent_req *subreq;
1104
1105 substate = talloc(state->reqs, struct cli_push_write_state);
1106 if (!substate) {
1107 return false;
1108 }
1109 substate->req = req;
1110 substate->idx = idx;
1111 substate->ofs = state->next_offset;
1112 substate->buf = talloc_array(substate, uint8_t, state->chunk_size);
1113 if (!substate->buf) {
1114 talloc_free(substate);
1115 return false;
1116 }
1117 substate->size = state->source(substate->buf,
1118 state->chunk_size,
1119 state->priv);
1120 if (substate->size == 0) {
1121 state->eof = true;
1122 /* nothing to send */
1123 talloc_free(substate);
1124 return true;
1125 }
1126
1127 subreq = cli_writeall_send(substate,
1128 state->ev, state->cli,
1129 state->fnum, state->mode,
1130 substate->buf,
1131 substate->ofs,
1132 substate->size);
1133 if (!subreq) {
1134 talloc_free(substate);
1135 return false;
1136 }
1137 tevent_req_set_callback(subreq, cli_push_written, substate);
1138
1139 state->reqs[idx] = substate;
1140 state->pending += 1;
1141 state->next_offset += substate->size;
1142
1143 return true;
1144}
1145
1146struct tevent_req *cli_push_send(TALLOC_CTX *mem_ctx, struct event_context *ev,
1147 struct cli_state *cli,
1148 uint16_t fnum, uint16_t mode,
1149 off_t start_offset, size_t window_size,
1150 size_t (*source)(uint8_t *buf, size_t n,
1151 void *priv),
1152 void *priv)
1153{
1154 struct tevent_req *req;
1155 struct cli_push_state *state;
1156 uint32_t i;
1157
1158 req = tevent_req_create(mem_ctx, &state, struct cli_push_state);
1159 if (req == NULL) {
1160 return NULL;
1161 }
1162 state->cli = cli;
1163 state->ev = ev;
1164 state->fnum = fnum;
1165 state->start_offset = start_offset;
1166 state->mode = mode;
1167 state->source = source;
1168 state->priv = priv;
1169 state->eof = false;
1170 state->pending = 0;
1171 state->next_offset = start_offset;
1172
1173 state->chunk_size = cli_write_max_bufsize(cli, mode, 14);
1174
1175 if (window_size == 0) {
1176 window_size = cli->max_mux * state->chunk_size;
1177 }
1178 state->num_reqs = window_size/state->chunk_size;
1179 if ((window_size % state->chunk_size) > 0) {
1180 state->num_reqs += 1;
1181 }
1182 state->num_reqs = MIN(state->num_reqs, cli->max_mux);
1183 state->num_reqs = MAX(state->num_reqs, 1);
1184
1185 state->reqs = TALLOC_ZERO_ARRAY(state, struct cli_push_write_state *,
1186 state->num_reqs);
1187 if (state->reqs == NULL) {
1188 goto failed;
1189 }
1190
1191 for (i=0; i<state->num_reqs; i++) {
1192 if (!cli_push_write_setup(req, state, i)) {
1193 goto failed;
1194 }
1195
1196 if (state->eof) {
1197 break;
1198 }
1199 }
1200
1201 if (state->pending == 0) {
1202 tevent_req_done(req);
1203 return tevent_req_post(req, ev);
1204 }
1205
1206 return req;
1207
1208 failed:
1209 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1210 return tevent_req_post(req, ev);
1211}
1212
1213static void cli_push_written(struct tevent_req *subreq)
1214{
1215 struct cli_push_write_state *substate = tevent_req_callback_data(
1216 subreq, struct cli_push_write_state);
1217 struct tevent_req *req = substate->req;
1218 struct cli_push_state *state = tevent_req_data(
1219 req, struct cli_push_state);
1220 NTSTATUS status;
1221 uint32_t idx = substate->idx;
1222
1223 state->reqs[idx] = NULL;
1224 state->pending -= 1;
1225
1226 status = cli_writeall_recv(subreq, NULL);
1227 TALLOC_FREE(subreq);
1228 TALLOC_FREE(substate);
1229 if (tevent_req_nterror(req, status)) {
1230 return;
1231 }
1232
1233 if (!state->eof) {
1234 if (!cli_push_write_setup(req, state, idx)) {
1235 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
1236 return;
1237 }
1238 }
1239
1240 if (state->pending == 0) {
1241 tevent_req_done(req);
1242 return;
1243 }
1244}
1245
1246NTSTATUS cli_push_recv(struct tevent_req *req)
1247{
1248 return tevent_req_simple_recv_ntstatus(req);
1249}
1250
1251NTSTATUS cli_push(struct cli_state *cli, uint16_t fnum, uint16_t mode,
1252 off_t start_offset, size_t window_size,
1253 size_t (*source)(uint8_t *buf, size_t n, void *priv),
1254 void *priv)
1255{
1256 TALLOC_CTX *frame = talloc_stackframe();
1257 struct event_context *ev;
1258 struct tevent_req *req;
1259 NTSTATUS status = NT_STATUS_OK;
1260
1261 if (cli_has_async_calls(cli)) {
1262 /*
1263 * Can't use sync call while an async call is in flight
1264 */
1265 status = NT_STATUS_INVALID_PARAMETER;
1266 goto fail;
1267 }
1268
1269 ev = event_context_init(frame);
1270 if (ev == NULL) {
1271 status = NT_STATUS_NO_MEMORY;
1272 goto fail;
1273 }
1274
1275 req = cli_push_send(frame, ev, cli, fnum, mode, start_offset,
1276 window_size, source, priv);
1277 if (req == NULL) {
1278 status = NT_STATUS_NO_MEMORY;
1279 goto fail;
1280 }
1281
1282 if (!tevent_req_poll(req, ev)) {
1283 status = map_nt_error_from_unix(errno);
1284 goto fail;
1285 }
1286
1287 status = cli_push_recv(req);
1288 fail:
1289 TALLOC_FREE(frame);
1290 return status;
1291}
Note: See TracBrowser for help on using the repository browser.