source: trunk/server/source4/libcli/raw/clitransport.c

Last change on this file was 745, checked in by Silvan Scherrer, 13 years ago

Samba Server: updated trunk to 3.6.0

File size: 18.7 KB
Line 
1/*
2 Unix SMB/CIFS implementation.
3 SMB client transport context management functions
4
5 Copyright (C) Andrew Tridgell 1994-2005
6 Copyright (C) James Myers 2003 <myersjj@samba.org>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
20*/
21
22#include "includes.h"
23#include "libcli/raw/libcliraw.h"
24#include "libcli/raw/raw_proto.h"
25#include "lib/socket/socket.h"
26#include "../lib/util/dlinklist.h"
27#include "lib/events/events.h"
28#include "lib/stream/packet.h"
29#include "librpc/gen_ndr/ndr_nbt.h"
30#include "../libcli/nbt/libnbt.h"
31
32
33/*
34 an event has happened on the socket
35*/
36static void smbcli_transport_event_handler(struct tevent_context *ev,
37 struct tevent_fd *fde,
38 uint16_t flags, void *private_data)
39{
40 struct smbcli_transport *transport = talloc_get_type(private_data,
41 struct smbcli_transport);
42 if (flags & EVENT_FD_READ) {
43 packet_recv(transport->packet);
44 return;
45 }
46 if (flags & EVENT_FD_WRITE) {
47 packet_queue_run(transport->packet);
48 }
49}
50
51/*
52 destroy a transport
53 */
54static int transport_destructor(struct smbcli_transport *transport)
55{
56 smbcli_transport_dead(transport, NT_STATUS_LOCAL_DISCONNECT);
57 return 0;
58}
59
60
61/*
62 handle receive errors
63*/
64static void smbcli_transport_error(void *private_data, NTSTATUS status)
65{
66 struct smbcli_transport *transport = talloc_get_type(private_data, struct smbcli_transport);
67 smbcli_transport_dead(transport, status);
68}
69
70static NTSTATUS smbcli_transport_finish_recv(void *private_data, DATA_BLOB blob);
71
72/*
73 create a transport structure based on an established socket
74*/
75struct smbcli_transport *smbcli_transport_init(struct smbcli_socket *sock,
76 TALLOC_CTX *parent_ctx,
77 bool primary,
78 struct smbcli_options *options)
79{
80 struct smbcli_transport *transport;
81
82 transport = talloc_zero(parent_ctx, struct smbcli_transport);
83 if (!transport) return NULL;
84
85 if (primary) {
86 transport->socket = talloc_steal(transport, sock);
87 } else {
88 transport->socket = talloc_reference(transport, sock);
89 }
90 transport->negotiate.protocol = PROTOCOL_NT1;
91 transport->options = *options;
92 transport->negotiate.max_xmit = transport->options.max_xmit;
93
94 /* setup the stream -> packet parser */
95 transport->packet = packet_init(transport);
96 if (transport->packet == NULL) {
97 talloc_free(transport);
98 return NULL;
99 }
100 packet_set_private(transport->packet, transport);
101 packet_set_socket(transport->packet, transport->socket->sock);
102 packet_set_callback(transport->packet, smbcli_transport_finish_recv);
103 packet_set_full_request(transport->packet, packet_full_request_nbt);
104 packet_set_error_handler(transport->packet, smbcli_transport_error);
105 packet_set_event_context(transport->packet, transport->socket->event.ctx);
106 packet_set_nofree(transport->packet);
107 packet_set_initial_read(transport->packet, 4);
108
109 smbcli_init_signing(transport);
110
111 ZERO_STRUCT(transport->called);
112
113 /* take over event handling from the socket layer - it only
114 handles events up until we are connected */
115 talloc_free(transport->socket->event.fde);
116 transport->socket->event.fde = event_add_fd(transport->socket->event.ctx,
117 transport->socket->sock,
118 socket_get_fd(transport->socket->sock),
119 EVENT_FD_READ,
120 smbcli_transport_event_handler,
121 transport);
122
123 packet_set_fde(transport->packet, transport->socket->event.fde);
124 packet_set_serialise(transport->packet);
125 talloc_set_destructor(transport, transport_destructor);
126
127 return transport;
128}
129
130/*
131 mark the transport as dead
132*/
133void smbcli_transport_dead(struct smbcli_transport *transport, NTSTATUS status)
134{
135 smbcli_sock_dead(transport->socket);
136
137 if (NT_STATUS_EQUAL(NT_STATUS_UNSUCCESSFUL, status)) {
138 status = NT_STATUS_UNEXPECTED_NETWORK_ERROR;
139 }
140
141 /* kill only the first pending receive - this is so that if
142 that async function frees the connection we don't die trying
143 to use old memory. The caller has to cope with only one
144 network error */
145 if (transport->pending_recv) {
146 struct smbcli_request *req = transport->pending_recv;
147 req->state = SMBCLI_REQUEST_ERROR;
148 req->status = status;
149 DLIST_REMOVE(transport->pending_recv, req);
150 if (req->async.fn) {
151 req->async.fn(req);
152 }
153 }
154}
155
156
157/*
158 send a session request
159*/
160struct smbcli_request *smbcli_transport_connect_send(struct smbcli_transport *transport,
161 struct nbt_name *calling,
162 struct nbt_name *called)
163{
164 uint8_t *p;
165 struct smbcli_request *req;
166 DATA_BLOB calling_blob, called_blob;
167 TALLOC_CTX *tmp_ctx = talloc_new(transport);
168 NTSTATUS status;
169
170 status = nbt_name_dup(transport, called, &transport->called);
171 if (!NT_STATUS_IS_OK(status)) goto failed;
172
173 status = nbt_name_to_blob(tmp_ctx, &calling_blob, calling);
174 if (!NT_STATUS_IS_OK(status)) goto failed;
175
176 status = nbt_name_to_blob(tmp_ctx, &called_blob, called);
177 if (!NT_STATUS_IS_OK(status)) goto failed;
178
179 /* allocate output buffer */
180 req = smbcli_request_setup_nonsmb(transport,
181 NBT_HDR_SIZE +
182 calling_blob.length + called_blob.length);
183 if (req == NULL) goto failed;
184
185 /* put in the destination name */
186 p = req->out.buffer + NBT_HDR_SIZE;
187 memcpy(p, called_blob.data, called_blob.length);
188 p += called_blob.length;
189
190 memcpy(p, calling_blob.data, calling_blob.length);
191 p += calling_blob.length;
192
193 _smb_setlen(req->out.buffer, PTR_DIFF(p, req->out.buffer) - NBT_HDR_SIZE);
194 SCVAL(req->out.buffer,0,0x81);
195
196 if (!smbcli_request_send(req)) {
197 smbcli_request_destroy(req);
198 goto failed;
199 }
200
201 talloc_free(tmp_ctx);
202 return req;
203
204failed:
205 talloc_free(tmp_ctx);
206 return NULL;
207}
208
209/*
210 map a session request error to a NTSTATUS
211 */
212static NTSTATUS map_session_refused_error(uint8_t error)
213{
214 switch (error) {
215 case 0x80:
216 case 0x81:
217 return NT_STATUS_REMOTE_NOT_LISTENING;
218 case 0x82:
219 return NT_STATUS_RESOURCE_NAME_NOT_FOUND;
220 case 0x83:
221 return NT_STATUS_REMOTE_RESOURCES;
222 }
223 return NT_STATUS_UNEXPECTED_IO_ERROR;
224}
225
226
227/*
228 finish a smbcli_transport_connect()
229*/
230NTSTATUS smbcli_transport_connect_recv(struct smbcli_request *req)
231{
232 NTSTATUS status;
233
234 if (!smbcli_request_receive(req)) {
235 smbcli_request_destroy(req);
236 return NT_STATUS_UNEXPECTED_NETWORK_ERROR;
237 }
238
239 switch (CVAL(req->in.buffer,0)) {
240 case 0x82:
241 status = NT_STATUS_OK;
242 break;
243 case 0x83:
244 status = map_session_refused_error(CVAL(req->in.buffer,4));
245 break;
246 case 0x84:
247 DEBUG(1,("Warning: session retarget not supported\n"));
248 status = NT_STATUS_NOT_SUPPORTED;
249 break;
250 default:
251 status = NT_STATUS_UNEXPECTED_IO_ERROR;
252 break;
253 }
254
255 smbcli_request_destroy(req);
256 return status;
257}
258
259
260/*
261 send a session request (if needed)
262*/
263bool smbcli_transport_connect(struct smbcli_transport *transport,
264 struct nbt_name *calling,
265 struct nbt_name *called)
266{
267 struct smbcli_request *req;
268 NTSTATUS status;
269
270 if (transport->socket->port == 445) {
271 return true;
272 }
273
274 req = smbcli_transport_connect_send(transport,
275 calling, called);
276 status = smbcli_transport_connect_recv(req);
277 return NT_STATUS_IS_OK(status);
278}
279
280/****************************************************************************
281get next mid in sequence
282****************************************************************************/
283uint16_t smbcli_transport_next_mid(struct smbcli_transport *transport)
284{
285 uint16_t mid;
286 struct smbcli_request *req;
287
288 mid = transport->next_mid;
289
290again:
291 /* now check to see if this mid is being used by one of the
292 pending requests. This is quite efficient because the list is
293 usually very short */
294
295 /* the zero mid is reserved for requests that don't have a mid */
296 if (mid == 0) mid = 1;
297
298 for (req=transport->pending_recv; req; req=req->next) {
299 if (req->mid == mid) {
300 mid++;
301 goto again;
302 }
303 }
304
305 transport->next_mid = mid+1;
306 return mid;
307}
308
309static void idle_handler(struct tevent_context *ev,
310 struct tevent_timer *te, struct timeval t, void *private_data)
311{
312 struct smbcli_transport *transport = talloc_get_type(private_data,
313 struct smbcli_transport);
314 struct timeval next = timeval_add(&t, 0, transport->idle.period);
315 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
316 transport,
317 next,
318 idle_handler, transport);
319 transport->idle.func(transport, transport->idle.private_data);
320}
321
322/*
323 setup the idle handler for a transport
324 the period is in microseconds
325*/
326_PUBLIC_ void smbcli_transport_idle_handler(struct smbcli_transport *transport,
327 void (*idle_func)(struct smbcli_transport *, void *),
328 uint64_t period,
329 void *private_data)
330{
331 transport->idle.func = idle_func;
332 transport->idle.private_data = private_data;
333 transport->idle.period = period;
334
335 if (transport->socket->event.te != NULL) {
336 talloc_free(transport->socket->event.te);
337 }
338
339 transport->socket->event.te = event_add_timed(transport->socket->event.ctx,
340 transport,
341 timeval_current_ofs(0, period),
342 idle_handler, transport);
343}
344
345/*
346 we have a full request in our receive buffer - match it to a pending request
347 and process
348 */
349static NTSTATUS smbcli_transport_finish_recv(void *private_data, DATA_BLOB blob)
350{
351 struct smbcli_transport *transport = talloc_get_type(private_data,
352 struct smbcli_transport);
353 uint8_t *buffer, *hdr, *vwv;
354 int len;
355 uint16_t wct=0, mid = 0, op = 0;
356 struct smbcli_request *req = NULL;
357
358 buffer = blob.data;
359 len = blob.length;
360
361 hdr = buffer+NBT_HDR_SIZE;
362 vwv = hdr + HDR_VWV;
363
364 /* see if it could be an oplock break request */
365 if (smbcli_handle_oplock_break(transport, len, hdr, vwv)) {
366 talloc_free(buffer);
367 return NT_STATUS_OK;
368 }
369
370 /* at this point we need to check for a readbraw reply, as
371 these can be any length */
372 if (transport->readbraw_pending) {
373 transport->readbraw_pending = 0;
374
375 /* it must match the first entry in the pending queue
376 as the client is not allowed to have outstanding
377 readbraw requests */
378 req = transport->pending_recv;
379 if (!req) goto error;
380
381 req->in.buffer = buffer;
382 talloc_steal(req, buffer);
383 req->in.size = len;
384 req->in.allocated = req->in.size;
385 goto async;
386 }
387
388 if (len >= MIN_SMB_SIZE) {
389 /* extract the mid for matching to pending requests */
390 mid = SVAL(hdr, HDR_MID);
391 wct = CVAL(hdr, HDR_WCT);
392 op = CVAL(hdr, HDR_COM);
393 }
394
395 /* match the incoming request against the list of pending requests */
396 for (req=transport->pending_recv; req; req=req->next) {
397 if (req->mid == mid) break;
398 }
399
400 /* see if it's a ntcancel reply for the current MID */
401 req = smbcli_handle_ntcancel_reply(req, len, hdr);
402
403 if (!req) {
404 DEBUG(1,("Discarding unmatched reply with mid %d op %d\n", mid, op));
405 goto error;
406 }
407
408 /* fill in the 'in' portion of the matching request */
409 req->in.buffer = buffer;
410 talloc_steal(req, buffer);
411 req->in.size = len;
412 req->in.allocated = req->in.size;
413
414 /* handle NBT session replies */
415 if (req->in.size >= 4 && req->in.buffer[0] != 0) {
416 req->status = NT_STATUS_OK;
417 goto async;
418 }
419
420 /* handle non-SMB replies */
421 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE) {
422 req->state = SMBCLI_REQUEST_ERROR;
423 goto error;
424 }
425
426 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
427 DEBUG(2,("bad reply size for mid %d\n", mid));
428 req->status = NT_STATUS_UNSUCCESSFUL;
429 req->state = SMBCLI_REQUEST_ERROR;
430 goto error;
431 }
432
433 req->in.hdr = hdr;
434 req->in.vwv = vwv;
435 req->in.wct = wct;
436 if (req->in.size >= NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct)) {
437 req->in.data = req->in.vwv + VWV(wct) + 2;
438 req->in.data_size = SVAL(req->in.vwv, VWV(wct));
439 if (req->in.size < NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct) + req->in.data_size) {
440 DEBUG(3,("bad data size for mid %d\n", mid));
441 /* blergh - w2k3 gives a bogus data size values in some
442 openX replies */
443 req->in.data_size = req->in.size - (NBT_HDR_SIZE + MIN_SMB_SIZE + VWV(wct));
444 }
445 }
446 req->in.ptr = req->in.data;
447 req->flags2 = SVAL(req->in.hdr, HDR_FLG2);
448
449 smb_setup_bufinfo(req);
450
451 if (!(req->flags2 & FLAGS2_32_BIT_ERROR_CODES)) {
452 int eclass = CVAL(req->in.hdr,HDR_RCLS);
453 int code = SVAL(req->in.hdr,HDR_ERR);
454 if (eclass == 0 && code == 0) {
455 transport->error.e.nt_status = NT_STATUS_OK;
456 } else {
457 transport->error.e.nt_status = NT_STATUS_DOS(eclass, code);
458 }
459 } else {
460 transport->error.e.nt_status = NT_STATUS(IVAL(req->in.hdr, HDR_RCLS));
461 }
462
463 req->status = transport->error.e.nt_status;
464 if (NT_STATUS_IS_OK(req->status)) {
465 transport->error.etype = ETYPE_NONE;
466 } else {
467 transport->error.etype = ETYPE_SMB;
468 }
469
470 if (!smbcli_request_check_sign_mac(req)) {
471 transport->error.etype = ETYPE_SOCKET;
472 transport->error.e.socket_error = SOCKET_READ_BAD_SIG;
473 req->state = SMBCLI_REQUEST_ERROR;
474 req->status = NT_STATUS_ACCESS_DENIED;
475 goto error;
476 };
477
478async:
479 /* if this request has an async handler then call that to
480 notify that the reply has been received. This might destroy
481 the request so it must happen last */
482
483 req->state = SMBCLI_REQUEST_DONE;
484
485 if (req->recv_helper.fn) {
486 /*
487 * let the recv helper decide in
488 * what state the request really is
489 */
490 req->state = req->recv_helper.fn(req);
491
492 /* if more parts are needed, wait for them */
493 if (req->state <= SMBCLI_REQUEST_RECV) {
494 return NT_STATUS_OK;
495 }
496 }
497 DLIST_REMOVE(transport->pending_recv, req);
498 if (req->async.fn) {
499 req->async.fn(req);
500 }
501 return NT_STATUS_OK;
502
503error:
504 if (req) {
505 DLIST_REMOVE(transport->pending_recv, req);
506 req->state = SMBCLI_REQUEST_ERROR;
507 if (req->async.fn) {
508 req->async.fn(req);
509 }
510 } else {
511 talloc_free(buffer);
512 }
513 return NT_STATUS_OK;
514}
515
516/*
517 process some read/write requests that are pending
518 return false if the socket is dead
519*/
520_PUBLIC_ bool smbcli_transport_process(struct smbcli_transport *transport)
521{
522 NTSTATUS status;
523 size_t npending;
524
525 packet_queue_run(transport->packet);
526 if (transport->socket->sock == NULL) {
527 return false;
528 }
529
530 status = socket_pending(transport->socket->sock, &npending);
531 if (NT_STATUS_IS_OK(status) && npending > 0) {
532 packet_recv(transport->packet);
533 }
534 if (transport->socket->sock == NULL) {
535 return false;
536 }
537 return true;
538}
539
540/*
541 handle timeouts of individual smb requests
542*/
543static void smbcli_timeout_handler(struct tevent_context *ev, struct tevent_timer *te,
544 struct timeval t, void *private_data)
545{
546 struct smbcli_request *req = talloc_get_type(private_data, struct smbcli_request);
547
548 if (req->state == SMBCLI_REQUEST_RECV) {
549 DLIST_REMOVE(req->transport->pending_recv, req);
550 }
551 req->status = NT_STATUS_IO_TIMEOUT;
552 req->state = SMBCLI_REQUEST_ERROR;
553 if (req->async.fn) {
554 req->async.fn(req);
555 }
556}
557
558
559/*
560 destroy a request
561*/
562static int smbcli_request_destructor(struct smbcli_request *req)
563{
564 if (req->state == SMBCLI_REQUEST_RECV) {
565 DLIST_REMOVE(req->transport->pending_recv, req);
566 }
567 return 0;
568}
569
570
571/*
572 put a request into the send queue
573*/
574void smbcli_transport_send(struct smbcli_request *req)
575{
576 DATA_BLOB blob;
577 NTSTATUS status;
578
579 /* check if the transport is dead */
580 if (req->transport->socket->sock == NULL) {
581 req->state = SMBCLI_REQUEST_ERROR;
582 req->status = NT_STATUS_NET_WRITE_FAULT;
583 return;
584 }
585
586 blob = data_blob_const(req->out.buffer, req->out.size);
587 status = packet_send(req->transport->packet, blob);
588 if (!NT_STATUS_IS_OK(status)) {
589 req->state = SMBCLI_REQUEST_ERROR;
590 req->status = status;
591 return;
592 }
593
594 packet_queue_run(req->transport->packet);
595 if (req->transport->socket->sock == NULL) {
596 req->state = SMBCLI_REQUEST_ERROR;
597 req->status = NT_STATUS_NET_WRITE_FAULT;
598 return;
599 }
600
601 if (req->one_way_request) {
602 req->state = SMBCLI_REQUEST_DONE;
603 smbcli_request_destroy(req);
604 return;
605 }
606
607 req->state = SMBCLI_REQUEST_RECV;
608 DLIST_ADD(req->transport->pending_recv, req);
609
610 /* add a timeout */
611 if (req->transport->options.request_timeout) {
612 event_add_timed(req->transport->socket->event.ctx, req,
613 timeval_current_ofs(req->transport->options.request_timeout, 0),
614 smbcli_timeout_handler, req);
615 }
616
617 talloc_set_destructor(req, smbcli_request_destructor);
618}
619
620
621/****************************************************************************
622 Send an SMBecho (async send)
623*****************************************************************************/
624_PUBLIC_ struct smbcli_request *smb_raw_echo_send(struct smbcli_transport *transport,
625 struct smb_echo *p)
626{
627 struct smbcli_request *req;
628
629 req = smbcli_request_setup_transport(transport, SMBecho, 1, p->in.size);
630 if (!req) return NULL;
631
632 SSVAL(req->out.vwv, VWV(0), p->in.repeat_count);
633
634 memcpy(req->out.data, p->in.data, p->in.size);
635
636 ZERO_STRUCT(p->out);
637
638 if (!smbcli_request_send(req)) {
639 smbcli_request_destroy(req);
640 return NULL;
641 }
642
643 return req;
644}
645
646/****************************************************************************
647 raw echo interface (async recv)
648****************************************************************************/
649NTSTATUS smb_raw_echo_recv(struct smbcli_request *req, TALLOC_CTX *mem_ctx,
650 struct smb_echo *p)
651{
652 if (!smbcli_request_receive(req) ||
653 smbcli_request_is_error(req)) {
654 goto failed;
655 }
656
657 SMBCLI_CHECK_WCT(req, 1);
658 p->out.count++;
659 p->out.sequence_number = SVAL(req->in.vwv, VWV(0));
660 p->out.size = req->in.data_size;
661 talloc_free(p->out.data);
662 p->out.data = talloc_array(mem_ctx, uint8_t, p->out.size);
663 NT_STATUS_HAVE_NO_MEMORY(p->out.data);
664
665 if (!smbcli_raw_pull_data(&req->in.bufinfo, req->in.data, p->out.size, p->out.data)) {
666 req->status = NT_STATUS_BUFFER_TOO_SMALL;
667 }
668
669 if (p->out.count == p->in.repeat_count) {
670 return smbcli_request_destroy(req);
671 }
672
673 return NT_STATUS_OK;
674
675failed:
676 return smbcli_request_destroy(req);
677}
678
679/****************************************************************************
680 Send a echo (sync interface)
681*****************************************************************************/
682NTSTATUS smb_raw_echo(struct smbcli_transport *transport, struct smb_echo *p)
683{
684 struct smbcli_request *req = smb_raw_echo_send(transport, p);
685 return smbcli_request_simple_recv(req);
686}
Note: See TracBrowser for help on using the repository browser.