source: vendor/3.6.0/source4/libnet/libnet_rpc.c

Last change on this file was 740, checked in by Silvan Scherrer, 13 years ago

Samba Server: update vendor to 3.6.0

File size: 30.5 KB
Line 
1/*
2 Unix SMB/CIFS implementation.
3
4 Copyright (C) Stefan Metzmacher 2004
5 Copyright (C) Rafal Szczesniak 2005
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
19*/
20
21#include "includes.h"
22#include "libnet/libnet.h"
23#include "libcli/libcli.h"
24#include "libcli/composite/composite.h"
25#include "librpc/rpc/dcerpc_proto.h"
26#include "librpc/gen_ndr/ndr_lsa_c.h"
27#include "librpc/gen_ndr/ndr_samr.h"
28
29
30struct rpc_connect_srv_state {
31 struct libnet_context *ctx;
32 struct libnet_RpcConnect r;
33 const char *binding;
34
35 /* information about the progress */
36 void (*monitor_fn)(struct monitor_msg*);
37};
38
39
40static void continue_pipe_connect(struct composite_context *ctx);
41
42
43/**
44 * Initiates connection to rpc pipe on remote server
45 *
46 * @param ctx initialised libnet context
47 * @param mem_ctx memory context of this call
48 * @param r data structure containing necessary parameters and return values
49 * @return composite context of this call
50 **/
51
52static struct composite_context* libnet_RpcConnectSrv_send(struct libnet_context *ctx,
53 TALLOC_CTX *mem_ctx,
54 struct libnet_RpcConnect *r,
55 void (*monitor)(struct monitor_msg*))
56{
57 struct composite_context *c;
58 struct rpc_connect_srv_state *s;
59 struct dcerpc_binding *b;
60 struct composite_context *pipe_connect_req;
61
62 /* composite context allocation and setup */
63 c = composite_create(ctx, ctx->event_ctx);
64 if (c == NULL) return c;
65
66 s = talloc_zero(c, struct rpc_connect_srv_state);
67 if (composite_nomem(s, c)) return c;
68
69 c->private_data = s;
70 s->monitor_fn = monitor;
71
72 s->ctx = ctx;
73 s->r = *r;
74 ZERO_STRUCT(s->r.out);
75
76 /* prepare binding string */
77 switch (r->level) {
78 case LIBNET_RPC_CONNECT_SERVER:
79 s->binding = talloc_asprintf(s, "ncacn_np:%s", r->in.name);
80 break;
81 case LIBNET_RPC_CONNECT_SERVER_ADDRESS:
82 s->binding = talloc_asprintf(s, "ncacn_np:%s", r->in.address);
83 break;
84
85 case LIBNET_RPC_CONNECT_BINDING:
86 s->binding = talloc_strdup(s, r->in.binding);
87 break;
88
89 case LIBNET_RPC_CONNECT_DC:
90 case LIBNET_RPC_CONNECT_PDC:
91 /* this should never happen - DC and PDC level has a separate
92 composite function */
93 case LIBNET_RPC_CONNECT_DC_INFO:
94 /* this should never happen - DC_INFO level has a separate
95 composite function */
96 composite_error(c, NT_STATUS_INVALID_LEVEL);
97 return c;
98 }
99
100 /* parse binding string to the structure */
101 c->status = dcerpc_parse_binding(c, s->binding, &b);
102 if (!NT_STATUS_IS_OK(c->status)) {
103 DEBUG(0, ("Failed to parse dcerpc binding '%s'\n", s->binding));
104 composite_error(c, c->status);
105 return c;
106 }
107
108 switch (r->level) {
109 case LIBNET_RPC_CONNECT_SERVER:
110 case LIBNET_RPC_CONNECT_SERVER_ADDRESS:
111 b->flags = r->in.dcerpc_flags;
112 }
113
114 if (DEBUGLEVEL >= 10) {
115 b->flags |= DCERPC_DEBUG_PRINT_BOTH;
116 }
117
118 if (r->level == LIBNET_RPC_CONNECT_SERVER_ADDRESS) {
119 b->target_hostname = talloc_strdup(b, r->in.name);
120 if (composite_nomem(b->target_hostname, c)) {
121 return c;
122 }
123 }
124
125 /* connect to remote dcerpc pipe */
126 pipe_connect_req = dcerpc_pipe_connect_b_send(c, b, r->in.dcerpc_iface,
127 ctx->cred, c->event_ctx,
128 ctx->lp_ctx);
129 if (composite_nomem(pipe_connect_req, c)) return c;
130
131 composite_continue(c, pipe_connect_req, continue_pipe_connect, c);
132 return c;
133}
134
135
136/*
137 Step 2 of RpcConnectSrv - get rpc connection
138*/
139static void continue_pipe_connect(struct composite_context *ctx)
140{
141 struct composite_context *c;
142 struct rpc_connect_srv_state *s;
143
144 c = talloc_get_type(ctx->async.private_data, struct composite_context);
145 s = talloc_get_type(c->private_data, struct rpc_connect_srv_state);
146
147 /* receive result of rpc pipe connection */
148 c->status = dcerpc_pipe_connect_b_recv(ctx, c, &s->r.out.dcerpc_pipe);
149
150 /* post monitor message */
151 if (s->monitor_fn) {
152 struct monitor_msg msg;
153 struct msg_net_rpc_connect data;
154 struct dcerpc_binding *binding = s->r.out.dcerpc_pipe->binding;
155
156 /* prepare monitor message and post it */
157 data.host = binding->host;
158 data.endpoint = binding->endpoint;
159 data.transport = binding->transport;
160 data.domain_name = binding->target_hostname;
161
162 msg.type = mon_NetRpcConnect;
163 msg.data = (void*)&data;
164 msg.data_size = sizeof(data);
165 s->monitor_fn(&msg);
166 }
167
168 composite_done(c);
169}
170
171
172/**
173 * Receives result of connection to rpc pipe on remote server
174 *
175 * @param c composite context
176 * @param ctx initialised libnet context
177 * @param mem_ctx memory context of this call
178 * @param r data structure containing necessary parameters and return values
179 * @return nt status of rpc connection
180 **/
181
182static NTSTATUS libnet_RpcConnectSrv_recv(struct composite_context *c,
183 struct libnet_context *ctx,
184 TALLOC_CTX *mem_ctx,
185 struct libnet_RpcConnect *r)
186{
187 NTSTATUS status;
188 struct rpc_connect_srv_state *s = talloc_get_type(c->private_data,
189 struct rpc_connect_srv_state);
190
191 status = composite_wait(c);
192 if (NT_STATUS_IS_OK(status)) {
193 /* move the returned rpc pipe between memory contexts */
194 s = talloc_get_type(c->private_data, struct rpc_connect_srv_state);
195 r->out.dcerpc_pipe = talloc_steal(mem_ctx, s->r.out.dcerpc_pipe);
196
197 /* reference created pipe structure to long-term libnet_context
198 so that it can be used by other api functions even after short-term
199 mem_ctx is freed */
200 if (r->in.dcerpc_iface == &ndr_table_samr) {
201 ctx->samr.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
202
203 } else if (r->in.dcerpc_iface == &ndr_table_lsarpc) {
204 ctx->lsa.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
205 }
206
207 r->out.error_string = talloc_strdup(mem_ctx, "Success");
208
209 } else {
210 r->out.error_string = talloc_asprintf(mem_ctx, "Error: %s", nt_errstr(status));
211 }
212
213 talloc_free(c);
214 return status;
215}
216
217
218struct rpc_connect_dc_state {
219 struct libnet_context *ctx;
220 struct libnet_RpcConnect r;
221 struct libnet_RpcConnect r2;
222 struct libnet_LookupDCs f;
223 const char *connect_name;
224
225 /* information about the progress */
226 void (*monitor_fn)(struct monitor_msg *);
227};
228
229
230static void continue_lookup_dc(struct tevent_req *req);
231static void continue_rpc_connect(struct composite_context *ctx);
232
233
234/**
235 * Initiates connection to rpc pipe on domain pdc
236 *
237 * @param ctx initialised libnet context
238 * @param mem_ctx memory context of this call
239 * @param r data structure containing necessary parameters and return values
240 * @return composite context of this call
241 **/
242
243static struct composite_context* libnet_RpcConnectDC_send(struct libnet_context *ctx,
244 TALLOC_CTX *mem_ctx,
245 struct libnet_RpcConnect *r,
246 void (*monitor)(struct monitor_msg *msg))
247{
248 struct composite_context *c;
249 struct rpc_connect_dc_state *s;
250 struct tevent_req *lookup_dc_req;
251
252 /* composite context allocation and setup */
253 c = composite_create(ctx, ctx->event_ctx);
254 if (c == NULL) return c;
255
256 s = talloc_zero(c, struct rpc_connect_dc_state);
257 if (composite_nomem(s, c)) return c;
258
259 c->private_data = s;
260 s->monitor_fn = monitor;
261
262 s->ctx = ctx;
263 s->r = *r;
264 ZERO_STRUCT(s->r.out);
265
266 switch (r->level) {
267 case LIBNET_RPC_CONNECT_PDC:
268 s->f.in.name_type = NBT_NAME_PDC;
269 break;
270
271 case LIBNET_RPC_CONNECT_DC:
272 s->f.in.name_type = NBT_NAME_LOGON;
273 break;
274
275 default:
276 break;
277 }
278
279 s->f.in.domain_name = r->in.name;
280 s->f.out.num_dcs = 0;
281 s->f.out.dcs = NULL;
282
283 /* find the domain pdc first */
284 lookup_dc_req = libnet_LookupDCs_send(ctx, c, &s->f);
285 if (composite_nomem(lookup_dc_req, c)) return c;
286
287 tevent_req_set_callback(lookup_dc_req, continue_lookup_dc, c);
288 return c;
289}
290
291
292/*
293 Step 2 of RpcConnectDC: get domain controller name and
294 initiate RpcConnect to it
295*/
296static void continue_lookup_dc(struct tevent_req *req)
297{
298 struct composite_context *c;
299 struct rpc_connect_dc_state *s;
300 struct composite_context *rpc_connect_req;
301 struct monitor_msg msg;
302 struct msg_net_lookup_dc data;
303
304 c = tevent_req_callback_data(req, struct composite_context);
305 s = talloc_get_type_abort(c->private_data, struct rpc_connect_dc_state);
306
307 /* receive result of domain controller lookup */
308 c->status = libnet_LookupDCs_recv(req, c, &s->f);
309 if (!composite_is_ok(c)) return;
310
311 /* decide on preferred address type depending on DC type */
312 s->connect_name = s->f.out.dcs[0].name;
313
314 /* post monitor message */
315 if (s->monitor_fn) {
316 /* prepare a monitor message and post it */
317 data.domain_name = s->f.in.domain_name;
318 data.hostname = s->f.out.dcs[0].name;
319 data.address = s->f.out.dcs[0].address;
320
321 msg.type = mon_NetLookupDc;
322 msg.data = &data;
323 msg.data_size = sizeof(data);
324 s->monitor_fn(&msg);
325 }
326
327 /* ok, pdc has been found so do attempt to rpc connect */
328 s->r2.level = LIBNET_RPC_CONNECT_SERVER_ADDRESS;
329
330 /* this will cause yet another name resolution, but at least
331 * we pass the right name down the stack now */
332 s->r2.in.name = talloc_strdup(s, s->connect_name);
333 s->r2.in.address = talloc_steal(s, s->f.out.dcs[0].address);
334 s->r2.in.dcerpc_iface = s->r.in.dcerpc_iface;
335 s->r2.in.dcerpc_flags = s->r.in.dcerpc_flags;
336
337 /* send rpc connect request to the server */
338 rpc_connect_req = libnet_RpcConnectSrv_send(s->ctx, c, &s->r2, s->monitor_fn);
339 if (composite_nomem(rpc_connect_req, c)) return;
340
341 composite_continue(c, rpc_connect_req, continue_rpc_connect, c);
342}
343
344
345/*
346 Step 3 of RpcConnectDC: get rpc connection to the server
347*/
348static void continue_rpc_connect(struct composite_context *ctx)
349{
350 struct composite_context *c;
351 struct rpc_connect_dc_state *s;
352
353 c = talloc_get_type(ctx->async.private_data, struct composite_context);
354 s = talloc_get_type(c->private_data, struct rpc_connect_dc_state);
355
356 c->status = libnet_RpcConnectSrv_recv(ctx, s->ctx, c, &s->r2);
357
358 /* error string is to be passed anyway */
359 s->r.out.error_string = s->r2.out.error_string;
360 if (!composite_is_ok(c)) return;
361
362 s->r.out.dcerpc_pipe = s->r2.out.dcerpc_pipe;
363
364 /* post monitor message */
365 if (s->monitor_fn) {
366 struct monitor_msg msg;
367 struct msg_net_rpc_connect data;
368 struct dcerpc_binding *binding = s->r.out.dcerpc_pipe->binding;
369
370 data.host = binding->host;
371 data.endpoint = binding->endpoint;
372 data.transport = binding->transport;
373 data.domain_name = binding->target_hostname;
374
375 msg.type = mon_NetRpcConnect;
376 msg.data = (void*)&data;
377 msg.data_size = sizeof(data);
378 s->monitor_fn(&msg);
379 }
380
381 composite_done(c);
382}
383
384
385/**
386 * Receives result of connection to rpc pipe on domain pdc
387 *
388 * @param c composite context
389 * @param ctx initialised libnet context
390 * @param mem_ctx memory context of this call
391 * @param r data structure containing necessary parameters and return values
392 * @return nt status of rpc connection
393 **/
394
395static NTSTATUS libnet_RpcConnectDC_recv(struct composite_context *c,
396 struct libnet_context *ctx,
397 TALLOC_CTX *mem_ctx,
398 struct libnet_RpcConnect *r)
399{
400 NTSTATUS status;
401 struct rpc_connect_dc_state *s = talloc_get_type(c->private_data,
402 struct rpc_connect_dc_state);
403
404 status = composite_wait(c);
405 if (NT_STATUS_IS_OK(status)) {
406 /* move connected rpc pipe between memory contexts
407
408 The use of talloc_reparent(talloc_parent(), ...) is
409 bizarre, but it is needed because of the absolutely
410 atrocious use of talloc in this code. We need to
411 force the original parent to change, but finding
412 the original parent is well nigh impossible at this
413 point in the code (yes, I tried).
414 */
415 r->out.dcerpc_pipe = talloc_reparent(talloc_parent(s->r.out.dcerpc_pipe),
416 mem_ctx, s->r.out.dcerpc_pipe);
417
418 /* reference created pipe structure to long-term libnet_context
419 so that it can be used by other api functions even after short-term
420 mem_ctx is freed */
421 if (r->in.dcerpc_iface == &ndr_table_samr) {
422 ctx->samr.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
423
424 } else if (r->in.dcerpc_iface == &ndr_table_lsarpc) {
425 ctx->lsa.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
426 }
427
428 } else {
429 r->out.error_string = talloc_asprintf(mem_ctx,
430 "Failed to rpc connect: %s",
431 nt_errstr(status));
432 }
433
434 talloc_free(c);
435 return status;
436}
437
438
439
440struct rpc_connect_dci_state {
441 struct libnet_context *ctx;
442 struct libnet_RpcConnect r;
443 struct libnet_RpcConnect rpc_conn;
444 struct policy_handle lsa_handle;
445 struct lsa_QosInfo qos;
446 struct lsa_ObjectAttribute attr;
447 struct lsa_OpenPolicy2 lsa_open_policy;
448 struct dcerpc_pipe *lsa_pipe;
449 struct lsa_QueryInfoPolicy2 lsa_query_info2;
450 struct lsa_QueryInfoPolicy lsa_query_info;
451 struct dcerpc_binding *final_binding;
452 struct dcerpc_pipe *final_pipe;
453
454 /* information about the progress */
455 void (*monitor_fn)(struct monitor_msg*);
456};
457
458
459static void continue_dci_rpc_connect(struct composite_context *ctx);
460static void continue_lsa_policy(struct tevent_req *subreq);
461static void continue_lsa_query_info(struct tevent_req *subreq);
462static void continue_lsa_query_info2(struct tevent_req *subreq);
463static void continue_epm_map_binding(struct composite_context *ctx);
464static void continue_secondary_conn(struct composite_context *ctx);
465static void continue_epm_map_binding_send(struct composite_context *c);
466
467
468/**
469 * Initiates connection to rpc pipe on remote server or pdc. Received result
470 * contains info on the domain name, domain sid and realm.
471 *
472 * @param ctx initialised libnet context
473 * @param mem_ctx memory context of this call
474 * @param r data structure containing necessary parameters and return values. Must be a talloc context
475 * @return composite context of this call
476 **/
477
478static struct composite_context* libnet_RpcConnectDCInfo_send(struct libnet_context *ctx,
479 TALLOC_CTX *mem_ctx,
480 struct libnet_RpcConnect *r,
481 void (*monitor)(struct monitor_msg*))
482{
483 struct composite_context *c, *conn_req;
484 struct rpc_connect_dci_state *s;
485
486 /* composite context allocation and setup */
487 c = composite_create(ctx, ctx->event_ctx);
488 if (c == NULL) return c;
489
490 s = talloc_zero(c, struct rpc_connect_dci_state);
491 if (composite_nomem(s, c)) return c;
492
493 c->private_data = s;
494 s->monitor_fn = monitor;
495
496 s->ctx = ctx;
497 s->r = *r;
498 ZERO_STRUCT(s->r.out);
499
500
501 /* proceed to pure rpc connection if the binding string is provided,
502 otherwise try to connect domain controller */
503 if (r->in.binding == NULL) {
504 /* Pass on any binding flags (such as anonymous fallback) that have been set */
505 s->rpc_conn.in.dcerpc_flags = r->in.dcerpc_flags;
506
507 s->rpc_conn.in.name = r->in.name;
508 s->rpc_conn.level = LIBNET_RPC_CONNECT_DC;
509 } else {
510 s->rpc_conn.in.binding = r->in.binding;
511 s->rpc_conn.level = LIBNET_RPC_CONNECT_BINDING;
512 }
513
514 /* we need to query information on lsarpc interface first */
515 s->rpc_conn.in.dcerpc_iface = &ndr_table_lsarpc;
516
517 /* request connection to the lsa pipe on the pdc */
518 conn_req = libnet_RpcConnect_send(ctx, c, &s->rpc_conn, s->monitor_fn);
519 if (composite_nomem(c, conn_req)) return c;
520
521 composite_continue(c, conn_req, continue_dci_rpc_connect, c);
522 return c;
523}
524
525
526/*
527 Step 2 of RpcConnectDCInfo: receive opened rpc pipe and open
528 lsa policy handle
529*/
530static void continue_dci_rpc_connect(struct composite_context *ctx)
531{
532 struct composite_context *c;
533 struct rpc_connect_dci_state *s;
534 struct tevent_req *subreq;
535
536 c = talloc_get_type(ctx->async.private_data, struct composite_context);
537 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
538
539 c->status = libnet_RpcConnect_recv(ctx, s->ctx, c, &s->rpc_conn);
540 if (!NT_STATUS_IS_OK(c->status)) {
541 composite_error(c, c->status);
542 return;
543 }
544
545 /* post monitor message */
546 if (s->monitor_fn) {
547 struct monitor_msg msg;
548 struct msg_net_rpc_connect data;
549 struct dcerpc_binding *binding = s->r.out.dcerpc_pipe->binding;
550
551 data.host = binding->host;
552 data.endpoint = binding->endpoint;
553 data.transport = binding->transport;
554 data.domain_name = binding->target_hostname;
555
556 msg.type = mon_NetRpcConnect;
557 msg.data = (void*)&data;
558 msg.data_size = sizeof(data);
559 s->monitor_fn(&msg);
560 }
561
562 /* prepare to open a policy handle on lsa pipe */
563 s->lsa_pipe = s->ctx->lsa.pipe;
564
565 s->qos.len = 0;
566 s->qos.impersonation_level = 2;
567 s->qos.context_mode = 1;
568 s->qos.effective_only = 0;
569
570 s->attr.sec_qos = &s->qos;
571
572 s->lsa_open_policy.in.attr = &s->attr;
573 s->lsa_open_policy.in.system_name = talloc_asprintf(c, "\\");
574 if (composite_nomem(s->lsa_open_policy.in.system_name, c)) return;
575
576 s->lsa_open_policy.in.access_mask = SEC_FLAG_MAXIMUM_ALLOWED;
577 s->lsa_open_policy.out.handle = &s->lsa_handle;
578
579 subreq = dcerpc_lsa_OpenPolicy2_r_send(s, c->event_ctx,
580 s->lsa_pipe->binding_handle,
581 &s->lsa_open_policy);
582 if (composite_nomem(subreq, c)) return;
583
584 tevent_req_set_callback(subreq, continue_lsa_policy, c);
585}
586
587
588/*
589 Step 3 of RpcConnectDCInfo: Get policy handle and query lsa info
590 for kerberos realm (dns name) and guid. The query may fail.
591*/
592static void continue_lsa_policy(struct tevent_req *subreq)
593{
594 struct composite_context *c;
595 struct rpc_connect_dci_state *s;
596
597 c = tevent_req_callback_data(subreq, struct composite_context);
598 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
599
600 c->status = dcerpc_lsa_OpenPolicy2_r_recv(subreq, s);
601 TALLOC_FREE(subreq);
602 if (!NT_STATUS_IS_OK(c->status)) {
603 composite_error(c, c->status);
604 return;
605 }
606
607 if (NT_STATUS_EQUAL(s->lsa_open_policy.out.result, NT_STATUS_RPC_PROTSEQ_NOT_SUPPORTED)) {
608 s->r.out.realm = NULL;
609 s->r.out.guid = NULL;
610 s->r.out.domain_name = NULL;
611 s->r.out.domain_sid = NULL;
612
613 /* Skip to the creating the actual connection, no info available on this transport */
614 continue_epm_map_binding_send(c);
615 return;
616
617 } else if (!NT_STATUS_IS_OK(s->lsa_open_policy.out.result)) {
618 composite_error(c, s->lsa_open_policy.out.result);
619 return;
620 }
621
622 /* post monitor message */
623 if (s->monitor_fn) {
624 struct monitor_msg msg;
625
626 msg.type = mon_LsaOpenPolicy;
627 msg.data = NULL;
628 msg.data_size = 0;
629 s->monitor_fn(&msg);
630 }
631
632 /* query lsa info for dns domain name and guid */
633 s->lsa_query_info2.in.handle = &s->lsa_handle;
634 s->lsa_query_info2.in.level = LSA_POLICY_INFO_DNS;
635 s->lsa_query_info2.out.info = talloc_zero(c, union lsa_PolicyInformation *);
636 if (composite_nomem(s->lsa_query_info2.out.info, c)) return;
637
638 subreq = dcerpc_lsa_QueryInfoPolicy2_r_send(s, c->event_ctx,
639 s->lsa_pipe->binding_handle,
640 &s->lsa_query_info2);
641 if (composite_nomem(subreq, c)) return;
642
643 tevent_req_set_callback(subreq, continue_lsa_query_info2, c);
644}
645
646
647/*
648 Step 4 of RpcConnectDCInfo: Get realm and guid if provided (rpc call
649 may result in failure) and query lsa info for domain name and sid.
650*/
651static void continue_lsa_query_info2(struct tevent_req *subreq)
652{
653 struct composite_context *c;
654 struct rpc_connect_dci_state *s;
655
656 c = tevent_req_callback_data(subreq, struct composite_context);
657 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
658
659 c->status = dcerpc_lsa_QueryInfoPolicy2_r_recv(subreq, s);
660 TALLOC_FREE(subreq);
661
662 /* In case of error just null the realm and guid and proceed
663 to the next step. After all, it doesn't have to be AD domain
664 controller we talking to - NT-style PDC also counts */
665
666 if (NT_STATUS_EQUAL(c->status, NT_STATUS_RPC_PROCNUM_OUT_OF_RANGE)) {
667 s->r.out.realm = NULL;
668 s->r.out.guid = NULL;
669
670 } else {
671 if (!NT_STATUS_IS_OK(c->status)) {
672 s->r.out.error_string = talloc_asprintf(c,
673 "lsa_QueryInfoPolicy2 failed: %s",
674 nt_errstr(c->status));
675 composite_error(c, c->status);
676 return;
677 }
678
679 if (!NT_STATUS_IS_OK(s->lsa_query_info2.out.result)) {
680 s->r.out.error_string = talloc_asprintf(c,
681 "lsa_QueryInfoPolicy2 failed: %s",
682 nt_errstr(s->lsa_query_info2.out.result));
683 composite_error(c, s->lsa_query_info2.out.result);
684 return;
685 }
686
687 /* Copy the dns domain name and guid from the query result */
688
689 /* this should actually be a conversion from lsa_StringLarge */
690 s->r.out.realm = (*s->lsa_query_info2.out.info)->dns.dns_domain.string;
691 s->r.out.guid = talloc(c, struct GUID);
692 if (composite_nomem(s->r.out.guid, c)) {
693 s->r.out.error_string = NULL;
694 return;
695 }
696 *s->r.out.guid = (*s->lsa_query_info2.out.info)->dns.domain_guid;
697 }
698
699 /* post monitor message */
700 if (s->monitor_fn) {
701 struct monitor_msg msg;
702
703 msg.type = mon_LsaQueryPolicy;
704 msg.data = NULL;
705 msg.data_size = 0;
706 s->monitor_fn(&msg);
707 }
708
709 /* query lsa info for domain name and sid */
710 s->lsa_query_info.in.handle = &s->lsa_handle;
711 s->lsa_query_info.in.level = LSA_POLICY_INFO_DOMAIN;
712 s->lsa_query_info.out.info = talloc_zero(c, union lsa_PolicyInformation *);
713 if (composite_nomem(s->lsa_query_info.out.info, c)) return;
714
715 subreq = dcerpc_lsa_QueryInfoPolicy_r_send(s, c->event_ctx,
716 s->lsa_pipe->binding_handle,
717 &s->lsa_query_info);
718 if (composite_nomem(subreq, c)) return;
719
720 tevent_req_set_callback(subreq, continue_lsa_query_info, c);
721}
722
723
724/*
725 Step 5 of RpcConnectDCInfo: Get domain name and sid
726*/
727static void continue_lsa_query_info(struct tevent_req *subreq)
728{
729 struct composite_context *c;
730 struct rpc_connect_dci_state *s;
731
732 c = tevent_req_callback_data(subreq, struct composite_context);
733 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
734
735 c->status = dcerpc_lsa_QueryInfoPolicy_r_recv(subreq, s);
736 TALLOC_FREE(subreq);
737 if (!NT_STATUS_IS_OK(c->status)) {
738 s->r.out.error_string = talloc_asprintf(c,
739 "lsa_QueryInfoPolicy failed: %s",
740 nt_errstr(c->status));
741 composite_error(c, c->status);
742 return;
743 }
744
745 /* post monitor message */
746 if (s->monitor_fn) {
747 struct monitor_msg msg;
748
749 msg.type = mon_LsaQueryPolicy;
750 msg.data = NULL;
751 msg.data_size = 0;
752 s->monitor_fn(&msg);
753 }
754
755 /* Copy the domain name and sid from the query result */
756 s->r.out.domain_sid = (*s->lsa_query_info.out.info)->domain.sid;
757 s->r.out.domain_name = (*s->lsa_query_info.out.info)->domain.name.string;
758
759 continue_epm_map_binding_send(c);
760}
761
762/*
763 Step 5 (continued) of RpcConnectDCInfo: request endpoint
764 map binding.
765
766 We may short-cut to this step if we don't support LSA OpenPolicy on this transport
767*/
768static void continue_epm_map_binding_send(struct composite_context *c)
769{
770 struct rpc_connect_dci_state *s;
771 struct composite_context *epm_map_req;
772 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
773
774 /* prepare to get endpoint mapping for the requested interface */
775 s->final_binding = talloc_zero(s, struct dcerpc_binding);
776 if (composite_nomem(s->final_binding, c)) return;
777
778 *s->final_binding = *s->lsa_pipe->binding;
779 /* Ensure we keep hold of the member elements */
780 if (composite_nomem(talloc_reference(s->final_binding, s->lsa_pipe->binding), c)) return;
781
782 epm_map_req = dcerpc_epm_map_binding_send(c, s->final_binding, s->r.in.dcerpc_iface,
783 s->lsa_pipe->conn->event_ctx, s->ctx->lp_ctx);
784 if (composite_nomem(epm_map_req, c)) return;
785
786 composite_continue(c, epm_map_req, continue_epm_map_binding, c);
787}
788
789/*
790 Step 6 of RpcConnectDCInfo: Receive endpoint mapping and create secondary
791 rpc connection derived from already used pipe but connected to the requested
792 one (as specified in libnet_RpcConnect structure)
793*/
794static void continue_epm_map_binding(struct composite_context *ctx)
795{
796 struct composite_context *c, *sec_conn_req;
797 struct rpc_connect_dci_state *s;
798
799 c = talloc_get_type(ctx->async.private_data, struct composite_context);
800 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
801
802 c->status = dcerpc_epm_map_binding_recv(ctx);
803 if (!NT_STATUS_IS_OK(c->status)) {
804 s->r.out.error_string = talloc_asprintf(c,
805 "failed to map pipe with endpoint mapper - %s",
806 nt_errstr(c->status));
807 composite_error(c, c->status);
808 return;
809 }
810
811 /* create secondary connection derived from lsa pipe */
812 sec_conn_req = dcerpc_secondary_connection_send(s->lsa_pipe, s->final_binding);
813 if (composite_nomem(sec_conn_req, c)) return;
814
815 composite_continue(c, sec_conn_req, continue_secondary_conn, c);
816}
817
818
819/*
820 Step 7 of RpcConnectDCInfo: Get actual pipe to be returned
821 and complete this composite call
822*/
823static void continue_secondary_conn(struct composite_context *ctx)
824{
825 struct composite_context *c;
826 struct rpc_connect_dci_state *s;
827
828 c = talloc_get_type(ctx->async.private_data, struct composite_context);
829 s = talloc_get_type(c->private_data, struct rpc_connect_dci_state);
830
831 c->status = dcerpc_secondary_connection_recv(ctx, &s->final_pipe);
832 if (!NT_STATUS_IS_OK(c->status)) {
833 s->r.out.error_string = talloc_asprintf(c,
834 "secondary connection failed: %s",
835 nt_errstr(c->status));
836
837 composite_error(c, c->status);
838 return;
839 }
840
841 s->r.out.dcerpc_pipe = s->final_pipe;
842
843 /* post monitor message */
844 if (s->monitor_fn) {
845 struct monitor_msg msg;
846 struct msg_net_rpc_connect data;
847 struct dcerpc_binding *binding = s->r.out.dcerpc_pipe->binding;
848
849 /* prepare monitor message and post it */
850 data.host = binding->host;
851 data.endpoint = binding->endpoint;
852 data.transport = binding->transport;
853 data.domain_name = binding->target_hostname;
854
855 msg.type = mon_NetRpcConnect;
856 msg.data = (void*)&data;
857 msg.data_size = sizeof(data);
858 s->monitor_fn(&msg);
859 }
860
861 composite_done(c);
862}
863
864
865/**
866 * Receives result of connection to rpc pipe and gets basic
867 * domain info (name, sid, realm, guid)
868 *
869 * @param c composite context
870 * @param ctx initialised libnet context
871 * @param mem_ctx memory context of this call
872 * @param r data structure containing return values
873 * @return nt status of rpc connection
874 **/
875
876static NTSTATUS libnet_RpcConnectDCInfo_recv(struct composite_context *c, struct libnet_context *ctx,
877 TALLOC_CTX *mem_ctx, struct libnet_RpcConnect *r)
878{
879 NTSTATUS status;
880 struct rpc_connect_dci_state *s = talloc_get_type(c->private_data,
881 struct rpc_connect_dci_state);
882
883 status = composite_wait(c);
884 if (NT_STATUS_IS_OK(status)) {
885 r->out.realm = talloc_steal(mem_ctx, s->r.out.realm);
886 r->out.guid = talloc_steal(mem_ctx, s->r.out.guid);
887 r->out.domain_name = talloc_steal(mem_ctx, s->r.out.domain_name);
888 r->out.domain_sid = talloc_steal(mem_ctx, s->r.out.domain_sid);
889
890 r->out.dcerpc_pipe = talloc_steal(mem_ctx, s->r.out.dcerpc_pipe);
891
892 /* reference created pipe structure to long-term libnet_context
893 so that it can be used by other api functions even after short-term
894 mem_ctx is freed */
895 if (r->in.dcerpc_iface == &ndr_table_samr) {
896 ctx->samr.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
897 ctx->samr.samr_handle = ctx->samr.pipe->binding_handle;
898
899 } else if (r->in.dcerpc_iface == &ndr_table_lsarpc) {
900 ctx->lsa.pipe = talloc_reference(ctx, r->out.dcerpc_pipe);
901 ctx->lsa.lsa_handle = ctx->lsa.pipe->binding_handle;
902 }
903
904 } else {
905 if (s->r.out.error_string) {
906 r->out.error_string = talloc_steal(mem_ctx, s->r.out.error_string);
907 } else if (r->in.binding == NULL) {
908 r->out.error_string = talloc_asprintf(mem_ctx, "Connection to DC failed: %s", nt_errstr(status));
909 } else {
910 r->out.error_string = talloc_asprintf(mem_ctx, "Connection to DC %s failed: %s",
911 r->in.binding, nt_errstr(status));
912 }
913 }
914
915 talloc_free(c);
916 return status;
917}
918
919
920/**
921 * Initiates connection to rpc pipe on remote server or pdc, optionally
922 * providing domain info
923 *
924 * @param ctx initialised libnet context
925 * @param mem_ctx memory context of this call
926 * @param r data structure containing necessary parameters and return values
927 * @return composite context of this call
928 **/
929
930struct composite_context* libnet_RpcConnect_send(struct libnet_context *ctx,
931 TALLOC_CTX *mem_ctx,
932 struct libnet_RpcConnect *r,
933 void (*monitor)(struct monitor_msg*))
934{
935 struct composite_context *c;
936
937 switch (r->level) {
938 case LIBNET_RPC_CONNECT_SERVER:
939 case LIBNET_RPC_CONNECT_SERVER_ADDRESS:
940 case LIBNET_RPC_CONNECT_BINDING:
941 c = libnet_RpcConnectSrv_send(ctx, mem_ctx, r, monitor);
942 break;
943
944 case LIBNET_RPC_CONNECT_PDC:
945 case LIBNET_RPC_CONNECT_DC:
946 c = libnet_RpcConnectDC_send(ctx, mem_ctx, r, monitor);
947 break;
948
949 case LIBNET_RPC_CONNECT_DC_INFO:
950 c = libnet_RpcConnectDCInfo_send(ctx, mem_ctx, r, monitor);
951 break;
952
953 default:
954 c = talloc_zero(mem_ctx, struct composite_context);
955 composite_error(c, NT_STATUS_INVALID_LEVEL);
956 }
957
958 return c;
959}
960
961
962/**
963 * Receives result of connection to rpc pipe on remote server or pdc
964 *
965 * @param c composite context
966 * @param ctx initialised libnet context
967 * @param mem_ctx memory context of this call
968 * @param r data structure containing necessary parameters and return values
969 * @return nt status of rpc connection
970 **/
971
972NTSTATUS libnet_RpcConnect_recv(struct composite_context *c, struct libnet_context *ctx,
973 TALLOC_CTX *mem_ctx, struct libnet_RpcConnect *r)
974{
975 switch (r->level) {
976 case LIBNET_RPC_CONNECT_SERVER:
977 case LIBNET_RPC_CONNECT_BINDING:
978 return libnet_RpcConnectSrv_recv(c, ctx, mem_ctx, r);
979
980 case LIBNET_RPC_CONNECT_PDC:
981 case LIBNET_RPC_CONNECT_DC:
982 return libnet_RpcConnectDC_recv(c, ctx, mem_ctx, r);
983
984 case LIBNET_RPC_CONNECT_DC_INFO:
985 return libnet_RpcConnectDCInfo_recv(c, ctx, mem_ctx, r);
986
987 default:
988 ZERO_STRUCT(r->out);
989 return NT_STATUS_INVALID_LEVEL;
990 }
991}
992
993
994/**
995 * Connect to a rpc pipe on a remote server - sync version
996 *
997 * @param ctx initialised libnet context
998 * @param mem_ctx memory context of this call
999 * @param r data structure containing necessary parameters and return values
1000 * @return nt status of rpc connection
1001 **/
1002
1003NTSTATUS libnet_RpcConnect(struct libnet_context *ctx, TALLOC_CTX *mem_ctx,
1004 struct libnet_RpcConnect *r)
1005{
1006 struct composite_context *c;
1007
1008 c = libnet_RpcConnect_send(ctx, mem_ctx, r, NULL);
1009 return libnet_RpcConnect_recv(c, ctx, mem_ctx, r);
1010}
Note: See TracBrowser for help on using the repository browser.