Changeset 740 for vendor/current/source4/wrepl_server
- Timestamp:
- Nov 14, 2012, 12:59:34 PM (13 years ago)
- Location:
- vendor/current/source4/wrepl_server
- Files:
-
- 1 added
- 1 deleted
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
vendor/current/source4/wrepl_server/wrepl_apply_records.c
r414 r740 21 21 22 22 #include "includes.h" 23 #include <tevent.h> 23 24 #include "smbd/service_task.h" 24 25 #include "lib/messaging/irpc.h" 25 #include "librpc/gen_ndr/ndr_irpc .h"26 #include "librpc/gen_ndr/ndr_irpc_c.h" 26 27 #include "librpc/gen_ndr/ndr_winsrepl.h" 27 28 #include "wrepl_server/wrepl_server.h" … … 883 884 884 885 struct r_do_challenge_state { 885 struct messaging_context *msg_ctx;886 struct dcerpc_binding_handle *irpc_handle; 886 887 struct wreplsrv_partner *partner; 887 888 struct winsdb_record *rec; … … 889 890 struct wrepl_name replica; 890 891 struct nbtd_proxy_wins_challenge r; 892 struct nbtd_proxy_wins_release_demand dr; 891 893 }; 892 894 893 static void r_do_late_release_demand_handler(struct irpc_request *ireq)895 static void r_do_late_release_demand_handler(struct tevent_req *subreq) 894 896 { 895 897 NTSTATUS status; 896 struct r_do_challenge_state *state = talloc_get_type(ireq->async.private_data, 897 struct r_do_challenge_state); 898 899 status = irpc_call_recv(ireq); 898 struct r_do_challenge_state *state = 899 tevent_req_callback_data(subreq, 900 struct r_do_challenge_state); 901 902 status = dcerpc_nbtd_proxy_wins_release_demand_r_recv(subreq, state); 903 TALLOC_FREE(subreq); 904 900 905 /* don't care about the result */ 901 906 talloc_free(state); … … 904 909 static NTSTATUS r_do_late_release_demand(struct r_do_challenge_state *state) 905 910 { 906 struct irpc_request *ireq; 907 struct server_id *nbt_servers; 908 struct nbtd_proxy_wins_release_demand r; 911 struct tevent_req *subreq; 909 912 uint32_t i; 910 913 … … 912 915 nbt_name_string(state, &state->replica.name))); 913 916 914 nbt_servers = irpc_servers_byname(state->msg_ctx, state, "nbt_server"); 915 if ((nbt_servers == NULL) || (nbt_servers[0].id == 0)) { 916 return NT_STATUS_INTERNAL_ERROR; 917 } 918 919 r.in.name = state->replica.name; 920 r.in.num_addrs = state->r.out.num_addrs; 921 r.in.addrs = talloc_array(state, struct nbtd_proxy_wins_addr, r.in.num_addrs); 922 NT_STATUS_HAVE_NO_MEMORY(r.in.addrs); 917 state->dr.in.name = state->replica.name; 918 state->dr.in.num_addrs = state->r.out.num_addrs; 919 state->dr.in.addrs = talloc_array(state, 920 struct nbtd_proxy_wins_addr, 921 state->dr.in.num_addrs); 922 NT_STATUS_HAVE_NO_MEMORY(state->dr.in.addrs); 923 923 /* TODO: fix pidl to handle inline ipv4address arrays */ 924 for (i=0; i < r.in.num_addrs; i++) {925 r.in.addrs[i].addr = state->r.out.addrs[i].addr;926 } 927 928 ireq = IRPC_CALL_SEND(state->msg_ctx, nbt_servers[0],929 irpc, NBTD_PROXY_WINS_RELEASE_DEMAND,930 &r, state);931 NT_STATUS_HAVE_NO_MEMORY(ireq);932 933 ireq->async.fn = r_do_late_release_demand_handler; 934 ireq->async.private_data= state;924 for (i=0; i < state->dr.in.num_addrs; i++) { 925 state->dr.in.addrs[i].addr = state->r.out.addrs[i].addr; 926 } 927 928 subreq = dcerpc_nbtd_proxy_wins_release_demand_r_send(state, 929 state->partner->service->task->event_ctx, 930 state->irpc_handle, 931 &state->dr); 932 NT_STATUS_HAVE_NO_MEMORY(subreq); 933 934 tevent_req_set_callback(subreq, r_do_late_release_demand_handler, state); 935 935 936 936 return NT_STATUS_OK; … … 952 952 _UA_MA_DI_A<00>: C:BEST vs. B:BEST2 (C:ALL) => B:MHOMED => MHOMED_MERGE 953 953 */ 954 static void r_do_challenge_handler(struct irpc_request *ireq)954 static void r_do_challenge_handler(struct tevent_req *subreq) 955 955 { 956 956 NTSTATUS status; 957 struct r_do_challenge_state *state = talloc_get_type(ireq->async.private_data, 958 struct r_do_challenge_state); 957 struct r_do_challenge_state *state = 958 tevent_req_callback_data(subreq, 959 struct r_do_challenge_state); 959 960 bool old_is_subset = false; 960 961 bool new_is_subset = false; … … 963 964 uint32_t num_rec_addrs; 964 965 965 status = irpc_call_recv(ireq); 966 status = dcerpc_nbtd_proxy_wins_challenge_r_recv(subreq, state); 967 TALLOC_FREE(subreq); 966 968 967 969 DEBUG(4,("r_do_challenge_handler: %s: %s\n", … … 1034 1036 struct wrepl_name *replica) 1035 1037 { 1036 struct irpc_request *ireq;1037 1038 struct r_do_challenge_state *state; 1038 struct server_id *nbt_servers;1039 struct tevent_req *subreq; 1039 1040 const char **addrs; 1040 1041 uint32_t i; … … 1045 1046 state = talloc_zero(mem_ctx, struct r_do_challenge_state); 1046 1047 NT_STATUS_HAVE_NO_MEMORY(state); 1047 state->msg_ctx = partner->service->task->msg_ctx;1048 1048 state->partner = partner; 1049 1049 state->rec = talloc_steal(state, rec); … … 1055 1055 talloc_steal(state, replica->addresses); 1056 1056 1057 nbt_servers = irpc_servers_byname(state->msg_ctx, state, "nbt_server"); 1058 if ((nbt_servers == NULL) || (nbt_servers[0].id == 0)) { 1057 state->irpc_handle = irpc_binding_handle_by_name(state, 1058 partner->service->task->msg_ctx, 1059 "nbt_server", 1060 &ndr_table_irpc); 1061 if (state->irpc_handle == NULL) { 1059 1062 return NT_STATUS_INTERNAL_ERROR; 1060 1063 } … … 1071 1074 } 1072 1075 1073 ireq = IRPC_CALL_SEND(state->msg_ctx, nbt_servers[0],1074 irpc, NBTD_PROXY_WINS_CHALLENGE,1075 &state->r, state);1076 NT_STATUS_HAVE_NO_MEMORY(ireq);1077 1078 ireq->async.fn = r_do_challenge_handler; 1079 ireq->async.private_data= state;1076 subreq = dcerpc_nbtd_proxy_wins_challenge_r_send(state, 1077 state->partner->service->task->event_ctx, 1078 state->irpc_handle, 1079 &state->r); 1080 NT_STATUS_HAVE_NO_MEMORY(subreq); 1081 1082 tevent_req_set_callback(subreq, r_do_challenge_handler, state); 1080 1083 1081 1084 talloc_steal(partner, state); … … 1084 1087 1085 1088 struct r_do_release_demand_state { 1086 struct messaging_context *msg_ctx;1087 1089 struct nbtd_proxy_wins_release_demand r; 1088 1090 }; 1089 1091 1090 static void r_do_release_demand_handler(struct irpc_request *ireq)1092 static void r_do_release_demand_handler(struct tevent_req *subreq) 1091 1093 { 1092 1094 NTSTATUS status; 1093 struct r_do_release_demand_state *state = talloc_get_type(ireq->async.private_data, 1094 struct r_do_release_demand_state); 1095 1096 status = irpc_call_recv(ireq); 1095 struct r_do_release_demand_state *state = 1096 tevent_req_callback_data(subreq, 1097 struct r_do_release_demand_state); 1098 1099 status = dcerpc_nbtd_proxy_wins_release_demand_r_recv(subreq, state); 1100 TALLOC_FREE(subreq); 1101 1097 1102 /* don't care about the result */ 1098 1103 talloc_free(state); … … 1106 1111 { 1107 1112 NTSTATUS status; 1108 struct irpc_request *ireq; 1109 struct server_id *nbt_servers; 1113 struct dcerpc_binding_handle *irpc_handle; 1110 1114 const char **addrs; 1111 1115 struct winsdb_addr **addresses; 1112 1116 struct r_do_release_demand_state *state; 1117 struct tevent_req *subreq; 1113 1118 uint32_t i; 1114 1119 … … 1128 1133 state = talloc_zero(mem_ctx, struct r_do_release_demand_state); 1129 1134 NT_STATUS_HAVE_NO_MEMORY(state); 1130 state->msg_ctx = partner->service->task->msg_ctx; 1131 1132 nbt_servers = irpc_servers_byname(state->msg_ctx, state, "nbt_server"); 1133 if ((nbt_servers == NULL) || (nbt_servers[0].id == 0)) { 1135 1136 irpc_handle = irpc_binding_handle_by_name(state, 1137 partner->service->task->msg_ctx, 1138 "nbt_server", 1139 &ndr_table_irpc); 1140 if (irpc_handle == NULL) { 1134 1141 return NT_STATUS_INTERNAL_ERROR; 1135 1142 } … … 1147 1154 } 1148 1155 1149 ireq = IRPC_CALL_SEND(state->msg_ctx, nbt_servers[0],1150 irpc, NBTD_PROXY_WINS_RELEASE_DEMAND,1151 &state->r, state);1152 NT_STATUS_HAVE_NO_MEMORY(ireq);1153 1154 ireq->async.fn = r_do_release_demand_handler; 1155 ireq->async.private_data= state;1156 subreq = dcerpc_nbtd_proxy_wins_release_demand_r_send(state, 1157 partner->service->task->event_ctx, 1158 irpc_handle, 1159 &state->r); 1160 NT_STATUS_HAVE_NO_MEMORY(subreq); 1161 1162 tevent_req_set_callback(subreq, r_do_release_demand_handler, state); 1156 1163 1157 1164 talloc_steal(partner, state); … … 1194 1201 bool skip_replica_owned_by_us = false; 1195 1202 bool become_owner = true; 1196 bool propagate = lp _parm_bool(partner->service->task->lp_ctx, NULL, "wreplsrv", "propagate name releases", false);1203 bool propagate = lpcfg_parm_bool(partner->service->task->lp_ctx, NULL, "wreplsrv", "propagate name releases", false); 1197 1204 const char *local_owner = partner->service->wins_db->local_owner; 1198 1205 … … 1358 1365 bool replica_vs_replica = false; 1359 1366 bool local_vs_replica = false; 1367 1368 if (replica->name.scope) { 1369 TALLOC_CTX *parent; 1370 const char *scope; 1371 1372 /* 1373 * Windows 2008 truncates the scope to 237 bytes, 1374 * so we do... 1375 */ 1376 parent = talloc_parent(replica->name.scope); 1377 scope = talloc_strndup(parent, replica->name.scope, 237); 1378 NT_STATUS_HAVE_NO_MEMORY(scope); 1379 replica->name.scope = scope; 1380 } 1360 1381 1361 1382 status = winsdb_lookup(partner->service->wins_db, -
vendor/current/source4/wrepl_server/wrepl_in_call.c
r414 r740 22 22 #include "includes.h" 23 23 #include "lib/events/events.h" 24 #include "lib/socket/socket.h" 24 #include "lib/tsocket/tsocket.h" 25 #include "smbd/service_task.h" 25 26 #include "smbd/service_stream.h" 26 27 #include "libcli/wrepl/winsrepl.h" … … 28 29 #include "libcli/composite/composite.h" 29 30 #include "nbt_server/wins/winsdb.h" 30 #include "lib/ldb/include/ldb.h"31 #include "lib/ldb/include/ldb_errors.h"31 #include <ldb.h> 32 #include <ldb_errors.h> 32 33 #include "system/time.h" 34 #include "lib/util/tsort.h" 35 #include "param/param.h" 33 36 34 37 static NTSTATUS wreplsrv_in_start_association(struct wreplsrv_in_call *call) … … 199 202 struct winsdb_record *rec; 200 203 NTSTATUS status; 201 u int32_t i, j;204 unsigned int i, j; 202 205 time_t now = time(NULL); 203 206 … … 301 304 302 305 /* sort the names before we send them */ 303 qsort(names, j, sizeof(struct wrepl_wins_name), (comparison_fn_t)wreplsrv_in_sort_wins_name);306 TYPESAFE_QSORT(names, j, wreplsrv_in_sort_wins_name); 304 307 305 308 DEBUG(2,("WINSREPL:reply [%u] records owner[%s] min[%llu] max[%llu] to partner[%s]\n", … … 341 344 struct wrepl_table *update_in = &call->req_packet.message.replication.info.table; 342 345 struct wreplsrv_in_update_state *update_state; 343 uint16_t fde_flags;346 NTSTATUS status; 344 347 345 348 DEBUG(2,("WREPL_REPL_UPDATE: partner[%s] initiator[%s] num_owners[%u]\n", 346 349 call->wreplconn->partner->address, 347 350 update_in->initiator, update_in->partner_count)); 348 349 /*350 * we need to flip the connection into a client connection351 * and do a WREPL_REPL_SEND_REQUEST's on the that connection352 * and then stop this connection353 */354 fde_flags = event_get_fd_flags(wrepl_in->conn->event.fde);355 talloc_free(wrepl_in->conn->event.fde);356 wrepl_in->conn->event.fde = NULL;357 351 358 352 update_state = talloc(wrepl_in, struct wreplsrv_in_update_state); … … 365 359 wrepl_out->assoc_ctx.our_ctx = wrepl_in->assoc_ctx.our_ctx; 366 360 wrepl_out->assoc_ctx.peer_ctx = wrepl_in->assoc_ctx.peer_ctx; 367 wrepl_out->sock = wrepl_socket_merge(wrepl_out, 368 wrepl_in->conn->event.ctx, 369 wrepl_in->conn->socket, 370 wrepl_in->packet); 371 NT_STATUS_HAVE_NO_MEMORY(wrepl_out->sock); 372 373 event_set_fd_flags(wrepl_out->sock->event.fde, fde_flags); 361 wrepl_out->sock = wrepl_socket_init(wrepl_out, 362 wrepl_in->conn->event.ctx); 363 364 NT_STATUS_HAVE_NO_MEMORY_AND_FREE(wrepl_out->sock, update_state); 365 366 TALLOC_FREE(wrepl_in->send_queue); 367 368 status = wrepl_socket_donate_stream(wrepl_out->sock, &wrepl_in->tstream); 369 NT_STATUS_NOT_OK_RETURN_AND_FREE(status, update_state); 374 370 375 371 update_state->wrepl_in = wrepl_in; … … 382 378 update_state->creq = wreplsrv_pull_cycle_send(update_state, &update_state->cycle_io); 383 379 if (!update_state->creq) { 380 talloc_free(update_state); 384 381 return NT_STATUS_INTERNAL_ERROR; 385 382 } … … 433 430 434 431 if (!call->wreplconn->partner) { 435 struct socket_address *partner_ip = socket_get_peer_addr(call->wreplconn->conn->socket, call); 436 437 call->wreplconn->partner = wreplsrv_find_partner(call->wreplconn->service, partner_ip->addr); 432 struct tsocket_address *peer_addr = call->wreplconn->conn->remote_address; 433 char *peer_ip; 434 435 if (!tsocket_address_is_inet(peer_addr, "ipv4")) { 436 DEBUG(0,("wreplsrv_in_replication: non ipv4 peer addr '%s'\n", 437 tsocket_address_string(peer_addr, call))); 438 return NT_STATUS_INTERNAL_ERROR; 439 } 440 441 peer_ip = tsocket_address_inet_addr_string(peer_addr, call); 442 if (peer_ip == NULL) { 443 return NT_STATUS_NO_MEMORY; 444 } 445 446 call->wreplconn->partner = wreplsrv_find_partner(call->wreplconn->service, peer_ip); 438 447 if (!call->wreplconn->partner) { 439 DEBUG(1,("Failing WINS replication from non-partner %s\n", 440 partner_ip ? partner_ip->addr : NULL)); 448 DEBUG(1,("Failing WINS replication from non-partner %s\n", peer_ip)); 441 449 return wreplsrv_in_stop_assoc_ctx(call); 442 450 } -
vendor/current/source4/wrepl_server/wrepl_in_connection.c
r414 r740 32 32 #include "system/network.h" 33 33 #include "lib/socket/netif.h" 34 #include "lib/tsocket/tsocket.h" 35 #include "libcli/util/tstream.h" 34 36 #include "param/param.h" 35 37 … … 39 41 } 40 42 41 static int terminate_after_send_destructor(struct wreplsrv_in_connection **tas)42 {43 wreplsrv_terminate_in_connection(*tas, "wreplsrv_in_connection: terminate_after_send");44 return 0;45 }46 47 43 /* 48 44 receive some data on a WREPL connection 49 45 */ 50 static NTSTATUS wreplsrv_recv_request(void *private_data, DATA_BLOB blob) 51 { 52 struct wreplsrv_in_connection *wreplconn = talloc_get_type(private_data, struct wreplsrv_in_connection); 53 struct wreplsrv_in_call *call; 54 DATA_BLOB packet_in_blob; 55 DATA_BLOB packet_out_blob; 46 static NTSTATUS wreplsrv_process(struct wreplsrv_in_connection *wrepl_conn, 47 struct wreplsrv_in_call **_call) 48 { 56 49 struct wrepl_wrap packet_out_wrap; 57 50 NTSTATUS status; 58 51 enum ndr_err_code ndr_err; 59 60 call = talloc_zero(wreplconn, struct wreplsrv_in_call); 61 NT_STATUS_HAVE_NO_MEMORY(call); 62 call->wreplconn = wreplconn; 63 talloc_steal(call, blob.data); 64 65 packet_in_blob.data = blob.data + 4; 66 packet_in_blob.length = blob.length - 4; 67 68 ndr_err = ndr_pull_struct_blob(&packet_in_blob, call, 69 lp_iconv_convenience(wreplconn->service->task->lp_ctx), 52 struct wreplsrv_in_call *call = *_call; 53 54 ndr_err = ndr_pull_struct_blob(&call->in, call, 70 55 &call->req_packet, 71 56 (ndr_pull_flags_fn_t)ndr_pull_wrepl_packet); … … 75 60 76 61 if (DEBUGLVL(10)) { 77 DEBUG(10,("Received WINS-Replication packet of length %u\n", 78 (unsigned )packet_in_blob.length + 4));62 DEBUG(10,("Received WINS-Replication packet of length %u\n", 63 (unsigned int) call->in.length + 4)); 79 64 NDR_PRINT_DEBUG(wrepl_packet, &call->req_packet); 80 65 } … … 85 70 /* w2k just ignores invalid packets, so we do */ 86 71 DEBUG(10,("Received WINS-Replication packet was invalid, we just ignore it\n")); 87 talloc_free(call); 72 TALLOC_FREE(call); 73 *_call = NULL; 88 74 return NT_STATUS_OK; 89 75 } … … 91 77 /* and now encode the reply */ 92 78 packet_out_wrap.packet = call->rep_packet; 93 ndr_err = ndr_push_struct_blob(&packet_out_blob, call, 94 lp_iconv_convenience(wreplconn->service->task->lp_ctx), 79 ndr_err = ndr_push_struct_blob(&call->out, call, 95 80 &packet_out_wrap, 96 (ndr_push_flags_fn_t)ndr_push_wrepl_wrap);81 (ndr_push_flags_fn_t) ndr_push_wrepl_wrap); 97 82 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { 98 83 return ndr_map_error2ntstatus(ndr_err); … … 100 85 101 86 if (DEBUGLVL(10)) { 102 DEBUG(10,("Sending WINS-Replication packet of length %d\n", (int)packet_out_blob.length)); 87 DEBUG(10,("Sending WINS-Replication packet of length %u\n", 88 (unsigned int) call->out.length)); 103 89 NDR_PRINT_DEBUG(wrepl_packet, &call->rep_packet); 104 90 } 105 91 92 return NT_STATUS_OK; 93 } 94 95 static void wreplsrv_call_loop(struct tevent_req *subreq); 96 97 /* 98 called when we get a new connection 99 */ 100 static void wreplsrv_accept(struct stream_connection *conn) 101 { 102 struct wreplsrv_service *service = talloc_get_type(conn->private_data, struct wreplsrv_service); 103 struct wreplsrv_in_connection *wrepl_conn; 104 struct tsocket_address *peer_addr; 105 char *peer_ip; 106 struct tevent_req *subreq; 107 int rc; 108 109 wrepl_conn = talloc_zero(conn, struct wreplsrv_in_connection); 110 if (wrepl_conn == NULL) { 111 stream_terminate_connection(conn, 112 "wreplsrv_accept: out of memory"); 113 return; 114 } 115 116 wrepl_conn->send_queue = tevent_queue_create(conn, "wrepl_accept"); 117 if (wrepl_conn->send_queue == NULL) { 118 stream_terminate_connection(conn, 119 "wrepl_accept: out of memory"); 120 return; 121 } 122 123 TALLOC_FREE(conn->event.fde); 124 125 rc = tstream_bsd_existing_socket(wrepl_conn, 126 socket_get_fd(conn->socket), 127 &wrepl_conn->tstream); 128 if (rc < 0) { 129 stream_terminate_connection(conn, 130 "wrepl_accept: out of memory"); 131 return; 132 } 133 socket_set_flags(conn->socket, SOCKET_FLAG_NOCLOSE); 134 135 wrepl_conn->conn = conn; 136 wrepl_conn->service = service; 137 138 peer_addr = conn->remote_address; 139 140 if (!tsocket_address_is_inet(peer_addr, "ipv4")) { 141 DEBUG(0,("wreplsrv_accept: non ipv4 peer addr '%s'\n", 142 tsocket_address_string(peer_addr, wrepl_conn))); 143 wreplsrv_terminate_in_connection(wrepl_conn, "wreplsrv_accept: " 144 "invalid peer IP"); 145 return; 146 } 147 148 peer_ip = tsocket_address_inet_addr_string(peer_addr, wrepl_conn); 149 if (peer_ip == NULL) { 150 wreplsrv_terminate_in_connection(wrepl_conn, "wreplsrv_accept: " 151 "could not convert peer IP into a string"); 152 return; 153 } 154 155 wrepl_conn->partner = wreplsrv_find_partner(service, peer_ip); 156 irpc_add_name(conn->msg_ctx, "wreplsrv_connection"); 157 158 /* 159 * The wrepl pdu's has the length as 4 byte (initial_read_size), 160 * packet_full_request_u32 provides the pdu length then. 161 */ 162 subreq = tstream_read_pdu_blob_send(wrepl_conn, 163 wrepl_conn->conn->event.ctx, 164 wrepl_conn->tstream, 165 4, /* initial_read_size */ 166 packet_full_request_u32, 167 wrepl_conn); 168 if (subreq == NULL) { 169 wreplsrv_terminate_in_connection(wrepl_conn, "wrepl_accept: " 170 "no memory for tstream_read_pdu_blob_send"); 171 return; 172 } 173 tevent_req_set_callback(subreq, wreplsrv_call_loop, wrepl_conn); 174 } 175 176 static void wreplsrv_call_writev_done(struct tevent_req *subreq); 177 178 static void wreplsrv_call_loop(struct tevent_req *subreq) 179 { 180 struct wreplsrv_in_connection *wrepl_conn = tevent_req_callback_data(subreq, 181 struct wreplsrv_in_connection); 182 struct wreplsrv_in_call *call; 183 NTSTATUS status; 184 185 call = talloc_zero(wrepl_conn, struct wreplsrv_in_call); 186 if (call == NULL) { 187 wreplsrv_terminate_in_connection(wrepl_conn, "wreplsrv_call_loop: " 188 "no memory for wrepl_samba3_call"); 189 return; 190 } 191 call->wreplconn = wrepl_conn; 192 193 status = tstream_read_pdu_blob_recv(subreq, 194 call, 195 &call->in); 196 TALLOC_FREE(subreq); 197 if (!NT_STATUS_IS_OK(status)) { 198 const char *reason; 199 200 reason = talloc_asprintf(call, "wreplsrv_call_loop: " 201 "tstream_read_pdu_blob_recv() - %s", 202 nt_errstr(status)); 203 if (!reason) { 204 reason = nt_errstr(status); 205 } 206 207 wreplsrv_terminate_in_connection(wrepl_conn, reason); 208 return; 209 } 210 211 DEBUG(10,("Received wrepl packet of length %lu from %s\n", 212 (long) call->in.length, 213 tsocket_address_string(wrepl_conn->conn->remote_address, call))); 214 215 /* skip length header */ 216 call->in.data += 4; 217 call->in.length -= 4; 218 219 status = wreplsrv_process(wrepl_conn, &call); 220 if (!NT_STATUS_IS_OK(status)) { 221 const char *reason; 222 223 reason = talloc_asprintf(call, "wreplsrv_call_loop: " 224 "tstream_read_pdu_blob_recv() - %s", 225 nt_errstr(status)); 226 if (reason == NULL) { 227 reason = nt_errstr(status); 228 } 229 230 wreplsrv_terminate_in_connection(wrepl_conn, reason); 231 return; 232 } 233 234 /* We handed over the connection so we're done here */ 235 if (wrepl_conn->tstream == NULL) { 236 return; 237 } 238 239 /* Invalid WINS-Replication packet, we just ignore it */ 240 if (call == NULL) { 241 goto noreply; 242 } 243 244 call->out_iov[0].iov_base = (char *) call->out.data; 245 call->out_iov[0].iov_len = call->out.length; 246 247 subreq = tstream_writev_queue_send(call, 248 wrepl_conn->conn->event.ctx, 249 wrepl_conn->tstream, 250 wrepl_conn->send_queue, 251 call->out_iov, 1); 252 if (subreq == NULL) { 253 wreplsrv_terminate_in_connection(wrepl_conn, "wreplsrv_call_loop: " 254 "no memory for tstream_writev_queue_send"); 255 return; 256 } 257 tevent_req_set_callback(subreq, wreplsrv_call_writev_done, call); 258 259 noreply: 260 /* 261 * The wrepl pdu's has the length as 4 byte (initial_read_size), 262 * provides the pdu length then. 263 */ 264 subreq = tstream_read_pdu_blob_send(wrepl_conn, 265 wrepl_conn->conn->event.ctx, 266 wrepl_conn->tstream, 267 4, /* initial_read_size */ 268 packet_full_request_u32, 269 wrepl_conn); 270 if (subreq == NULL) { 271 wreplsrv_terminate_in_connection(wrepl_conn, "wreplsrv_call_loop: " 272 "no memory for tstream_read_pdu_blob_send"); 273 return; 274 } 275 tevent_req_set_callback(subreq, wreplsrv_call_loop, wrepl_conn); 276 } 277 278 static void wreplsrv_call_writev_done(struct tevent_req *subreq) 279 { 280 struct wreplsrv_in_call *call = tevent_req_callback_data(subreq, 281 struct wreplsrv_in_call); 282 int sys_errno; 283 int rc; 284 285 rc = tstream_writev_queue_recv(subreq, &sys_errno); 286 TALLOC_FREE(subreq); 287 if (rc == -1) { 288 const char *reason; 289 290 reason = talloc_asprintf(call, "wreplsrv_call_writev_done: " 291 "tstream_writev_queue_recv() - %d:%s", 292 sys_errno, strerror(sys_errno)); 293 if (reason == NULL) { 294 reason = "wreplsrv_call_writev_done: " 295 "tstream_writev_queue_recv() failed"; 296 } 297 298 wreplsrv_terminate_in_connection(call->wreplconn, reason); 299 return; 300 } 301 106 302 if (call->terminate_after_send) { 107 struct wreplsrv_in_connection **tas; 108 tas = talloc(packet_out_blob.data, struct wreplsrv_in_connection *); 109 NT_STATUS_HAVE_NO_MEMORY(tas); 110 *tas = wreplconn; 111 talloc_set_destructor(tas, terminate_after_send_destructor); 112 } 113 114 status = packet_send(wreplconn->packet, packet_out_blob); 115 NT_STATUS_NOT_OK_RETURN(status); 303 wreplsrv_terminate_in_connection(call->wreplconn, 304 "wreplsrv_in_connection: terminate_after_send"); 305 return; 306 } 116 307 117 308 talloc_free(call); 118 return NT_STATUS_OK; 119 } 120 121 /* 122 called when the socket becomes readable 309 } 310 311 /* 312 called on a tcp recv 123 313 */ 124 314 static void wreplsrv_recv(struct stream_connection *conn, uint16_t flags) 125 315 { 126 struct wreplsrv_in_connection *wreplconn = talloc_get_type(conn->private_data, 127 struct wreplsrv_in_connection); 128 129 packet_recv(wreplconn->packet); 130 } 131 132 /* 133 called when the socket becomes writable 316 struct wreplsrv_in_connection *wrepl_conn = talloc_get_type(conn->private_data, 317 struct wreplsrv_in_connection); 318 /* this should never be triggered! */ 319 DEBUG(0,("Terminating connection - '%s'\n", "wrepl_recv: called")); 320 wreplsrv_terminate_in_connection(wrepl_conn, "wrepl_recv: called"); 321 } 322 323 /* 324 called when we can write to a connection 134 325 */ 135 326 static void wreplsrv_send(struct stream_connection *conn, uint16_t flags) 136 327 { 137 struct wreplsrv_in_connection *wreplconn = talloc_get_type(conn->private_data, 138 struct wreplsrv_in_connection); 139 packet_queue_run(wreplconn->packet); 140 } 141 142 /* 143 handle socket recv errors 144 */ 145 static void wreplsrv_recv_error(void *private_data, NTSTATUS status) 146 { 147 struct wreplsrv_in_connection *wreplconn = talloc_get_type(private_data, 148 struct wreplsrv_in_connection); 149 wreplsrv_terminate_in_connection(wreplconn, nt_errstr(status)); 150 } 151 152 /* 153 called when we get a new connection 154 */ 155 static void wreplsrv_accept(struct stream_connection *conn) 156 { 157 struct wreplsrv_service *service = talloc_get_type(conn->private_data, struct wreplsrv_service); 158 struct wreplsrv_in_connection *wreplconn; 159 struct socket_address *peer_ip; 160 161 wreplconn = talloc_zero(conn, struct wreplsrv_in_connection); 162 if (!wreplconn) { 163 stream_terminate_connection(conn, "wreplsrv_accept: out of memory"); 164 return; 165 } 166 167 wreplconn->packet = packet_init(wreplconn); 168 if (!wreplconn->packet) { 169 wreplsrv_terminate_in_connection(wreplconn, "wreplsrv_accept: out of memory"); 170 return; 171 } 172 packet_set_private(wreplconn->packet, wreplconn); 173 packet_set_socket(wreplconn->packet, conn->socket); 174 packet_set_callback(wreplconn->packet, wreplsrv_recv_request); 175 packet_set_full_request(wreplconn->packet, packet_full_request_u32); 176 packet_set_error_handler(wreplconn->packet, wreplsrv_recv_error); 177 packet_set_event_context(wreplconn->packet, conn->event.ctx); 178 packet_set_fde(wreplconn->packet, conn->event.fde); 179 packet_set_serialise(wreplconn->packet); 180 181 wreplconn->conn = conn; 182 wreplconn->service = service; 183 184 peer_ip = socket_get_peer_addr(conn->socket, wreplconn); 185 if (!peer_ip) { 186 wreplsrv_terminate_in_connection(wreplconn, "wreplsrv_accept: could not obtain peer IP from kernel"); 187 return; 188 } 189 190 wreplconn->partner = wreplsrv_find_partner(service, peer_ip->addr); 191 192 conn->private_data = wreplconn; 193 194 irpc_add_name(conn->msg_ctx, "wreplsrv_connection"); 328 struct wreplsrv_in_connection *wrepl_conn = talloc_get_type(conn->private_data, 329 struct wreplsrv_in_connection); 330 /* this should never be triggered! */ 331 DEBUG(0,("Terminating connection - '%s'\n", "wrepl_send: called")); 332 wreplsrv_terminate_in_connection(wrepl_conn, "wrepl_send: called"); 195 333 } 196 334 … … 206 344 */ 207 345 NTSTATUS wreplsrv_in_connection_merge(struct wreplsrv_partner *partner, 208 struct socket_context *sock,209 struct packet_context *packet,346 uint32_t peer_assoc_ctx, 347 struct tstream_context **stream, 210 348 struct wreplsrv_in_connection **_wrepl_in) 211 349 { … … 214 352 const struct model_ops *model_ops; 215 353 struct stream_connection *conn; 354 struct tevent_req *subreq; 216 355 NTSTATUS status; 217 356 … … 219 358 ask for the single process model ops and pass these to the 220 359 stream_setup_socket() call. */ 221 model_ops = process_model_startup( service->task->event_ctx,"single");360 model_ops = process_model_startup("single"); 222 361 if (!model_ops) { 223 362 DEBUG(0,("Can't find 'single' process model_ops")); … … 230 369 wrepl_in->service = service; 231 370 wrepl_in->partner = partner; 232 233 status = stream_new_connection_merge(service->task->event_ctx, service->task->lp_ctx, model_ops, 234 sock, &wreplsrv_stream_ops, service->task->msg_ctx, 235 wrepl_in, &conn); 371 wrepl_in->tstream = talloc_move(wrepl_in, stream); 372 wrepl_in->assoc_ctx.peer_ctx = peer_assoc_ctx; 373 374 status = stream_new_connection_merge(service->task->event_ctx, 375 service->task->lp_ctx, 376 model_ops, 377 &wreplsrv_stream_ops, 378 service->task->msg_ctx, 379 wrepl_in, 380 &conn); 236 381 NT_STATUS_NOT_OK_RETURN(status); 237 382 … … 243 388 talloc_steal(conn, wrepl_in); 244 389 390 wrepl_in->send_queue = tevent_queue_create(wrepl_in, "wreplsrv_in_connection_merge"); 391 if (wrepl_in->send_queue == NULL) { 392 stream_terminate_connection(conn, 393 "wreplsrv_in_connection_merge: out of memory"); 394 return NT_STATUS_NO_MEMORY; 395 } 396 245 397 /* 246 * now update the packet handling callback,... 398 * The wrepl pdu's has the length as 4 byte (initial_read_size), 399 * packet_full_request_u32 provides the pdu length then. 247 400 */ 248 wrepl_in->packet = talloc_steal(wrepl_in, packet); 249 packet_set_private(wrepl_in->packet, wrepl_in); 250 packet_set_socket(wrepl_in->packet, conn->socket); 251 packet_set_callback(wrepl_in->packet, wreplsrv_recv_request); 252 packet_set_full_request(wrepl_in->packet, packet_full_request_u32); 253 packet_set_error_handler(wrepl_in->packet, wreplsrv_recv_error); 254 packet_set_event_context(wrepl_in->packet, conn->event.ctx); 255 packet_set_fde(wrepl_in->packet, conn->event.fde); 256 packet_set_serialise(wrepl_in->packet); 401 subreq = tstream_read_pdu_blob_send(wrepl_in, 402 wrepl_in->conn->event.ctx, 403 wrepl_in->tstream, 404 4, /* initial_read_size */ 405 packet_full_request_u32, 406 wrepl_in); 407 if (subreq == NULL) { 408 wreplsrv_terminate_in_connection(wrepl_in, "wreplsrv_in_connection_merge: " 409 "no memory for tstream_read_pdu_blob_send"); 410 return NT_STATUS_NO_MEMORY; 411 } 412 tevent_req_set_callback(subreq, wreplsrv_call_loop, wrepl_in); 257 413 258 414 *_wrepl_in = wrepl_in; 415 259 416 return NT_STATUS_OK; 260 417 } … … 274 431 ask for the single process model ops and pass these to the 275 432 stream_setup_socket() call. */ 276 model_ops = process_model_startup( task->event_ctx,"single");433 model_ops = process_model_startup("single"); 277 434 if (!model_ops) { 278 435 DEBUG(0,("Can't find 'single' process model_ops")); … … 280 437 } 281 438 282 if (lp _interfaces(lp_ctx) && lp_bind_interfaces_only(lp_ctx)) {439 if (lpcfg_interfaces(lp_ctx) && lpcfg_bind_interfaces_only(lp_ctx)) { 283 440 int num_interfaces; 284 441 int i; 285 442 struct interface *ifaces; 286 443 287 load_interfaces(task, lp _interfaces(lp_ctx), &ifaces);444 load_interfaces(task, lpcfg_interfaces(lp_ctx), &ifaces); 288 445 289 446 num_interfaces = iface_count(ifaces); … … 295 452 for(i = 0; i < num_interfaces; i++) { 296 453 address = iface_n_ip(ifaces, i); 297 status = stream_setup_socket(task ->event_ctx,298 task->lp_ctx, model_ops, 454 status = stream_setup_socket(task, task->event_ctx, 455 task->lp_ctx, model_ops, 299 456 &wreplsrv_stream_ops, 300 457 "ipv4", address, &port, 301 lp_socket_options(task->lp_ctx),458 lpcfg_socket_options(task->lp_ctx), 302 459 service); 303 460 if (!NT_STATUS_IS_OK(status)) { … … 308 465 } 309 466 } else { 310 address = lp _socket_address(lp_ctx);311 status = stream_setup_socket(task ->event_ctx, task->lp_ctx,467 address = lpcfg_socket_address(lp_ctx); 468 status = stream_setup_socket(task, task->event_ctx, task->lp_ctx, 312 469 model_ops, &wreplsrv_stream_ops, 313 "ipv4", address, &port, lp _socket_options(task->lp_ctx),470 "ipv4", address, &port, lpcfg_socket_options(task->lp_ctx), 314 471 service); 315 472 if (!NT_STATUS_IS_OK(status)) { -
vendor/current/source4/wrepl_server/wrepl_out_helpers.c
r414 r740 42 42 enum wreplsrv_out_connect_stage stage; 43 43 struct composite_context *c; 44 struct wrepl_request *req;45 struct composite_context *c_req;46 44 struct wrepl_associate assoc_io; 47 45 enum winsrepl_partner_type type; 48 46 struct wreplsrv_out_connection *wreplconn; 47 struct tevent_req *subreq; 49 48 }; 50 49 51 static void wreplsrv_out_connect_handler_creq(struct composite_context *c_req); 52 static void wreplsrv_out_connect_handler_req(struct wrepl_request *req); 50 static void wreplsrv_out_connect_handler_treq(struct tevent_req *subreq); 53 51 54 52 static NTSTATUS wreplsrv_out_connect_wait_socket(struct wreplsrv_out_connect_state *state) … … 56 54 NTSTATUS status; 57 55 58 status = wrepl_connect_recv(state->c_req); 59 NT_STATUS_NOT_OK_RETURN(status); 60 61 state->req = wrepl_associate_send(state->wreplconn->sock, &state->assoc_io); 62 NT_STATUS_HAVE_NO_MEMORY(state->req); 63 64 state->req->async.fn = wreplsrv_out_connect_handler_req; 65 state->req->async.private_data = state; 56 status = wrepl_connect_recv(state->subreq); 57 TALLOC_FREE(state->subreq); 58 NT_STATUS_NOT_OK_RETURN(status); 59 60 state->subreq = wrepl_associate_send(state, 61 state->wreplconn->service->task->event_ctx, 62 state->wreplconn->sock, &state->assoc_io); 63 NT_STATUS_HAVE_NO_MEMORY(state->subreq); 64 65 tevent_req_set_callback(state->subreq, 66 wreplsrv_out_connect_handler_treq, 67 state); 66 68 67 69 state->stage = WREPLSRV_OUT_CONNECT_STAGE_WAIT_ASSOC_CTX; … … 74 76 NTSTATUS status; 75 77 76 status = wrepl_associate_recv(state->req, &state->assoc_io); 78 status = wrepl_associate_recv(state->subreq, &state->assoc_io); 79 TALLOC_FREE(state->subreq); 77 80 NT_STATUS_NOT_OK_RETURN(status); 78 81 … … 122 125 } 123 126 124 static void wreplsrv_out_connect_handler_creq(struct composite_context *creq) 125 { 126 struct wreplsrv_out_connect_state *state = talloc_get_type(creq->async.private_data, 127 struct wreplsrv_out_connect_state); 128 wreplsrv_out_connect_handler(state); 129 return; 130 } 131 132 static void wreplsrv_out_connect_handler_req(struct wrepl_request *req) 133 { 134 struct wreplsrv_out_connect_state *state = talloc_get_type(req->async.private_data, 127 static void wreplsrv_out_connect_handler_treq(struct tevent_req *subreq) 128 { 129 struct wreplsrv_out_connect_state *state = tevent_req_callback_data(subreq, 135 130 struct wreplsrv_out_connect_state); 136 131 wreplsrv_out_connect_handler(state); … … 172 167 /* we have a connection already, so use it */ 173 168 if (wreplconn) { 174 if ( !wreplconn->sock->dead) {169 if (wrepl_socket_is_connected(wreplconn->sock)) { 175 170 state->stage = WREPLSRV_OUT_CONNECT_STAGE_DONE; 176 171 state->wreplconn= wreplconn; … … 193 188 wreplconn->service = service; 194 189 wreplconn->partner = partner; 195 wreplconn->sock = wrepl_socket_init(wreplconn, service->task->event_ctx , lp_iconv_convenience(service->task->lp_ctx));190 wreplconn->sock = wrepl_socket_init(wreplconn, service->task->event_ctx); 196 191 if (!wreplconn->sock) goto failed; 197 192 198 193 state->stage = WREPLSRV_OUT_CONNECT_STAGE_WAIT_SOCKET; 199 194 state->wreplconn= wreplconn; 200 state->c_req = wrepl_connect_send(wreplconn->sock, 195 state->subreq = wrepl_connect_send(state, 196 service->task->event_ctx, 197 wreplconn->sock, 201 198 partner->our_address?partner->our_address:wrepl_best_ip(service->task->lp_ctx, partner->address), 202 199 partner->address); 203 if (!state->c_req) goto failed; 204 205 state->c_req->async.fn = wreplsrv_out_connect_handler_creq; 206 state->c_req->async.private_data = state; 200 if (!state->subreq) goto failed; 201 202 tevent_req_set_callback(state->subreq, 203 wreplsrv_out_connect_handler_treq, 204 state); 207 205 208 206 return c; … … 256 254 enum wreplsrv_pull_table_stage stage; 257 255 struct composite_context *c; 258 struct wrepl_request *req;259 256 struct wrepl_pull_table table_io; 260 257 struct wreplsrv_pull_table_io *io; 261 258 struct composite_context *creq; 262 259 struct wreplsrv_out_connection *wreplconn; 260 struct tevent_req *subreq; 263 261 }; 264 262 265 static void wreplsrv_pull_table_handler_ req(struct wrepl_request *req);263 static void wreplsrv_pull_table_handler_treq(struct tevent_req *subreq); 266 264 267 265 static NTSTATUS wreplsrv_pull_table_wait_connection(struct wreplsrv_pull_table_state *state) … … 273 271 274 272 state->table_io.in.assoc_ctx = state->wreplconn->assoc_ctx.peer_ctx; 275 state->req = wrepl_pull_table_send(state->wreplconn->sock, &state->table_io); 276 NT_STATUS_HAVE_NO_MEMORY(state->req); 277 278 state->req->async.fn = wreplsrv_pull_table_handler_req; 279 state->req->async.private_data = state; 273 state->subreq = wrepl_pull_table_send(state, 274 state->wreplconn->service->task->event_ctx, 275 state->wreplconn->sock, &state->table_io); 276 NT_STATUS_HAVE_NO_MEMORY(state->subreq); 277 278 tevent_req_set_callback(state->subreq, 279 wreplsrv_pull_table_handler_treq, 280 state); 280 281 281 282 state->stage = WREPLSRV_PULL_TABLE_STAGE_WAIT_TABLE_REPLY; … … 288 289 NTSTATUS status; 289 290 290 status = wrepl_pull_table_recv(state->req, state, &state->table_io); 291 status = wrepl_pull_table_recv(state->subreq, state, &state->table_io); 292 TALLOC_FREE(state->subreq); 291 293 NT_STATUS_NOT_OK_RETURN(status); 292 294 … … 329 331 } 330 332 331 static void wreplsrv_pull_table_handler_ req(struct wrepl_request *req)332 { 333 struct wreplsrv_pull_table_state *state = t alloc_get_type(req->async.private_data,333 static void wreplsrv_pull_table_handler_treq(struct tevent_req *subreq) 334 { 335 struct wreplsrv_pull_table_state *state = tevent_req_callback_data(subreq, 334 336 struct wreplsrv_pull_table_state); 335 337 wreplsrv_pull_table_handler(state); … … 415 417 enum wreplsrv_pull_names_stage stage; 416 418 struct composite_context *c; 417 struct wrepl_request *req;418 419 struct wrepl_pull_names pull_io; 419 420 struct wreplsrv_pull_names_io *io; 420 421 struct composite_context *creq; 421 422 struct wreplsrv_out_connection *wreplconn; 423 struct tevent_req *subreq; 422 424 }; 423 425 424 static void wreplsrv_pull_names_handler_ req(struct wrepl_request *req);426 static void wreplsrv_pull_names_handler_treq(struct tevent_req *subreq); 425 427 426 428 static NTSTATUS wreplsrv_pull_names_wait_connection(struct wreplsrv_pull_names_state *state) … … 433 435 state->pull_io.in.assoc_ctx = state->wreplconn->assoc_ctx.peer_ctx; 434 436 state->pull_io.in.partner = state->io->in.owner; 435 state->req = wrepl_pull_names_send(state->wreplconn->sock, &state->pull_io); 436 NT_STATUS_HAVE_NO_MEMORY(state->req); 437 438 state->req->async.fn = wreplsrv_pull_names_handler_req; 439 state->req->async.private_data = state; 437 state->subreq = wrepl_pull_names_send(state, 438 state->wreplconn->service->task->event_ctx, 439 state->wreplconn->sock, 440 &state->pull_io); 441 NT_STATUS_HAVE_NO_MEMORY(state->subreq); 442 443 tevent_req_set_callback(state->subreq, 444 wreplsrv_pull_names_handler_treq, 445 state); 440 446 441 447 state->stage = WREPLSRV_PULL_NAMES_STAGE_WAIT_SEND_REPLY; … … 448 454 NTSTATUS status; 449 455 450 status = wrepl_pull_names_recv(state->req, state, &state->pull_io); 456 status = wrepl_pull_names_recv(state->subreq, state, &state->pull_io); 457 TALLOC_FREE(state->subreq); 451 458 NT_STATUS_NOT_OK_RETURN(status); 452 459 … … 489 496 } 490 497 491 static void wreplsrv_pull_names_handler_ req(struct wrepl_request *req)492 { 493 struct wreplsrv_pull_names_state *state = t alloc_get_type(req->async.private_data,498 static void wreplsrv_pull_names_handler_treq(struct tevent_req *subreq) 499 { 500 struct wreplsrv_pull_names_state *state = tevent_req_callback_data(subreq, 494 501 struct wreplsrv_pull_names_state); 495 502 wreplsrv_pull_names_handler(state); … … 566 573 struct composite_context *creq; 567 574 struct wrepl_associate_stop assoc_stop_io; 568 struct wrepl_request *req;575 struct tevent_req *subreq; 569 576 }; 570 577 571 578 static void wreplsrv_pull_cycle_handler_creq(struct composite_context *creq); 572 static void wreplsrv_pull_cycle_handler_ req(struct wrepl_request *req);579 static void wreplsrv_pull_cycle_handler_treq(struct tevent_req *subreq); 573 580 574 581 static NTSTATUS wreplsrv_pull_cycle_next_owner_do_work(struct wreplsrv_pull_cycle_state *state) … … 648 655 state->assoc_stop_io.in.assoc_ctx = state->io->in.wreplconn->assoc_ctx.peer_ctx; 649 656 state->assoc_stop_io.in.reason = 0; 650 state->req = wrepl_associate_stop_send(state->io->in.wreplconn->sock, &state->assoc_stop_io); 651 NT_STATUS_HAVE_NO_MEMORY(state->req); 652 653 state->req->async.fn = wreplsrv_pull_cycle_handler_req; 654 state->req->async.private_data = state; 657 state->subreq = wrepl_associate_stop_send(state, 658 state->io->in.wreplconn->service->task->event_ctx, 659 state->io->in.wreplconn->sock, 660 &state->assoc_stop_io); 661 NT_STATUS_HAVE_NO_MEMORY(state->subreq); 662 663 tevent_req_set_callback(state->subreq, 664 wreplsrv_pull_cycle_handler_treq, 665 state); 655 666 656 667 state->stage = WREPLSRV_PULL_CYCLE_STAGE_WAIT_STOP_ASSOC; … … 725 736 NTSTATUS status; 726 737 727 status = wrepl_associate_stop_recv(state->req, &state->assoc_stop_io); 738 status = wrepl_associate_stop_recv(state->subreq, &state->assoc_stop_io); 739 TALLOC_FREE(state->subreq); 728 740 NT_STATUS_NOT_OK_RETURN(status); 729 741 … … 772 784 } 773 785 774 static void wreplsrv_pull_cycle_handler_ req(struct wrepl_request *req)775 { 776 struct wreplsrv_pull_cycle_state *state = t alloc_get_type(req->async.private_data,786 static void wreplsrv_pull_cycle_handler_treq(struct tevent_req *subreq) 787 { 788 struct wreplsrv_pull_cycle_state *state = tevent_req_callback_data(subreq, 777 789 struct wreplsrv_pull_cycle_state); 778 790 wreplsrv_pull_cycle_handler(state); … … 826 838 enum wreplsrv_push_notify_stage { 827 839 WREPLSRV_PUSH_NOTIFY_STAGE_WAIT_CONNECT, 840 WREPLSRV_PUSH_NOTIFY_STAGE_WAIT_UPDATE, 828 841 WREPLSRV_PUSH_NOTIFY_STAGE_WAIT_INFORM, 829 842 WREPLSRV_PUSH_NOTIFY_STAGE_DONE … … 837 850 bool full_table; 838 851 struct wrepl_send_ctrl ctrl; 839 struct wrepl_request *req;840 852 struct wrepl_packet req_packet; 841 853 struct wrepl_packet *rep_packet; 842 854 struct composite_context *creq; 843 855 struct wreplsrv_out_connection *wreplconn; 856 struct tevent_req *subreq; 844 857 }; 845 858 846 859 static void wreplsrv_push_notify_handler_creq(struct composite_context *creq); 847 static void wreplsrv_push_notify_handler_ req(struct wrepl_request *req);860 static void wreplsrv_push_notify_handler_treq(struct tevent_req *subreq); 848 861 849 862 static NTSTATUS wreplsrv_push_notify_update(struct wreplsrv_push_notify_state *state) … … 853 866 struct wrepl_replication *repl_out = &state->req_packet.message.replication; 854 867 struct wrepl_table *table_out = &state->req_packet.message.replication.info.table; 855 struct wreplsrv_in_connection *wrepl_in; 856 NTSTATUS status; 857 struct socket_context *sock; 858 struct packet_context *packet; 859 uint16_t fde_flags; 868 NTSTATUS status; 860 869 861 870 /* prepare the outgoing request */ … … 871 880 872 881 /* queue the request */ 873 state->req = wrepl_request_send(state->wreplconn->sock, req, NULL); 874 NT_STATUS_HAVE_NO_MEMORY(state->req); 875 876 /* 877 * now we need to convert the wrepl_socket (client connection) 878 * into a wreplsrv_in_connection (server connection), because 879 * we'll act as a server on this connection after the WREPL_REPL_UPDATE* 880 * message is received by the peer. 881 */ 882 883 /* steal the socket_context */ 884 sock = state->wreplconn->sock->sock; 885 state->wreplconn->sock->sock = NULL; 886 talloc_steal(state, sock); 887 888 /* 889 * steal the packet_context 890 * note the request DATA_BLOB we just send on the 891 * wrepl_socket (client connection) is still unter the 892 * packet context and will be send to the wire 893 */ 894 packet = state->wreplconn->sock->packet; 895 state->wreplconn->sock->packet = NULL; 896 talloc_steal(state, packet); 897 898 /* 899 * get the fde_flags of the old fde event, 900 * so that we can later set the same flags to the new one 901 */ 902 fde_flags = event_get_fd_flags(state->wreplconn->sock->event.fde); 903 904 /* 905 * free the wrepl_socket (client connection) 906 */ 907 talloc_free(state->wreplconn->sock); 908 state->wreplconn->sock = NULL; 909 910 /* 911 * now create a wreplsrv_in_connection, 912 * on which we act as server 913 * 914 * NOTE: sock and packet will be stolen by 915 * wreplsrv_in_connection_merge() 916 */ 917 status = wreplsrv_in_connection_merge(state->io->in.partner, 918 sock, packet, &wrepl_in); 919 NT_STATUS_NOT_OK_RETURN(status); 920 921 event_set_fd_flags(wrepl_in->conn->event.fde, fde_flags); 922 923 wrepl_in->assoc_ctx.peer_ctx = state->wreplconn->assoc_ctx.peer_ctx; 924 wrepl_in->assoc_ctx.our_ctx = 0; 925 926 /* now we can free the wreplsrv_out_connection */ 927 talloc_free(state->wreplconn); 928 state->wreplconn = NULL; 929 930 state->stage = WREPLSRV_PUSH_NOTIFY_STAGE_DONE; 882 state->subreq = wrepl_request_send(state, 883 state->wreplconn->service->task->event_ctx, 884 state->wreplconn->sock, req, NULL); 885 NT_STATUS_HAVE_NO_MEMORY(state->subreq); 886 887 tevent_req_set_callback(state->subreq, 888 wreplsrv_push_notify_handler_treq, 889 state); 890 891 state->stage = WREPLSRV_PUSH_NOTIFY_STAGE_WAIT_UPDATE; 931 892 932 893 return NT_STATUS_OK; … … 954 915 state->ctrl.send_only = true; 955 916 956 state->req = wrepl_request_send(state->wreplconn->sock, req, &state->ctrl); 957 NT_STATUS_HAVE_NO_MEMORY(state->req); 958 959 state->req->async.fn = wreplsrv_push_notify_handler_req; 960 state->req->async.private_data = state; 917 state->subreq = wrepl_request_send(state, 918 state->wreplconn->service->task->event_ctx, 919 state->wreplconn->sock, req, &state->ctrl); 920 NT_STATUS_HAVE_NO_MEMORY(state->subreq); 921 922 tevent_req_set_callback(state->subreq, 923 wreplsrv_push_notify_handler_treq, 924 state); 961 925 962 926 state->stage = WREPLSRV_PUSH_NOTIFY_STAGE_WAIT_INFORM; … … 1004 968 return NT_STATUS_INTERNAL_ERROR; 1005 969 } 1006 1007 return NT_STATUS_INTERNAL_ERROR; 970 } 971 972 static NTSTATUS wreplsrv_push_notify_wait_update(struct wreplsrv_push_notify_state *state) 973 { 974 struct wreplsrv_in_connection *wrepl_in; 975 struct tstream_context *stream; 976 NTSTATUS status; 977 978 status = wrepl_request_recv(state->subreq, state, NULL); 979 TALLOC_FREE(state->subreq); 980 NT_STATUS_NOT_OK_RETURN(status); 981 982 /* 983 * now we need to convert the wrepl_socket (client connection) 984 * into a wreplsrv_in_connection (server connection), because 985 * we'll act as a server on this connection after the WREPL_REPL_UPDATE* 986 * message is received by the peer. 987 */ 988 989 status = wrepl_socket_split_stream(state->wreplconn->sock, state, &stream); 990 NT_STATUS_NOT_OK_RETURN(status); 991 992 /* 993 * now create a wreplsrv_in_connection, 994 * on which we act as server 995 * 996 * NOTE: stream will be stolen by 997 * wreplsrv_in_connection_merge() 998 */ 999 status = wreplsrv_in_connection_merge(state->io->in.partner, 1000 state->wreplconn->assoc_ctx.peer_ctx, 1001 &stream, 1002 &wrepl_in); 1003 NT_STATUS_NOT_OK_RETURN(status); 1004 1005 /* now we can free the wreplsrv_out_connection */ 1006 TALLOC_FREE(state->wreplconn); 1007 1008 state->stage = WREPLSRV_PUSH_NOTIFY_STAGE_DONE; 1009 return NT_STATUS_OK; 1008 1010 } 1009 1011 … … 1012 1014 NTSTATUS status; 1013 1015 1014 status = wrepl_request_recv(state->req, state, NULL); 1016 status = wrepl_request_recv(state->subreq, state, NULL); 1017 TALLOC_FREE(state->subreq); 1015 1018 NT_STATUS_NOT_OK_RETURN(status); 1016 1019 … … 1026 1029 case WREPLSRV_PUSH_NOTIFY_STAGE_WAIT_CONNECT: 1027 1030 c->status = wreplsrv_push_notify_wait_connect(state); 1031 break; 1032 case WREPLSRV_PUSH_NOTIFY_STAGE_WAIT_UPDATE: 1033 c->status = wreplsrv_push_notify_wait_update(state); 1028 1034 break; 1029 1035 case WREPLSRV_PUSH_NOTIFY_STAGE_WAIT_INFORM: … … 1055 1061 } 1056 1062 1057 static void wreplsrv_push_notify_handler_ req(struct wrepl_request *req)1058 { 1059 struct wreplsrv_push_notify_state *state = t alloc_get_type(req->async.private_data,1063 static void wreplsrv_push_notify_handler_treq(struct tevent_req *subreq) 1064 { 1065 struct wreplsrv_push_notify_state *state = tevent_req_callback_data(subreq, 1060 1066 struct wreplsrv_push_notify_state); 1061 1067 wreplsrv_push_notify_handler(state); -
vendor/current/source4/wrepl_server/wrepl_scavenging.c
r414 r740 24 24 #include "wrepl_server/wrepl_server.h" 25 25 #include "nbt_server/wins/winsdb.h" 26 #include "ldb/include/ldb.h"27 #include "ldb/include/ldb_errors.h"26 #include <ldb.h> 27 #include <ldb_errors.h> 28 28 #include "system/time.h" 29 29 #include "smbd/service_task.h" 30 30 #include "lib/messaging/irpc.h" 31 #include "librpc/gen_ndr/ndr_irpc .h"31 #include "librpc/gen_ndr/ndr_irpc_c.h" 32 32 #include "librpc/gen_ndr/ndr_nbt.h" 33 33 #include "param/param.h" … … 53 53 const char *owner_filter; 54 54 const char *filter; 55 u int32_t i;55 unsigned int i; 56 56 int ret; 57 57 time_t now = time(NULL); … … 66 66 struct timeval tombstone_extra_time; 67 67 const char *local_owner = service->wins_db->local_owner; 68 bool propagate = lp _parm_bool(service->task->lp_ctx, NULL, "wreplsrv", "propagate name releases", false);68 bool propagate = lpcfg_parm_bool(service->task->lp_ctx, NULL, "wreplsrv", "propagate name releases", false); 69 69 70 70 now_timestr = ldb_timestring(tmp_mem, now); … … 207 207 const char *owner_filter; 208 208 const char *filter; 209 u int32_t i;209 unsigned int i; 210 210 int ret; 211 211 time_t now = time(NULL); … … 313 313 }; 314 314 315 static void verify_handler(struct irpc_request *ireq)315 static void verify_handler(struct tevent_req *subreq) 316 316 { 317 struct verify_state *s = talloc_get_type(ireq->async.private_data, 318 struct verify_state); 317 struct verify_state *s = 318 tevent_req_callback_data(subreq, 319 struct verify_state); 319 320 struct winsdb_record *rec = s->rec; 320 321 const char *action; … … 337 338 * - if an error happens do nothing 338 339 */ 339 status = irpc_call_recv(ireq); 340 status = dcerpc_nbtd_proxy_wins_challenge_r_recv(subreq, s); 341 TALLOC_FREE(subreq); 340 342 if (NT_STATUS_EQUAL(NT_STATUS_OBJECT_NAME_NOT_FOUND, status)) { 341 343 delete_record = true; … … 422 424 const char *owner_filter; 423 425 const char *filter; 424 u int32_t i;426 unsigned int i; 425 427 int ret; 426 428 time_t now = time(NULL); 427 429 const char *now_timestr; 428 struct irpc_request *ireq;430 struct tevent_req *subreq; 429 431 struct verify_state *s; 430 struct server_id *nbt_servers; 431 432 nbt_servers = irpc_servers_byname(service->task->msg_ctx, tmp_mem, "nbt_server"); 433 if ((nbt_servers == NULL) || (nbt_servers[0].id == 0)) { 434 return NT_STATUS_INTERNAL_ERROR; 435 } 432 struct dcerpc_binding_handle *irpc_handle; 436 433 437 434 now_timestr = ldb_timestring(tmp_mem, now); … … 490 487 s->r.in.addrs[0].addr = rec->wins_owner; 491 488 492 ireq = IRPC_CALL_SEND(s->msg_ctx, nbt_servers[0], 493 irpc, NBTD_PROXY_WINS_CHALLENGE, 494 &s->r, s); 495 NT_STATUS_HAVE_NO_MEMORY(ireq); 496 497 ireq->async.fn = verify_handler; 498 ireq->async.private_data= s; 489 irpc_handle = irpc_binding_handle_by_name(s, 490 service->task->msg_ctx, 491 "nbt_server", 492 &ndr_table_irpc); 493 if (irpc_handle == NULL) { 494 return NT_STATUS_INTERNAL_ERROR; 495 } 496 497 subreq = dcerpc_nbtd_proxy_wins_challenge_r_send(s, 498 service->task->event_ctx, 499 irpc_handle, 500 &s->r); 501 NT_STATUS_HAVE_NO_MEMORY(subreq); 502 503 tevent_req_set_callback(subreq, verify_handler, s); 499 504 500 505 talloc_steal(service, s); -
vendor/current/source4/wrepl_server/wrepl_server.c
r414 r740 28 28 #include "wrepl_server/wrepl_server.h" 29 29 #include "nbt_server/wins/winsdb.h" 30 #include "ldb/include/ldb.h"31 #include "ldb/include/ldb_errors.h"30 #include <ldb.h> 31 #include <ldb_errors.h> 32 32 #include "auth/auth.h" 33 33 #include "ldb_wrap.h" … … 39 39 struct loadparm_context *lp_ctx) 40 40 { 41 return ldb_wrap_connect(mem_ctx, ev_ctx, lp_ctx, private_path(mem_ctx, 42 lp_ctx, lp _wins_config_url(lp_ctx)),43 system_session( mem_ctx, lp_ctx), NULL, 0, NULL);41 return ldb_wrap_connect(mem_ctx, ev_ctx, lp_ctx, private_path(mem_ctx, 42 lp_ctx, lpcfg_wins_config_url(lp_ctx)), 43 system_session(lp_ctx), NULL, 0); 44 44 } 45 45 … … 75 75 struct loadparm_context *lp_ctx) 76 76 { 77 const char *owner = lp _parm_string(lp_ctx, NULL, "winsdb", "local_owner");77 const char *owner = lpcfg_parm_string(lp_ctx, NULL, "winsdb", "local_owner"); 78 78 79 79 if (owner == NULL) { 80 80 struct interface *ifaces; 81 load_interfaces(service, lp _interfaces(lp_ctx), &ifaces);81 load_interfaces(service, lpcfg_interfaces(lp_ctx), &ifaces); 82 82 owner = iface_n_ip(ifaces, 0); 83 83 } … … 94 94 95 95 /* the default renew interval is 6 days */ 96 service->config.renew_interval = lp _parm_int(lp_ctx, NULL,"wreplsrv","renew_interval", 6*24*60*60);96 service->config.renew_interval = lpcfg_parm_int(lp_ctx, NULL,"wreplsrv","renew_interval", 6*24*60*60); 97 97 98 98 /* the default tombstone (extinction) interval is 6 days */ 99 service->config.tombstone_interval= lp _parm_int(lp_ctx, NULL,"wreplsrv","tombstone_interval", 6*24*60*60);99 service->config.tombstone_interval= lpcfg_parm_int(lp_ctx, NULL,"wreplsrv","tombstone_interval", 6*24*60*60); 100 100 101 101 /* the default tombstone (extinction) timeout is 1 day */ 102 service->config.tombstone_timeout = lp _parm_int(lp_ctx, NULL,"wreplsrv","tombstone_timeout", 1*24*60*60);102 service->config.tombstone_timeout = lpcfg_parm_int(lp_ctx, NULL,"wreplsrv","tombstone_timeout", 1*24*60*60); 103 103 104 104 /* the default tombstone extra timeout is 3 days */ 105 service->config.tombstone_extra_timeout = lp _parm_int(lp_ctx, NULL,"wreplsrv","tombstone_extra_timeout", 3*24*60*60);105 service->config.tombstone_extra_timeout = lpcfg_parm_int(lp_ctx, NULL,"wreplsrv","tombstone_extra_timeout", 3*24*60*60); 106 106 107 107 /* the default verify interval is 24 days */ 108 service->config.verify_interval = lp _parm_int(lp_ctx, NULL,"wreplsrv","verify_interval", 24*24*60*60);108 service->config.verify_interval = lpcfg_parm_int(lp_ctx, NULL,"wreplsrv","verify_interval", 24*24*60*60); 109 109 110 110 /* the default scavenging interval is 'renew_interval/2' */ 111 service->config.scavenging_interval=lp _parm_int(lp_ctx, NULL,"wreplsrv","scavenging_interval",111 service->config.scavenging_interval=lpcfg_parm_int(lp_ctx, NULL,"wreplsrv","scavenging_interval", 112 112 service->config.renew_interval/2); 113 113 114 114 /* the maximun interval to the next periodic processing event */ 115 service->config.periodic_interval = lp _parm_int(lp_ctx, NULL,"wreplsrv","periodic_interval", 15);115 service->config.periodic_interval = lpcfg_parm_int(lp_ctx, NULL,"wreplsrv","periodic_interval", 15); 116 116 117 117 return NT_STATUS_OK; … … 140 140 int ret; 141 141 TALLOC_CTX *tmp_ctx; 142 int i;142 unsigned int i; 143 143 uint64_t new_seqnumber; 144 144 … … 364 364 TALLOC_CTX *tmp_ctx = talloc_new(service); 365 365 struct ldb_context *ldb = service->wins_db->ldb; 366 int i;366 unsigned int i; 367 367 struct wreplsrv_owner *local_owner; 368 368 const char *wins_owner; … … 452 452 struct wreplsrv_service *service; 453 453 454 if (!lp _wins_support(task->lp_ctx)) {454 if (!lpcfg_wins_support(task->lp_ctx)) { 455 455 return; 456 456 } … … 486 486 487 487 /* 488 * setup listen sockets, so we can an wser requests from our partners,488 * setup listen sockets, so we can answer requests from our partners, 489 489 * which pull from us 490 490 */ -
vendor/current/source4/wrepl_server/wrepl_server.h
r414 r740 19 19 along with this program. If not, see <http://www.gnu.org/licenses/>. 20 20 */ 21 22 #include "system/network.h" 21 23 22 24 struct wreplsrv_service; … … 36 38 struct wrepl_packet rep_packet; 37 39 bool terminate_after_send; 40 41 DATA_BLOB in; 42 DATA_BLOB out; 43 struct iovec out_iov[1]; 38 44 }; 39 45 … … 44 50 struct wreplsrv_in_connection *prev,*next; 45 51 struct stream_connection *conn; 46 struct packet_context *packet; 52 struct tstream_context *tstream; 53 struct tevent_queue *send_queue; 47 54 48 55 /* our global service context */
Note:
See TracChangeset
for help on using the changeset viewer.