Changeset 988 for vendor/current/source4/dsdb/repl
- Timestamp:
- Nov 24, 2016, 1:14:11 PM (9 years ago)
- Location:
- vendor/current/source4/dsdb/repl
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
vendor/current/source4/dsdb/repl/drepl_extended.c
r740 r988 40 40 */ 41 41 static WERROR drepl_create_extended_source_dsa(struct dreplsrv_service *service, 42 TALLOC_CTX *mem_ctx, 42 43 struct ldb_dn *nc_dn, 43 44 struct ldb_dn *source_dsa_dn, … … 94 95 } 95 96 96 sdsa->repsFrom1->other_info->dns_name = 97 talloc_asprintf(sdsa->repsFrom1->other_info, "%s._msdcs.%s", 98 GUID_string(sdsa->repsFrom1->other_info, &sdsa->repsFrom1->source_dsa_obj_guid), 99 lpcfg_dnsdomain(service->task->lp_ctx)); 97 sdsa->repsFrom1->other_info->dns_name = samdb_ntds_msdcs_dns_name(ldb, 98 sdsa->repsFrom1->other_info, 99 &sdsa->repsFrom1->source_dsa_obj_guid); 100 100 if (!sdsa->repsFrom1->other_info->dns_name) { 101 101 talloc_free(sdsa); … … 167 167 { 168 168 struct extended_op_data *data = talloc_get_type_abort(cb_data, struct extended_op_data); 169 talloc_ free(data->sdsa);169 talloc_unlink(data, data->sdsa); 170 170 data->callback(service, err, exop_error, data->callback_data); 171 171 talloc_free(data); … … 186 186 WERROR werr; 187 187 struct extended_op_data *data; 188 struct dreplsrv_partition_source_dsa *sdsa;189 190 werr = drepl_create_extended_source_dsa(service, nc_dn, source_dsa_dn, min_usn, &sdsa);191 W_ERROR_NOT_OK_RETURN(werr);192 188 193 189 data = talloc(service, struct extended_op_data); 194 190 W_ERROR_HAVE_NO_MEMORY(data); 195 191 192 werr = drepl_create_extended_source_dsa(service, data, nc_dn, source_dsa_dn, min_usn, &data->sdsa); 193 W_ERROR_NOT_OK_RETURN(werr); 194 196 195 data->callback = callback; 197 196 data->callback_data = callback_data; 198 data->sdsa = sdsa; 199 200 werr = dreplsrv_schedule_partition_pull_source(service, sdsa, 197 198 werr = dreplsrv_schedule_partition_pull_source(service, data->sdsa, 201 199 0, extended_op, fsmo_info, 202 200 extended_op_callback, data); 203 201 if (!W_ERROR_IS_OK(werr)) { 204 talloc_free(sdsa);205 202 talloc_free(data); 206 203 } -
vendor/current/source4/dsdb/repl/drepl_fsmo.c
r740 r988 32 32 #include "param/param.h" 33 33 34 struct fsmo_role_state { 35 struct irpc_message *msg; 36 struct drepl_takeFSMORole *r; 37 }; 38 34 39 static void drepl_role_callback(struct dreplsrv_service *service, 35 40 WERROR werr, … … 37 42 void *cb_data) 38 43 { 44 struct fsmo_role_state *fsmo = talloc_get_type_abort(cb_data, struct fsmo_role_state); 39 45 if (!W_ERROR_IS_OK(werr)) { 40 DEBUG( 0,(__location__ ": Failed role transfer - %s - extended_ret[0x%X]\n",46 DEBUG(2,(__location__ ": Failed role transfer - %s - extended_ret[0x%X]\n", 41 47 win_errstr(werr), ext_err)); 42 48 } else { 43 DEBUG( 0,(__location__ ": Successful role transfer\n"));49 DEBUG(2,(__location__ ": Successful role transfer\n")); 44 50 } 45 } 46 47 static bool fsmo_master_cmp(struct ldb_dn *ntds_dn, struct ldb_dn *role_owner_dn) 48 { 49 if (ldb_dn_compare(ntds_dn, role_owner_dn) == 0) { 50 DEBUG(0,("\nWe are the FSMO master.\n")); 51 return true; 52 } 53 return false; 51 fsmo->r->out.result = werr; 52 irpc_send_reply(fsmo->msg, NT_STATUS_OK); 54 53 } 55 54 … … 57 56 see which role is we are asked to assume, initialize data and send request 58 57 */ 59 WERROR dreplsrv_fsmo_role_check(struct dreplsrv_service *service,60 enum drepl_role_master role)58 NTSTATUS drepl_take_FSMO_role(struct irpc_message *msg, 59 struct drepl_takeFSMORole *r) 61 60 { 62 struct ldb_dn *role_owner_dn, *fsmo_role_dn, *ntds_dn; 61 struct dreplsrv_service *service = talloc_get_type(msg->private_data, 62 struct dreplsrv_service); 63 struct ldb_dn *role_owner_dn, *fsmo_role_dn; 63 64 TALLOC_CTX *tmp_ctx = talloc_new(service); 64 65 uint64_t fsmo_info = 0; 65 66 enum drsuapi_DsExtendedOperation extended_op = DRSUAPI_EXOP_NONE; 66 67 WERROR werr; 67 68 ntds_dn = samdb_ntds_settings_dn(service->samdb); 69 if (!ntds_dn) { 70 return WERR_DS_DRA_INTERNAL_ERROR; 71 } 68 enum drepl_role_master role = r->in.role; 69 struct fsmo_role_state *fsmo; 70 bool is_us; 71 int ret; 72 72 73 73 werr = dsdb_get_fsmo_role_info(tmp_ctx, service->samdb, role, 74 74 &fsmo_role_dn, &role_owner_dn); 75 75 if (!W_ERROR_IS_OK(werr)) { 76 return werr; 76 talloc_free(tmp_ctx); 77 r->out.result = werr; 78 return NT_STATUS_OK; 77 79 } 78 80 … … 90 92 break; 91 93 default: 92 return WERR_DS_DRA_INTERNAL_ERROR; 94 DEBUG(0,("Unknown role %u in role transfer\n", 95 (unsigned)role)); 96 /* IRPC messages are trusted, so this really should not happen */ 97 smb_panic("Unknown role despite dsdb_get_fsmo_role_info success"); 93 98 } 94 99 95 if (fsmo_master_cmp(ntds_dn, role_owner_dn) ||96 (extended_op == DRSUAPI_EXOP_NONE)) {97 DEBUG(0,("FSMO role check failed for DN %s and owner %s",100 ret = samdb_dn_is_our_ntdsa(service->samdb, role_owner_dn, &is_us); 101 if (ret != LDB_SUCCESS) { 102 DEBUG(0,("FSMO role check failed (failed to confirm if our ntdsDsa) for DN %s and owner %s \n", 98 103 ldb_dn_get_linearized(fsmo_role_dn), 99 104 ldb_dn_get_linearized(role_owner_dn))); 100 return WERR_OK; 105 talloc_free(tmp_ctx); 106 r->out.result = WERR_DS_DRA_INTERNAL_ERROR; 107 return NT_STATUS_OK; 101 108 } 109 110 if (is_us) { 111 DEBUG(5,("FSMO role check failed, we already own DN %s with %s\n", 112 ldb_dn_get_linearized(fsmo_role_dn), 113 ldb_dn_get_linearized(role_owner_dn))); 114 r->out.result = WERR_OK; 115 talloc_free(tmp_ctx); 116 return NT_STATUS_OK; 117 } 118 119 fsmo = talloc(msg, struct fsmo_role_state); 120 NT_STATUS_HAVE_NO_MEMORY(fsmo); 121 122 fsmo->msg = msg; 123 fsmo->r = r; 102 124 103 125 werr = drepl_request_extended_op(service, … … 108 130 0, 109 131 drepl_role_callback, 110 NULL); 111 if (W_ERROR_IS_OK(werr)) { 112 dreplsrv_run_pending_ops(service); 113 } else { 114 DEBUG(0,("%s: drepl_request_extended_op() failed with %s", 115 __FUNCTION__, win_errstr(werr))); 132 fsmo); 133 if (!W_ERROR_IS_OK(werr)) { 134 r->out.result = werr; 135 talloc_free(tmp_ctx); 136 return NT_STATUS_OK; 116 137 } 117 return werr; 138 139 /* mark this message to be answered later */ 140 msg->defer_reply = true; 141 dreplsrv_run_pending_ops(service); 142 talloc_free(tmp_ctx); 143 return NT_STATUS_OK; 118 144 } -
vendor/current/source4/dsdb/repl/drepl_notify.c
r740 r988 196 196 nt_errstr(status), win_errstr(werr))); 197 197 } else { 198 DEBUG(2,("dreplsrv_notify: DsReplicaSync OK for%s\n",198 DEBUG(2,("dreplsrv_notify: DsReplicaSync successfuly sent to %s\n", 199 199 op->source_dsa->repsFrom1->other_info->dns_name)); 200 200 op->source_dsa->notify_uSN = op->uSN; … … 256 256 /* first check the sources list */ 257 257 for (s=p->sources; s; s=s->next) { 258 if (GUID_ compare(&s->repsFrom1->source_dsa_obj_guid, guid) == 0) {258 if (GUID_equal(&s->repsFrom1->source_dsa_obj_guid, guid)) { 259 259 return s; 260 260 } … … 263 263 /* then the notifies list */ 264 264 for (s=p->notifies; s; s=s->next) { 265 if (GUID_ compare(&s->repsFrom1->source_dsa_obj_guid, guid) == 0) {265 if (GUID_equal(&s->repsFrom1->source_dsa_obj_guid, guid)) { 266 266 return s; 267 267 } … … 324 324 op->schedule_time = time(NULL); 325 325 326 DLIST_ADD_END(service->ops.notifies, op , struct dreplsrv_notify_operation *);326 DLIST_ADD_END(service->ops.notifies, op); 327 327 talloc_steal(service, op); 328 328 return WERR_OK; … … 452 452 service->notify.next_event = next_time; 453 453 454 new_te = event_add_timed(service->task->event_ctx, service,454 new_te = tevent_add_timer(service->task->event_ctx, service, 455 455 service->notify.next_event, 456 456 dreplsrv_notify_handler_te, service); -
vendor/current/source4/dsdb/repl/drepl_out_helpers.c
r740 r988 68 68 state->drsuapi = conn->drsuapi; 69 69 70 if (state->drsuapi && !state->drsuapi->pipe->conn->dead) { 71 tevent_req_done(req); 72 return tevent_req_post(req, ev); 73 } 74 75 if (state->drsuapi && state->drsuapi->pipe->conn->dead) { 76 talloc_free(state->drsuapi); 77 conn->drsuapi = NULL; 70 if (state->drsuapi != NULL) { 71 struct dcerpc_binding_handle *b = 72 state->drsuapi->pipe->binding_handle; 73 bool is_connected = dcerpc_binding_handle_is_connected(b); 74 75 if (is_connected) { 76 tevent_req_done(req); 77 return tevent_req_post(req, ev); 78 } 79 80 TALLOC_FREE(conn->drsuapi); 78 81 } 79 82 … … 115 118 116 119 status = gensec_session_key(state->drsuapi->pipe->conn->security_state.generic_state, 120 state->drsuapi, 117 121 &state->drsuapi->gensec_skey); 118 122 if (tevent_req_nterror(req, status)) { … … 173 177 break; 174 178 } 179 case 28: { 180 *info28 = state->bind_r.out.bind_info->info.info28; 181 break; 182 } 183 case 32: { 184 struct drsuapi_DsBindInfo32 *info32; 185 info32 = &state->bind_r.out.bind_info->info.info32; 186 187 info28->supported_extensions = info32->supported_extensions; 188 info28->site_guid = info32->site_guid; 189 info28->pid = info32->pid; 190 info28->repl_epoch = info32->repl_epoch; 191 break; 192 } 175 193 case 48: { 176 194 struct drsuapi_DsBindInfo48 *info48; … … 183 201 break; 184 202 } 185 case 28: 186 *info28 = state->bind_r.out.bind_info->info.info28; 203 case 52: { 204 struct drsuapi_DsBindInfo52 *info52; 205 info52 = &state->bind_r.out.bind_info->info.info52; 206 207 info28->supported_extensions = info52->supported_extensions; 208 info28->site_guid = info52->site_guid; 209 info28->pid = info52->pid; 210 info28->repl_epoch = info52->repl_epoch; 211 break; 212 } 213 default: 214 DEBUG(1, ("Warning: invalid info length in bind info: %d\n", 215 state->bind_r.out.bind_info->length)); 187 216 break; 188 217 } … … 262 291 263 292 /* 264 get a partial attribute set for a replication call293 get a RODC partial attribute set for a replication call 265 294 */ 266 295 static NTSTATUS dreplsrv_get_rodc_partial_attribute_set(struct dreplsrv_service *service, … … 280 309 pas->version = 1; 281 310 pas->attids = talloc_array(pas, enum drsuapi_DsAttributeId, schema->num_attributes); 282 NT_STATUS_HAVE_NO_MEMORY_AND_FREE(pas->attids, pas); 311 if (pas->attids == NULL) { 312 TALLOC_FREE(pas); 313 return NT_STATUS_NO_MEMORY; 314 } 283 315 284 316 for (i=0; i<schema->num_attributes; i++) { … … 294 326 pas->num_attids++; 295 327 } 328 329 pas->attids = talloc_realloc(pas, pas->attids, enum drsuapi_DsAttributeId, pas->num_attids); 330 if (pas->attids == NULL) { 331 TALLOC_FREE(pas); 332 return NT_STATUS_NO_MEMORY; 333 } 334 335 *_pas = pas; 336 return NT_STATUS_OK; 337 } 338 339 340 /* 341 get a GC partial attribute set for a replication call 342 */ 343 static NTSTATUS dreplsrv_get_gc_partial_attribute_set(struct dreplsrv_service *service, 344 TALLOC_CTX *mem_ctx, 345 struct drsuapi_DsPartialAttributeSet **_pas) 346 { 347 struct drsuapi_DsPartialAttributeSet *pas; 348 struct dsdb_schema *schema; 349 uint32_t i; 350 351 pas = talloc_zero(mem_ctx, struct drsuapi_DsPartialAttributeSet); 352 NT_STATUS_HAVE_NO_MEMORY(pas); 353 354 schema = dsdb_get_schema(service->samdb, NULL); 355 356 pas->version = 1; 357 pas->attids = talloc_array(pas, enum drsuapi_DsAttributeId, schema->num_attributes); 358 if (pas->attids == NULL) { 359 TALLOC_FREE(pas); 360 return NT_STATUS_NO_MEMORY; 361 } 362 363 for (i=0; i<schema->num_attributes; i++) { 364 struct dsdb_attribute *a; 365 a = schema->attributes_by_attributeID_id[i]; 366 if (a->isMemberOfPartialAttributeSet) { 367 pas->attids[pas->num_attids] = dsdb_attribute_get_attid(a, false); 368 pas->num_attids++; 369 } 370 } 371 372 pas->attids = talloc_realloc(pas, pas->attids, enum drsuapi_DsAttributeId, pas->num_attids); 373 if (pas->attids == NULL) { 374 TALLOC_FREE(pas); 375 return NT_STATUS_NO_MEMORY; 376 } 377 296 378 *_pas = pas; 297 379 return NT_STATUS_OK; … … 337 419 NTSTATUS status; 338 420 uint32_t replica_flags; 421 struct drsuapi_DsReplicaHighWaterMark highwatermark; 422 struct ldb_dn *schema_dn = ldb_get_schema_basedn(service->samdb); 339 423 340 424 r = talloc(state, struct drsuapi_DsGetNCChanges); … … 373 457 374 458 replica_flags = rf1->replica_flags; 375 376 if (service->am_rodc) { 459 highwatermark = rf1->highwatermark; 460 461 if (partition->partial_replica) { 462 status = dreplsrv_get_gc_partial_attribute_set(service, r, &pas); 463 if (!NT_STATUS_IS_OK(status)) { 464 DEBUG(0,(__location__ ": Failed to construct GC partial attribute set : %s\n", nt_errstr(status))); 465 return; 466 } 467 replica_flags &= ~DRSUAPI_DRS_WRIT_REP; 468 } else if (partition->rodc_replica) { 377 469 bool for_schema = false; 378 if (ldb_dn_compare_base( ldb_get_schema_basedn(service->samdb), partition->dn) == 0) {470 if (ldb_dn_compare_base(schema_dn, partition->dn) == 0) { 379 471 for_schema = true; 380 472 } … … 382 474 status = dreplsrv_get_rodc_partial_attribute_set(service, r, &pas, for_schema); 383 475 if (!NT_STATUS_IS_OK(status)) { 384 DEBUG(0,(__location__ ": Failed to construct partial attribute set : %s\n", nt_errstr(status)));476 DEBUG(0,(__location__ ": Failed to construct RODC partial attribute set : %s\n", nt_errstr(status))); 385 477 return; 386 478 } 479 replica_flags &= ~DRSUAPI_DRS_WRIT_REP; 387 480 if (state->op->extended_op == DRSUAPI_EXOP_REPL_SECRET) { 388 481 replica_flags &= ~DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING; 389 } 482 } else { 483 replica_flags |= DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING; 484 } 485 } 486 if (state->op->extended_op != DRSUAPI_EXOP_NONE) { 487 /* 488 * If it's an exop never set the ADD_REF even if it's in 489 * repsFrom flags. 490 */ 491 replica_flags &= ~DRSUAPI_DRS_ADD_REF; 492 } 493 494 /* is this a full resync of all objects? */ 495 if (state->op->options & DRSUAPI_DRS_FULL_SYNC_NOW) { 496 ZERO_STRUCT(highwatermark); 497 /* clear the FULL_SYNC_NOW option for subsequent 498 stages of the replication cycle */ 499 state->op->options &= ~DRSUAPI_DRS_FULL_SYNC_NOW; 500 state->op->options |= DRSUAPI_DRS_FULL_SYNC_IN_PROGRESS; 501 replica_flags |= DRSUAPI_DRS_NEVER_SYNCED; 502 } 503 if (state->op->options & DRSUAPI_DRS_FULL_SYNC_IN_PROGRESS) { 504 uptodateness_vector = NULL; 390 505 } 391 506 … … 396 511 r->in.req->req8.source_dsa_invocation_id= rf1->source_dsa_invocation_id; 397 512 r->in.req->req8.naming_context = &partition->nc; 398 r->in.req->req8.highwatermark = rf1->highwatermark;513 r->in.req->req8.highwatermark = highwatermark; 399 514 r->in.req->req8.uptodateness_vector = uptodateness_vector; 400 515 r->in.req->req8.replica_flags = replica_flags; … … 412 527 r->in.req->req5.source_dsa_invocation_id= rf1->source_dsa_invocation_id; 413 528 r->in.req->req5.naming_context = &partition->nc; 414 r->in.req->req5.highwatermark = rf1->highwatermark;529 r->in.req->req5.highwatermark = highwatermark; 415 530 r->in.req->req5.uptodateness_vector = uptodateness_vector; 416 531 r->in.req->req5.replica_flags = replica_flags; … … 454 569 struct drsuapi_DsGetNCChangesCtr1 *ctr1 = NULL; 455 570 struct drsuapi_DsGetNCChangesCtr6 *ctr6 = NULL; 456 enum drsuapi_DsExtendedError extended_ret ;571 enum drsuapi_DsExtendedError extended_ret = DRSUAPI_EXOP_ERR_NONE; 457 572 state->ndr_struct_ptr = NULL; 458 573 … … 543 658 struct dreplsrv_partition *partition = state->op->source_dsa->partition; 544 659 struct dreplsrv_drsuapi_connection *drsuapi = state->op->source_dsa->conn->drsuapi; 660 struct ldb_dn *schema_dn = ldb_get_schema_basedn(service->samdb); 545 661 struct dsdb_schema *schema; 546 662 struct dsdb_schema *working_schema = NULL; … … 555 671 WERROR status; 556 672 NTSTATUS nt_status; 673 uint32_t dsdb_repl_flags = 0; 557 674 558 675 switch (ctr_level) { … … 563 680 linked_attributes_count = 0; 564 681 linked_attributes = NULL; 682 rf1.source_dsa_obj_guid = ctr1->source_dsa_guid; 683 rf1.source_dsa_invocation_id = ctr1->source_dsa_invocation_id; 565 684 rf1.highwatermark = ctr1->new_highwatermark; 566 685 uptodateness_vector = NULL; /* TODO: map it */ … … 573 692 linked_attributes_count = ctr6->linked_attributes_count; 574 693 linked_attributes = ctr6->linked_attributes; 694 rf1.source_dsa_obj_guid = ctr6->source_dsa_guid; 695 rf1.source_dsa_invocation_id = ctr6->source_dsa_invocation_id; 575 696 rf1.highwatermark = ctr6->new_highwatermark; 576 697 uptodateness_vector = ctr6->uptodateness_vector; … … 594 715 * We won't need a working schema for empty replicas sent. 595 716 */ 596 if (first_object && ldb_dn_compare(partition->dn, schema->base_dn) == 0) { 597 /* create working schema to convert objects with */ 598 status = dsdb_repl_make_working_schema(service->samdb, 599 schema, 600 mapping_ctr, 601 object_count, 602 first_object, 603 &drsuapi->gensec_skey, 604 state, &working_schema); 605 if (!W_ERROR_IS_OK(status)) { 606 DEBUG(0,("Failed to create working schema: %s\n", 607 win_errstr(status))); 608 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR); 609 return; 610 } 717 if (first_object) { 718 bool is_schema = ldb_dn_compare(partition->dn, schema_dn) == 0; 719 if (is_schema) { 720 /* create working schema to convert objects with */ 721 status = dsdb_repl_make_working_schema(service->samdb, 722 schema, 723 mapping_ctr, 724 object_count, 725 first_object, 726 &drsuapi->gensec_skey, 727 state, &working_schema); 728 if (!W_ERROR_IS_OK(status)) { 729 DEBUG(0,("Failed to create working schema: %s\n", 730 win_errstr(status))); 731 tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR); 732 return; 733 } 734 } 735 } 736 737 if (partition->partial_replica || partition->rodc_replica) { 738 dsdb_repl_flags |= DSDB_REPL_FLAG_PARTIAL_REPLICA; 739 } 740 if (state->op->options & DRSUAPI_DRS_FULL_SYNC_IN_PROGRESS) { 741 dsdb_repl_flags |= DSDB_REPL_FLAG_PRIORITISE_INCOMING; 742 } 743 if (state->op->options & DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING) { 744 dsdb_repl_flags |= DSDB_REPL_FLAG_EXPECT_NO_SECRETS; 611 745 } 612 746 … … 622 756 uptodateness_vector, 623 757 &drsuapi->gensec_skey, 758 dsdb_repl_flags, 624 759 state, &objects); 625 760 if (!W_ERROR_IS_OK(status)) { … … 648 783 *state->op->source_dsa->repsFrom1 = rf1; 649 784 } 650 /*651 * TODO: update our uptodatevector!652 */653 785 654 786 /* we don't need this maybe very large structure anymore */ … … 691 823 struct dreplsrv_drsuapi_connection *drsuapi = state->op->source_dsa->conn->drsuapi; 692 824 struct drsuapi_DsReplicaUpdateRefs *r; 693 char *ntds_guid_str;694 825 char *ntds_dns_name; 695 826 struct tevent_req *subreq; … … 700 831 } 701 832 702 ntds_guid_str = GUID_string(r, &service->ntds_guid); 703 if (tevent_req_nomem(ntds_guid_str, req)) { 704 return; 705 } 706 707 ntds_dns_name = talloc_asprintf(r, "%s._msdcs.%s", 708 ntds_guid_str, 709 lpcfg_dnsdomain(service->task->lp_ctx)); 833 ntds_dns_name = samdb_ntds_msdcs_dns_name(service->samdb, r, &service->ntds_guid); 710 834 if (tevent_req_nomem(ntds_dns_name, req)) { 835 talloc_free(r); 711 836 return; 712 837 } … … 728 853 r); 729 854 if (tevent_req_nomem(subreq, req)) { 855 talloc_free(r); 730 856 return; 731 857 } … … 764 890 r->in.req.req1.dest_dsa_dns_name, 765 891 r->in.req.req1.naming_context->dn)); 766 tevent_req_nterror(req, status); 767 return; 892 /* 893 * TODO we are currently not sending the 894 * DsReplicaUpdateRefs at the correct moment, 895 * we do it just after a GetNcChanges which is 896 * not always correct. 897 * Especially when another DC is trying to demote 898 * it will sends us a DsReplicaSync that will trigger a getNcChanges 899 * this call will succeed but the DsRecplicaUpdateRefs that we send 900 * just after will not because the DC is in a demote state and 901 * will reply us a WERR_DS_DRA_BUSY, this error will cause us to 902 * answer to the DsReplicaSync with a non OK status, the other DC 903 * will stop the demote due to this error. 904 * In order to cope with this we will for the moment concider 905 * a DS_DRA_BUSY not as an error. 906 * It's not ideal but it should not have a too huge impact for 907 * running production as this error otherwise never happen and 908 * due to the fact the send a DsReplicaUpdateRefs after each getNcChanges 909 */ 910 if (!W_ERROR_EQUAL(r->out.result, WERR_DS_DRA_BUSY)) { 911 tevent_req_nterror(req, status); 912 return; 913 } 768 914 } 769 915 -
vendor/current/source4/dsdb/repl/drepl_out_pull.c
r740 r988 58 58 59 59 for (i=0; i<count; i++) { 60 if (GUID_ compare(source_dsa_obj_guid,61 &reps[i].ctr.ctr1.source_dsa_obj_guid) == 0) {60 if (GUID_equal(source_dsa_obj_guid, 61 &reps[i].ctr.ctr1.source_dsa_obj_guid)) { 62 62 break; 63 63 } … … 102 102 103 103 op->service = s; 104 op->source_dsa = source; 104 /* 105 * source may either be the long-term list of partners, or 106 * from dreplsrv_partition_source_dsa_temporary(). Because it 107 * can be either, we can't talloc_steal() it here, so we 108 * instead we reference it. 109 * 110 * We never talloc_free() the p->sources pointers - indeed we 111 * never remove them - and the temp source will otherwise go 112 * away with the msg it is allocated on. 113 * 114 * Finally the pointer created in drepl_request_extended_op() 115 * is removed with talloc_unlink(). 116 * 117 */ 118 op->source_dsa = talloc_reference(op, source); 119 if (!op->source_dsa) { 120 return WERR_NOMEM; 121 } 122 105 123 op->options = options; 106 124 op->extended_op = extended_op; … … 110 128 op->schedule_time = time(NULL); 111 129 112 DLIST_ADD_END(s->ops.pending, op , struct dreplsrv_out_operation *);130 DLIST_ADD_END(s->ops.pending, op); 113 131 114 132 return WERR_OK; -
vendor/current/source4/dsdb/repl/drepl_partitions.c
r740 r988 33 33 #include "libcli/security/security.h" 34 34 #include "param/param.h" 35 35 #include "dsdb/common/util.h" 36 37 /* 38 load the partitions list based on replicated NC attributes in our 39 NTDSDSA object 40 */ 36 41 WERROR dreplsrv_load_partitions(struct dreplsrv_service *s) 37 42 { 38 43 WERROR status; 39 static const char *attrs[] = { " namingContexts", NULL };40 unsigned int i;44 static const char *attrs[] = { "hasMasterNCs", "msDS-hasMasterNCs", "hasPartialReplicaNCs", "msDS-HasFullReplicaNCs", NULL }; 45 unsigned int a; 41 46 int ret; 42 47 TALLOC_CTX *tmp_ctx; 43 48 struct ldb_result *res; 44 49 struct ldb_message_element *el; 50 struct ldb_dn *ntds_dn; 45 51 46 52 tmp_ctx = talloc_new(s); 47 53 W_ERROR_HAVE_NO_MEMORY(tmp_ctx); 48 54 49 ret = ldb_search(s->samdb, tmp_ctx, &res, 50 ldb_dn_new(tmp_ctx, s->samdb, ""), LDB_SCOPE_BASE, attrs, NULL); 55 ntds_dn = samdb_ntds_settings_dn(s->samdb, tmp_ctx); 56 if (!ntds_dn) { 57 DEBUG(1,(__location__ ": Unable to find ntds_dn: %s\n", ldb_errstring(s->samdb))); 58 talloc_free(tmp_ctx); 59 return WERR_DS_DRA_INTERNAL_ERROR; 60 } 61 62 ret = dsdb_search_dn(s->samdb, tmp_ctx, &res, ntds_dn, attrs, DSDB_SEARCH_SHOW_EXTENDED_DN); 51 63 if (ret != LDB_SUCCESS) { 52 DEBUG(1,("Searching for namingContexts in rootDSEfailed: %s\n", ldb_errstring(s->samdb)));64 DEBUG(1,("Searching for hasMasterNCs in NTDS DN failed: %s\n", ldb_errstring(s->samdb))); 53 65 talloc_free(tmp_ctx); 54 66 return WERR_DS_DRA_INTERNAL_ERROR; 55 } 56 57 el = ldb_msg_find_element(res->msgs[0], "namingContexts"); 58 if (!el) { 59 DEBUG(1,("Finding namingContexts element in root_res failed: %s\n", 60 ldb_errstring(s->samdb))); 61 talloc_free(tmp_ctx); 62 return WERR_DS_DRA_INTERNAL_ERROR; 63 } 64 65 for (i=0; i<el->num_values; i++) { 66 struct ldb_dn *pdn; 67 struct dreplsrv_partition *p; 68 69 pdn = ldb_dn_from_ldb_val(tmp_ctx, s->samdb, &el->values[i]); 70 if (pdn == NULL) { 71 talloc_free(tmp_ctx); 72 return WERR_DS_DRA_INTERNAL_ERROR; 73 } 74 if (!ldb_dn_validate(pdn)) { 75 return WERR_DS_DRA_INTERNAL_ERROR; 76 } 77 78 p = talloc_zero(s, struct dreplsrv_partition); 79 W_ERROR_HAVE_NO_MEMORY(p); 80 81 p->dn = talloc_steal(p, pdn); 82 83 DLIST_ADD(s->partitions, p); 84 85 DEBUG(2, ("dreplsrv_partition[%s] loaded\n", ldb_dn_get_linearized(p->dn))); 67 } 68 69 for (a=0; attrs[a]; a++) { 70 int i; 71 72 el = ldb_msg_find_element(res->msgs[0], attrs[a]); 73 if (el == NULL) { 74 continue; 75 } 76 for (i=0; i<el->num_values; i++) { 77 struct ldb_dn *pdn; 78 struct dreplsrv_partition *p, *tp; 79 bool found; 80 81 pdn = ldb_dn_from_ldb_val(tmp_ctx, s->samdb, &el->values[i]); 82 if (pdn == NULL) { 83 talloc_free(tmp_ctx); 84 return WERR_DS_DRA_INTERNAL_ERROR; 85 } 86 if (!ldb_dn_validate(pdn)) { 87 return WERR_DS_DRA_INTERNAL_ERROR; 88 } 89 90 p = talloc_zero(s, struct dreplsrv_partition); 91 W_ERROR_HAVE_NO_MEMORY(p); 92 93 p->dn = talloc_steal(p, pdn); 94 p->service = s; 95 96 if (strcasecmp(attrs[a], "hasPartialReplicaNCs") == 0) { 97 p->partial_replica = true; 98 } else if (strcasecmp(attrs[a], "msDS-HasFullReplicaNCs") == 0) { 99 p->rodc_replica = true; 100 } 101 102 /* Do not add partitions more than once */ 103 found = false; 104 for (tp = s->partitions; tp; tp = tp->next) { 105 if (ldb_dn_compare(tp->dn, p->dn) == 0) { 106 found = true; 107 break; 108 } 109 } 110 if (found) { 111 talloc_free(p); 112 continue; 113 } 114 115 DLIST_ADD(s->partitions, p); 116 DEBUG(2, ("dreplsrv_partition[%s] loaded\n", ldb_dn_get_linearized(p->dn))); 117 } 86 118 } 87 119 … … 92 124 93 125 return WERR_OK; 126 } 127 128 /* 129 Check if particular SPN exists for an account 130 */ 131 static bool dreplsrv_spn_exists(struct ldb_context *samdb, struct ldb_dn *account_dn, 132 const char *principal_name) 133 { 134 TALLOC_CTX *tmp_ctx; 135 const char *attrs_empty[] = { NULL }; 136 int ret; 137 struct ldb_result *res; 138 139 tmp_ctx = talloc_new(samdb); 140 141 ret = dsdb_search(samdb, tmp_ctx, &res, account_dn, LDB_SCOPE_BASE, attrs_empty, 142 0, "servicePrincipalName=%s", 143 ldb_binary_encode_string(tmp_ctx, principal_name)); 144 if (ret != LDB_SUCCESS || res->count != 1) { 145 talloc_free(tmp_ctx); 146 return false; 147 } 148 149 talloc_free(tmp_ctx); 150 return true; 94 151 } 95 152 … … 97 154 work out the principal to use for DRS replication connections 98 155 */ 99 NTSTATUS dreplsrv_get_target_principal(struct dreplsrv_service *s,100 101 102 constchar **target_principal)156 static NTSTATUS dreplsrv_get_target_principal(struct dreplsrv_service *s, 157 TALLOC_CTX *mem_ctx, 158 const struct repsFromTo1 *rft, 159 char **target_principal) 103 160 { 104 161 TALLOC_CTX *tmp_ctx; 105 162 struct ldb_result *res; 106 const char *attrs[] = { "dNSHostName", NULL }; 163 const char *attrs_server[] = { "dNSHostName", "serverReference", NULL }; 164 const char *attrs_ntds[] = { "msDS-HasDomainNCs", "hasMasterNCs", NULL }; 107 165 int ret; 108 const char *hostname; 109 struct ldb_dn *dn; 166 const char *hostname, *dnsdomain=NULL; 167 struct ldb_dn *ntds_dn, *server_dn, *computer_dn; 168 struct ldb_dn *forest_dn, *nc_dn; 110 169 111 170 *target_principal = NULL; … … 114 173 115 174 /* we need to find their hostname */ 116 ret = dsdb_find_dn_by_guid(s->samdb, tmp_ctx, &rft->source_dsa_obj_guid, &dn);175 ret = dsdb_find_dn_by_guid(s->samdb, tmp_ctx, &rft->source_dsa_obj_guid, 0, &ntds_dn); 117 176 if (ret != LDB_SUCCESS) { 118 177 talloc_free(tmp_ctx); … … 121 180 } 122 181 182 server_dn = ldb_dn_copy(tmp_ctx, ntds_dn); 183 if (server_dn == NULL) { 184 talloc_free(tmp_ctx); 185 return NT_STATUS_OK; 186 } 187 123 188 /* strip off the NTDS Settings */ 124 if (!ldb_dn_remove_child_components( dn, 1)) {189 if (!ldb_dn_remove_child_components(server_dn, 1)) { 125 190 talloc_free(tmp_ctx); 126 191 return NT_STATUS_OK; 127 192 } 128 193 129 ret = dsdb_search_dn(s->samdb, tmp_ctx, &res, dn, attrs, 0);194 ret = dsdb_search_dn(s->samdb, tmp_ctx, &res, server_dn, attrs_server, 0); 130 195 if (ret != LDB_SUCCESS) { 131 196 talloc_free(tmp_ctx); 132 /* its OK for their accountDN not to be in our database */197 /* its OK for their server DN not to be in our database */ 133 198 return NT_STATUS_OK; 134 199 } 135 200 201 forest_dn = ldb_get_root_basedn(s->samdb); 202 if (forest_dn == NULL) { 203 talloc_free(tmp_ctx); 204 return NT_STATUS_OK; 205 } 206 136 207 hostname = ldb_msg_find_attr_as_string(res->msgs[0], "dNSHostName", NULL); 137 if (hostname == NULL) { 138 talloc_free(tmp_ctx); 139 /* its OK to not have a dnshostname */ 208 computer_dn = ldb_msg_find_attr_as_dn(s->samdb, tmp_ctx, res->msgs[0], "serverReference"); 209 if (hostname != NULL && computer_dn != NULL) { 210 char *local_principal; 211 212 /* 213 if we have the dNSHostName attribute then we can use 214 the GC/hostname/realm SPN. All DCs should have this SPN 215 216 Windows DC may set up it's dNSHostName before setting up 217 GC/xx/xx SPN. So make sure it exists, before using it. 218 */ 219 local_principal = talloc_asprintf(mem_ctx, "GC/%s/%s", 220 hostname, 221 samdb_dn_to_dns_domain(tmp_ctx, forest_dn)); 222 if (dreplsrv_spn_exists(s->samdb, computer_dn, local_principal)) { 223 *target_principal = local_principal; 224 talloc_free(tmp_ctx); 225 return NT_STATUS_OK; 226 } 227 228 talloc_free(local_principal); 229 } 230 231 /* 232 if we can't find the dNSHostName then we will try for the 233 E3514235-4B06-11D1-AB04-00C04FC2DCD2/${NTDSGUID}/${DNSDOMAIN} 234 SPN. To use that we need the DNS domain name of the target 235 DC. We find that by first looking for the msDS-HasDomainNCs 236 in the NTDSDSA object of the DC, and if we don't find that, 237 then we look for the hasMasterNCs attribute, and eliminate 238 the known schema and configuruation DNs. Despite how 239 bizarre this seems, Hongwei tells us that this is in fact 240 what windows does to find the SPN!! 241 */ 242 ret = dsdb_search_dn(s->samdb, tmp_ctx, &res, ntds_dn, attrs_ntds, 0); 243 if (ret != LDB_SUCCESS) { 244 talloc_free(tmp_ctx); 140 245 return NT_STATUS_OK; 141 246 } 142 247 143 /* All DCs have the GC/hostname/realm name, but if some of the 144 * preconditions are not satisfied, then we will fall back to 145 * the 146 * E3514235-4B06-11D1-AB04-00C04FC2DCD2/${NTDSGUID}/${DNSDOMAIN} 147 * name. This means that if a AD server has a dnsHostName set 148 * on it's record, it must also have GC/hostname/realm 149 * servicePrincipalName */ 150 151 *target_principal = talloc_asprintf(mem_ctx, "GC/%s/%s", 152 hostname, 153 lpcfg_dnsdomain(s->task->lp_ctx)); 248 nc_dn = ldb_msg_find_attr_as_dn(s->samdb, tmp_ctx, res->msgs[0], "msDS-HasDomainNCs"); 249 if (nc_dn != NULL) { 250 dnsdomain = samdb_dn_to_dns_domain(tmp_ctx, nc_dn); 251 } 252 253 if (dnsdomain == NULL) { 254 struct ldb_message_element *el; 255 int i; 256 el = ldb_msg_find_element(res->msgs[0], "hasMasterNCs"); 257 for (i=0; el && i<el->num_values; i++) { 258 nc_dn = ldb_dn_from_ldb_val(tmp_ctx, s->samdb, &el->values[i]); 259 if (nc_dn == NULL || 260 ldb_dn_compare(ldb_get_config_basedn(s->samdb), nc_dn) == 0 || 261 ldb_dn_compare(ldb_get_schema_basedn(s->samdb), nc_dn) == 0) { 262 continue; 263 } 264 /* it must be a domain DN, get the equivalent 265 DNS domain name */ 266 dnsdomain = samdb_dn_to_dns_domain(tmp_ctx, nc_dn); 267 break; 268 } 269 } 270 271 if (dnsdomain != NULL) { 272 *target_principal = talloc_asprintf(mem_ctx, 273 "E3514235-4B06-11D1-AB04-00C04FC2DCD2/%s/%s@%s", 274 GUID_string(tmp_ctx, &rft->source_dsa_obj_guid), 275 dnsdomain, dnsdomain); 276 } 277 154 278 talloc_free(tmp_ctx); 155 279 return NT_STATUS_OK; … … 174 298 hostname = rft->other_info->dns_name; 175 299 176 for (cur = s->connections; cur; cur = cur->next) { 177 if (strcmp(cur->binding->host, hostname) == 0) { 300 for (cur = s->connections; cur; cur = cur->next) { 301 const char *host; 302 303 host = dcerpc_binding_get_string_option(cur->binding, "host"); 304 if (host == NULL) { 305 continue; 306 } 307 308 if (strcmp(host, hostname) == 0) { 178 309 conn = cur; 179 310 break; … … 184 315 NTSTATUS nt_status; 185 316 char *binding_str; 317 char *target_principal = NULL; 186 318 187 319 conn = talloc_zero(s, struct dreplsrv_out_connection); … … 201 333 /* use the GC principal for DRS replication */ 202 334 nt_status = dreplsrv_get_target_principal(s, conn->binding, 203 rft, & conn->binding->target_principal);335 rft, &target_principal); 204 336 if (!NT_STATUS_IS_OK(nt_status)) { 205 337 return ntstatus_to_werror(nt_status); 206 338 } 207 339 208 DLIST_ADD_END(s->connections, conn, struct dreplsrv_out_connection *); 209 210 DEBUG(4,("dreplsrv_out_connection_attach(%s): create\n", conn->binding->host)); 340 nt_status = dcerpc_binding_set_string_option(conn->binding, 341 "target_principal", 342 target_principal); 343 TALLOC_FREE(target_principal); 344 if (!NT_STATUS_IS_OK(nt_status)) { 345 return ntstatus_to_werror(nt_status); 346 } 347 348 DLIST_ADD_END(s->connections, conn); 349 350 DEBUG(4,("dreplsrv_out_connection_attach(%s): create\n", hostname)); 211 351 } else { 212 DEBUG(4,("dreplsrv_out_connection_attach(%s): attach\n", conn->binding->host));352 DEBUG(4,("dreplsrv_out_connection_attach(%s): attach\n", hostname)); 213 353 } 214 354 … … 225 365 struct dreplsrv_partition_source_dsa *s; 226 366 for (s=list; s; s=s->next) { 227 if (GUID_ compare(&s->repsFrom1->source_dsa_obj_guid, guid) == 0) {367 if (GUID_equal(&s->repsFrom1->source_dsa_obj_guid, guid)) { 228 368 return s; 229 369 } … … 276 416 /* re-use an existing source if found */ 277 417 for (s2=*listp; s2; s2=s2->next) { 278 if (GUID_ compare(&s2->repsFrom1->source_dsa_obj_guid,279 &source->repsFrom1->source_dsa_obj_guid) == 0) {418 if (GUID_equal(&s2->repsFrom1->source_dsa_obj_guid, 419 &source->repsFrom1->source_dsa_obj_guid)) { 280 420 talloc_free(s2->repsFrom1->other_info); 281 421 *s2->repsFrom1 = *source->repsFrom1; … … 286 426 } 287 427 288 DLIST_ADD_END(*listp, source , struct dreplsrv_partition_source_dsa *);428 DLIST_ADD_END(*listp, source); 289 429 return WERR_OK; 290 430 } 291 431 432 /** 433 * Find a partition when given a NC 434 * If the NC can't be found it will return BAD_NC 435 * Initial checks for invalid parameters have to be done beforehand 436 */ 292 437 WERROR dreplsrv_partition_find_for_nc(struct dreplsrv_service *s, 293 conststruct GUID *nc_guid,294 conststruct dom_sid *nc_sid,438 struct GUID *nc_guid, 439 struct dom_sid *nc_sid, 295 440 const char *nc_dn_str, 296 441 struct dreplsrv_partition **_p) … … 306 451 valid_guid = nc_guid && !GUID_all_zero(nc_guid); 307 452 308 if (!valid_sid && !valid_guid && !nc_dn_str) {309 return WERR_DS_DRA_ INVALID_PARAMETER;453 if (!valid_sid && !valid_guid && (!nc_dn_str)) { 454 return WERR_DS_DRA_BAD_NC; 310 455 } 311 456 … … 315 460 || (valid_sid && dom_sid_equal(&p->nc.sid, nc_sid))) 316 461 { 462 /* fill in he right guid and sid if possible */ 463 if (nc_guid && !valid_guid) { 464 dsdb_get_extended_dn_guid(p->dn, nc_guid, "GUID"); 465 } 466 if (nc_sid && !valid_sid) { 467 dsdb_get_extended_dn_sid(p->dn, nc_sid, "SID"); 468 } 317 469 *_p = p; 318 470 return WERR_OK; … … 363 515 364 516 517 /* 518 create a temporary dsa structure for a replication. This is needed 519 for the initial replication of a new partition, such as when a new 520 domain NC is created and we are a global catalog server 521 */ 522 WERROR dreplsrv_partition_source_dsa_temporary(struct dreplsrv_partition *p, 523 TALLOC_CTX *mem_ctx, 524 const struct GUID *dsa_guid, 525 struct dreplsrv_partition_source_dsa **_dsa) 526 { 527 struct dreplsrv_partition_source_dsa *dsa; 528 WERROR werr; 529 530 dsa = talloc_zero(mem_ctx, struct dreplsrv_partition_source_dsa); 531 W_ERROR_HAVE_NO_MEMORY(dsa); 532 533 dsa->partition = p; 534 dsa->repsFrom1 = &dsa->_repsFromBlob.ctr.ctr1; 535 dsa->repsFrom1->replica_flags = 0; 536 dsa->repsFrom1->source_dsa_obj_guid = *dsa_guid; 537 538 dsa->repsFrom1->other_info = talloc_zero(dsa, struct repsFromTo1OtherInfo); 539 W_ERROR_HAVE_NO_MEMORY(dsa->repsFrom1->other_info); 540 541 dsa->repsFrom1->other_info->dns_name = samdb_ntds_msdcs_dns_name(p->service->samdb, 542 dsa->repsFrom1->other_info, dsa_guid); 543 W_ERROR_HAVE_NO_MEMORY(dsa->repsFrom1->other_info->dns_name); 544 545 werr = dreplsrv_out_connection_attach(p->service, dsa->repsFrom1, &dsa->conn); 546 if (!W_ERROR_IS_OK(werr)) { 547 DEBUG(0,(__location__ ": Failed to attach connection to %s\n", 548 ldb_dn_get_linearized(p->dn))); 549 talloc_free(dsa); 550 return werr; 551 } 552 553 *_dsa = dsa; 554 555 return WERR_OK; 556 } 557 558 365 559 static WERROR dreplsrv_refresh_partition(struct dreplsrv_service *s, 366 560 struct dreplsrv_partition *p) 367 561 { 368 562 WERROR status; 369 struct dom_sid *nc_sid;563 NTSTATUS ntstatus; 370 564 struct ldb_message_element *orf_el = NULL; 371 struct ldb_result *r ;565 struct ldb_result *r = NULL; 372 566 unsigned int i; 373 567 int ret; 374 568 TALLOC_CTX *mem_ctx = talloc_new(p); 375 569 static const char *attrs[] = { 376 "objectSid",377 "objectGUID",378 570 "repsFrom", 379 571 "repsTo", 380 572 NULL 381 573 }; 574 struct ldb_dn *dn; 382 575 383 576 DEBUG(4, ("dreplsrv_refresh_partition(%s)\n", 384 577 ldb_dn_get_linearized(p->dn))); 385 578 386 ret = ldb_search(s->samdb, mem_ctx, &r, p->dn, LDB_SCOPE_BASE, attrs, 387 "(objectClass=*)"); 388 if (ret != LDB_SUCCESS) { 579 ret = dsdb_search_dn(s->samdb, mem_ctx, &r, p->dn, attrs, DSDB_SEARCH_SHOW_EXTENDED_DN); 580 if (ret == LDB_ERR_NO_SUCH_OBJECT) { 581 /* we haven't replicated the partition yet, but we 582 * can fill in the guid, sid etc from the partition DN */ 583 dn = p->dn; 584 } else if (ret != LDB_SUCCESS) { 389 585 talloc_free(mem_ctx); 390 586 return WERR_FOOBAR; 587 } else { 588 dn = r->msgs[0]->dn; 391 589 } 392 590 393 591 talloc_free(discard_const(p->nc.dn)); 394 592 ZERO_STRUCT(p->nc); 395 p->nc.dn = ldb_dn_alloc_linearized(p, p->dn);593 p->nc.dn = ldb_dn_alloc_linearized(p, dn); 396 594 W_ERROR_HAVE_NO_MEMORY(p->nc.dn); 397 p->nc.guid = samdb_result_guid(r->msgs[0], "objectGUID"); 398 nc_sid = samdb_result_dom_sid(p, r->msgs[0], "objectSid"); 399 if (nc_sid) { 400 p->nc.sid = *nc_sid; 401 talloc_free(nc_sid); 402 } 595 ntstatus = dsdb_get_extended_dn_guid(dn, &p->nc.guid, "GUID"); 596 if (!NT_STATUS_IS_OK(ntstatus)) { 597 DEBUG(0,(__location__ ": unable to get GUID for %s: %s\n", 598 p->nc.dn, nt_errstr(ntstatus))); 599 talloc_free(mem_ctx); 600 return WERR_DS_DRA_INTERNAL_ERROR; 601 } 602 dsdb_get_extended_dn_sid(dn, &p->nc.sid, "SID"); 403 603 404 604 talloc_free(p->uptodatevector.cursors); … … 412 612 } 413 613 414 orf_el = ldb_msg_find_element(r->msgs[0], "repsFrom"); 415 if (orf_el) { 614 status = WERR_OK; 615 616 if (r != NULL && (orf_el = ldb_msg_find_element(r->msgs[0], "repsFrom"))) { 416 617 for (i=0; i < orf_el->num_values; i++) { 417 status = dreplsrv_partition_add_source_dsa(s, p, &p->sources, 618 status = dreplsrv_partition_add_source_dsa(s, p, &p->sources, 418 619 NULL, &orf_el->values[i]); 419 W_ERROR_NOT_OK_RETURN(status); 420 } 421 } 422 423 orf_el = ldb_msg_find_element(r->msgs[0], "repsTo"); 424 if (orf_el) { 620 W_ERROR_NOT_OK_GOTO_DONE(status); 621 } 622 } 623 624 if (r != NULL && (orf_el = ldb_msg_find_element(r->msgs[0], "repsTo"))) { 425 625 for (i=0; i < orf_el->num_values; i++) { 426 status = dreplsrv_partition_add_source_dsa(s, p, &p->notifies, 626 status = dreplsrv_partition_add_source_dsa(s, p, &p->notifies, 427 627 p->sources, &orf_el->values[i]); 428 W_ERROR_NOT_OK_RETURN(status); 429 } 430 } 431 628 W_ERROR_NOT_OK_GOTO_DONE(status); 629 } 630 } 631 632 done: 432 633 talloc_free(mem_ctx); 433 434 return WERR_OK; 634 return status; 435 635 } 436 636 -
vendor/current/source4/dsdb/repl/drepl_periodic.c
r740 r988 75 75 service->periodic.next_event = next_time; 76 76 77 new_te = event_add_timed(service->task->event_ctx, service,77 new_te = tevent_add_timer(service->task->event_ctx, service, 78 78 service->periodic.next_event, 79 79 dreplsrv_periodic_handler_te, service); … … 134 134 } 135 135 } 136 137 static void dreplsrv_pending_run(struct dreplsrv_service *service); 138 139 static void dreplsrv_pending_handler_te(struct tevent_context *ev, struct tevent_timer *te, 140 struct timeval t, void *ptr) 141 { 142 struct dreplsrv_service *service = talloc_get_type(ptr, struct dreplsrv_service); 143 144 service->pending.te = NULL; 145 146 dreplsrv_pending_run(service); 147 } 148 149 WERROR dreplsrv_pendingops_schedule(struct dreplsrv_service *service, uint32_t next_interval) 150 { 151 TALLOC_CTX *tmp_mem; 152 struct tevent_timer *new_te; 153 struct timeval next_time; 154 155 /* prevent looping */ 156 if (next_interval == 0) { 157 next_interval = 1; 158 } 159 160 next_time = timeval_current_ofs(next_interval, 50); 161 162 if (service->pending.te) { 163 /* 164 * if the timestamp of the new event is higher, 165 * as current next we don't need to reschedule 166 */ 167 if (timeval_compare(&next_time, &service->pending.next_event) > 0) { 168 return WERR_OK; 169 } 170 } 171 172 /* reset the next scheduled timestamp */ 173 service->pending.next_event = next_time; 174 175 new_te = tevent_add_timer(service->task->event_ctx, service, 176 service->pending.next_event, 177 dreplsrv_pending_handler_te, service); 178 W_ERROR_HAVE_NO_MEMORY(new_te); 179 180 tmp_mem = talloc_new(service); 181 DEBUG(4,("dreplsrv_pending_schedule(%u) %sscheduled for: %s\n", 182 next_interval, 183 (service->pending.te?"re":""), 184 nt_time_string(tmp_mem, timeval_to_nttime(&next_time)))); 185 talloc_free(tmp_mem); 186 187 talloc_free(service->pending.te); 188 service->pending.te = new_te; 189 190 return WERR_OK; 191 } 192 193 static void dreplsrv_pending_run(struct dreplsrv_service *service) 194 { 195 dreplsrv_run_pending_ops(service); 196 } -
vendor/current/source4/dsdb/repl/drepl_ridalloc.c
r740 r988 96 96 *_alloc_pool = UINT64_MAX; 97 97 98 server_dn = ldb_dn_get_parent(tmp_ctx, samdb_ntds_settings_dn(ldb ));98 server_dn = ldb_dn_get_parent(tmp_ctx, samdb_ntds_settings_dn(ldb, tmp_ctx)); 99 99 if (!server_dn) { 100 100 talloc_free(tmp_ctx); … … 104 104 ret = samdb_reference_dn(ldb, tmp_ctx, server_dn, "serverReference", &machine_dn); 105 105 if (ret != LDB_SUCCESS) { 106 DEBUG(0,(__location__ ": Failed to find serverReference in %s - %s ",106 DEBUG(0,(__location__ ": Failed to find serverReference in %s - %s\n", 107 107 ldb_dn_get_linearized(server_dn), ldb_errstring(ldb))); 108 108 talloc_free(tmp_ctx); … … 118 118 } 119 119 if (ret != LDB_SUCCESS) { 120 DEBUG(0,(__location__ ": Failed to find rIDSetReferences in %s - %s ",120 DEBUG(0,(__location__ ": Failed to find rIDSetReferences in %s - %s\n", 121 121 ldb_dn_get_linearized(machine_dn), ldb_errstring(ldb))); 122 122 talloc_free(tmp_ctx); … … 126 126 ret = ldb_search(ldb, tmp_ctx, &res, rid_set_dn, LDB_SCOPE_BASE, attrs, NULL); 127 127 if (ret != LDB_SUCCESS) { 128 DEBUG(0,(__location__ ": Failed to load RID Set attrs from %s - %s ",128 DEBUG(0,(__location__ ": Failed to load RID Set attrs from %s - %s\n", 129 129 ldb_dn_get_linearized(rid_set_dn), ldb_errstring(ldb))); 130 130 talloc_free(tmp_ctx); … … 169 169 int ret; 170 170 uint64_t alloc_pool; 171 bool is_us; 171 172 172 173 if (service->am_rodc) { … … 195 196 ret = samdb_rid_manager_dn(ldb, tmp_ctx, &rid_manager_dn); 196 197 if (ret != LDB_SUCCESS) { 197 DEBUG(0, (__location__ ": Failed to find RID Manager object - %s ", ldb_errstring(ldb)));198 DEBUG(0, (__location__ ": Failed to find RID Manager object - %s\n", ldb_errstring(ldb))); 198 199 talloc_free(tmp_ctx); 199 200 return WERR_DS_DRA_INTERNAL_ERROR; … … 203 204 ret = samdb_reference_dn(ldb, tmp_ctx, rid_manager_dn, "fSMORoleOwner", &fsmo_role_dn); 204 205 if (ret != LDB_SUCCESS) { 205 DEBUG(0,(__location__ ": Failed to find fSMORoleOwner in RID Manager object - %s ",206 DEBUG(0,(__location__ ": Failed to find fSMORoleOwner in RID Manager object - %s\n", 206 207 ldb_errstring(ldb))); 207 208 talloc_free(tmp_ctx); … … 209 210 } 210 211 211 if (ldb_dn_compare(samdb_ntds_settings_dn(ldb), fsmo_role_dn) == 0) { 212 ret = samdb_dn_is_our_ntdsa(ldb, fsmo_role_dn, &is_us); 213 if (ret != LDB_SUCCESS) { 214 DEBUG(0,(__location__ ": Failed to find detrmine if %s is our ntdsDsa object - %s\n", 215 ldb_dn_get_linearized(fsmo_role_dn), ldb_errstring(ldb))); 216 talloc_free(tmp_ctx); 217 return WERR_DS_DRA_INTERNAL_ERROR; 218 } 219 220 if (is_us) { 212 221 /* we are the RID Manager - no need to do a 213 222 DRSUAPI_EXOP_FSMO_RID_ALLOC */ … … 237 246 /* called by the samldb ldb module to tell us to ask for a new RID 238 247 pool */ 239 void dreplsrv_allocate_rid(struct messaging_context *msg, void *private_data,248 void dreplsrv_allocate_rid(struct imessaging_context *msg, void *private_data, 240 249 uint32_t msg_type, 241 250 struct server_id server_id, DATA_BLOB *data) -
vendor/current/source4/dsdb/repl/drepl_service.c
r740 r988 34 34 #include "librpc/gen_ndr/ndr_irpc.h" 35 35 #include "param/param.h" 36 #include "libds/common/roles.h" 36 37 37 38 /** … … 307 308 &req1->source_dsa_guid, 308 309 &dsa); 310 if (W_ERROR_EQUAL(werr, WERR_DS_DRA_NO_REPLICA)) { 311 /* we don't have this source setup as 312 a replication partner. Create a 313 temporary dsa structure for this 314 replication */ 315 werr = dreplsrv_partition_source_dsa_temporary(p, 316 msg, 317 &req1->source_dsa_guid, 318 &dsa); 319 } 309 320 } 310 321 if (!W_ERROR_IS_OK(werr)) { … … 329 340 * replication as soon as possible 330 341 */ 331 dreplsrv_pe riodic_schedule(service, 0);342 dreplsrv_pendingops_schedule(service, 0); 332 343 333 344 done: … … 353 364 } 354 365 355 static NTSTATUS drepl_take_FSMO_role(struct irpc_message *msg,356 struct drepl_takeFSMORole *r)357 {358 struct dreplsrv_service *service = talloc_get_type(msg->private_data,359 struct dreplsrv_service);360 r->out.result = dreplsrv_fsmo_role_check(service, r->in.role);361 return NT_STATUS_OK;362 }363 364 366 /** 365 367 * Called when the auth code wants us to try and replicate … … 434 436 false); 435 437 return; 436 case ROLE_ DOMAIN_CONTROLLER:438 case ROLE_ACTIVE_DIRECTORY_DC: 437 439 /* Yes, we want DSDB replication */ 438 440 break; … … 507 509 IRPC_REGISTER(task->msg_ctx, irpc, DREPL_TAKEFSMOROLE, drepl_take_FSMO_role, service); 508 510 IRPC_REGISTER(task->msg_ctx, irpc, DREPL_TRIGGER_REPL_SECRET, drepl_trigger_repl_secret, service); 509 messaging_register(task->msg_ctx, service, MSG_DREPL_ALLOCATE_RID, dreplsrv_allocate_rid);511 imessaging_register(task->msg_ctx, service, MSG_DREPL_ALLOCATE_RID, dreplsrv_allocate_rid); 510 512 } 511 513 -
vendor/current/source4/dsdb/repl/drepl_service.h
r740 r988 53 53 /* the out going connection to the source dsa */ 54 54 struct dreplsrv_drsuapi_connection *drsuapi; 55 56 /* used to force the GC principal name */57 const char *principal_name;58 55 }; 59 56 … … 110 107 struct dreplsrv_partition_source_dsa *notifies; 111 108 112 bool incoming_only; 109 bool partial_replica; 110 bool rodc_replica; 113 111 }; 114 112 … … 190 188 } periodic; 191 189 190 /* some stuff for running only the pendings ops */ 191 struct { 192 /* 193 * the interval between notify runs 194 */ 195 uint32_t interval; 196 197 /* 198 * the timestamp for the next event, 199 * this is the timstamp passed to event_add_timed() 200 */ 201 struct timeval next_event; 202 203 /* here we have a reference to the timed event the schedules the notifies */ 204 struct tevent_timer *te; 205 } pending; 206 192 207 /* some stuff for notify processing */ 193 208 struct { -
vendor/current/source4/dsdb/repl/replicated_objects.c
r740 r988 32 32 #include "param/param.h" 33 33 34 static WERROR dsdb_repl_merge_working_schema(struct ldb_context *ldb, 35 struct dsdb_schema *dest_schema, 36 const struct dsdb_schema *ref_schema) 37 { 38 const struct dsdb_class *cur_class = NULL; 39 const struct dsdb_attribute *cur_attr = NULL; 40 int ret; 41 42 for (cur_class = ref_schema->classes; 43 cur_class; 44 cur_class = cur_class->next) 45 { 46 const struct dsdb_class *tmp1; 47 struct dsdb_class *tmp2; 48 49 tmp1 = dsdb_class_by_governsID_id(dest_schema, 50 cur_class->governsID_id); 51 if (tmp1 != NULL) { 52 continue; 53 } 54 55 /* 56 * Do a shallow copy so that original next and prev are 57 * not modified, we don't need to do a deep copy 58 * as the rest won't be modified and this is for 59 * a short lived object. 60 */ 61 tmp2 = talloc(dest_schema, struct dsdb_class); 62 if (tmp2 == NULL) { 63 return WERR_NOMEM; 64 } 65 *tmp2 = *cur_class; 66 DLIST_ADD(dest_schema->classes, tmp2); 67 } 68 69 for (cur_attr = ref_schema->attributes; 70 cur_attr; 71 cur_attr = cur_attr->next) 72 { 73 const struct dsdb_attribute *tmp1; 74 struct dsdb_attribute *tmp2; 75 76 tmp1 = dsdb_attribute_by_attributeID_id(dest_schema, 77 cur_attr->attributeID_id); 78 if (tmp1 != NULL) { 79 continue; 80 } 81 82 /* 83 * Do a shallow copy so that original next and prev are 84 * not modified, we don't need to do a deep copy 85 * as the rest won't be modified and this is for 86 * a short lived object. 87 */ 88 tmp2 = talloc(dest_schema, struct dsdb_attribute); 89 if (tmp2 == NULL) { 90 return WERR_NOMEM; 91 } 92 *tmp2 = *cur_attr; 93 DLIST_ADD(dest_schema->attributes, tmp2); 94 } 95 96 ret = dsdb_setup_sorted_accessors(ldb, dest_schema); 97 if (LDB_SUCCESS != ret) { 98 DEBUG(0,("Failed to add new attribute to reference schema!\n")); 99 return WERR_INTERNAL_ERROR; 100 } 101 102 return WERR_OK; 103 } 104 105 WERROR dsdb_repl_resolve_working_schema(struct ldb_context *ldb, 106 TALLOC_CTX *mem_ctx, 107 struct dsdb_schema_prefixmap *pfm_remote, 108 uint32_t cycle_before_switching, 109 struct dsdb_schema *initial_schema, 110 struct dsdb_schema *resulting_schema, 111 uint32_t object_count, 112 const struct drsuapi_DsReplicaObjectListItemEx *first_object) 113 { 114 struct schema_list { 115 struct schema_list *next, *prev; 116 const struct drsuapi_DsReplicaObjectListItemEx *obj; 117 }; 118 struct schema_list *schema_list = NULL, *schema_list_item, *schema_list_next_item; 119 WERROR werr; 120 struct dsdb_schema *working_schema; 121 const struct drsuapi_DsReplicaObjectListItemEx *cur; 122 DATA_BLOB empty_key = data_blob_null; 123 int ret, pass_no; 124 uint32_t ignore_attids[] = { 125 DRSUAPI_ATTID_auxiliaryClass, 126 DRSUAPI_ATTID_mayContain, 127 DRSUAPI_ATTID_mustContain, 128 DRSUAPI_ATTID_possSuperiors, 129 DRSUAPI_ATTID_systemPossSuperiors, 130 DRSUAPI_ATTID_INVALID 131 }; 132 133 /* create a list of objects yet to be converted */ 134 for (cur = first_object; cur; cur = cur->next_object) { 135 schema_list_item = talloc(mem_ctx, struct schema_list); 136 if (schema_list_item == NULL) { 137 return WERR_NOMEM; 138 } 139 140 schema_list_item->obj = cur; 141 DLIST_ADD_END(schema_list, schema_list_item); 142 } 143 144 /* resolve objects until all are resolved and in local schema */ 145 pass_no = 1; 146 working_schema = initial_schema; 147 148 while (schema_list) { 149 uint32_t converted_obj_count = 0; 150 uint32_t failed_obj_count = 0; 151 152 if (resulting_schema != working_schema) { 153 /* 154 * If the selfmade schema is not the schema used to 155 * translate and validate replicated object, 156 * Which means that we are using the bootstrap schema 157 * Then we add attributes and classes that were already 158 * translated to the working schema, the idea is that 159 * we might need to add new attributes and classes 160 * to be able to translate critical replicated objects 161 * and without that we wouldn't be able to translate them 162 */ 163 werr = dsdb_repl_merge_working_schema(ldb, 164 working_schema, 165 resulting_schema); 166 if (!W_ERROR_IS_OK(werr)) { 167 return werr; 168 } 169 } 170 171 for (schema_list_item = schema_list; 172 schema_list_item; 173 schema_list_item=schema_list_next_item) { 174 struct dsdb_extended_replicated_object object; 175 176 cur = schema_list_item->obj; 177 178 /* 179 * Save the next item, now we have saved out 180 * the current one, so we can DLIST_REMOVE it 181 * safely 182 */ 183 schema_list_next_item = schema_list_item->next; 184 185 /* 186 * Convert the objects into LDB messages using the 187 * schema we have so far. It's ok if we fail to convert 188 * an object. We should convert more objects on next pass. 189 */ 190 werr = dsdb_convert_object_ex(ldb, working_schema, 191 NULL, 192 pfm_remote, 193 cur, &empty_key, 194 ignore_attids, 195 0, 196 schema_list_item, &object); 197 if (!W_ERROR_IS_OK(werr)) { 198 DEBUG(4,("debug: Failed to convert schema " 199 "object %s into ldb msg, " 200 "will try during next loop\n", 201 cur->object.identifier->dn)); 202 203 failed_obj_count++; 204 } else { 205 /* 206 * Convert the schema from ldb_message format 207 * (OIDs as OID strings) into schema, using 208 * the remote prefixMap 209 * 210 * It's not likely, but possible to get the 211 * same object twice and we should keep 212 * the last instance. 213 */ 214 werr = dsdb_schema_set_el_from_ldb_msg_dups(ldb, 215 resulting_schema, 216 object.msg, 217 true); 218 if (!W_ERROR_IS_OK(werr)) { 219 DEBUG(4,("debug: failed to convert " 220 "object %s into a schema element, " 221 "will try during next loop: %s\n", 222 ldb_dn_get_linearized(object.msg->dn), 223 win_errstr(werr))); 224 failed_obj_count++; 225 } else { 226 DEBUG(8,("Converted object %s into a schema element\n", 227 ldb_dn_get_linearized(object.msg->dn))); 228 DLIST_REMOVE(schema_list, schema_list_item); 229 TALLOC_FREE(schema_list_item); 230 converted_obj_count++; 231 } 232 } 233 } 234 235 DEBUG(4,("Schema load pass %d: converted %d, %d of %d objects left to be converted.\n", 236 pass_no, converted_obj_count, failed_obj_count, object_count)); 237 238 /* check if we converted any objects in this pass */ 239 if (converted_obj_count == 0) { 240 DEBUG(0,("Can't continue Schema load: " 241 "didn't manage to convert any objects: " 242 "all %d remaining of %d objects " 243 "failed to convert\n", 244 failed_obj_count, object_count)); 245 return WERR_INTERNAL_ERROR; 246 } 247 248 /* 249 * Don't try to load the schema if there is missing object 250 * _and_ we are on the first pass as some critical objects 251 * might be missing. 252 */ 253 if (failed_obj_count == 0 || pass_no > cycle_before_switching) { 254 /* prepare for another cycle */ 255 working_schema = resulting_schema; 256 257 ret = dsdb_setup_sorted_accessors(ldb, working_schema); 258 if (LDB_SUCCESS != ret) { 259 DEBUG(0,("Failed to create schema-cache indexes!\n")); 260 return WERR_INTERNAL_ERROR; 261 } 262 } 263 pass_no++; 264 } 265 266 return WERR_OK; 267 } 268 34 269 /** 35 270 * Multi-pass working schema creation … … 51 286 struct dsdb_schema **_schema_out) 52 287 { 53 struct schema_list {54 struct schema_list *next, *prev;55 const struct drsuapi_DsReplicaObjectListItemEx *obj;56 };57 58 288 WERROR werr; 59 289 struct dsdb_schema_prefixmap *pfm_remote; 60 struct schema_list *schema_list = NULL, *schema_list_item, *schema_list_next_item;61 290 struct dsdb_schema *working_schema; 62 const struct drsuapi_DsReplicaObjectListItemEx *cur;63 int ret, pass_no;64 uint32_t ignore_attids[] = {65 DRSUAPI_ATTID_auxiliaryClass,66 DRSUAPI_ATTID_mayContain,67 DRSUAPI_ATTID_mustContain,68 DRSUAPI_ATTID_possSuperiors,69 DRSUAPI_ATTID_systemPossSuperiors,70 DRSUAPI_ATTID_INVALID71 };72 291 73 292 /* make a copy of the iniatial_scheam so we don't mess with it */ … … 87 306 } 88 307 89 /* create a list of objects yet to be converted */ 90 for (cur = first_object; cur; cur = cur->next_object) { 91 schema_list_item = talloc(mem_ctx, struct schema_list); 92 schema_list_item->obj = cur; 93 DLIST_ADD_END(schema_list, schema_list_item, struct schema_list); 94 } 95 96 /* resolve objects until all are resolved and in local schema */ 97 pass_no = 1; 98 99 while (schema_list) { 100 uint32_t converted_obj_count = 0; 101 uint32_t failed_obj_count = 0; 102 TALLOC_CTX *tmp_ctx = talloc_new(mem_ctx); 103 W_ERROR_HAVE_NO_MEMORY(tmp_ctx); 104 105 for (schema_list_item = schema_list; schema_list_item; schema_list_item=schema_list_next_item) { 106 struct dsdb_extended_replicated_object object; 107 108 cur = schema_list_item->obj; 109 110 /* Save the next item, now we have saved out 111 * the current one, so we can DLIST_REMOVE it 112 * safely */ 113 schema_list_next_item = schema_list_item->next; 114 115 /* 116 * Convert the objects into LDB messages using the 117 * schema we have so far. It's ok if we fail to convert 118 * an object. We should convert more objects on next pass. 119 */ 120 werr = dsdb_convert_object_ex(ldb, working_schema, pfm_remote, 121 cur, gensec_skey, 122 ignore_attids, 123 tmp_ctx, &object); 124 if (!W_ERROR_IS_OK(werr)) { 125 DEBUG(1,("Warning: Failed to convert schema object %s into ldb msg\n", 126 cur->object.identifier->dn)); 127 128 failed_obj_count++; 129 } else { 130 /* 131 * Convert the schema from ldb_message format 132 * (OIDs as OID strings) into schema, using 133 * the remote prefixMap 134 */ 135 werr = dsdb_schema_set_el_from_ldb_msg(ldb, 136 working_schema, 137 object.msg); 138 if (!W_ERROR_IS_OK(werr)) { 139 DEBUG(1,("Warning: failed to convert object %s into a schema element: %s\n", 140 ldb_dn_get_linearized(object.msg->dn), 141 win_errstr(werr))); 142 failed_obj_count++; 143 } else { 144 DLIST_REMOVE(schema_list, schema_list_item); 145 talloc_free(schema_list_item); 146 converted_obj_count++; 147 } 148 } 149 } 150 talloc_free(tmp_ctx); 151 152 DEBUG(4,("Schema load pass %d: %d/%d of %d objects left to be converted.\n", 153 pass_no, failed_obj_count, converted_obj_count, object_count)); 154 pass_no++; 155 156 /* check if we converted any objects in this pass */ 157 if (converted_obj_count == 0) { 158 DEBUG(0,("Can't continue Schema load: didn't manage to convert any objects: all %d remaining of %d objects failed to convert\n", failed_obj_count, object_count)); 159 return WERR_INTERNAL_ERROR; 160 } 161 162 /* rebuild indexes */ 163 ret = dsdb_setup_sorted_accessors(ldb, working_schema); 164 if (LDB_SUCCESS != ret) { 165 DEBUG(0,("Failed to create schema-cache indexes!\n")); 166 return WERR_INTERNAL_ERROR; 167 } 168 }; 308 werr = dsdb_repl_resolve_working_schema(ldb, mem_ctx, 309 pfm_remote, 310 0, /* cycle_before_switching */ 311 working_schema, 312 working_schema, 313 object_count, 314 first_object); 315 if (!W_ERROR_IS_OK(werr)) { 316 DEBUG(0, ("%s: dsdb_repl_resolve_working_schema() failed: %s", 317 __location__, win_errstr(werr))); 318 return werr; 319 } 169 320 170 321 *_schema_out = working_schema; … … 189 340 WERROR dsdb_convert_object_ex(struct ldb_context *ldb, 190 341 const struct dsdb_schema *schema, 342 struct ldb_dn *partition_dn, 191 343 const struct dsdb_schema_prefixmap *pfm_remote, 192 344 const struct drsuapi_DsReplicaObjectListItemEx *in, 193 345 const DATA_BLOB *gensec_skey, 194 346 const uint32_t *ignore_attids, 347 uint32_t dsdb_repl_flags, 195 348 TALLOC_CTX *mem_ctx, 196 349 struct dsdb_extended_replicated_object *out) 197 350 { 198 351 NTSTATUS nt_status; 199 WERROR status ;352 WERROR status = WERR_OK; 200 353 uint32_t i; 201 354 struct ldb_message *msg; 202 355 struct replPropertyMetaDataBlob *md; 356 int instanceType; 357 struct ldb_message_element *instanceType_e = NULL; 203 358 struct ldb_val guid_value; 359 struct ldb_val parent_guid_value; 204 360 NTTIME whenChanged = 0; 205 361 time_t whenChanged_t; 206 362 const char *whenChanged_s; 207 const char *rdn_name = NULL;208 const struct ldb_val *rdn_value = NULL;209 const struct dsdb_attribute *rdn_attr = NULL;210 uint32_t rdn_attid;211 363 struct drsuapi_DsReplicaAttribute *name_a = NULL; 212 364 struct drsuapi_DsReplicaMetaData *name_d = NULL; … … 244 396 W_ERROR_HAVE_NO_MEMORY(msg->dn); 245 397 246 rdn_name = ldb_dn_get_rdn_name(msg->dn);247 rdn_attr = dsdb_attribute_by_lDAPDisplayName(schema, rdn_name);248 if (!rdn_attr) {249 return WERR_FOOBAR;250 }251 rdn_attid = rdn_attr->attributeID_id;252 rdn_value = ldb_dn_get_rdn_val(msg->dn);253 254 398 msg->num_elements = in->object.attribute_ctr.num_attributes; 255 399 msg->elements = talloc_array(msg, struct ldb_message_element, 256 msg->num_elements );400 msg->num_elements + 1); /* +1 because of the RDN attribute */ 257 401 W_ERROR_HAVE_NO_MEMORY(msg->elements); 258 402 … … 286 430 } 287 431 432 if (GUID_all_zero(&d->originating_invocation_id)) { 433 status = WERR_DS_SRC_GUID_MISMATCH; 434 DEBUG(0, ("Refusing replication of object containing invalid zero invocationID on attribute %d of %s: %s\n", 435 a->attid, 436 ldb_dn_get_linearized(msg->dn), 437 win_errstr(status))); 438 return status; 439 } 440 441 if (a->attid == DRSUAPI_ATTID_instanceType) { 442 if (instanceType_e != NULL) { 443 return WERR_FOOBAR; 444 } 445 instanceType_e = e; 446 } 447 288 448 for (j=0; j<a->value_ctr.num_values; j++) { 289 status = drsuapi_decrypt_attribute(a->value_ctr.values[j].blob, gensec_skey, rid, a); 290 W_ERROR_NOT_OK_RETURN(status); 291 } 292 449 status = drsuapi_decrypt_attribute(a->value_ctr.values[j].blob, 450 gensec_skey, rid, 451 dsdb_repl_flags, a); 452 if (!W_ERROR_IS_OK(status)) { 453 break; 454 } 455 } 456 if (W_ERROR_EQUAL(status, WERR_TOO_MANY_SECRETS)) { 457 WERROR get_name_status = dsdb_attribute_drsuapi_to_ldb(ldb, schema, pfm_remote, 458 a, msg->elements, e, NULL); 459 if (W_ERROR_IS_OK(get_name_status)) { 460 DEBUG(0, ("Unxpectedly got secret value %s on %s from DRS server\n", 461 e->name, ldb_dn_get_linearized(msg->dn))); 462 } else { 463 DEBUG(0, ("Unxpectedly got secret value on %s from DRS server", 464 ldb_dn_get_linearized(msg->dn))); 465 } 466 } else if (!W_ERROR_IS_OK(status)) { 467 return status; 468 } 469 470 /* 471 * This function also fills in the local attid value, 472 * based on comparing the remote and local prefixMap 473 * tables. If we don't convert the value, then we can 474 * have invalid values in the replPropertyMetaData we 475 * store on disk, as the prefixMap is per host, not 476 * per-domain. This may be why Microsoft added the 477 * msDS-IntID feature, however this is not used for 478 * extra attributes in the schema partition itself. 479 */ 293 480 status = dsdb_attribute_drsuapi_to_ldb(ldb, schema, pfm_remote, 294 a, msg->elements, e); 481 a, msg->elements, e, 482 &m->attid); 295 483 W_ERROR_NOT_OK_RETURN(status); 296 484 297 m->attid = a->attid;298 485 m->version = d->version; 299 486 m->originating_change_time = d->originating_change_time; … … 320 507 if (rdn_m) { 321 508 struct ldb_message_element *el; 509 const char *rdn_name = NULL; 510 const struct ldb_val *rdn_value = NULL; 511 const struct dsdb_attribute *rdn_attr = NULL; 512 uint32_t rdn_attid; 513 514 /* 515 * We only need the schema calls for the RDN in this 516 * codepath, and by doing this we avoid needing to 517 * have the dsdb_attribute_by_lDAPDisplayName accessor 518 * working during the schema load. 519 */ 520 rdn_name = ldb_dn_get_rdn_name(msg->dn); 521 rdn_attr = dsdb_attribute_by_lDAPDisplayName(schema, rdn_name); 522 if (!rdn_attr) { 523 return WERR_FOOBAR; 524 } 525 rdn_attid = rdn_attr->attributeID_id; 526 rdn_value = ldb_dn_get_rdn_val(msg->dn); 527 322 528 el = ldb_msg_find_element(msg, rdn_attr->lDAPDisplayName); 323 529 if (!el) { … … 350 556 } 351 557 558 if (instanceType_e == NULL) { 559 return WERR_FOOBAR; 560 } 561 562 instanceType = ldb_msg_find_attr_as_int(msg, "instanceType", 0); 563 564 if (instanceType & INSTANCE_TYPE_IS_NC_HEAD && partition_dn) { 565 int partition_dn_cmp = ldb_dn_compare(partition_dn, msg->dn); 566 if (partition_dn_cmp != 0) { 567 DEBUG(4, ("Remote server advised us of a new partition %s while processing %s, ignoring\n", 568 ldb_dn_get_linearized(msg->dn), 569 ldb_dn_get_linearized(partition_dn))); 570 return WERR_DS_ADD_REPLICA_INHIBITED; 571 } 572 } 573 574 if (dsdb_repl_flags & DSDB_REPL_FLAG_PARTIAL_REPLICA) { 575 /* the instanceType type for partial_replica 576 replication is sent via DRS with TYPE_WRITE set, but 577 must be used on the client with TYPE_WRITE removed 578 */ 579 if (instanceType & INSTANCE_TYPE_WRITE) { 580 /* 581 * Make sure we do not change the order 582 * of msg->elements! 583 * 584 * That's why we use 585 * instanceType_e->num_values = 0 586 * instead of 587 * ldb_msg_remove_attr(msg, "instanceType"); 588 */ 589 struct ldb_message_element *e; 590 591 e = ldb_msg_find_element(msg, "instanceType"); 592 if (e != instanceType_e) { 593 DEBUG(0,("instanceType_e[%p] changed to e[%p]\n", 594 instanceType_e, e)); 595 return WERR_FOOBAR; 596 } 597 598 instanceType_e->num_values = 0; 599 600 instanceType &= ~INSTANCE_TYPE_WRITE; 601 if (ldb_msg_add_fmt(msg, "instanceType", "%d", instanceType) != LDB_SUCCESS) { 602 return WERR_INTERNAL_ERROR; 603 } 604 } 605 } else { 606 if (!(instanceType & INSTANCE_TYPE_WRITE)) { 607 DEBUG(0, ("Refusing to replicate %s from a read-only repilca into a read-write replica!\n", 608 ldb_dn_get_linearized(msg->dn))); 609 return WERR_DS_DRA_SOURCE_IS_PARTIAL_REPLICA; 610 } 611 } 612 352 613 whenChanged_t = nt_time_to_unix(whenChanged); 353 614 whenChanged_s = ldb_timestring(msg, whenChanged_t); … … 359 620 } 360 621 622 if (in->parent_object_guid) { 623 nt_status = GUID_to_ndr_blob(in->parent_object_guid, msg, &parent_guid_value); 624 if (!NT_STATUS_IS_OK(nt_status)) { 625 return ntstatus_to_werror(nt_status); 626 } 627 } else { 628 parent_guid_value = data_blob_null; 629 } 630 361 631 out->msg = msg; 362 632 out->guid_value = guid_value; 633 out->parent_guid_value = parent_guid_value; 363 634 out->when_changed = whenChanged_s; 364 635 out->meta_data = md; … … 377 648 const struct drsuapi_DsReplicaCursor2CtrEx *uptodateness_vector, 378 649 const DATA_BLOB *gensec_skey, 650 uint32_t dsdb_repl_flags, 379 651 TALLOC_CTX *mem_ctx, 380 652 struct dsdb_extended_replicated_objects **objects) … … 390 662 W_ERROR_HAVE_NO_MEMORY(out); 391 663 out->version = DSDB_EXTENDED_REPLICATED_OBJECTS_VERSION; 664 out->dsdb_repl_flags = dsdb_repl_flags; 392 665 393 666 /* … … 429 702 out->uptodateness_vector= uptodateness_vector; 430 703 431 out->num_objects = object_count;704 out->num_objects = 0; 432 705 out->objects = talloc_array(out, 433 706 struct dsdb_extended_replicated_object, 434 o ut->num_objects);707 object_count); 435 708 W_ERROR_HAVE_NO_MEMORY_AND_FREE(out->objects, out); 436 709 … … 441 714 442 715 for (i=0, cur = first_object; cur; cur = cur->next_object, i++) { 443 if (i == o ut->num_objects) {716 if (i == object_count) { 444 717 talloc_free(out); 445 718 return WERR_FOOBAR; 446 719 } 447 720 448 status = dsdb_convert_object_ex(ldb, schema, pfm_remote, 721 status = dsdb_convert_object_ex(ldb, schema, out->partition_dn, 722 pfm_remote, 449 723 cur, gensec_skey, 450 724 NULL, 451 out->objects, &out->objects[i]); 725 dsdb_repl_flags, 726 out->objects, 727 &out->objects[out->num_objects]); 728 729 /* 730 * Check to see if we have been advised of a 731 * subdomain or new application partition. We don't 732 * want to start on that here, instead the caller 733 * should consider if it would like to replicate it 734 * based on the cross-ref object. 735 */ 736 if (W_ERROR_EQUAL(status, WERR_DS_ADD_REPLICA_INHIBITED)) { 737 continue; 738 } 739 452 740 if (!W_ERROR_IS_OK(status)) { 453 741 talloc_free(out); … … 457 745 return status; 458 746 } 459 } 460 if (i != out->num_objects) { 747 748 /* Assuming we didn't skip or error, increment the number of objects */ 749 out->num_objects++; 750 } 751 out->objects = talloc_realloc(out, out->objects, 752 struct dsdb_extended_replicated_object, 753 out->num_objects); 754 if (out->num_objects != 0 && out->objects == NULL) { 755 talloc_free(out); 756 return WERR_FOOBAR; 757 } 758 if (i != object_count) { 461 759 talloc_free(out); 462 760 return WERR_FOOBAR; … … 485 783 struct ldb_result *ext_res; 486 784 struct dsdb_schema *cur_schema = NULL; 785 struct dsdb_schema *new_schema = NULL; 487 786 int ret; 488 787 uint64_t seq_num1, seq_num2; 788 bool used_global_schema = false; 789 790 TALLOC_CTX *tmp_ctx = talloc_new(objects); 791 if (!tmp_ctx) { 792 DEBUG(0,("Failed to start talloc\n")); 793 return WERR_NOMEM; 794 } 489 795 490 796 /* TODO: handle linked attributes */ … … 503 809 DEBUG(0,(__location__ " Failed to load partition uSN\n")); 504 810 ldb_transaction_cancel(ldb); 811 TALLOC_FREE(tmp_ctx); 505 812 return WERR_FOOBAR; 506 813 } … … 514 821 if (working_schema) { 515 822 /* store current schema so we can fall back in case of failure */ 516 cur_schema = dsdb_get_schema(ldb, working_schema); 823 cur_schema = dsdb_get_schema(ldb, tmp_ctx); 824 used_global_schema = dsdb_uses_global_schema(ldb); 517 825 518 826 ret = dsdb_reference_schema(ldb, working_schema, false); … … 522 830 /* TODO: Map LDB Error to NTSTATUS? */ 523 831 ldb_transaction_cancel(ldb); 832 TALLOC_FREE(tmp_ctx); 524 833 return WERR_INTERNAL_ERROR; 525 834 } … … 529 838 if (ret != LDB_SUCCESS) { 530 839 /* restore previous schema */ 531 if (cur_schema ) { 840 if (used_global_schema) { 841 dsdb_set_global_schema(ldb); 842 } else if (cur_schema) { 532 843 dsdb_reference_schema(ldb, cur_schema, false); 533 dsdb_make_schema_global(ldb, cur_schema);534 844 } 535 845 … … 537 847 ldb_errstring(ldb), ldb_strerror(ret))); 538 848 ldb_transaction_cancel(ldb); 849 TALLOC_FREE(tmp_ctx); 539 850 return WERR_FOOBAR; 540 851 } … … 548 859 if (!W_ERROR_IS_OK(werr)) { 549 860 /* restore previous schema */ 550 if (cur_schema ) { 861 if (used_global_schema) { 862 dsdb_set_global_schema(ldb); 863 } else if (cur_schema ) { 551 864 dsdb_reference_schema(ldb, cur_schema, false); 552 dsdb_make_schema_global(ldb, cur_schema);553 865 } 554 866 DEBUG(0,("Failed to save updated prefixMap: %s\n", 555 867 win_errstr(werr))); 868 TALLOC_FREE(tmp_ctx); 556 869 return werr; 557 870 } … … 561 874 if (ret != LDB_SUCCESS) { 562 875 /* restore previous schema */ 563 if (cur_schema ) { 876 if (used_global_schema) { 877 dsdb_set_global_schema(ldb); 878 } else if (cur_schema ) { 564 879 dsdb_reference_schema(ldb, cur_schema, false); 565 dsdb_make_schema_global(ldb, cur_schema);566 880 } 567 881 DEBUG(0,(__location__ " Failed to prepare commit of transaction: %s\n", 568 882 ldb_errstring(ldb))); 883 TALLOC_FREE(tmp_ctx); 569 884 return WERR_FOOBAR; 570 885 } … … 573 888 if (ret != LDB_SUCCESS) { 574 889 /* restore previous schema */ 575 if (cur_schema ) { 890 if (used_global_schema) { 891 dsdb_set_global_schema(ldb); 892 } else if (cur_schema ) { 576 893 dsdb_reference_schema(ldb, cur_schema, false); 577 dsdb_make_schema_global(ldb, cur_schema);578 894 } 579 895 DEBUG(0,(__location__ " Failed to load partition uSN\n")); 580 896 ldb_transaction_cancel(ldb); 897 TALLOC_FREE(tmp_ctx); 581 898 return WERR_FOOBAR; 582 }583 584 /* if this replication partner didn't need to be notified585 before this transaction then it still doesn't need to be586 notified, as the changes came from this server */587 if (seq_num2 > seq_num1 && seq_num1 <= *notify_uSN) {588 *notify_uSN = seq_num2;589 899 } 590 900 … … 592 902 if (ret != LDB_SUCCESS) { 593 903 /* restore previous schema */ 594 if (cur_schema ) { 904 if (used_global_schema) { 905 dsdb_set_global_schema(ldb); 906 } else if (cur_schema ) { 595 907 dsdb_reference_schema(ldb, cur_schema, false); 596 dsdb_make_schema_global(ldb, cur_schema);597 908 } 598 909 DEBUG(0,(__location__ " Failed to commit transaction\n")); 910 TALLOC_FREE(tmp_ctx); 599 911 return WERR_FOOBAR; 912 } 913 914 /* if this replication partner didn't need to be notified 915 before this transaction then it still doesn't need to be 916 notified, as the changes came from this server */ 917 if (seq_num2 > seq_num1 && seq_num1 <= *notify_uSN) { 918 *notify_uSN = seq_num2; 600 919 } 601 920 … … 605 924 */ 606 925 if (working_schema) { 607 cur_schema = dsdb_get_schema(ldb, NULL); 608 /* TODO: What we do in case dsdb_get_schema() fail? 609 * We can't fallback at this point anymore */ 610 if (cur_schema) { 611 dsdb_make_schema_global(ldb, cur_schema); 926 struct ldb_message *msg; 927 struct ldb_request *req; 928 929 /* Force a reload */ 930 working_schema->last_refresh = 0; 931 new_schema = dsdb_get_schema(ldb, tmp_ctx); 932 /* TODO: 933 * If dsdb_get_schema() fails, we just fall back 934 * to what we had. However, the database is probably 935 * unable to operate for other users from this 936 * point... */ 937 if (new_schema && used_global_schema) { 938 dsdb_make_schema_global(ldb, new_schema); 939 } else if (used_global_schema) { 940 DEBUG(0,("Failed to re-load schema after commit of transaction\n")); 941 dsdb_set_global_schema(ldb); 942 TALLOC_FREE(tmp_ctx); 943 return WERR_INTERNAL_ERROR; 944 } else { 945 DEBUG(0,("Failed to re-load schema after commit of transaction\n")); 946 dsdb_reference_schema(ldb, cur_schema, false); 947 TALLOC_FREE(tmp_ctx); 948 return WERR_INTERNAL_ERROR; 949 } 950 msg = ldb_msg_new(tmp_ctx); 951 if (msg == NULL) { 952 TALLOC_FREE(tmp_ctx); 953 return WERR_NOMEM; 954 } 955 msg->dn = ldb_dn_new(msg, ldb, ""); 956 if (msg->dn == NULL) { 957 TALLOC_FREE(tmp_ctx); 958 return WERR_NOMEM; 959 } 960 961 ret = ldb_msg_add_string(msg, "schemaUpdateNow", "1"); 962 if (ret != LDB_SUCCESS) { 963 TALLOC_FREE(tmp_ctx); 964 return WERR_INTERNAL_ERROR; 965 } 966 967 ret = ldb_build_mod_req(&req, ldb, objects, 968 msg, 969 NULL, 970 NULL, 971 ldb_op_default_callback, 972 NULL); 973 974 if (ret != LDB_SUCCESS) { 975 TALLOC_FREE(tmp_ctx); 976 return WERR_DS_DRA_INTERNAL_ERROR; 977 } 978 979 ret = ldb_transaction_start(ldb); 980 if (ret != LDB_SUCCESS) { 981 TALLOC_FREE(tmp_ctx); 982 DEBUG(0, ("Autotransaction start failed\n")); 983 return WERR_DS_DRA_INTERNAL_ERROR; 984 } 985 986 ret = ldb_request(ldb, req); 987 if (ret == LDB_SUCCESS) { 988 ret = ldb_wait(req->handle, LDB_WAIT_ALL); 989 } 990 991 if (ret == LDB_SUCCESS) { 992 ret = ldb_transaction_commit(ldb); 993 } else { 994 DEBUG(0, ("Schema update now failed: %s\n", 995 ldb_errstring(ldb))); 996 ldb_transaction_cancel(ldb); 997 } 998 999 if (ret != LDB_SUCCESS) { 1000 DEBUG(0, ("Commit failed: %s\n", ldb_errstring(ldb))); 1001 TALLOC_FREE(tmp_ctx); 1002 return WERR_DS_INTERNAL_FAILURE; 612 1003 } 613 1004 } … … 617 1008 ldb_dn_get_linearized(objects->partition_dn))); 618 1009 1010 TALLOC_FREE(tmp_ctx); 619 1011 return WERR_OK; 620 1012 } … … 657 1049 658 1050 status = dsdb_attribute_drsuapi_to_ldb(ldb, schema, schema->prefixmap, 659 a, msg->elements, e );1051 a, msg->elements, e, NULL); 660 1052 W_ERROR_NOT_OK_RETURN(status); 661 1053 } … … 671 1063 const struct drsuapi_DsReplicaObjectListItem *first_object, 672 1064 uint32_t *_num, 1065 uint32_t dsdb_repl_flags, 673 1066 struct drsuapi_DsReplicaObjectIdentifier2 **_ids) 674 1067 { … … 729 1122 } 730 1123 1124 if (dsdb_repl_flags & DSDB_REPL_FLAG_ADD_NCNAME) { 1125 /* check for possible NC creation */ 1126 for (i=0; i < num_objects; i++) { 1127 struct ldb_message *msg = objects[i]; 1128 struct ldb_message_element *el; 1129 struct ldb_dn *nc_dn; 1130 1131 if (ldb_msg_check_string_attribute(msg, "objectClass", "crossRef") == 0) { 1132 continue; 1133 } 1134 el = ldb_msg_find_element(msg, "nCName"); 1135 if (el == NULL || el->num_values != 1) { 1136 continue; 1137 } 1138 nc_dn = ldb_dn_from_ldb_val(objects, ldb, &el->values[0]); 1139 if (!ldb_dn_validate(nc_dn)) { 1140 continue; 1141 } 1142 ret = dsdb_create_partial_replica_NC(ldb, nc_dn); 1143 if (ret != LDB_SUCCESS) { 1144 status = WERR_DS_INTERNAL_FAILURE; 1145 goto cancel; 1146 } 1147 } 1148 } 1149 731 1150 for (i=0; i < num_objects; i++) { 732 1151 struct dom_sid *sid = NULL;
Note:
See TracChangeset
for help on using the changeset viewer.