Changeset 988 for vendor/current/source3/locking/brlock.c
- Timestamp:
- Nov 24, 2016, 1:14:11 PM (9 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
vendor/current/source3/locking/brlock.c
r740 r988 1 /* 1 /* 2 2 Unix SMB/CIFS implementation. 3 3 byte range locking code … … 6 6 Copyright (C) Andrew Tridgell 1992-2000 7 7 Copyright (C) Jeremy Allison 1992-2000 8 8 9 9 This program is free software; you can redistribute it and/or modify 10 10 it under the terms of the GNU General Public License as published by 11 11 the Free Software Foundation; either version 3 of the License, or 12 12 (at your option) any later version. 13 13 14 14 This program is distributed in the hope that it will be useful, 15 15 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 17 GNU General Public License for more details. 18 18 19 19 You should have received a copy of the GNU General Public License 20 20 along with this program. If not, see <http://www.gnu.org/licenses/>. … … 29 29 #include "locking/proto.h" 30 30 #include "smbd/globals.h" 31 #include "dbwrap.h" 31 #include "dbwrap/dbwrap.h" 32 #include "dbwrap/dbwrap_open.h" 32 33 #include "serverid.h" 33 34 #include "messages.h" 35 #include "util_tdb.h" 34 36 35 37 #undef DBGC_CLASS … … 41 43 42 44 static struct db_context *brlock_db; 45 46 struct byte_range_lock { 47 struct files_struct *fsp; 48 unsigned int num_locks; 49 bool modified; 50 uint32_t num_read_oplocks; 51 struct lock_struct *lock_data; 52 struct db_record *record; 53 }; 43 54 44 55 /**************************************************************************** … … 46 57 ****************************************************************************/ 47 58 48 static void print_lock_struct(unsigned int i, struct lock_struct *pls) 49 { 59 static void print_lock_struct(unsigned int i, const struct lock_struct *pls) 60 { 61 struct server_id_buf tmp; 62 50 63 DEBUG(10,("[%u]: smblctx = %llu, tid = %u, pid = %s, ", 51 64 i, 52 65 (unsigned long long)pls->context.smblctx, 53 66 (unsigned int)pls->context.tid, 54 procid_str(talloc_tos(), &pls->context.pid) )); 55 56 DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n", 57 (double)pls->start, 58 (double)pls->size, 59 pls->fnum, 60 lock_type_name(pls->lock_type), 61 lock_flav_name(pls->lock_flav) )); 67 server_id_str_buf(pls->context.pid, &tmp) )); 68 69 DEBUG(10, ("start = %ju, size = %ju, fnum = %ju, %s %s\n", 70 (uintmax_t)pls->start, 71 (uintmax_t)pls->size, 72 (uintmax_t)pls->fnum, 73 lock_type_name(pls->lock_type), 74 lock_flav_name(pls->lock_flav))); 75 } 76 77 unsigned int brl_num_locks(const struct byte_range_lock *brl) 78 { 79 return brl->num_locks; 80 } 81 82 struct files_struct *brl_fsp(struct byte_range_lock *brl) 83 { 84 return brl->fsp; 85 } 86 87 uint32_t brl_num_read_oplocks(const struct byte_range_lock *brl) 88 { 89 return brl->num_read_oplocks; 90 } 91 92 void brl_set_num_read_oplocks(struct byte_range_lock *brl, 93 uint32_t num_read_oplocks) 94 { 95 DEBUG(10, ("Setting num_read_oplocks to %"PRIu32"\n", 96 num_read_oplocks)); 97 SMB_ASSERT(brl->record != NULL); /* otherwise we're readonly */ 98 brl->num_read_oplocks = num_read_oplocks; 99 brl->modified = true; 62 100 } 63 101 … … 66 104 ****************************************************************************/ 67 105 68 bool brl_same_context(const struct lock_context *ctx1, 106 static bool brl_same_context(const struct lock_context *ctx1, 69 107 const struct lock_context *ctx2) 70 108 { 71 return ( procid_equal(&ctx1->pid, &ctx2->pid) &&109 return (serverid_equal(&ctx1->pid, &ctx2->pid) && 72 110 (ctx1->smblctx == ctx2->smblctx) && 73 111 (ctx1->tid == ctx2->tid)); … … 82 120 { 83 121 /* XXX Remove for Win7 compatibility. */ 84 /* this extra check is not redund ent - it copes with locks122 /* this extra check is not redundant - it copes with locks 85 123 that go beyond the end of 64 bit file space */ 86 124 if (lck1->size != 0 && … … 101 139 ****************************************************************************/ 102 140 103 static bool brl_conflict(const struct lock_struct *lck1, 141 static bool brl_conflict(const struct lock_struct *lck1, 104 142 const struct lock_struct *lck2) 105 143 { … … 122 160 123 161 return brl_overlap(lck1, lck2); 124 } 162 } 125 163 126 164 /**************************************************************************** … … 130 168 ****************************************************************************/ 131 169 132 static bool brl_conflict_posix(const struct lock_struct *lck1, 170 static bool brl_conflict_posix(const struct lock_struct *lck1, 133 171 const struct lock_struct *lck2) 134 172 { … … 147 185 } 148 186 149 /* Locks on the same context con't conflict. Ignore fnum. */187 /* Locks on the same context don't conflict. Ignore fnum. */ 150 188 if (brl_same_context(&lck1->context, &lck2->context)) { 151 189 return False; … … 155 193 do they overlap ? */ 156 194 return brl_overlap(lck1, lck2); 157 } 195 } 158 196 159 197 #if ZERO_ZERO 160 static bool brl_conflict1(const struct lock_struct *lck1, 198 static bool brl_conflict1(const struct lock_struct *lck1, 161 199 const struct lock_struct *lck2) 162 200 { … … 181 219 return False; 182 220 } 183 221 184 222 return True; 185 } 223 } 186 224 #endif 187 225 … … 192 230 ****************************************************************************/ 193 231 194 static bool brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2) 195 { 196 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type)) 232 static bool brl_conflict_other(const struct lock_struct *lock, 233 const struct lock_struct *rw_probe) 234 { 235 if (IS_PENDING_LOCK(lock->lock_type) || 236 IS_PENDING_LOCK(rw_probe->lock_type)) { 197 237 return False; 198 199 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) 238 } 239 240 if (lock->lock_type == READ_LOCK && rw_probe->lock_type == READ_LOCK) { 200 241 return False; 201 202 /* POSIX flavour locks never conflict here - this is only called 203 in the read/write path. */ 204 205 if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK) 242 } 243 244 if (lock->lock_flav == POSIX_LOCK && 245 rw_probe->lock_flav == POSIX_LOCK) { 246 /* 247 * POSIX flavour locks never conflict here - this is only called 248 * in the read/write path. 249 */ 206 250 return False; 251 } 252 253 if (!brl_overlap(lock, rw_probe)) { 254 /* 255 * I/O can only conflict when overlapping a lock, thus let it 256 * pass 257 */ 258 return false; 259 } 260 261 if (!brl_same_context(&lock->context, &rw_probe->context)) { 262 /* 263 * Different process, conflict 264 */ 265 return true; 266 } 267 268 if (lock->fnum != rw_probe->fnum) { 269 /* 270 * Different file handle, conflict 271 */ 272 return true; 273 } 274 275 if ((lock->lock_type == READ_LOCK) && 276 (rw_probe->lock_type == WRITE_LOCK)) { 277 /* 278 * Incoming WRITE locks conflict with existing READ locks even 279 * if the context is the same. JRA. See LOCKTEST7 in 280 * smbtorture. 281 */ 282 return true; 283 } 207 284 208 285 /* 209 * I ncoming WRITE locks conflict with existing READ locks even210 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.286 * I/O request compatible with existing lock, let it pass without 287 * conflict 211 288 */ 212 289 213 if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) { 214 if (brl_same_context(&lck1->context, &lck2->context) && 215 lck1->fnum == lck2->fnum) 216 return False; 217 } 218 219 return brl_overlap(lck1, lck2); 220 } 290 return false; 291 } 221 292 222 293 /**************************************************************************** … … 228 299 if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start)) 229 300 return True; 230 if ((lock->start >= pend_lock->start) && (lock->start < =pend_lock->start + pend_lock->size))301 if ((lock->start >= pend_lock->start) && (lock->start < pend_lock->start + pend_lock->size)) 231 302 return True; 232 303 return False; … … 239 310 ****************************************************************************/ 240 311 241 NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, bool blocking_lock) 312 static NTSTATUS brl_lock_failed(files_struct *fsp, 313 const struct lock_struct *lock, 314 bool blocking_lock) 242 315 { 243 316 if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) { … … 252 325 } 253 326 254 if ( procid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&327 if (serverid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) && 255 328 lock->context.tid == fsp->last_lock_failure.context.tid && 256 329 lock->fnum == fsp->last_lock_failure.fnum && … … 272 345 { 273 346 int tdb_flags; 347 char *db_path; 274 348 275 349 if (brlock_db) { … … 288 362 } 289 363 290 brlock_db = db_open(NULL, lock_path("brlock.tdb"), 291 lp_open_files_db_hash_size(), tdb_flags, 292 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 ); 364 db_path = lock_path("brlock.tdb"); 365 if (db_path == NULL) { 366 DEBUG(0, ("out of memory!\n")); 367 return; 368 } 369 370 brlock_db = db_open(NULL, db_path, 371 SMB_OPEN_DATABASE_TDB_HASH_SIZE, tdb_flags, 372 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644, 373 DBWRAP_LOCK_ORDER_2, DBWRAP_FLAG_NONE); 293 374 if (!brlock_db) { 294 375 DEBUG(0,("Failed to open byte range locking database %s\n", 295 lock_path("brlock.tdb"))); 376 db_path)); 377 TALLOC_FREE(db_path); 296 378 return; 297 379 } 380 TALLOC_FREE(db_path); 298 381 } 299 382 … … 312 395 ****************************************************************************/ 313 396 314 static int lock_compare(const struct lock_struct *lck1, 397 static int lock_compare(const struct lock_struct *lck1, 315 398 const struct lock_struct *lck2) 316 399 { … … 347 430 /* Do any Windows or POSIX locks conflict ? */ 348 431 if (brl_conflict(&locks[i], plock)) { 432 if (!serverid_exists(&locks[i].context.pid)) { 433 locks[i].context.pid.pid = 0; 434 br_lck->modified = true; 435 continue; 436 } 349 437 /* Remember who blocked us. */ 350 438 plock->context.smblctx = locks[i].context.smblctx; … … 352 440 } 353 441 #if ZERO_ZERO 354 if (plock->start == 0 && plock->size == 0 && 442 if (plock->start == 0 && plock->size == 0 && 355 443 locks[i].size == 0) { 356 444 break; … … 392 480 393 481 /* no conflicts - add it to the list of locks */ 394 locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks)); 482 locks = talloc_realloc(br_lck, locks, struct lock_struct, 483 (br_lck->num_locks + 1)); 395 484 if (!locks) { 396 485 status = NT_STATUS_NO_MEMORY; … … 730 819 so we need at most 2 more entries. */ 731 820 732 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));821 tp = talloc_array(br_lck, struct lock_struct, br_lck->num_locks + 2); 733 822 if (!tp) { 734 823 return NT_STATUS_NO_MEMORY; … … 750 839 /* Do any Windows flavour locks conflict ? */ 751 840 if (brl_conflict(curr_lock, plock)) { 841 if (!serverid_exists(&curr_lock->context.pid)) { 842 curr_lock->context.pid.pid = 0; 843 br_lck->modified = true; 844 continue; 845 } 752 846 /* No games with error messages. */ 753 SAFE_FREE(tp);847 TALLOC_FREE(tp); 754 848 /* Remember who blocked us. */ 755 849 plock->context.smblctx = curr_lock->context.smblctx; … … 764 858 /* POSIX conflict semantics are different. */ 765 859 if (brl_conflict_posix(curr_lock, plock)) { 860 if (!serverid_exists(&curr_lock->context.pid)) { 861 curr_lock->context.pid.pid = 0; 862 br_lck->modified = true; 863 continue; 864 } 766 865 /* Can't block ourselves with POSIX locks. */ 767 866 /* No games with error messages. */ 768 SAFE_FREE(tp);867 TALLOC_FREE(tp); 769 868 /* Remember who blocked us. */ 770 869 plock->context.smblctx = curr_lock->context.smblctx; … … 831 930 832 931 if (errno_ret == EACCES || errno_ret == EAGAIN) { 833 SAFE_FREE(tp);932 TALLOC_FREE(tp); 834 933 status = NT_STATUS_FILE_LOCK_CONFLICT; 835 934 goto fail; 836 935 } else { 837 SAFE_FREE(tp);936 TALLOC_FREE(tp); 838 937 status = map_nt_error_from_unix(errno); 839 938 goto fail; … … 845 944 * Realloc so we don't leak entries per lock call. */ 846 945 if (count < br_lck->num_locks + 2) { 847 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));946 tp = talloc_realloc(br_lck, tp, struct lock_struct, count); 848 947 if (!tp) { 849 948 status = NT_STATUS_NO_MEMORY; … … 853 952 854 953 br_lck->num_locks = count; 855 SAFE_FREE(br_lck->lock_data);954 TALLOC_FREE(br_lck->lock_data); 856 955 br_lck->lock_data = tp; 857 956 locks = tp; … … 873 972 if (pend_lock->lock_type == PENDING_READ_LOCK && 874 973 brl_pending_overlap(plock, pend_lock)) { 875 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n", 876 procid_str_static(&pend_lock->context.pid ))); 974 struct server_id_buf tmp; 975 976 DEBUG(10, ("brl_lock_posix: sending unlock " 977 "message to pid %s\n", 978 server_id_str_buf(pend_lock->context.pid, 979 &tmp))); 877 980 878 981 messaging_send(msg_ctx, pend_lock->context.pid, … … 894 997 struct byte_range_lock *br_lck, 895 998 struct lock_struct *plock, 896 bool blocking_lock, 897 struct blocking_lock_record *blr) 999 bool blocking_lock) 898 1000 { 899 1001 VFS_FIND(brl_lock_windows); 900 return handle->fns->brl_lock_windows (handle, br_lck, plock,901 blocking_lock, blr);1002 return handle->fns->brl_lock_windows_fn(handle, br_lck, plock, 1003 blocking_lock); 902 1004 } 903 1005 … … 911 1013 struct server_id pid, 912 1014 br_off start, 913 br_off size, 1015 br_off size, 914 1016 enum brl_type lock_type, 915 1017 enum brl_flavour lock_flav, 916 1018 bool blocking_lock, 917 uint64_t *psmblctx, 918 struct blocking_lock_record *blr) 1019 uint64_t *psmblctx) 919 1020 { 920 1021 NTSTATUS ret; 921 1022 struct lock_struct lock; 1023 1024 ZERO_STRUCT(lock); 922 1025 923 1026 #if !ZERO_ZERO … … 927 1030 #endif 928 1031 929 #ifdef DEVELOPER 930 /* Quieten valgrind on test. */ 931 memset(&lock, '\0', sizeof(lock)); 932 #endif 933 934 lock.context.smblctx = smblctx; 935 lock.context.pid = pid; 936 lock.context.tid = br_lck->fsp->conn->cnum; 937 lock.start = start; 938 lock.size = size; 939 lock.fnum = br_lck->fsp->fnum; 940 lock.lock_type = lock_type; 941 lock.lock_flav = lock_flav; 1032 lock = (struct lock_struct) { 1033 .context.smblctx = smblctx, 1034 .context.pid = pid, 1035 .context.tid = br_lck->fsp->conn->cnum, 1036 .start = start, 1037 .size = size, 1038 .fnum = br_lck->fsp->fnum, 1039 .lock_type = lock_type, 1040 .lock_flav = lock_flav 1041 }; 942 1042 943 1043 if (lock_flav == WINDOWS_LOCK) { 944 1044 ret = SMB_VFS_BRL_LOCK_WINDOWS(br_lck->fsp->conn, br_lck, 945 &lock, blocking_lock, blr);1045 &lock, blocking_lock); 946 1046 } else { 947 1047 ret = brl_lock_posix(msg_ctx, br_lck, &lock); … … 958 1058 } 959 1059 return ret; 1060 } 1061 1062 static void brl_delete_lock_struct(struct lock_struct *locks, 1063 unsigned num_locks, 1064 unsigned del_idx) 1065 { 1066 if (del_idx >= num_locks) { 1067 return; 1068 } 1069 memmove(&locks[del_idx], &locks[del_idx+1], 1070 sizeof(*locks) * (num_locks - del_idx - 1)); 960 1071 } 961 1072 … … 1027 1138 #endif 1028 1139 1029 /* Actually delete the lock. */ 1030 if (i < br_lck->num_locks - 1) { 1031 memmove(&locks[i], &locks[i+1], 1032 sizeof(*locks)*((br_lck->num_locks-1) - i)); 1033 } 1034 1140 brl_delete_lock_struct(locks, br_lck->num_locks, i); 1035 1141 br_lck->num_locks -= 1; 1036 1142 br_lck->modified = True; … … 1058 1164 /* We could send specific lock info here... */ 1059 1165 if (brl_pending_overlap(plock, pend_lock)) { 1060 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n", 1061 procid_str_static(&pend_lock->context.pid ))); 1166 struct server_id_buf tmp; 1167 1168 DEBUG(10, ("brl_unlock: sending unlock message to " 1169 "pid %s\n", 1170 server_id_str_buf(pend_lock->context.pid, 1171 &tmp))); 1062 1172 1063 1173 messaging_send(msg_ctx, pend_lock->context.pid, … … 1099 1209 1 more entry. */ 1100 1210 1101 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));1211 tp = talloc_array(br_lck, struct lock_struct, br_lck->num_locks + 1); 1102 1212 if (!tp) { 1103 1213 DEBUG(10,("brl_unlock_posix: malloc fail\n")); … … 1121 1231 /* Do any Windows flavour locks conflict ? */ 1122 1232 if (brl_conflict(lock, plock)) { 1123 SAFE_FREE(tp);1233 TALLOC_FREE(tp); 1124 1234 return false; 1125 1235 } … … 1166 1276 if (!overlap_found) { 1167 1277 /* Just ignore - no change. */ 1168 SAFE_FREE(tp);1278 TALLOC_FREE(tp); 1169 1279 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n")); 1170 1280 return True; … … 1183 1293 /* Realloc so we don't leak entries per unlock call. */ 1184 1294 if (count) { 1185 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));1295 tp = talloc_realloc(br_lck, tp, struct lock_struct, count); 1186 1296 if (!tp) { 1187 1297 DEBUG(10,("brl_unlock_posix: realloc fail\n")); … … 1190 1300 } else { 1191 1301 /* We deleted the last lock. */ 1192 SAFE_FREE(tp);1302 TALLOC_FREE(tp); 1193 1303 tp = NULL; 1194 1304 } … … 1198 1308 1199 1309 br_lck->num_locks = count; 1200 SAFE_FREE(br_lck->lock_data);1310 TALLOC_FREE(br_lck->lock_data); 1201 1311 locks = tp; 1202 1312 br_lck->lock_data = tp; … … 1215 1325 /* We could send specific lock info here... */ 1216 1326 if (brl_pending_overlap(plock, pend_lock)) { 1217 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n", 1218 procid_str_static(&pend_lock->context.pid ))); 1327 struct server_id_buf tmp; 1328 1329 DEBUG(10, ("brl_unlock: sending unlock message to " 1330 "pid %s\n", 1331 server_id_str_buf(pend_lock->context.pid, 1332 &tmp))); 1219 1333 1220 1334 messaging_send(msg_ctx, pend_lock->context.pid, … … 1232 1346 { 1233 1347 VFS_FIND(brl_unlock_windows); 1234 return handle->fns->brl_unlock_windows(handle, msg_ctx, br_lck, plock); 1348 return handle->fns->brl_unlock_windows_fn(handle, msg_ctx, br_lck, 1349 plock); 1235 1350 } 1236 1351 … … 1272 1387 1273 1388 bool brl_locktest(struct byte_range_lock *br_lck, 1274 uint64_t smblctx, 1275 struct server_id pid, 1276 br_off start, 1277 br_off size, 1278 enum brl_type lock_type, 1279 enum brl_flavour lock_flav) 1389 const struct lock_struct *rw_probe) 1280 1390 { 1281 1391 bool ret = True; 1282 1392 unsigned int i; 1283 struct lock_struct lock; 1284 const struct lock_struct *locks = br_lck->lock_data; 1393 struct lock_struct *locks = br_lck->lock_data; 1285 1394 files_struct *fsp = br_lck->fsp; 1286 1287 lock.context.smblctx = smblctx;1288 lock.context.pid = pid;1289 lock.context.tid = br_lck->fsp->conn->cnum;1290 lock.start = start;1291 lock.size = size;1292 lock.fnum = fsp->fnum;1293 lock.lock_type = lock_type;1294 lock.lock_flav = lock_flav;1295 1395 1296 1396 /* Make sure existing locks don't conflict */ … … 1299 1399 * Our own locks don't conflict. 1300 1400 */ 1301 if (brl_conflict_other(&locks[i], &lock)) { 1401 if (brl_conflict_other(&locks[i], rw_probe)) { 1402 if (br_lck->record == NULL) { 1403 /* readonly */ 1404 return false; 1405 } 1406 1407 if (!serverid_exists(&locks[i].context.pid)) { 1408 locks[i].context.pid.pid = 0; 1409 br_lck->modified = true; 1410 continue; 1411 } 1412 1302 1413 return False; 1303 1414 } … … 1310 1421 */ 1311 1422 1312 if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) { 1423 if(lp_posix_locking(fsp->conn->params) && 1424 (rw_probe->lock_flav == WINDOWS_LOCK)) { 1425 /* 1426 * Make copies -- is_posix_locked might modify the values 1427 */ 1428 1429 br_off start = rw_probe->start; 1430 br_off size = rw_probe->size; 1431 enum brl_type lock_type = rw_probe->lock_type; 1432 1313 1433 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK); 1314 1434 1315 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n", 1316 (double)start, (double)size, ret ? "locked" : "unlocked", 1317 fsp->fnum, fsp_str_dbg(fsp))); 1435 DEBUG(10, ("brl_locktest: posix start=%ju len=%ju %s for %s " 1436 "file %s\n", (uintmax_t)start, (uintmax_t)size, 1437 ret ? "locked" : "unlocked", 1438 fsp_fnum_dbg(fsp), fsp_str_dbg(fsp))); 1318 1439 1319 1440 /* We need to return the inverse of is_posix_locked. */ … … 1333 1454 struct server_id pid, 1334 1455 br_off *pstart, 1335 br_off *psize, 1456 br_off *psize, 1336 1457 enum brl_type *plock_type, 1337 1458 enum brl_flavour lock_flav) … … 1358 1479 if (exlock->lock_flav == WINDOWS_LOCK) { 1359 1480 conflict = brl_conflict(exlock, &lock); 1360 } else { 1481 } else { 1361 1482 conflict = brl_conflict_posix(exlock, &lock); 1362 1483 } … … 1379 1500 bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK); 1380 1501 1381 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n", 1382 (double)*pstart, (double)*psize, ret ? "locked" : "unlocked", 1383 fsp->fnum, fsp_str_dbg(fsp))); 1502 DEBUG(10, ("brl_lockquery: posix start=%ju len=%ju %s for %s " 1503 "file %s\n", (uintmax_t)*pstart, 1504 (uintmax_t)*psize, ret ? "locked" : "unlocked", 1505 fsp_fnum_dbg(fsp), fsp_str_dbg(fsp))); 1384 1506 1385 1507 if (ret) { … … 1396 1518 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct *handle, 1397 1519 struct byte_range_lock *br_lck, 1398 struct lock_struct *plock, 1399 struct blocking_lock_record *blr) 1520 struct lock_struct *plock) 1400 1521 { 1401 1522 VFS_FIND(brl_cancel_windows); 1402 return handle->fns->brl_cancel_windows (handle, br_lck, plock, blr);1523 return handle->fns->brl_cancel_windows_fn(handle, br_lck, plock); 1403 1524 } 1404 1525 … … 1411 1532 br_off start, 1412 1533 br_off size, 1413 enum brl_flavour lock_flav, 1414 struct blocking_lock_record *blr) 1534 enum brl_flavour lock_flav) 1415 1535 { 1416 1536 bool ret; … … 1428 1548 if (lock_flav == WINDOWS_LOCK) { 1429 1549 ret = SMB_VFS_BRL_CANCEL_WINDOWS(br_lck->fsp->conn, br_lck, 1430 &lock, blr);1550 &lock); 1431 1551 } else { 1432 1552 ret = brl_lock_cancel_default(br_lck, &lock); … … 1463 1583 } 1464 1584 1465 if (i < br_lck->num_locks - 1) { 1466 /* Found this particular pending lock - delete it */ 1467 memmove(&locks[i], &locks[i+1], 1468 sizeof(*locks)*((br_lck->num_locks-1) - i)); 1469 } 1470 1585 brl_delete_lock_struct(locks, br_lck->num_locks, i); 1471 1586 br_lck->num_locks -= 1; 1472 1587 br_lck->modified = True; … … 1484 1599 { 1485 1600 files_struct *fsp = br_lck->fsp; 1486 uint 16tid = fsp->conn->cnum;1487 int fnum = fsp->fnum;1601 uint32_t tid = fsp->conn->cnum; 1602 uint64_t fnum = fsp->fnum; 1488 1603 unsigned int i; 1489 1604 struct lock_struct *locks = br_lck->lock_data; 1490 struct server_id pid = sconn_server_id(fsp->conn->sconn);1605 struct server_id pid = messaging_server_id(fsp->conn->sconn->msg_ctx); 1491 1606 struct lock_struct *locks_copy; 1492 1607 unsigned int num_locks_copy; … … 1494 1609 /* Copy the current lock array. */ 1495 1610 if (br_lck->num_locks) { 1496 locks_copy = (struct lock_struct *) TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));1611 locks_copy = (struct lock_struct *)talloc_memdup(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct)); 1497 1612 if (!locks_copy) { 1498 1613 smb_panic("brl_close_fnum: talloc failed"); … … 1507 1622 struct lock_struct *lock = &locks_copy[i]; 1508 1623 1509 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) &&1624 if (lock->context.tid == tid && serverid_equal(&lock->context.pid, &pid) && 1510 1625 (lock->fnum == fnum)) { 1511 1626 brl_unlock(msg_ctx, … … 1520 1635 } 1521 1636 1522 /**************************************************************************** 1523 Ensure this set of lock entries is valid. 1524 ****************************************************************************/ 1525 static bool validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks) 1526 { 1637 bool brl_mark_disconnected(struct files_struct *fsp) 1638 { 1639 uint32_t tid = fsp->conn->cnum; 1640 uint64_t smblctx; 1641 uint64_t fnum = fsp->fnum; 1527 1642 unsigned int i; 1528 unsigned int num_valid_entries = 0; 1529 struct lock_struct *locks = *pplocks; 1530 1531 for (i = 0; i < *pnum_entries; i++) { 1532 struct lock_struct *lock_data = &locks[i]; 1533 if (!serverid_exists(&lock_data->context.pid)) { 1534 /* This process no longer exists - mark this 1535 entry as invalid by zeroing it. */ 1536 ZERO_STRUCTP(lock_data); 1537 } else { 1538 num_valid_entries++; 1539 } 1540 } 1541 1542 if (num_valid_entries != *pnum_entries) { 1543 struct lock_struct *new_lock_data = NULL; 1544 1545 if (num_valid_entries) { 1546 new_lock_data = SMB_MALLOC_ARRAY(struct lock_struct, num_valid_entries); 1547 if (!new_lock_data) { 1548 DEBUG(3, ("malloc fail\n")); 1549 return False; 1550 } 1551 1552 num_valid_entries = 0; 1553 for (i = 0; i < *pnum_entries; i++) { 1554 struct lock_struct *lock_data = &locks[i]; 1555 if (lock_data->context.smblctx && 1556 lock_data->context.tid) { 1557 /* Valid (nonzero) entry - copy it. */ 1558 memcpy(&new_lock_data[num_valid_entries], 1559 lock_data, sizeof(struct lock_struct)); 1560 num_valid_entries++; 1561 } 1562 } 1563 } 1564 1565 SAFE_FREE(*pplocks); 1566 *pplocks = new_lock_data; 1567 *pnum_entries = num_valid_entries; 1568 } 1569 1570 return True; 1643 struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx); 1644 struct byte_range_lock *br_lck = NULL; 1645 1646 if (fsp->op == NULL) { 1647 return false; 1648 } 1649 1650 smblctx = fsp->op->global->open_persistent_id; 1651 1652 if (!fsp->op->global->durable) { 1653 return false; 1654 } 1655 1656 if (fsp->current_lock_count == 0) { 1657 return true; 1658 } 1659 1660 br_lck = brl_get_locks(talloc_tos(), fsp); 1661 if (br_lck == NULL) { 1662 return false; 1663 } 1664 1665 for (i=0; i < br_lck->num_locks; i++) { 1666 struct lock_struct *lock = &br_lck->lock_data[i]; 1667 1668 /* 1669 * as this is a durable handle, we only expect locks 1670 * of the current file handle! 1671 */ 1672 1673 if (lock->context.smblctx != smblctx) { 1674 TALLOC_FREE(br_lck); 1675 return false; 1676 } 1677 1678 if (lock->context.tid != tid) { 1679 TALLOC_FREE(br_lck); 1680 return false; 1681 } 1682 1683 if (!serverid_equal(&lock->context.pid, &self)) { 1684 TALLOC_FREE(br_lck); 1685 return false; 1686 } 1687 1688 if (lock->fnum != fnum) { 1689 TALLOC_FREE(br_lck); 1690 return false; 1691 } 1692 1693 server_id_set_disconnected(&lock->context.pid); 1694 lock->context.tid = TID_FIELD_INVALID; 1695 lock->fnum = FNUM_FIELD_INVALID; 1696 } 1697 1698 br_lck->modified = true; 1699 TALLOC_FREE(br_lck); 1700 return true; 1701 } 1702 1703 bool brl_reconnect_disconnected(struct files_struct *fsp) 1704 { 1705 uint32_t tid = fsp->conn->cnum; 1706 uint64_t smblctx; 1707 uint64_t fnum = fsp->fnum; 1708 unsigned int i; 1709 struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx); 1710 struct byte_range_lock *br_lck = NULL; 1711 1712 if (fsp->op == NULL) { 1713 return false; 1714 } 1715 1716 smblctx = fsp->op->global->open_persistent_id; 1717 1718 if (!fsp->op->global->durable) { 1719 return false; 1720 } 1721 1722 /* 1723 * When reconnecting, we do not want to validate the brlock entries 1724 * and thereby remove our own (disconnected) entries but reactivate 1725 * them instead. 1726 */ 1727 1728 br_lck = brl_get_locks(talloc_tos(), fsp); 1729 if (br_lck == NULL) { 1730 return false; 1731 } 1732 1733 if (br_lck->num_locks == 0) { 1734 TALLOC_FREE(br_lck); 1735 return true; 1736 } 1737 1738 for (i=0; i < br_lck->num_locks; i++) { 1739 struct lock_struct *lock = &br_lck->lock_data[i]; 1740 1741 /* 1742 * as this is a durable handle we only expect locks 1743 * of the current file handle! 1744 */ 1745 1746 if (lock->context.smblctx != smblctx) { 1747 TALLOC_FREE(br_lck); 1748 return false; 1749 } 1750 1751 if (lock->context.tid != TID_FIELD_INVALID) { 1752 TALLOC_FREE(br_lck); 1753 return false; 1754 } 1755 1756 if (!server_id_is_disconnected(&lock->context.pid)) { 1757 TALLOC_FREE(br_lck); 1758 return false; 1759 } 1760 1761 if (lock->fnum != FNUM_FIELD_INVALID) { 1762 TALLOC_FREE(br_lck); 1763 return false; 1764 } 1765 1766 lock->context.pid = self; 1767 lock->context.tid = tid; 1768 lock->fnum = fnum; 1769 } 1770 1771 fsp->current_lock_count = br_lck->num_locks; 1772 br_lck->modified = true; 1773 TALLOC_FREE(br_lck); 1774 return true; 1571 1775 } 1572 1776 … … 1585 1789 ****************************************************************************/ 1586 1790 1587 static int traverse_fn(struct db_record *rec, void *state)1791 static int brl_traverse_fn(struct db_record *rec, void *state) 1588 1792 { 1589 1793 struct brl_forall_cb *cb = (struct brl_forall_cb *)state; … … 1592 1796 unsigned int i; 1593 1797 unsigned int num_locks = 0; 1594 unsigned int orig_num_locks = 0; 1798 TDB_DATA dbkey; 1799 TDB_DATA value; 1800 1801 dbkey = dbwrap_record_get_key(rec); 1802 value = dbwrap_record_get_value(rec); 1595 1803 1596 1804 /* In a traverse function we must make a copy of 1597 1805 dbuf before modifying it. */ 1598 1806 1599 locks = (struct lock_struct *) memdup(rec->value.dptr,1600 rec->value.dsize);1807 locks = (struct lock_struct *)talloc_memdup( 1808 talloc_tos(), value.dptr, value.dsize); 1601 1809 if (!locks) { 1602 1810 return -1; /* Terminate traversal. */ 1603 1811 } 1604 1812 1605 key = (struct file_id *)rec->key.dptr; 1606 orig_num_locks = num_locks = rec->value.dsize/sizeof(*locks); 1607 1608 /* Ensure the lock db is clean of entries from invalid processes. */ 1609 1610 if (!validate_lock_entries(&num_locks, &locks)) { 1611 SAFE_FREE(locks); 1612 return -1; /* Terminate traversal */ 1613 } 1614 1615 if (orig_num_locks != num_locks) { 1616 if (num_locks) { 1617 TDB_DATA data; 1618 data.dptr = (uint8_t *)locks; 1619 data.dsize = num_locks*sizeof(struct lock_struct); 1620 rec->store(rec, data, TDB_REPLACE); 1621 } else { 1622 rec->delete_rec(rec); 1623 } 1624 } 1813 key = (struct file_id *)dbkey.dptr; 1814 num_locks = value.dsize/sizeof(*locks); 1625 1815 1626 1816 if (cb->fn) { … … 1636 1826 } 1637 1827 1638 SAFE_FREE(locks);1828 TALLOC_FREE(locks); 1639 1829 return 0; 1640 1830 } … … 1652 1842 { 1653 1843 struct brl_forall_cb cb; 1844 NTSTATUS status; 1845 int count = 0; 1654 1846 1655 1847 if (!brlock_db) { … … 1658 1850 cb.fn = fn; 1659 1851 cb.private_data = private_data; 1660 return brlock_db->traverse(brlock_db, traverse_fn, &cb); 1852 status = dbwrap_traverse(brlock_db, brl_traverse_fn, &cb, &count); 1853 1854 if (!NT_STATUS_IS_OK(status)) { 1855 return -1; 1856 } else { 1857 return count; 1858 } 1661 1859 } 1662 1860 … … 1669 1867 static void byte_range_lock_flush(struct byte_range_lock *br_lck) 1670 1868 { 1671 if (br_lck->read_only) { 1672 SMB_ASSERT(!br_lck->modified); 1673 } 1869 unsigned i; 1870 struct lock_struct *locks = br_lck->lock_data; 1674 1871 1675 1872 if (!br_lck->modified) { 1873 DEBUG(10, ("br_lck not modified\n")); 1676 1874 goto done; 1677 1875 } 1678 1876 1679 if (br_lck->num_locks == 0) { 1877 i = 0; 1878 1879 while (i < br_lck->num_locks) { 1880 if (locks[i].context.pid.pid == 0) { 1881 /* 1882 * Autocleanup, the process conflicted and does not 1883 * exist anymore. 1884 */ 1885 locks[i] = locks[br_lck->num_locks-1]; 1886 br_lck->num_locks -= 1; 1887 } else { 1888 i += 1; 1889 } 1890 } 1891 1892 if ((br_lck->num_locks == 0) && (br_lck->num_read_oplocks == 0)) { 1680 1893 /* No locks - delete this entry. */ 1681 NTSTATUS status = br_lck->record->delete_rec(br_lck->record);1894 NTSTATUS status = dbwrap_record_delete(br_lck->record); 1682 1895 if (!NT_STATUS_IS_OK(status)) { 1683 1896 DEBUG(0, ("delete_rec returned %s\n", … … 1686 1899 } 1687 1900 } else { 1901 size_t lock_len, data_len; 1688 1902 TDB_DATA data; 1689 1903 NTSTATUS status; 1690 1904 1691 data.dptr = (uint8 *)br_lck->lock_data; 1692 data.dsize = br_lck->num_locks * sizeof(struct lock_struct); 1693 1694 status = br_lck->record->store(br_lck->record, data, 1695 TDB_REPLACE); 1905 lock_len = br_lck->num_locks * sizeof(struct lock_struct); 1906 data_len = lock_len + sizeof(br_lck->num_read_oplocks); 1907 1908 data.dsize = data_len; 1909 data.dptr = talloc_array(talloc_tos(), uint8_t, data_len); 1910 SMB_ASSERT(data.dptr != NULL); 1911 1912 memcpy(data.dptr, br_lck->lock_data, lock_len); 1913 memcpy(data.dptr + lock_len, &br_lck->num_read_oplocks, 1914 sizeof(br_lck->num_read_oplocks)); 1915 1916 status = dbwrap_record_store(br_lck->record, data, TDB_REPLACE); 1917 TALLOC_FREE(data.dptr); 1696 1918 if (!NT_STATUS_IS_OK(status)) { 1697 1919 DEBUG(0, ("store returned %s\n", nt_errstr(status))); … … 1700 1922 } 1701 1923 1924 DEBUG(10, ("seqnum=%d\n", dbwrap_get_seqnum(brlock_db))); 1925 1702 1926 done: 1703 1704 br_lck->read_only = true;1705 1927 br_lck->modified = false; 1706 1707 1928 TALLOC_FREE(br_lck->record); 1708 1929 } … … 1711 1932 { 1712 1933 byte_range_lock_flush(br_lck); 1713 SAFE_FREE(br_lck->lock_data);1714 1934 return 0; 1935 } 1936 1937 static bool brl_parse_data(struct byte_range_lock *br_lck, TDB_DATA data) 1938 { 1939 size_t data_len; 1940 1941 if (data.dsize == 0) { 1942 return true; 1943 } 1944 if (data.dsize % sizeof(struct lock_struct) != 1945 sizeof(br_lck->num_read_oplocks)) { 1946 DEBUG(1, ("Invalid data size: %u\n", (unsigned)data.dsize)); 1947 return false; 1948 } 1949 1950 br_lck->num_locks = data.dsize / sizeof(struct lock_struct); 1951 data_len = br_lck->num_locks * sizeof(struct lock_struct); 1952 1953 br_lck->lock_data = talloc_memdup(br_lck, data.dptr, data_len); 1954 if (br_lck->lock_data == NULL) { 1955 DEBUG(1, ("talloc_memdup failed\n")); 1956 return false; 1957 } 1958 memcpy(&br_lck->num_read_oplocks, data.dptr + data_len, 1959 sizeof(br_lck->num_read_oplocks)); 1960 return true; 1715 1961 } 1716 1962 … … 1721 1967 ********************************************************************/ 1722 1968 1723 static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx, 1724 files_struct *fsp, bool read_only) 1969 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx, files_struct *fsp) 1725 1970 { 1726 1971 TDB_DATA key, data; 1727 struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);1728 bool do_read_only = read_only; 1729 1972 struct byte_range_lock *br_lck; 1973 1974 br_lck = talloc_zero(mem_ctx, struct byte_range_lock); 1730 1975 if (br_lck == NULL) { 1731 1976 return NULL; … … 1733 1978 1734 1979 br_lck->fsp = fsp; 1735 br_lck->num_locks = 0; 1736 br_lck->modified = False; 1737 br_lck->key = fsp->file_id; 1738 1739 key.dptr = (uint8 *)&br_lck->key; 1980 1981 key.dptr = (uint8_t *)&fsp->file_id; 1740 1982 key.dsize = sizeof(struct file_id); 1741 1983 1742 if (!fsp->lockdb_clean) { 1743 /* We must be read/write to clean 1744 the dead entries. */ 1745 do_read_only = false; 1746 } 1747 1748 if (do_read_only) { 1749 if (brlock_db->fetch(brlock_db, br_lck, key, &data) == -1) { 1750 DEBUG(3, ("Could not fetch byte range lock record\n")); 1751 TALLOC_FREE(br_lck); 1752 return NULL; 1753 } 1754 br_lck->record = NULL; 1755 } else { 1756 br_lck->record = brlock_db->fetch_locked(brlock_db, br_lck, key); 1757 1758 if (br_lck->record == NULL) { 1759 DEBUG(3, ("Could not lock byte range lock entry\n")); 1760 TALLOC_FREE(br_lck); 1761 return NULL; 1762 } 1763 1764 data = br_lck->record->value; 1765 } 1766 1767 br_lck->read_only = do_read_only; 1768 br_lck->lock_data = NULL; 1984 br_lck->record = dbwrap_fetch_locked(brlock_db, br_lck, key); 1985 1986 if (br_lck->record == NULL) { 1987 DEBUG(3, ("Could not lock byte range lock entry\n")); 1988 TALLOC_FREE(br_lck); 1989 return NULL; 1990 } 1991 1992 data = dbwrap_record_get_value(br_lck->record); 1993 1994 if (!brl_parse_data(br_lck, data)) { 1995 TALLOC_FREE(br_lck); 1996 return NULL; 1997 } 1769 1998 1770 1999 talloc_set_destructor(br_lck, byte_range_lock_destructor); 1771 1772 br_lck->num_locks = data.dsize / sizeof(struct lock_struct);1773 1774 if (br_lck->num_locks != 0) {1775 br_lck->lock_data = SMB_MALLOC_ARRAY(struct lock_struct,1776 br_lck->num_locks);1777 if (br_lck->lock_data == NULL) {1778 DEBUG(0, ("malloc failed\n"));1779 TALLOC_FREE(br_lck);1780 return NULL;1781 }1782 1783 memcpy(br_lck->lock_data, data.dptr, data.dsize);1784 }1785 1786 if (!fsp->lockdb_clean) {1787 int orig_num_locks = br_lck->num_locks;1788 1789 /* This is the first time we've accessed this. */1790 /* Go through and ensure all entries exist - remove any that don't. */1791 /* Makes the lockdb self cleaning at low cost. */1792 1793 if (!validate_lock_entries(&br_lck->num_locks,1794 &br_lck->lock_data)) {1795 SAFE_FREE(br_lck->lock_data);1796 TALLOC_FREE(br_lck);1797 return NULL;1798 }1799 1800 /* Ensure invalid locks are cleaned up in the destructor. */1801 if (orig_num_locks != br_lck->num_locks) {1802 br_lck->modified = True;1803 }1804 1805 /* Mark the lockdb as "clean" as seen from this open file. */1806 fsp->lockdb_clean = True;1807 }1808 2000 1809 2001 if (DEBUGLEVEL >= 10) { … … 1818 2010 } 1819 2011 1820 if (do_read_only != read_only) { 2012 return br_lck; 2013 } 2014 2015 struct brl_get_locks_readonly_state { 2016 TALLOC_CTX *mem_ctx; 2017 struct byte_range_lock **br_lock; 2018 }; 2019 2020 static void brl_get_locks_readonly_parser(TDB_DATA key, TDB_DATA data, 2021 void *private_data) 2022 { 2023 struct brl_get_locks_readonly_state *state = 2024 (struct brl_get_locks_readonly_state *)private_data; 2025 struct byte_range_lock *br_lck; 2026 2027 br_lck = talloc_pooled_object( 2028 state->mem_ctx, struct byte_range_lock, 1, data.dsize); 2029 if (br_lck == NULL) { 2030 *state->br_lock = NULL; 2031 return; 2032 } 2033 *br_lck = (struct byte_range_lock) { 0 }; 2034 if (!brl_parse_data(br_lck, data)) { 2035 *state->br_lock = NULL; 2036 return; 2037 } 2038 *state->br_lock = br_lck; 2039 } 2040 2041 struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp) 2042 { 2043 struct byte_range_lock *br_lock = NULL; 2044 struct brl_get_locks_readonly_state state; 2045 NTSTATUS status; 2046 2047 DEBUG(10, ("seqnum=%d, fsp->brlock_seqnum=%d\n", 2048 dbwrap_get_seqnum(brlock_db), fsp->brlock_seqnum)); 2049 2050 if ((fsp->brlock_rec != NULL) 2051 && (dbwrap_get_seqnum(brlock_db) == fsp->brlock_seqnum)) { 1821 2052 /* 1822 * this stores the record and gets rid of1823 * the write lock that is needed for a cleanup2053 * We have cached the brlock_rec and the database did not 2054 * change. 1824 2055 */ 1825 byte_range_lock_flush(br_lck);1826 }1827 1828 return br_lck;1829 }1830 1831 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,1832 files_struct *fsp)1833 {1834 return brl_get_locks_internal(mem_ctx, fsp, False);1835 }1836 1837 struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)1838 {1839 struct byte_range_lock *br_lock;1840 1841 if (lp_clustering()) {1842 return brl_get_locks_internal(talloc_tos(), fsp, true);1843 }1844 1845 if ((fsp->brlock_rec != NULL)1846 && (brlock_db->get_seqnum(brlock_db) == fsp->brlock_seqnum)) {1847 2056 return fsp->brlock_rec; 1848 2057 } 1849 2058 1850 TALLOC_FREE(fsp->brlock_rec); 1851 1852 br_lock = brl_get_locks_internal(talloc_tos(), fsp, true); 2059 /* 2060 * Parse the record fresh from the database 2061 */ 2062 2063 state.mem_ctx = fsp; 2064 state.br_lock = &br_lock; 2065 2066 status = dbwrap_parse_record( 2067 brlock_db, 2068 make_tdb_data((uint8_t *)&fsp->file_id, 2069 sizeof(fsp->file_id)), 2070 brl_get_locks_readonly_parser, &state); 2071 2072 if (NT_STATUS_EQUAL(status,NT_STATUS_NOT_FOUND)) { 2073 /* 2074 * No locks on this file. Return an empty br_lock. 2075 */ 2076 br_lock = talloc(fsp, struct byte_range_lock); 2077 if (br_lock == NULL) { 2078 return NULL; 2079 } 2080 2081 br_lock->num_read_oplocks = 0; 2082 br_lock->num_locks = 0; 2083 br_lock->lock_data = NULL; 2084 2085 } else if (!NT_STATUS_IS_OK(status)) { 2086 DEBUG(3, ("Could not parse byte range lock record: " 2087 "%s\n", nt_errstr(status))); 2088 return NULL; 2089 } 1853 2090 if (br_lock == NULL) { 1854 2091 return NULL; 1855 2092 } 1856 fsp->brlock_seqnum = brlock_db->get_seqnum(brlock_db); 1857 1858 fsp->brlock_rec = talloc_move(fsp, &br_lock); 1859 1860 return fsp->brlock_rec; 2093 2094 br_lock->fsp = fsp; 2095 br_lock->modified = false; 2096 br_lock->record = NULL; 2097 2098 if (lp_clustering()) { 2099 /* 2100 * In the cluster case we can't cache the brlock struct 2101 * because dbwrap_get_seqnum does not work reliably over 2102 * ctdb. Thus we have to throw away the brlock struct soon. 2103 */ 2104 talloc_steal(talloc_tos(), br_lock); 2105 } else { 2106 /* 2107 * Cache the brlock struct, invalidated when the dbwrap_seqnum 2108 * changes. See beginning of this routine. 2109 */ 2110 TALLOC_FREE(fsp->brlock_rec); 2111 fsp->brlock_rec = br_lock; 2112 fsp->brlock_seqnum = dbwrap_get_seqnum(brlock_db); 2113 } 2114 2115 return br_lock; 1861 2116 } 1862 2117 1863 2118 struct brl_revalidate_state { 1864 2119 ssize_t array_size; 1865 uint32 num_pids;2120 uint32_t num_pids; 1866 2121 struct server_id *pids; 1867 2122 }; … … 1895 2150 static int compare_procids(const void *p1, const void *p2) 1896 2151 { 1897 const struct server_id *i1 = ( struct server_id *)p1;1898 const struct server_id *i2 = ( struct server_id *)p2;2152 const struct server_id *i1 = (const struct server_id *)p1; 2153 const struct server_id *i2 = (const struct server_id *)p2; 1899 2154 1900 2155 if (i1->pid < i2->pid) return -1; 1901 if (i 2->pid > i2->pid) return 1;2156 if (i1->pid > i2->pid) return 1; 1902 2157 return 0; 1903 2158 } … … 1912 2167 */ 1913 2168 1914 staticvoid brl_revalidate(struct messaging_context *msg_ctx,1915 1916 1917 1918 2169 void brl_revalidate(struct messaging_context *msg_ctx, 2170 void *private_data, 2171 uint32_t msg_type, 2172 struct server_id server_id, 2173 DATA_BLOB *data) 1919 2174 { 1920 2175 struct brl_revalidate_state *state; 1921 uint32 i;2176 uint32_t i; 1922 2177 struct server_id last_pid; 1923 2178 1924 if (!(state = TALLOC_ZERO_P(NULL, struct brl_revalidate_state))) {2179 if (!(state = talloc_zero(NULL, struct brl_revalidate_state))) { 1925 2180 DEBUG(0, ("talloc failed\n")); 1926 2181 return; … … 1943 2198 1944 2199 for (i=0; i<state->num_pids; i++) { 1945 if ( procid_equal(&last_pid, &state->pids[i])) {2200 if (serverid_equal(&last_pid, &state->pids[i])) { 1946 2201 /* 1947 2202 * We've seen that one already … … 1960 2215 } 1961 2216 1962 void brl_register_msgs(struct messaging_context *msg_ctx) 1963 { 1964 messaging_register(msg_ctx, NULL, MSG_SMB_BRL_VALIDATE, 1965 brl_revalidate); 1966 } 2217 bool brl_cleanup_disconnected(struct file_id fid, uint64_t open_persistent_id) 2218 { 2219 bool ret = false; 2220 TALLOC_CTX *frame = talloc_stackframe(); 2221 TDB_DATA key, val; 2222 struct db_record *rec; 2223 struct lock_struct *lock; 2224 unsigned n, num; 2225 NTSTATUS status; 2226 2227 key = make_tdb_data((void*)&fid, sizeof(fid)); 2228 2229 rec = dbwrap_fetch_locked(brlock_db, frame, key); 2230 if (rec == NULL) { 2231 DEBUG(5, ("brl_cleanup_disconnected: failed to fetch record " 2232 "for file %s\n", file_id_string(frame, &fid))); 2233 goto done; 2234 } 2235 2236 val = dbwrap_record_get_value(rec); 2237 lock = (struct lock_struct*)val.dptr; 2238 num = val.dsize / sizeof(struct lock_struct); 2239 if (lock == NULL) { 2240 DEBUG(10, ("brl_cleanup_disconnected: no byte range locks for " 2241 "file %s\n", file_id_string(frame, &fid))); 2242 ret = true; 2243 goto done; 2244 } 2245 2246 for (n=0; n<num; n++) { 2247 struct lock_context *ctx = &lock[n].context; 2248 2249 if (!server_id_is_disconnected(&ctx->pid)) { 2250 struct server_id_buf tmp; 2251 DEBUG(5, ("brl_cleanup_disconnected: byte range lock " 2252 "%s used by server %s, do not cleanup\n", 2253 file_id_string(frame, &fid), 2254 server_id_str_buf(ctx->pid, &tmp))); 2255 goto done; 2256 } 2257 2258 if (ctx->smblctx != open_persistent_id) { 2259 DEBUG(5, ("brl_cleanup_disconnected: byte range lock " 2260 "%s expected smblctx %llu but found %llu" 2261 ", do not cleanup\n", 2262 file_id_string(frame, &fid), 2263 (unsigned long long)open_persistent_id, 2264 (unsigned long long)ctx->smblctx)); 2265 goto done; 2266 } 2267 } 2268 2269 status = dbwrap_record_delete(rec); 2270 if (!NT_STATUS_IS_OK(status)) { 2271 DEBUG(5, ("brl_cleanup_disconnected: failed to delete record " 2272 "for file %s from %s, open %llu: %s\n", 2273 file_id_string(frame, &fid), dbwrap_name(brlock_db), 2274 (unsigned long long)open_persistent_id, 2275 nt_errstr(status))); 2276 goto done; 2277 } 2278 2279 DEBUG(10, ("brl_cleanup_disconnected: " 2280 "file %s cleaned up %u entries from open %llu\n", 2281 file_id_string(frame, &fid), num, 2282 (unsigned long long)open_persistent_id)); 2283 2284 ret = true; 2285 done: 2286 talloc_free(frame); 2287 return ret; 2288 }
Note:
See TracChangeset
for help on using the changeset viewer.