Changeset 988 for vendor/current/source3/locking
- Timestamp:
- Nov 24, 2016, 1:14:11 PM (9 years ago)
- Location:
- vendor/current/source3/locking
- Files:
-
- 3 added
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
vendor/current/source3/locking/brlock.c
r740 r988 1 /* 1 /* 2 2 Unix SMB/CIFS implementation. 3 3 byte range locking code … … 6 6 Copyright (C) Andrew Tridgell 1992-2000 7 7 Copyright (C) Jeremy Allison 1992-2000 8 8 9 9 This program is free software; you can redistribute it and/or modify 10 10 it under the terms of the GNU General Public License as published by 11 11 the Free Software Foundation; either version 3 of the License, or 12 12 (at your option) any later version. 13 13 14 14 This program is distributed in the hope that it will be useful, 15 15 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 17 GNU General Public License for more details. 18 18 19 19 You should have received a copy of the GNU General Public License 20 20 along with this program. If not, see <http://www.gnu.org/licenses/>. … … 29 29 #include "locking/proto.h" 30 30 #include "smbd/globals.h" 31 #include "dbwrap.h" 31 #include "dbwrap/dbwrap.h" 32 #include "dbwrap/dbwrap_open.h" 32 33 #include "serverid.h" 33 34 #include "messages.h" 35 #include "util_tdb.h" 34 36 35 37 #undef DBGC_CLASS … … 41 43 42 44 static struct db_context *brlock_db; 45 46 struct byte_range_lock { 47 struct files_struct *fsp; 48 unsigned int num_locks; 49 bool modified; 50 uint32_t num_read_oplocks; 51 struct lock_struct *lock_data; 52 struct db_record *record; 53 }; 43 54 44 55 /**************************************************************************** … … 46 57 ****************************************************************************/ 47 58 48 static void print_lock_struct(unsigned int i, struct lock_struct *pls) 49 { 59 static void print_lock_struct(unsigned int i, const struct lock_struct *pls) 60 { 61 struct server_id_buf tmp; 62 50 63 DEBUG(10,("[%u]: smblctx = %llu, tid = %u, pid = %s, ", 51 64 i, 52 65 (unsigned long long)pls->context.smblctx, 53 66 (unsigned int)pls->context.tid, 54 procid_str(talloc_tos(), &pls->context.pid) )); 55 56 DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n", 57 (double)pls->start, 58 (double)pls->size, 59 pls->fnum, 60 lock_type_name(pls->lock_type), 61 lock_flav_name(pls->lock_flav) )); 67 server_id_str_buf(pls->context.pid, &tmp) )); 68 69 DEBUG(10, ("start = %ju, size = %ju, fnum = %ju, %s %s\n", 70 (uintmax_t)pls->start, 71 (uintmax_t)pls->size, 72 (uintmax_t)pls->fnum, 73 lock_type_name(pls->lock_type), 74 lock_flav_name(pls->lock_flav))); 75 } 76 77 unsigned int brl_num_locks(const struct byte_range_lock *brl) 78 { 79 return brl->num_locks; 80 } 81 82 struct files_struct *brl_fsp(struct byte_range_lock *brl) 83 { 84 return brl->fsp; 85 } 86 87 uint32_t brl_num_read_oplocks(const struct byte_range_lock *brl) 88 { 89 return brl->num_read_oplocks; 90 } 91 92 void brl_set_num_read_oplocks(struct byte_range_lock *brl, 93 uint32_t num_read_oplocks) 94 { 95 DEBUG(10, ("Setting num_read_oplocks to %"PRIu32"\n", 96 num_read_oplocks)); 97 SMB_ASSERT(brl->record != NULL); /* otherwise we're readonly */ 98 brl->num_read_oplocks = num_read_oplocks; 99 brl->modified = true; 62 100 } 63 101 … … 66 104 ****************************************************************************/ 67 105 68 bool brl_same_context(const struct lock_context *ctx1, 106 static bool brl_same_context(const struct lock_context *ctx1, 69 107 const struct lock_context *ctx2) 70 108 { 71 return ( procid_equal(&ctx1->pid, &ctx2->pid) &&109 return (serverid_equal(&ctx1->pid, &ctx2->pid) && 72 110 (ctx1->smblctx == ctx2->smblctx) && 73 111 (ctx1->tid == ctx2->tid)); … … 82 120 { 83 121 /* XXX Remove for Win7 compatibility. */ 84 /* this extra check is not redund ent - it copes with locks122 /* this extra check is not redundant - it copes with locks 85 123 that go beyond the end of 64 bit file space */ 86 124 if (lck1->size != 0 && … … 101 139 ****************************************************************************/ 102 140 103 static bool brl_conflict(const struct lock_struct *lck1, 141 static bool brl_conflict(const struct lock_struct *lck1, 104 142 const struct lock_struct *lck2) 105 143 { … … 122 160 123 161 return brl_overlap(lck1, lck2); 124 } 162 } 125 163 126 164 /**************************************************************************** … … 130 168 ****************************************************************************/ 131 169 132 static bool brl_conflict_posix(const struct lock_struct *lck1, 170 static bool brl_conflict_posix(const struct lock_struct *lck1, 133 171 const struct lock_struct *lck2) 134 172 { … … 147 185 } 148 186 149 /* Locks on the same context con't conflict. Ignore fnum. */187 /* Locks on the same context don't conflict. Ignore fnum. */ 150 188 if (brl_same_context(&lck1->context, &lck2->context)) { 151 189 return False; … … 155 193 do they overlap ? */ 156 194 return brl_overlap(lck1, lck2); 157 } 195 } 158 196 159 197 #if ZERO_ZERO 160 static bool brl_conflict1(const struct lock_struct *lck1, 198 static bool brl_conflict1(const struct lock_struct *lck1, 161 199 const struct lock_struct *lck2) 162 200 { … … 181 219 return False; 182 220 } 183 221 184 222 return True; 185 } 223 } 186 224 #endif 187 225 … … 192 230 ****************************************************************************/ 193 231 194 static bool brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2) 195 { 196 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type)) 232 static bool brl_conflict_other(const struct lock_struct *lock, 233 const struct lock_struct *rw_probe) 234 { 235 if (IS_PENDING_LOCK(lock->lock_type) || 236 IS_PENDING_LOCK(rw_probe->lock_type)) { 197 237 return False; 198 199 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) 238 } 239 240 if (lock->lock_type == READ_LOCK && rw_probe->lock_type == READ_LOCK) { 200 241 return False; 201 202 /* POSIX flavour locks never conflict here - this is only called 203 in the read/write path. */ 204 205 if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK) 242 } 243 244 if (lock->lock_flav == POSIX_LOCK && 245 rw_probe->lock_flav == POSIX_LOCK) { 246 /* 247 * POSIX flavour locks never conflict here - this is only called 248 * in the read/write path. 249 */ 206 250 return False; 251 } 252 253 if (!brl_overlap(lock, rw_probe)) { 254 /* 255 * I/O can only conflict when overlapping a lock, thus let it 256 * pass 257 */ 258 return false; 259 } 260 261 if (!brl_same_context(&lock->context, &rw_probe->context)) { 262 /* 263 * Different process, conflict 264 */ 265 return true; 266 } 267 268 if (lock->fnum != rw_probe->fnum) { 269 /* 270 * Different file handle, conflict 271 */ 272 return true; 273 } 274 275 if ((lock->lock_type == READ_LOCK) && 276 (rw_probe->lock_type == WRITE_LOCK)) { 277 /* 278 * Incoming WRITE locks conflict with existing READ locks even 279 * if the context is the same. JRA. See LOCKTEST7 in 280 * smbtorture. 281 */ 282 return true; 283 } 207 284 208 285 /* 209 * I ncoming WRITE locks conflict with existing READ locks even210 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.286 * I/O request compatible with existing lock, let it pass without 287 * conflict 211 288 */ 212 289 213 if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) { 214 if (brl_same_context(&lck1->context, &lck2->context) && 215 lck1->fnum == lck2->fnum) 216 return False; 217 } 218 219 return brl_overlap(lck1, lck2); 220 } 290 return false; 291 } 221 292 222 293 /**************************************************************************** … … 228 299 if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start)) 229 300 return True; 230 if ((lock->start >= pend_lock->start) && (lock->start < =pend_lock->start + pend_lock->size))301 if ((lock->start >= pend_lock->start) && (lock->start < pend_lock->start + pend_lock->size)) 231 302 return True; 232 303 return False; … … 239 310 ****************************************************************************/ 240 311 241 NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, bool blocking_lock) 312 static NTSTATUS brl_lock_failed(files_struct *fsp, 313 const struct lock_struct *lock, 314 bool blocking_lock) 242 315 { 243 316 if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) { … … 252 325 } 253 326 254 if ( procid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&327 if (serverid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) && 255 328 lock->context.tid == fsp->last_lock_failure.context.tid && 256 329 lock->fnum == fsp->last_lock_failure.fnum && … … 272 345 { 273 346 int tdb_flags; 347 char *db_path; 274 348 275 349 if (brlock_db) { … … 288 362 } 289 363 290 brlock_db = db_open(NULL, lock_path("brlock.tdb"), 291 lp_open_files_db_hash_size(), tdb_flags, 292 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 ); 364 db_path = lock_path("brlock.tdb"); 365 if (db_path == NULL) { 366 DEBUG(0, ("out of memory!\n")); 367 return; 368 } 369 370 brlock_db = db_open(NULL, db_path, 371 SMB_OPEN_DATABASE_TDB_HASH_SIZE, tdb_flags, 372 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644, 373 DBWRAP_LOCK_ORDER_2, DBWRAP_FLAG_NONE); 293 374 if (!brlock_db) { 294 375 DEBUG(0,("Failed to open byte range locking database %s\n", 295 lock_path("brlock.tdb"))); 376 db_path)); 377 TALLOC_FREE(db_path); 296 378 return; 297 379 } 380 TALLOC_FREE(db_path); 298 381 } 299 382 … … 312 395 ****************************************************************************/ 313 396 314 static int lock_compare(const struct lock_struct *lck1, 397 static int lock_compare(const struct lock_struct *lck1, 315 398 const struct lock_struct *lck2) 316 399 { … … 347 430 /* Do any Windows or POSIX locks conflict ? */ 348 431 if (brl_conflict(&locks[i], plock)) { 432 if (!serverid_exists(&locks[i].context.pid)) { 433 locks[i].context.pid.pid = 0; 434 br_lck->modified = true; 435 continue; 436 } 349 437 /* Remember who blocked us. */ 350 438 plock->context.smblctx = locks[i].context.smblctx; … … 352 440 } 353 441 #if ZERO_ZERO 354 if (plock->start == 0 && plock->size == 0 && 442 if (plock->start == 0 && plock->size == 0 && 355 443 locks[i].size == 0) { 356 444 break; … … 392 480 393 481 /* no conflicts - add it to the list of locks */ 394 locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks)); 482 locks = talloc_realloc(br_lck, locks, struct lock_struct, 483 (br_lck->num_locks + 1)); 395 484 if (!locks) { 396 485 status = NT_STATUS_NO_MEMORY; … … 730 819 so we need at most 2 more entries. */ 731 820 732 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));821 tp = talloc_array(br_lck, struct lock_struct, br_lck->num_locks + 2); 733 822 if (!tp) { 734 823 return NT_STATUS_NO_MEMORY; … … 750 839 /* Do any Windows flavour locks conflict ? */ 751 840 if (brl_conflict(curr_lock, plock)) { 841 if (!serverid_exists(&curr_lock->context.pid)) { 842 curr_lock->context.pid.pid = 0; 843 br_lck->modified = true; 844 continue; 845 } 752 846 /* No games with error messages. */ 753 SAFE_FREE(tp);847 TALLOC_FREE(tp); 754 848 /* Remember who blocked us. */ 755 849 plock->context.smblctx = curr_lock->context.smblctx; … … 764 858 /* POSIX conflict semantics are different. */ 765 859 if (brl_conflict_posix(curr_lock, plock)) { 860 if (!serverid_exists(&curr_lock->context.pid)) { 861 curr_lock->context.pid.pid = 0; 862 br_lck->modified = true; 863 continue; 864 } 766 865 /* Can't block ourselves with POSIX locks. */ 767 866 /* No games with error messages. */ 768 SAFE_FREE(tp);867 TALLOC_FREE(tp); 769 868 /* Remember who blocked us. */ 770 869 plock->context.smblctx = curr_lock->context.smblctx; … … 831 930 832 931 if (errno_ret == EACCES || errno_ret == EAGAIN) { 833 SAFE_FREE(tp);932 TALLOC_FREE(tp); 834 933 status = NT_STATUS_FILE_LOCK_CONFLICT; 835 934 goto fail; 836 935 } else { 837 SAFE_FREE(tp);936 TALLOC_FREE(tp); 838 937 status = map_nt_error_from_unix(errno); 839 938 goto fail; … … 845 944 * Realloc so we don't leak entries per lock call. */ 846 945 if (count < br_lck->num_locks + 2) { 847 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));946 tp = talloc_realloc(br_lck, tp, struct lock_struct, count); 848 947 if (!tp) { 849 948 status = NT_STATUS_NO_MEMORY; … … 853 952 854 953 br_lck->num_locks = count; 855 SAFE_FREE(br_lck->lock_data);954 TALLOC_FREE(br_lck->lock_data); 856 955 br_lck->lock_data = tp; 857 956 locks = tp; … … 873 972 if (pend_lock->lock_type == PENDING_READ_LOCK && 874 973 brl_pending_overlap(plock, pend_lock)) { 875 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n", 876 procid_str_static(&pend_lock->context.pid ))); 974 struct server_id_buf tmp; 975 976 DEBUG(10, ("brl_lock_posix: sending unlock " 977 "message to pid %s\n", 978 server_id_str_buf(pend_lock->context.pid, 979 &tmp))); 877 980 878 981 messaging_send(msg_ctx, pend_lock->context.pid, … … 894 997 struct byte_range_lock *br_lck, 895 998 struct lock_struct *plock, 896 bool blocking_lock, 897 struct blocking_lock_record *blr) 999 bool blocking_lock) 898 1000 { 899 1001 VFS_FIND(brl_lock_windows); 900 return handle->fns->brl_lock_windows (handle, br_lck, plock,901 blocking_lock, blr);1002 return handle->fns->brl_lock_windows_fn(handle, br_lck, plock, 1003 blocking_lock); 902 1004 } 903 1005 … … 911 1013 struct server_id pid, 912 1014 br_off start, 913 br_off size, 1015 br_off size, 914 1016 enum brl_type lock_type, 915 1017 enum brl_flavour lock_flav, 916 1018 bool blocking_lock, 917 uint64_t *psmblctx, 918 struct blocking_lock_record *blr) 1019 uint64_t *psmblctx) 919 1020 { 920 1021 NTSTATUS ret; 921 1022 struct lock_struct lock; 1023 1024 ZERO_STRUCT(lock); 922 1025 923 1026 #if !ZERO_ZERO … … 927 1030 #endif 928 1031 929 #ifdef DEVELOPER 930 /* Quieten valgrind on test. */ 931 memset(&lock, '\0', sizeof(lock)); 932 #endif 933 934 lock.context.smblctx = smblctx; 935 lock.context.pid = pid; 936 lock.context.tid = br_lck->fsp->conn->cnum; 937 lock.start = start; 938 lock.size = size; 939 lock.fnum = br_lck->fsp->fnum; 940 lock.lock_type = lock_type; 941 lock.lock_flav = lock_flav; 1032 lock = (struct lock_struct) { 1033 .context.smblctx = smblctx, 1034 .context.pid = pid, 1035 .context.tid = br_lck->fsp->conn->cnum, 1036 .start = start, 1037 .size = size, 1038 .fnum = br_lck->fsp->fnum, 1039 .lock_type = lock_type, 1040 .lock_flav = lock_flav 1041 }; 942 1042 943 1043 if (lock_flav == WINDOWS_LOCK) { 944 1044 ret = SMB_VFS_BRL_LOCK_WINDOWS(br_lck->fsp->conn, br_lck, 945 &lock, blocking_lock, blr);1045 &lock, blocking_lock); 946 1046 } else { 947 1047 ret = brl_lock_posix(msg_ctx, br_lck, &lock); … … 958 1058 } 959 1059 return ret; 1060 } 1061 1062 static void brl_delete_lock_struct(struct lock_struct *locks, 1063 unsigned num_locks, 1064 unsigned del_idx) 1065 { 1066 if (del_idx >= num_locks) { 1067 return; 1068 } 1069 memmove(&locks[del_idx], &locks[del_idx+1], 1070 sizeof(*locks) * (num_locks - del_idx - 1)); 960 1071 } 961 1072 … … 1027 1138 #endif 1028 1139 1029 /* Actually delete the lock. */ 1030 if (i < br_lck->num_locks - 1) { 1031 memmove(&locks[i], &locks[i+1], 1032 sizeof(*locks)*((br_lck->num_locks-1) - i)); 1033 } 1034 1140 brl_delete_lock_struct(locks, br_lck->num_locks, i); 1035 1141 br_lck->num_locks -= 1; 1036 1142 br_lck->modified = True; … … 1058 1164 /* We could send specific lock info here... */ 1059 1165 if (brl_pending_overlap(plock, pend_lock)) { 1060 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n", 1061 procid_str_static(&pend_lock->context.pid ))); 1166 struct server_id_buf tmp; 1167 1168 DEBUG(10, ("brl_unlock: sending unlock message to " 1169 "pid %s\n", 1170 server_id_str_buf(pend_lock->context.pid, 1171 &tmp))); 1062 1172 1063 1173 messaging_send(msg_ctx, pend_lock->context.pid, … … 1099 1209 1 more entry. */ 1100 1210 1101 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));1211 tp = talloc_array(br_lck, struct lock_struct, br_lck->num_locks + 1); 1102 1212 if (!tp) { 1103 1213 DEBUG(10,("brl_unlock_posix: malloc fail\n")); … … 1121 1231 /* Do any Windows flavour locks conflict ? */ 1122 1232 if (brl_conflict(lock, plock)) { 1123 SAFE_FREE(tp);1233 TALLOC_FREE(tp); 1124 1234 return false; 1125 1235 } … … 1166 1276 if (!overlap_found) { 1167 1277 /* Just ignore - no change. */ 1168 SAFE_FREE(tp);1278 TALLOC_FREE(tp); 1169 1279 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n")); 1170 1280 return True; … … 1183 1293 /* Realloc so we don't leak entries per unlock call. */ 1184 1294 if (count) { 1185 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));1295 tp = talloc_realloc(br_lck, tp, struct lock_struct, count); 1186 1296 if (!tp) { 1187 1297 DEBUG(10,("brl_unlock_posix: realloc fail\n")); … … 1190 1300 } else { 1191 1301 /* We deleted the last lock. */ 1192 SAFE_FREE(tp);1302 TALLOC_FREE(tp); 1193 1303 tp = NULL; 1194 1304 } … … 1198 1308 1199 1309 br_lck->num_locks = count; 1200 SAFE_FREE(br_lck->lock_data);1310 TALLOC_FREE(br_lck->lock_data); 1201 1311 locks = tp; 1202 1312 br_lck->lock_data = tp; … … 1215 1325 /* We could send specific lock info here... */ 1216 1326 if (brl_pending_overlap(plock, pend_lock)) { 1217 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n", 1218 procid_str_static(&pend_lock->context.pid ))); 1327 struct server_id_buf tmp; 1328 1329 DEBUG(10, ("brl_unlock: sending unlock message to " 1330 "pid %s\n", 1331 server_id_str_buf(pend_lock->context.pid, 1332 &tmp))); 1219 1333 1220 1334 messaging_send(msg_ctx, pend_lock->context.pid, … … 1232 1346 { 1233 1347 VFS_FIND(brl_unlock_windows); 1234 return handle->fns->brl_unlock_windows(handle, msg_ctx, br_lck, plock); 1348 return handle->fns->brl_unlock_windows_fn(handle, msg_ctx, br_lck, 1349 plock); 1235 1350 } 1236 1351 … … 1272 1387 1273 1388 bool brl_locktest(struct byte_range_lock *br_lck, 1274 uint64_t smblctx, 1275 struct server_id pid, 1276 br_off start, 1277 br_off size, 1278 enum brl_type lock_type, 1279 enum brl_flavour lock_flav) 1389 const struct lock_struct *rw_probe) 1280 1390 { 1281 1391 bool ret = True; 1282 1392 unsigned int i; 1283 struct lock_struct lock; 1284 const struct lock_struct *locks = br_lck->lock_data; 1393 struct lock_struct *locks = br_lck->lock_data; 1285 1394 files_struct *fsp = br_lck->fsp; 1286 1287 lock.context.smblctx = smblctx;1288 lock.context.pid = pid;1289 lock.context.tid = br_lck->fsp->conn->cnum;1290 lock.start = start;1291 lock.size = size;1292 lock.fnum = fsp->fnum;1293 lock.lock_type = lock_type;1294 lock.lock_flav = lock_flav;1295 1395 1296 1396 /* Make sure existing locks don't conflict */ … … 1299 1399 * Our own locks don't conflict. 1300 1400 */ 1301 if (brl_conflict_other(&locks[i], &lock)) { 1401 if (brl_conflict_other(&locks[i], rw_probe)) { 1402 if (br_lck->record == NULL) { 1403 /* readonly */ 1404 return false; 1405 } 1406 1407 if (!serverid_exists(&locks[i].context.pid)) { 1408 locks[i].context.pid.pid = 0; 1409 br_lck->modified = true; 1410 continue; 1411 } 1412 1302 1413 return False; 1303 1414 } … … 1310 1421 */ 1311 1422 1312 if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) { 1423 if(lp_posix_locking(fsp->conn->params) && 1424 (rw_probe->lock_flav == WINDOWS_LOCK)) { 1425 /* 1426 * Make copies -- is_posix_locked might modify the values 1427 */ 1428 1429 br_off start = rw_probe->start; 1430 br_off size = rw_probe->size; 1431 enum brl_type lock_type = rw_probe->lock_type; 1432 1313 1433 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK); 1314 1434 1315 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n", 1316 (double)start, (double)size, ret ? "locked" : "unlocked", 1317 fsp->fnum, fsp_str_dbg(fsp))); 1435 DEBUG(10, ("brl_locktest: posix start=%ju len=%ju %s for %s " 1436 "file %s\n", (uintmax_t)start, (uintmax_t)size, 1437 ret ? "locked" : "unlocked", 1438 fsp_fnum_dbg(fsp), fsp_str_dbg(fsp))); 1318 1439 1319 1440 /* We need to return the inverse of is_posix_locked. */ … … 1333 1454 struct server_id pid, 1334 1455 br_off *pstart, 1335 br_off *psize, 1456 br_off *psize, 1336 1457 enum brl_type *plock_type, 1337 1458 enum brl_flavour lock_flav) … … 1358 1479 if (exlock->lock_flav == WINDOWS_LOCK) { 1359 1480 conflict = brl_conflict(exlock, &lock); 1360 } else { 1481 } else { 1361 1482 conflict = brl_conflict_posix(exlock, &lock); 1362 1483 } … … 1379 1500 bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK); 1380 1501 1381 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n", 1382 (double)*pstart, (double)*psize, ret ? "locked" : "unlocked", 1383 fsp->fnum, fsp_str_dbg(fsp))); 1502 DEBUG(10, ("brl_lockquery: posix start=%ju len=%ju %s for %s " 1503 "file %s\n", (uintmax_t)*pstart, 1504 (uintmax_t)*psize, ret ? "locked" : "unlocked", 1505 fsp_fnum_dbg(fsp), fsp_str_dbg(fsp))); 1384 1506 1385 1507 if (ret) { … … 1396 1518 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct *handle, 1397 1519 struct byte_range_lock *br_lck, 1398 struct lock_struct *plock, 1399 struct blocking_lock_record *blr) 1520 struct lock_struct *plock) 1400 1521 { 1401 1522 VFS_FIND(brl_cancel_windows); 1402 return handle->fns->brl_cancel_windows (handle, br_lck, plock, blr);1523 return handle->fns->brl_cancel_windows_fn(handle, br_lck, plock); 1403 1524 } 1404 1525 … … 1411 1532 br_off start, 1412 1533 br_off size, 1413 enum brl_flavour lock_flav, 1414 struct blocking_lock_record *blr) 1534 enum brl_flavour lock_flav) 1415 1535 { 1416 1536 bool ret; … … 1428 1548 if (lock_flav == WINDOWS_LOCK) { 1429 1549 ret = SMB_VFS_BRL_CANCEL_WINDOWS(br_lck->fsp->conn, br_lck, 1430 &lock, blr);1550 &lock); 1431 1551 } else { 1432 1552 ret = brl_lock_cancel_default(br_lck, &lock); … … 1463 1583 } 1464 1584 1465 if (i < br_lck->num_locks - 1) { 1466 /* Found this particular pending lock - delete it */ 1467 memmove(&locks[i], &locks[i+1], 1468 sizeof(*locks)*((br_lck->num_locks-1) - i)); 1469 } 1470 1585 brl_delete_lock_struct(locks, br_lck->num_locks, i); 1471 1586 br_lck->num_locks -= 1; 1472 1587 br_lck->modified = True; … … 1484 1599 { 1485 1600 files_struct *fsp = br_lck->fsp; 1486 uint 16tid = fsp->conn->cnum;1487 int fnum = fsp->fnum;1601 uint32_t tid = fsp->conn->cnum; 1602 uint64_t fnum = fsp->fnum; 1488 1603 unsigned int i; 1489 1604 struct lock_struct *locks = br_lck->lock_data; 1490 struct server_id pid = sconn_server_id(fsp->conn->sconn);1605 struct server_id pid = messaging_server_id(fsp->conn->sconn->msg_ctx); 1491 1606 struct lock_struct *locks_copy; 1492 1607 unsigned int num_locks_copy; … … 1494 1609 /* Copy the current lock array. */ 1495 1610 if (br_lck->num_locks) { 1496 locks_copy = (struct lock_struct *) TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));1611 locks_copy = (struct lock_struct *)talloc_memdup(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct)); 1497 1612 if (!locks_copy) { 1498 1613 smb_panic("brl_close_fnum: talloc failed"); … … 1507 1622 struct lock_struct *lock = &locks_copy[i]; 1508 1623 1509 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) &&1624 if (lock->context.tid == tid && serverid_equal(&lock->context.pid, &pid) && 1510 1625 (lock->fnum == fnum)) { 1511 1626 brl_unlock(msg_ctx, … … 1520 1635 } 1521 1636 1522 /**************************************************************************** 1523 Ensure this set of lock entries is valid. 1524 ****************************************************************************/ 1525 static bool validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks) 1526 { 1637 bool brl_mark_disconnected(struct files_struct *fsp) 1638 { 1639 uint32_t tid = fsp->conn->cnum; 1640 uint64_t smblctx; 1641 uint64_t fnum = fsp->fnum; 1527 1642 unsigned int i; 1528 unsigned int num_valid_entries = 0; 1529 struct lock_struct *locks = *pplocks; 1530 1531 for (i = 0; i < *pnum_entries; i++) { 1532 struct lock_struct *lock_data = &locks[i]; 1533 if (!serverid_exists(&lock_data->context.pid)) { 1534 /* This process no longer exists - mark this 1535 entry as invalid by zeroing it. */ 1536 ZERO_STRUCTP(lock_data); 1537 } else { 1538 num_valid_entries++; 1539 } 1540 } 1541 1542 if (num_valid_entries != *pnum_entries) { 1543 struct lock_struct *new_lock_data = NULL; 1544 1545 if (num_valid_entries) { 1546 new_lock_data = SMB_MALLOC_ARRAY(struct lock_struct, num_valid_entries); 1547 if (!new_lock_data) { 1548 DEBUG(3, ("malloc fail\n")); 1549 return False; 1550 } 1551 1552 num_valid_entries = 0; 1553 for (i = 0; i < *pnum_entries; i++) { 1554 struct lock_struct *lock_data = &locks[i]; 1555 if (lock_data->context.smblctx && 1556 lock_data->context.tid) { 1557 /* Valid (nonzero) entry - copy it. */ 1558 memcpy(&new_lock_data[num_valid_entries], 1559 lock_data, sizeof(struct lock_struct)); 1560 num_valid_entries++; 1561 } 1562 } 1563 } 1564 1565 SAFE_FREE(*pplocks); 1566 *pplocks = new_lock_data; 1567 *pnum_entries = num_valid_entries; 1568 } 1569 1570 return True; 1643 struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx); 1644 struct byte_range_lock *br_lck = NULL; 1645 1646 if (fsp->op == NULL) { 1647 return false; 1648 } 1649 1650 smblctx = fsp->op->global->open_persistent_id; 1651 1652 if (!fsp->op->global->durable) { 1653 return false; 1654 } 1655 1656 if (fsp->current_lock_count == 0) { 1657 return true; 1658 } 1659 1660 br_lck = brl_get_locks(talloc_tos(), fsp); 1661 if (br_lck == NULL) { 1662 return false; 1663 } 1664 1665 for (i=0; i < br_lck->num_locks; i++) { 1666 struct lock_struct *lock = &br_lck->lock_data[i]; 1667 1668 /* 1669 * as this is a durable handle, we only expect locks 1670 * of the current file handle! 1671 */ 1672 1673 if (lock->context.smblctx != smblctx) { 1674 TALLOC_FREE(br_lck); 1675 return false; 1676 } 1677 1678 if (lock->context.tid != tid) { 1679 TALLOC_FREE(br_lck); 1680 return false; 1681 } 1682 1683 if (!serverid_equal(&lock->context.pid, &self)) { 1684 TALLOC_FREE(br_lck); 1685 return false; 1686 } 1687 1688 if (lock->fnum != fnum) { 1689 TALLOC_FREE(br_lck); 1690 return false; 1691 } 1692 1693 server_id_set_disconnected(&lock->context.pid); 1694 lock->context.tid = TID_FIELD_INVALID; 1695 lock->fnum = FNUM_FIELD_INVALID; 1696 } 1697 1698 br_lck->modified = true; 1699 TALLOC_FREE(br_lck); 1700 return true; 1701 } 1702 1703 bool brl_reconnect_disconnected(struct files_struct *fsp) 1704 { 1705 uint32_t tid = fsp->conn->cnum; 1706 uint64_t smblctx; 1707 uint64_t fnum = fsp->fnum; 1708 unsigned int i; 1709 struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx); 1710 struct byte_range_lock *br_lck = NULL; 1711 1712 if (fsp->op == NULL) { 1713 return false; 1714 } 1715 1716 smblctx = fsp->op->global->open_persistent_id; 1717 1718 if (!fsp->op->global->durable) { 1719 return false; 1720 } 1721 1722 /* 1723 * When reconnecting, we do not want to validate the brlock entries 1724 * and thereby remove our own (disconnected) entries but reactivate 1725 * them instead. 1726 */ 1727 1728 br_lck = brl_get_locks(talloc_tos(), fsp); 1729 if (br_lck == NULL) { 1730 return false; 1731 } 1732 1733 if (br_lck->num_locks == 0) { 1734 TALLOC_FREE(br_lck); 1735 return true; 1736 } 1737 1738 for (i=0; i < br_lck->num_locks; i++) { 1739 struct lock_struct *lock = &br_lck->lock_data[i]; 1740 1741 /* 1742 * as this is a durable handle we only expect locks 1743 * of the current file handle! 1744 */ 1745 1746 if (lock->context.smblctx != smblctx) { 1747 TALLOC_FREE(br_lck); 1748 return false; 1749 } 1750 1751 if (lock->context.tid != TID_FIELD_INVALID) { 1752 TALLOC_FREE(br_lck); 1753 return false; 1754 } 1755 1756 if (!server_id_is_disconnected(&lock->context.pid)) { 1757 TALLOC_FREE(br_lck); 1758 return false; 1759 } 1760 1761 if (lock->fnum != FNUM_FIELD_INVALID) { 1762 TALLOC_FREE(br_lck); 1763 return false; 1764 } 1765 1766 lock->context.pid = self; 1767 lock->context.tid = tid; 1768 lock->fnum = fnum; 1769 } 1770 1771 fsp->current_lock_count = br_lck->num_locks; 1772 br_lck->modified = true; 1773 TALLOC_FREE(br_lck); 1774 return true; 1571 1775 } 1572 1776 … … 1585 1789 ****************************************************************************/ 1586 1790 1587 static int traverse_fn(struct db_record *rec, void *state)1791 static int brl_traverse_fn(struct db_record *rec, void *state) 1588 1792 { 1589 1793 struct brl_forall_cb *cb = (struct brl_forall_cb *)state; … … 1592 1796 unsigned int i; 1593 1797 unsigned int num_locks = 0; 1594 unsigned int orig_num_locks = 0; 1798 TDB_DATA dbkey; 1799 TDB_DATA value; 1800 1801 dbkey = dbwrap_record_get_key(rec); 1802 value = dbwrap_record_get_value(rec); 1595 1803 1596 1804 /* In a traverse function we must make a copy of 1597 1805 dbuf before modifying it. */ 1598 1806 1599 locks = (struct lock_struct *) memdup(rec->value.dptr,1600 rec->value.dsize);1807 locks = (struct lock_struct *)talloc_memdup( 1808 talloc_tos(), value.dptr, value.dsize); 1601 1809 if (!locks) { 1602 1810 return -1; /* Terminate traversal. */ 1603 1811 } 1604 1812 1605 key = (struct file_id *)rec->key.dptr; 1606 orig_num_locks = num_locks = rec->value.dsize/sizeof(*locks); 1607 1608 /* Ensure the lock db is clean of entries from invalid processes. */ 1609 1610 if (!validate_lock_entries(&num_locks, &locks)) { 1611 SAFE_FREE(locks); 1612 return -1; /* Terminate traversal */ 1613 } 1614 1615 if (orig_num_locks != num_locks) { 1616 if (num_locks) { 1617 TDB_DATA data; 1618 data.dptr = (uint8_t *)locks; 1619 data.dsize = num_locks*sizeof(struct lock_struct); 1620 rec->store(rec, data, TDB_REPLACE); 1621 } else { 1622 rec->delete_rec(rec); 1623 } 1624 } 1813 key = (struct file_id *)dbkey.dptr; 1814 num_locks = value.dsize/sizeof(*locks); 1625 1815 1626 1816 if (cb->fn) { … … 1636 1826 } 1637 1827 1638 SAFE_FREE(locks);1828 TALLOC_FREE(locks); 1639 1829 return 0; 1640 1830 } … … 1652 1842 { 1653 1843 struct brl_forall_cb cb; 1844 NTSTATUS status; 1845 int count = 0; 1654 1846 1655 1847 if (!brlock_db) { … … 1658 1850 cb.fn = fn; 1659 1851 cb.private_data = private_data; 1660 return brlock_db->traverse(brlock_db, traverse_fn, &cb); 1852 status = dbwrap_traverse(brlock_db, brl_traverse_fn, &cb, &count); 1853 1854 if (!NT_STATUS_IS_OK(status)) { 1855 return -1; 1856 } else { 1857 return count; 1858 } 1661 1859 } 1662 1860 … … 1669 1867 static void byte_range_lock_flush(struct byte_range_lock *br_lck) 1670 1868 { 1671 if (br_lck->read_only) { 1672 SMB_ASSERT(!br_lck->modified); 1673 } 1869 unsigned i; 1870 struct lock_struct *locks = br_lck->lock_data; 1674 1871 1675 1872 if (!br_lck->modified) { 1873 DEBUG(10, ("br_lck not modified\n")); 1676 1874 goto done; 1677 1875 } 1678 1876 1679 if (br_lck->num_locks == 0) { 1877 i = 0; 1878 1879 while (i < br_lck->num_locks) { 1880 if (locks[i].context.pid.pid == 0) { 1881 /* 1882 * Autocleanup, the process conflicted and does not 1883 * exist anymore. 1884 */ 1885 locks[i] = locks[br_lck->num_locks-1]; 1886 br_lck->num_locks -= 1; 1887 } else { 1888 i += 1; 1889 } 1890 } 1891 1892 if ((br_lck->num_locks == 0) && (br_lck->num_read_oplocks == 0)) { 1680 1893 /* No locks - delete this entry. */ 1681 NTSTATUS status = br_lck->record->delete_rec(br_lck->record);1894 NTSTATUS status = dbwrap_record_delete(br_lck->record); 1682 1895 if (!NT_STATUS_IS_OK(status)) { 1683 1896 DEBUG(0, ("delete_rec returned %s\n", … … 1686 1899 } 1687 1900 } else { 1901 size_t lock_len, data_len; 1688 1902 TDB_DATA data; 1689 1903 NTSTATUS status; 1690 1904 1691 data.dptr = (uint8 *)br_lck->lock_data; 1692 data.dsize = br_lck->num_locks * sizeof(struct lock_struct); 1693 1694 status = br_lck->record->store(br_lck->record, data, 1695 TDB_REPLACE); 1905 lock_len = br_lck->num_locks * sizeof(struct lock_struct); 1906 data_len = lock_len + sizeof(br_lck->num_read_oplocks); 1907 1908 data.dsize = data_len; 1909 data.dptr = talloc_array(talloc_tos(), uint8_t, data_len); 1910 SMB_ASSERT(data.dptr != NULL); 1911 1912 memcpy(data.dptr, br_lck->lock_data, lock_len); 1913 memcpy(data.dptr + lock_len, &br_lck->num_read_oplocks, 1914 sizeof(br_lck->num_read_oplocks)); 1915 1916 status = dbwrap_record_store(br_lck->record, data, TDB_REPLACE); 1917 TALLOC_FREE(data.dptr); 1696 1918 if (!NT_STATUS_IS_OK(status)) { 1697 1919 DEBUG(0, ("store returned %s\n", nt_errstr(status))); … … 1700 1922 } 1701 1923 1924 DEBUG(10, ("seqnum=%d\n", dbwrap_get_seqnum(brlock_db))); 1925 1702 1926 done: 1703 1704 br_lck->read_only = true;1705 1927 br_lck->modified = false; 1706 1707 1928 TALLOC_FREE(br_lck->record); 1708 1929 } … … 1711 1932 { 1712 1933 byte_range_lock_flush(br_lck); 1713 SAFE_FREE(br_lck->lock_data);1714 1934 return 0; 1935 } 1936 1937 static bool brl_parse_data(struct byte_range_lock *br_lck, TDB_DATA data) 1938 { 1939 size_t data_len; 1940 1941 if (data.dsize == 0) { 1942 return true; 1943 } 1944 if (data.dsize % sizeof(struct lock_struct) != 1945 sizeof(br_lck->num_read_oplocks)) { 1946 DEBUG(1, ("Invalid data size: %u\n", (unsigned)data.dsize)); 1947 return false; 1948 } 1949 1950 br_lck->num_locks = data.dsize / sizeof(struct lock_struct); 1951 data_len = br_lck->num_locks * sizeof(struct lock_struct); 1952 1953 br_lck->lock_data = talloc_memdup(br_lck, data.dptr, data_len); 1954 if (br_lck->lock_data == NULL) { 1955 DEBUG(1, ("talloc_memdup failed\n")); 1956 return false; 1957 } 1958 memcpy(&br_lck->num_read_oplocks, data.dptr + data_len, 1959 sizeof(br_lck->num_read_oplocks)); 1960 return true; 1715 1961 } 1716 1962 … … 1721 1967 ********************************************************************/ 1722 1968 1723 static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx, 1724 files_struct *fsp, bool read_only) 1969 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx, files_struct *fsp) 1725 1970 { 1726 1971 TDB_DATA key, data; 1727 struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);1728 bool do_read_only = read_only; 1729 1972 struct byte_range_lock *br_lck; 1973 1974 br_lck = talloc_zero(mem_ctx, struct byte_range_lock); 1730 1975 if (br_lck == NULL) { 1731 1976 return NULL; … … 1733 1978 1734 1979 br_lck->fsp = fsp; 1735 br_lck->num_locks = 0; 1736 br_lck->modified = False; 1737 br_lck->key = fsp->file_id; 1738 1739 key.dptr = (uint8 *)&br_lck->key; 1980 1981 key.dptr = (uint8_t *)&fsp->file_id; 1740 1982 key.dsize = sizeof(struct file_id); 1741 1983 1742 if (!fsp->lockdb_clean) { 1743 /* We must be read/write to clean 1744 the dead entries. */ 1745 do_read_only = false; 1746 } 1747 1748 if (do_read_only) { 1749 if (brlock_db->fetch(brlock_db, br_lck, key, &data) == -1) { 1750 DEBUG(3, ("Could not fetch byte range lock record\n")); 1751 TALLOC_FREE(br_lck); 1752 return NULL; 1753 } 1754 br_lck->record = NULL; 1755 } else { 1756 br_lck->record = brlock_db->fetch_locked(brlock_db, br_lck, key); 1757 1758 if (br_lck->record == NULL) { 1759 DEBUG(3, ("Could not lock byte range lock entry\n")); 1760 TALLOC_FREE(br_lck); 1761 return NULL; 1762 } 1763 1764 data = br_lck->record->value; 1765 } 1766 1767 br_lck->read_only = do_read_only; 1768 br_lck->lock_data = NULL; 1984 br_lck->record = dbwrap_fetch_locked(brlock_db, br_lck, key); 1985 1986 if (br_lck->record == NULL) { 1987 DEBUG(3, ("Could not lock byte range lock entry\n")); 1988 TALLOC_FREE(br_lck); 1989 return NULL; 1990 } 1991 1992 data = dbwrap_record_get_value(br_lck->record); 1993 1994 if (!brl_parse_data(br_lck, data)) { 1995 TALLOC_FREE(br_lck); 1996 return NULL; 1997 } 1769 1998 1770 1999 talloc_set_destructor(br_lck, byte_range_lock_destructor); 1771 1772 br_lck->num_locks = data.dsize / sizeof(struct lock_struct);1773 1774 if (br_lck->num_locks != 0) {1775 br_lck->lock_data = SMB_MALLOC_ARRAY(struct lock_struct,1776 br_lck->num_locks);1777 if (br_lck->lock_data == NULL) {1778 DEBUG(0, ("malloc failed\n"));1779 TALLOC_FREE(br_lck);1780 return NULL;1781 }1782 1783 memcpy(br_lck->lock_data, data.dptr, data.dsize);1784 }1785 1786 if (!fsp->lockdb_clean) {1787 int orig_num_locks = br_lck->num_locks;1788 1789 /* This is the first time we've accessed this. */1790 /* Go through and ensure all entries exist - remove any that don't. */1791 /* Makes the lockdb self cleaning at low cost. */1792 1793 if (!validate_lock_entries(&br_lck->num_locks,1794 &br_lck->lock_data)) {1795 SAFE_FREE(br_lck->lock_data);1796 TALLOC_FREE(br_lck);1797 return NULL;1798 }1799 1800 /* Ensure invalid locks are cleaned up in the destructor. */1801 if (orig_num_locks != br_lck->num_locks) {1802 br_lck->modified = True;1803 }1804 1805 /* Mark the lockdb as "clean" as seen from this open file. */1806 fsp->lockdb_clean = True;1807 }1808 2000 1809 2001 if (DEBUGLEVEL >= 10) { … … 1818 2010 } 1819 2011 1820 if (do_read_only != read_only) { 2012 return br_lck; 2013 } 2014 2015 struct brl_get_locks_readonly_state { 2016 TALLOC_CTX *mem_ctx; 2017 struct byte_range_lock **br_lock; 2018 }; 2019 2020 static void brl_get_locks_readonly_parser(TDB_DATA key, TDB_DATA data, 2021 void *private_data) 2022 { 2023 struct brl_get_locks_readonly_state *state = 2024 (struct brl_get_locks_readonly_state *)private_data; 2025 struct byte_range_lock *br_lck; 2026 2027 br_lck = talloc_pooled_object( 2028 state->mem_ctx, struct byte_range_lock, 1, data.dsize); 2029 if (br_lck == NULL) { 2030 *state->br_lock = NULL; 2031 return; 2032 } 2033 *br_lck = (struct byte_range_lock) { 0 }; 2034 if (!brl_parse_data(br_lck, data)) { 2035 *state->br_lock = NULL; 2036 return; 2037 } 2038 *state->br_lock = br_lck; 2039 } 2040 2041 struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp) 2042 { 2043 struct byte_range_lock *br_lock = NULL; 2044 struct brl_get_locks_readonly_state state; 2045 NTSTATUS status; 2046 2047 DEBUG(10, ("seqnum=%d, fsp->brlock_seqnum=%d\n", 2048 dbwrap_get_seqnum(brlock_db), fsp->brlock_seqnum)); 2049 2050 if ((fsp->brlock_rec != NULL) 2051 && (dbwrap_get_seqnum(brlock_db) == fsp->brlock_seqnum)) { 1821 2052 /* 1822 * this stores the record and gets rid of1823 * the write lock that is needed for a cleanup2053 * We have cached the brlock_rec and the database did not 2054 * change. 1824 2055 */ 1825 byte_range_lock_flush(br_lck);1826 }1827 1828 return br_lck;1829 }1830 1831 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,1832 files_struct *fsp)1833 {1834 return brl_get_locks_internal(mem_ctx, fsp, False);1835 }1836 1837 struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)1838 {1839 struct byte_range_lock *br_lock;1840 1841 if (lp_clustering()) {1842 return brl_get_locks_internal(talloc_tos(), fsp, true);1843 }1844 1845 if ((fsp->brlock_rec != NULL)1846 && (brlock_db->get_seqnum(brlock_db) == fsp->brlock_seqnum)) {1847 2056 return fsp->brlock_rec; 1848 2057 } 1849 2058 1850 TALLOC_FREE(fsp->brlock_rec); 1851 1852 br_lock = brl_get_locks_internal(talloc_tos(), fsp, true); 2059 /* 2060 * Parse the record fresh from the database 2061 */ 2062 2063 state.mem_ctx = fsp; 2064 state.br_lock = &br_lock; 2065 2066 status = dbwrap_parse_record( 2067 brlock_db, 2068 make_tdb_data((uint8_t *)&fsp->file_id, 2069 sizeof(fsp->file_id)), 2070 brl_get_locks_readonly_parser, &state); 2071 2072 if (NT_STATUS_EQUAL(status,NT_STATUS_NOT_FOUND)) { 2073 /* 2074 * No locks on this file. Return an empty br_lock. 2075 */ 2076 br_lock = talloc(fsp, struct byte_range_lock); 2077 if (br_lock == NULL) { 2078 return NULL; 2079 } 2080 2081 br_lock->num_read_oplocks = 0; 2082 br_lock->num_locks = 0; 2083 br_lock->lock_data = NULL; 2084 2085 } else if (!NT_STATUS_IS_OK(status)) { 2086 DEBUG(3, ("Could not parse byte range lock record: " 2087 "%s\n", nt_errstr(status))); 2088 return NULL; 2089 } 1853 2090 if (br_lock == NULL) { 1854 2091 return NULL; 1855 2092 } 1856 fsp->brlock_seqnum = brlock_db->get_seqnum(brlock_db); 1857 1858 fsp->brlock_rec = talloc_move(fsp, &br_lock); 1859 1860 return fsp->brlock_rec; 2093 2094 br_lock->fsp = fsp; 2095 br_lock->modified = false; 2096 br_lock->record = NULL; 2097 2098 if (lp_clustering()) { 2099 /* 2100 * In the cluster case we can't cache the brlock struct 2101 * because dbwrap_get_seqnum does not work reliably over 2102 * ctdb. Thus we have to throw away the brlock struct soon. 2103 */ 2104 talloc_steal(talloc_tos(), br_lock); 2105 } else { 2106 /* 2107 * Cache the brlock struct, invalidated when the dbwrap_seqnum 2108 * changes. See beginning of this routine. 2109 */ 2110 TALLOC_FREE(fsp->brlock_rec); 2111 fsp->brlock_rec = br_lock; 2112 fsp->brlock_seqnum = dbwrap_get_seqnum(brlock_db); 2113 } 2114 2115 return br_lock; 1861 2116 } 1862 2117 1863 2118 struct brl_revalidate_state { 1864 2119 ssize_t array_size; 1865 uint32 num_pids;2120 uint32_t num_pids; 1866 2121 struct server_id *pids; 1867 2122 }; … … 1895 2150 static int compare_procids(const void *p1, const void *p2) 1896 2151 { 1897 const struct server_id *i1 = ( struct server_id *)p1;1898 const struct server_id *i2 = ( struct server_id *)p2;2152 const struct server_id *i1 = (const struct server_id *)p1; 2153 const struct server_id *i2 = (const struct server_id *)p2; 1899 2154 1900 2155 if (i1->pid < i2->pid) return -1; 1901 if (i 2->pid > i2->pid) return 1;2156 if (i1->pid > i2->pid) return 1; 1902 2157 return 0; 1903 2158 } … … 1912 2167 */ 1913 2168 1914 staticvoid brl_revalidate(struct messaging_context *msg_ctx,1915 1916 1917 1918 2169 void brl_revalidate(struct messaging_context *msg_ctx, 2170 void *private_data, 2171 uint32_t msg_type, 2172 struct server_id server_id, 2173 DATA_BLOB *data) 1919 2174 { 1920 2175 struct brl_revalidate_state *state; 1921 uint32 i;2176 uint32_t i; 1922 2177 struct server_id last_pid; 1923 2178 1924 if (!(state = TALLOC_ZERO_P(NULL, struct brl_revalidate_state))) {2179 if (!(state = talloc_zero(NULL, struct brl_revalidate_state))) { 1925 2180 DEBUG(0, ("talloc failed\n")); 1926 2181 return; … … 1943 2198 1944 2199 for (i=0; i<state->num_pids; i++) { 1945 if ( procid_equal(&last_pid, &state->pids[i])) {2200 if (serverid_equal(&last_pid, &state->pids[i])) { 1946 2201 /* 1947 2202 * We've seen that one already … … 1960 2215 } 1961 2216 1962 void brl_register_msgs(struct messaging_context *msg_ctx) 1963 { 1964 messaging_register(msg_ctx, NULL, MSG_SMB_BRL_VALIDATE, 1965 brl_revalidate); 1966 } 2217 bool brl_cleanup_disconnected(struct file_id fid, uint64_t open_persistent_id) 2218 { 2219 bool ret = false; 2220 TALLOC_CTX *frame = talloc_stackframe(); 2221 TDB_DATA key, val; 2222 struct db_record *rec; 2223 struct lock_struct *lock; 2224 unsigned n, num; 2225 NTSTATUS status; 2226 2227 key = make_tdb_data((void*)&fid, sizeof(fid)); 2228 2229 rec = dbwrap_fetch_locked(brlock_db, frame, key); 2230 if (rec == NULL) { 2231 DEBUG(5, ("brl_cleanup_disconnected: failed to fetch record " 2232 "for file %s\n", file_id_string(frame, &fid))); 2233 goto done; 2234 } 2235 2236 val = dbwrap_record_get_value(rec); 2237 lock = (struct lock_struct*)val.dptr; 2238 num = val.dsize / sizeof(struct lock_struct); 2239 if (lock == NULL) { 2240 DEBUG(10, ("brl_cleanup_disconnected: no byte range locks for " 2241 "file %s\n", file_id_string(frame, &fid))); 2242 ret = true; 2243 goto done; 2244 } 2245 2246 for (n=0; n<num; n++) { 2247 struct lock_context *ctx = &lock[n].context; 2248 2249 if (!server_id_is_disconnected(&ctx->pid)) { 2250 struct server_id_buf tmp; 2251 DEBUG(5, ("brl_cleanup_disconnected: byte range lock " 2252 "%s used by server %s, do not cleanup\n", 2253 file_id_string(frame, &fid), 2254 server_id_str_buf(ctx->pid, &tmp))); 2255 goto done; 2256 } 2257 2258 if (ctx->smblctx != open_persistent_id) { 2259 DEBUG(5, ("brl_cleanup_disconnected: byte range lock " 2260 "%s expected smblctx %llu but found %llu" 2261 ", do not cleanup\n", 2262 file_id_string(frame, &fid), 2263 (unsigned long long)open_persistent_id, 2264 (unsigned long long)ctx->smblctx)); 2265 goto done; 2266 } 2267 } 2268 2269 status = dbwrap_record_delete(rec); 2270 if (!NT_STATUS_IS_OK(status)) { 2271 DEBUG(5, ("brl_cleanup_disconnected: failed to delete record " 2272 "for file %s from %s, open %llu: %s\n", 2273 file_id_string(frame, &fid), dbwrap_name(brlock_db), 2274 (unsigned long long)open_persistent_id, 2275 nt_errstr(status))); 2276 goto done; 2277 } 2278 2279 DEBUG(10, ("brl_cleanup_disconnected: " 2280 "file %s cleaned up %u entries from open %llu\n", 2281 file_id_string(frame, &fid), num, 2282 (unsigned long long)open_persistent_id)); 2283 2284 ret = true; 2285 done: 2286 talloc_free(frame); 2287 return ret; 2288 } -
vendor/current/source3/locking/locking.c
r860 r988 5 5 Copyright (C) Jeremy Allison 1992-2006 6 6 Copyright (C) Volker Lendecke 2005 7 7 8 8 This program is free software; you can redistribute it and/or modify 9 9 it under the terms of the GNU General Public License as published by 10 10 the Free Software Foundation; either version 3 of the License, or 11 11 (at your option) any later version. 12 12 13 13 This program is distributed in the hope that it will be useful, 14 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 16 GNU General Public License for more details. 17 17 18 18 You should have received a copy of the GNU General Public License 19 19 along with this program. If not, see <http://www.gnu.org/licenses/>. … … 30 30 support. 31 31 32 rewr tten completely to use new tdb code. Tridge, Dec '9932 rewritten completely to use new tdb code. Tridge, Dec '99 33 33 34 34 Added POSIX locking support. Jeremy Allison (jeremy@valinux.com), Apr. 2000. … … 40 40 #include "locking/proto.h" 41 41 #include "smbd/globals.h" 42 #include "dbwrap.h" 42 #include "dbwrap/dbwrap.h" 43 #include "dbwrap/dbwrap_open.h" 43 44 #include "../libcli/security/security.h" 44 45 #include "serverid.h" 45 46 #include "messages.h" 46 47 #include "util_tdb.h" 47 #include "../librpc/gen_ndr/ndr_security.h" 48 #include "../librpc/gen_ndr/ndr_open_files.h" 49 #include "librpc/gen_ndr/ndr_file_id.h" 50 #include "locking/leases_db.h" 48 51 49 52 #undef DBGC_CLASS … … 51 54 52 55 #define NO_LOCKING_COUNT (-1) 53 54 /* the locking database handle */55 static struct db_context *lock_db;56 56 57 57 /**************************************************************************** … … 96 96 plock->context.smblctx = smblctx; 97 97 plock->context.tid = fsp->conn->cnum; 98 plock->context.pid = sconn_server_id(fsp->conn->sconn);98 plock->context.pid = messaging_server_id(fsp->conn->sconn->msg_ctx); 99 99 plock->start = start; 100 100 plock->size = size; … … 106 106 bool strict_lock_default(files_struct *fsp, struct lock_struct *plock) 107 107 { 108 struct byte_range_lock *br_lck; 108 109 int strict_locking = lp_strict_locking(fsp->conn->params); 109 110 bool ret = False; … … 118 119 119 120 if (strict_locking == Auto) { 120 if (EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type) && (plock->lock_type == READ_LOCK || plock->lock_type == WRITE_LOCK)) { 121 DEBUG(10,("is_locked: optimisation - exclusive oplock on file %s\n", fsp_str_dbg(fsp))); 122 ret = True; 123 } else if ((fsp->oplock_type == LEVEL_II_OPLOCK) && 124 (plock->lock_type == READ_LOCK)) { 125 DEBUG(10,("is_locked: optimisation - level II oplock on file %s\n", fsp_str_dbg(fsp))); 126 ret = True; 127 } else { 128 struct byte_range_lock *br_lck; 129 130 br_lck = brl_get_locks_readonly(fsp); 131 if (!br_lck) { 132 return True; 133 } 134 ret = brl_locktest(br_lck, 135 plock->context.smblctx, 136 plock->context.pid, 137 plock->start, 138 plock->size, 139 plock->lock_type, 140 plock->lock_flav); 141 } 142 } else { 143 struct byte_range_lock *br_lck; 144 145 br_lck = brl_get_locks_readonly(fsp); 146 if (!br_lck) { 147 return True; 148 } 149 ret = brl_locktest(br_lck, 150 plock->context.smblctx, 151 plock->context.pid, 152 plock->start, 153 plock->size, 154 plock->lock_type, 155 plock->lock_flav); 156 } 157 158 DEBUG(10,("strict_lock_default: flavour = %s brl start=%.0f " 159 "len=%.0f %s for fnum %d file %s\n", 160 lock_flav_name(plock->lock_flav), 161 (double)plock->start, (double)plock->size, 162 ret ? "unlocked" : "locked", 163 plock->fnum, fsp_str_dbg(fsp))); 121 if (EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type) && 122 (plock->lock_type == READ_LOCK || 123 plock->lock_type == WRITE_LOCK)) { 124 DEBUG(10, ("is_locked: optimisation - exclusive oplock " 125 "on file %s\n", fsp_str_dbg(fsp))); 126 return true; 127 } 128 if ((fsp->oplock_type == LEVEL_II_OPLOCK) && 129 (plock->lock_type == READ_LOCK)) { 130 DEBUG(10, ("is_locked: optimisation - level II oplock " 131 "on file %s\n", fsp_str_dbg(fsp))); 132 return true; 133 } 134 } 135 136 br_lck = brl_get_locks_readonly(fsp); 137 if (!br_lck) { 138 return true; 139 } 140 ret = brl_locktest(br_lck, plock); 141 142 if (!ret) { 143 /* 144 * We got a lock conflict. Retry with rw locks to enable 145 * autocleanup. This is the slow path anyway. 146 */ 147 br_lck = brl_get_locks(talloc_tos(), fsp); 148 ret = brl_locktest(br_lck, plock); 149 TALLOC_FREE(br_lck); 150 } 151 152 DEBUG(10, ("strict_lock_default: flavour = %s brl start=%ju " 153 "len=%ju %s for fnum %ju file %s\n", 154 lock_flav_name(plock->lock_flav), 155 (uintmax_t)plock->start, (uintmax_t)plock->size, 156 ret ? "unlocked" : "locked", 157 (uintmax_t)plock->fnum, fsp_str_dbg(fsp))); 164 158 165 159 return ret; … … 198 192 return brl_lockquery(br_lck, 199 193 psmblctx, 200 sconn_server_id(fsp->conn->sconn),194 messaging_server_id(fsp->conn->sconn->msg_ctx), 201 195 poffset, 202 196 pcount, … … 246 240 bool blocking_lock, 247 241 NTSTATUS *perr, 248 uint64_t *psmblctx, 249 struct blocking_lock_record *blr) 242 uint64_t *psmblctx) 250 243 { 251 244 struct byte_range_lock *br_lck = NULL; … … 269 262 /* NOTE! 0 byte long ranges ARE allowed and should be stored */ 270 263 271 DEBUG(10,("do_lock: lock flavour %s lock type %s start=% .0f len=%.0f"272 "blocking_lock=%s requested for fnum %dfile %s\n",264 DEBUG(10,("do_lock: lock flavour %s lock type %s start=%ju len=%ju " 265 "blocking_lock=%s requested for %s file %s\n", 273 266 lock_flav_name(lock_flav), lock_type_name(lock_type), 274 ( double)offset, (double)count, blocking_lock ? "true" :275 "false", fsp ->fnum, fsp_str_dbg(fsp)));267 (uintmax_t)offset, (uintmax_t)count, blocking_lock ? "true" : 268 "false", fsp_fnum_dbg(fsp), fsp_str_dbg(fsp))); 276 269 277 270 br_lck = brl_get_locks(talloc_tos(), fsp); … … 284 277 br_lck, 285 278 smblctx, 286 sconn_server_id(fsp->conn->sconn),279 messaging_server_id(fsp->conn->sconn->msg_ctx), 287 280 offset, 288 281 count, … … 290 283 lock_flav, 291 284 blocking_lock, 292 psmblctx, 293 blr); 285 psmblctx); 294 286 295 287 DEBUG(10, ("do_lock: returning status=%s\n", nt_errstr(*perr))); … … 312 304 bool ok = False; 313 305 struct byte_range_lock *br_lck = NULL; 314 306 315 307 if (!fsp->can_lock) { 316 308 return fsp->is_directory ? NT_STATUS_INVALID_DEVICE_REQUEST : NT_STATUS_INVALID_HANDLE; 317 309 } 318 310 319 311 if (!lp_locking(fsp->conn->params)) { 320 312 return NT_STATUS_OK; 321 313 } 322 323 DEBUG(10, ("do_unlock: unlock start=%.0f len=%.0f requested for fnum %d file %s\n",324 (double)offset, (double)count, fsp->fnum,325 fsp_str_dbg(fsp)));314 315 DEBUG(10, ("do_unlock: unlock start=%ju len=%ju requested for %s file " 316 "%s\n", (uintmax_t)offset, (uintmax_t)count, 317 fsp_fnum_dbg(fsp), fsp_str_dbg(fsp))); 326 318 327 319 br_lck = brl_get_locks(talloc_tos(), fsp); … … 333 325 br_lck, 334 326 smblctx, 335 sconn_server_id(fsp->conn->sconn),327 messaging_server_id(fsp->conn->sconn->msg_ctx), 336 328 offset, 337 329 count, 338 330 lock_flav); 339 331 340 332 TALLOC_FREE(br_lck); 341 333 … … 354 346 355 347 NTSTATUS do_lock_cancel(files_struct *fsp, 356 uint64 smblctx,348 uint64_t smblctx, 357 349 uint64_t count, 358 350 uint64_t offset, 359 enum brl_flavour lock_flav, 360 struct blocking_lock_record *blr) 351 enum brl_flavour lock_flav) 361 352 { 362 353 bool ok = False; … … 367 358 NT_STATUS_INVALID_DEVICE_REQUEST : NT_STATUS_INVALID_HANDLE; 368 359 } 369 360 370 361 if (!lp_locking(fsp->conn->params)) { 371 362 return NT_STATUS_DOS(ERRDOS, ERRcancelviolation); 372 363 } 373 364 374 DEBUG(10, ("do_lock_cancel: cancel start=%.0f len=%.0f requested for fnum %d file %s\n",375 (double)offset, (double)count, fsp->fnum,376 fsp_str_dbg(fsp)));365 DEBUG(10, ("do_lock_cancel: cancel start=%ju len=%ju requested for " 366 "%s file %s\n", (uintmax_t)offset, (uintmax_t)count, 367 fsp_fnum_dbg(fsp), fsp_str_dbg(fsp))); 377 368 378 369 br_lck = brl_get_locks(talloc_tos(), fsp); … … 383 374 ok = brl_lock_cancel(br_lck, 384 375 smblctx, 385 sconn_server_id(fsp->conn->sconn),376 messaging_server_id(fsp->conn->sconn->msg_ctx), 386 377 offset, 387 378 count, 388 lock_flav, 389 blr); 379 lock_flav); 390 380 391 381 TALLOC_FREE(br_lck); … … 414 404 } 415 405 416 /* If we have no toutstanding locks or pending406 /* If we have no outstanding locks or pending 417 407 * locks then we don't need to look in the lock db. 418 408 */ … … 431 421 } 432 422 433 /****************************************************************************434 Initialise the locking functions.435 ****************************************************************************/436 437 static bool locking_init_internal(bool read_only)438 {439 brl_init(read_only);440 441 if (lock_db)442 return True;443 444 lock_db = db_open(NULL, lock_path("locking.tdb"),445 lp_open_files_db_hash_size(),446 TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,447 read_only?O_RDONLY:O_RDWR|O_CREAT, 0644);448 449 if (!lock_db) {450 DEBUG(0,("ERROR: Failed to initialise locking database\n"));451 return False;452 }453 454 if (!posix_locking_init(read_only))455 return False;456 457 return True;458 }459 460 bool locking_init(void)461 {462 return locking_init_internal(false);463 }464 465 bool locking_init_readonly(void)466 {467 return locking_init_internal(true);468 }469 470 /*******************************************************************471 Deinitialize the share_mode management.472 ******************************************************************/473 474 bool locking_end(void)475 {476 brl_shutdown();477 TALLOC_FREE(lock_db);478 return true;479 }480 481 /*******************************************************************482 Form a static locking key for a dev/inode pair.483 ******************************************************************/484 485 static TDB_DATA locking_key(const struct file_id *id, struct file_id *tmp)486 {487 *tmp = *id;488 return make_tdb_data((const uint8_t *)tmp, sizeof(*tmp));489 }490 491 423 /******************************************************************* 492 424 Print out a share mode. … … 495 427 char *share_mode_str(TALLOC_CTX *ctx, int num, const struct share_mode_entry *e) 496 428 { 497 return talloc_asprintf(ctx, "share_mode_entry[%d]: %s " 429 struct server_id_buf tmp; 430 431 return talloc_asprintf(ctx, "share_mode_entry[%d]: " 498 432 "pid = %s, share_access = 0x%x, private_options = 0x%x, " 499 "access_mask = 0x%x, mid = 0x%llx, type= 0x%x, gen_id = %l u, "433 "access_mask = 0x%x, mid = 0x%llx, type= 0x%x, gen_id = %llu, " 500 434 "uid = %u, flags = %u, file_id %s, name_hash = 0x%x", 501 435 num, 502 e->op_type == UNUSED_SHARE_MODE_ENTRY ? "UNUSED" : "", 503 procid_str_static(&e->pid), 436 server_id_str_buf(e->pid, &tmp), 504 437 e->share_access, e->private_options, 505 438 e->access_mask, (unsigned long long)e->op_mid, 506 e->op_type, e->share_file_id,439 e->op_type, (unsigned long long)e->share_file_id, 507 440 (unsigned int)e->uid, (unsigned int)e->flags, 508 441 file_id_string_tos(&e->id), … … 511 444 512 445 /******************************************************************* 513 Print out a share mode table. 446 Fetch a share mode where we know one MUST exist. This call reference 447 counts it internally to allow for nested lock fetches. 514 448 ********************************************************************/ 515 449 516 static void print_share_mode_table(struct locking_data *data) 517 { 518 int num_share_modes = data->u.s.num_share_mode_entries; 519 struct share_mode_entry *shares = 520 (struct share_mode_entry *)(data + 1); 521 int i; 522 523 for (i = 0; i < num_share_modes; i++) { 524 struct share_mode_entry entry; 525 char *str; 526 527 /* 528 * We need to memcpy the entry here due to alignment 529 * restrictions that are not met when directly accessing 530 * shares[i] 531 */ 532 533 memcpy(&entry, &shares[i], sizeof(struct share_mode_entry)); 534 str = share_mode_str(talloc_tos(), i, &entry); 535 536 DEBUG(10,("print_share_mode_table: %s\n", str ? str : "")); 537 TALLOC_FREE(str); 538 } 539 } 540 541 static int parse_delete_tokens_list(struct share_mode_lock *lck, 542 struct locking_data *pdata, 543 const TDB_DATA dbuf) 544 { 545 uint8_t *p = dbuf.dptr + sizeof(struct locking_data) + 546 (lck->num_share_modes * 547 sizeof(struct share_mode_entry)); 548 uint8_t *end_ptr = dbuf.dptr + (dbuf.dsize - 2); 549 int delete_tokens_size = 0; 550 int i; 551 552 lck->delete_tokens = NULL; 553 554 for (i = 0; i < pdata->u.s.num_delete_token_entries; i++) { 555 DATA_BLOB blob; 556 enum ndr_err_code ndr_err; 557 struct delete_token_list *pdtl; 558 size_t token_len = 0; 559 560 pdtl = TALLOC_ZERO_P(lck, struct delete_token_list); 561 if (pdtl == NULL) { 562 DEBUG(0,("parse_delete_tokens_list: talloc failed")); 563 return -1; 564 } 565 /* Copy out the name_hash. */ 566 memcpy(&pdtl->name_hash, p, sizeof(pdtl->name_hash)); 567 p += sizeof(pdtl->name_hash); 568 delete_tokens_size += sizeof(pdtl->name_hash); 569 570 pdtl->delete_token = TALLOC_ZERO_P(pdtl, struct security_unix_token); 571 if (pdtl->delete_token == NULL) { 572 DEBUG(0,("parse_delete_tokens_list: talloc failed")); 573 return -1; 574 } 575 576 if (p >= end_ptr) { 577 DEBUG(0,("parse_delete_tokens_list: corrupt data")); 578 return -1; 579 } 580 581 blob.data = p; 582 blob.length = end_ptr - p; 583 584 ndr_err = ndr_pull_struct_blob(&blob, 585 pdtl, 586 pdtl->delete_token, 587 (ndr_pull_flags_fn_t)ndr_pull_security_unix_token); 588 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { 589 DEBUG(1, ("parse_delete_tokens_list: " 590 "ndr_pull_security_unix_token failed\n")); 591 return -1; 592 } 593 594 token_len = ndr_size_security_unix_token(pdtl->delete_token, 0); 595 596 p += token_len; 597 delete_tokens_size += token_len; 598 599 if (p >= end_ptr) { 600 DEBUG(0,("parse_delete_tokens_list: corrupt data")); 601 return -1; 602 } 603 604 pdtl->delete_nt_token = TALLOC_ZERO_P(pdtl, struct security_token); 605 if (pdtl->delete_nt_token == NULL) { 606 DEBUG(0,("parse_delete_tokens_list: talloc failed")); 607 return -1; 608 } 609 610 blob.data = p; 611 blob.length = end_ptr - p; 612 613 ndr_err = ndr_pull_struct_blob(&blob, 614 pdtl, 615 pdtl->delete_nt_token, 616 (ndr_pull_flags_fn_t)ndr_pull_security_token); 617 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { 618 DEBUG(1, ("parse_delete_tokens_list: " 619 "ndr_pull_security_token failed\n")); 620 return -1; 621 } 622 623 token_len = ndr_size_security_token(pdtl->delete_nt_token, 0); 624 625 p += token_len; 626 delete_tokens_size += token_len; 627 628 /* Add to the list. */ 629 DLIST_ADD(lck->delete_tokens, pdtl); 630 } 631 632 return delete_tokens_size; 633 } 634 635 /******************************************************************* 636 Get all share mode entries for a dev/inode pair. 637 ********************************************************************/ 638 639 static bool parse_share_modes(const TDB_DATA dbuf, struct share_mode_lock *lck) 640 { 641 struct locking_data data; 642 int delete_tokens_size; 643 int i; 644 645 if (dbuf.dsize < sizeof(struct locking_data)) { 646 smb_panic("parse_share_modes: buffer too short"); 647 } 648 649 memcpy(&data, dbuf.dptr, sizeof(data)); 650 651 lck->old_write_time = data.u.s.old_write_time; 652 lck->changed_write_time = data.u.s.changed_write_time; 653 lck->num_share_modes = data.u.s.num_share_mode_entries; 654 655 DEBUG(10, ("parse_share_modes: owrt: %s, " 656 "cwrt: %s, ntok: %u, num_share_modes: %d\n", 657 timestring(talloc_tos(), 658 convert_timespec_to_time_t(lck->old_write_time)), 659 timestring(talloc_tos(), 660 convert_timespec_to_time_t( 661 lck->changed_write_time)), 662 (unsigned int)data.u.s.num_delete_token_entries, 663 lck->num_share_modes)); 664 665 if ((lck->num_share_modes < 0) || (lck->num_share_modes > 1000000)) { 666 DEBUG(0, ("invalid number of share modes: %d\n", 667 lck->num_share_modes)); 668 smb_panic("parse_share_modes: invalid number of share modes"); 669 } 670 671 lck->share_modes = NULL; 672 673 if (lck->num_share_modes != 0) { 674 675 if (dbuf.dsize < (sizeof(struct locking_data) + 676 (lck->num_share_modes * 677 sizeof(struct share_mode_entry)))) { 678 smb_panic("parse_share_modes: buffer too short"); 679 } 680 681 lck->share_modes = (struct share_mode_entry *) 682 TALLOC_MEMDUP(lck, 683 dbuf.dptr+sizeof(struct locking_data), 684 lck->num_share_modes * 685 sizeof(struct share_mode_entry)); 686 687 if (lck->share_modes == NULL) { 688 smb_panic("parse_share_modes: talloc failed"); 689 } 690 } 691 692 /* Get any delete tokens. */ 693 delete_tokens_size = parse_delete_tokens_list(lck, &data, dbuf); 694 if (delete_tokens_size < 0) { 695 smb_panic("parse_share_modes: parse_delete_tokens_list failed"); 696 } 697 698 /* Save off the associated service path and filename. */ 699 lck->servicepath = (const char *)dbuf.dptr + sizeof(struct locking_data) + 700 (lck->num_share_modes * sizeof(struct share_mode_entry)) + 701 delete_tokens_size; 702 703 lck->base_name = (const char *)dbuf.dptr + sizeof(struct locking_data) + 704 (lck->num_share_modes * sizeof(struct share_mode_entry)) + 705 delete_tokens_size + 706 strlen(lck->servicepath) + 1; 707 708 lck->stream_name = (const char *)dbuf.dptr + sizeof(struct locking_data) + 709 (lck->num_share_modes * sizeof(struct share_mode_entry)) + 710 delete_tokens_size + 711 strlen(lck->servicepath) + 1 + 712 strlen(lck->base_name) + 1; 713 714 /* 715 * Ensure that each entry has a real process attached. 716 */ 717 718 for (i = 0; i < lck->num_share_modes; i++) { 719 struct share_mode_entry *entry_p = &lck->share_modes[i]; 720 char *str = NULL; 721 if (DEBUGLEVEL >= 10) { 722 str = share_mode_str(NULL, i, entry_p); 723 } 724 DEBUG(10,("parse_share_modes: %s\n", 725 str ? str : "")); 726 if (!serverid_exists(&entry_p->pid)) { 727 DEBUG(10,("parse_share_modes: deleted %s\n", 728 str ? str : "")); 729 entry_p->op_type = UNUSED_SHARE_MODE_ENTRY; 730 lck->modified = True; 731 } 732 TALLOC_FREE(str); 733 } 734 735 return True; 736 } 737 738 static TDB_DATA unparse_share_modes(const struct share_mode_lock *lck) 739 { 740 TDB_DATA result; 741 int num_valid = 0; 742 int i; 743 struct locking_data *data; 744 ssize_t offset; 745 ssize_t sp_len, bn_len, sn_len; 746 uint32_t delete_tokens_size = 0; 747 struct delete_token_list *pdtl = NULL; 748 uint32_t num_delete_token_entries = 0; 749 750 result.dptr = NULL; 751 result.dsize = 0; 752 753 for (i=0; i<lck->num_share_modes; i++) { 754 if (!is_unused_share_mode_entry(&lck->share_modes[i])) { 755 num_valid += 1; 756 } 757 } 758 759 if (num_valid == 0) { 760 return result; 761 } 762 763 sp_len = strlen(lck->servicepath); 764 bn_len = strlen(lck->base_name); 765 sn_len = lck->stream_name != NULL ? strlen(lck->stream_name) : 0; 766 767 for (pdtl = lck->delete_tokens; pdtl; pdtl = pdtl->next) { 768 num_delete_token_entries++; 769 delete_tokens_size += sizeof(uint32_t) + 770 ndr_size_security_unix_token(pdtl->delete_token, 0) + 771 ndr_size_security_token(pdtl->delete_nt_token, 0); 772 } 773 774 result.dsize = sizeof(*data) + 775 lck->num_share_modes * sizeof(struct share_mode_entry) + 776 delete_tokens_size + 777 sp_len + 1 + 778 bn_len + 1 + 779 sn_len + 1; 780 result.dptr = TALLOC_ARRAY(lck, uint8, result.dsize); 781 782 if (result.dptr == NULL) { 783 smb_panic("talloc failed"); 784 } 785 786 data = (struct locking_data *)result.dptr; 787 ZERO_STRUCTP(data); 788 data->u.s.num_share_mode_entries = lck->num_share_modes; 789 data->u.s.old_write_time = lck->old_write_time; 790 data->u.s.changed_write_time = lck->changed_write_time; 791 data->u.s.num_delete_token_entries = num_delete_token_entries; 792 793 DEBUG(10,("unparse_share_modes: owrt: %s cwrt: %s, ntok: %u, " 794 "num: %d\n", 795 timestring(talloc_tos(), 796 convert_timespec_to_time_t(lck->old_write_time)), 797 timestring(talloc_tos(), 798 convert_timespec_to_time_t( 799 lck->changed_write_time)), 800 (unsigned int)data->u.s.num_delete_token_entries, 801 data->u.s.num_share_mode_entries)); 802 803 memcpy(result.dptr + sizeof(*data), lck->share_modes, 804 sizeof(struct share_mode_entry)*lck->num_share_modes); 805 offset = sizeof(*data) + 806 sizeof(struct share_mode_entry)*lck->num_share_modes; 807 808 /* Store any delete on close tokens. */ 809 for (pdtl = lck->delete_tokens; pdtl; pdtl = pdtl->next) { 810 struct security_unix_token *pdt = pdtl->delete_token; 811 struct security_token *pdt_nt = pdtl->delete_nt_token; 812 uint8_t *p = result.dptr + offset; 813 DATA_BLOB blob; 814 enum ndr_err_code ndr_err; 815 816 memcpy(p, &pdtl->name_hash, sizeof(uint32_t)); 817 p += sizeof(uint32_t); 818 offset += sizeof(uint32_t); 819 820 ndr_err = ndr_push_struct_blob(&blob, 821 talloc_tos(), 822 pdt, 823 (ndr_push_flags_fn_t)ndr_push_security_unix_token); 824 825 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { 826 smb_panic("ndr_push_security_unix_token failed"); 827 } 828 829 /* We know we have space here as we counted above. */ 830 memcpy(p, blob.data, blob.length); 831 p += blob.length; 832 offset += blob.length; 833 TALLOC_FREE(blob.data); 834 835 ndr_err = ndr_push_struct_blob(&blob, 836 talloc_tos(), 837 pdt_nt, 838 (ndr_push_flags_fn_t)ndr_push_security_token); 839 840 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { 841 smb_panic("ndr_push_security_token failed"); 842 } 843 844 /* We know we have space here as we counted above. */ 845 memcpy(p, blob.data, blob.length); 846 p += blob.length; 847 offset += blob.length; 848 TALLOC_FREE(blob.data); 849 } 850 851 safe_strcpy((char *)result.dptr + offset, lck->servicepath, 852 result.dsize - offset - 1); 853 offset += sp_len + 1; 854 safe_strcpy((char *)result.dptr + offset, lck->base_name, 855 result.dsize - offset - 1); 856 offset += bn_len + 1; 857 safe_strcpy((char *)result.dptr + offset, lck->stream_name, 858 result.dsize - offset - 1); 859 860 if (DEBUGLEVEL >= 10) { 861 print_share_mode_table(data); 862 } 863 864 return result; 865 } 866 867 static int share_mode_lock_destructor(struct share_mode_lock *lck) 868 { 869 NTSTATUS status; 870 TDB_DATA data; 871 872 if (!lck->modified) { 873 return 0; 874 } 875 876 data = unparse_share_modes(lck); 877 878 if (data.dptr == NULL) { 879 if (!lck->fresh) { 880 /* There has been an entry before, delete it */ 881 882 status = lck->record->delete_rec(lck->record); 883 if (!NT_STATUS_IS_OK(status)) { 884 char *errmsg; 885 886 DEBUG(0, ("delete_rec returned %s\n", 887 nt_errstr(status))); 888 889 if (asprintf(&errmsg, "could not delete share " 890 "entry: %s\n", 891 nt_errstr(status)) == -1) { 892 smb_panic("could not delete share" 893 "entry"); 894 } 895 smb_panic(errmsg); 896 } 897 } 898 goto done; 899 } 900 901 status = lck->record->store(lck->record, data, TDB_REPLACE); 902 if (!NT_STATUS_IS_OK(status)) { 903 char *errmsg; 904 905 DEBUG(0, ("store returned %s\n", nt_errstr(status))); 906 907 if (asprintf(&errmsg, "could not store share mode entry: %s", 908 nt_errstr(status)) == -1) { 909 smb_panic("could not store share mode entry"); 910 } 911 smb_panic(errmsg); 912 } 913 914 done: 915 916 return 0; 917 } 918 919 static bool fill_share_mode_lock(struct share_mode_lock *lck, 920 struct file_id id, 921 const char *servicepath, 922 const struct smb_filename *smb_fname, 923 TDB_DATA share_mode_data, 924 const struct timespec *old_write_time) 925 { 926 /* Ensure we set every field here as the destructor must be 927 valid even if parse_share_modes fails. */ 928 929 lck->servicepath = NULL; 930 lck->base_name = NULL; 931 lck->stream_name = NULL; 932 lck->id = id; 933 lck->num_share_modes = 0; 934 lck->share_modes = NULL; 935 lck->delete_tokens = NULL; 936 ZERO_STRUCT(lck->old_write_time); 937 ZERO_STRUCT(lck->changed_write_time); 938 lck->fresh = False; 939 lck->modified = False; 940 941 lck->fresh = (share_mode_data.dptr == NULL); 942 943 if (lck->fresh) { 944 bool has_stream; 945 if (smb_fname == NULL || servicepath == NULL 946 || old_write_time == NULL) { 947 return False; 948 } 949 950 has_stream = smb_fname->stream_name != NULL; 951 952 lck->base_name = talloc_strdup(lck, smb_fname->base_name); 953 lck->stream_name = talloc_strdup(lck, smb_fname->stream_name); 954 lck->servicepath = talloc_strdup(lck, servicepath); 955 if (lck->base_name == NULL || 956 (has_stream && lck->stream_name == NULL) || 957 lck->servicepath == NULL) { 958 DEBUG(0, ("talloc failed\n")); 959 return False; 960 } 961 lck->old_write_time = *old_write_time; 962 } else { 963 if (!parse_share_modes(share_mode_data, lck)) { 964 DEBUG(0, ("Could not parse share modes\n")); 965 return False; 966 } 967 } 968 969 return True; 970 } 971 972 struct share_mode_lock *get_share_mode_lock(TALLOC_CTX *mem_ctx, 973 const struct file_id id, 974 const char *servicepath, 975 const struct smb_filename *smb_fname, 976 const struct timespec *old_write_time) 977 { 978 struct share_mode_lock *lck; 979 struct file_id tmp; 980 TDB_DATA key = locking_key(&id, &tmp); 981 982 if (!(lck = TALLOC_P(mem_ctx, struct share_mode_lock))) { 983 DEBUG(0, ("talloc failed\n")); 984 return NULL; 985 } 986 987 if (!(lck->record = lock_db->fetch_locked(lock_db, lck, key))) { 988 DEBUG(3, ("Could not lock share entry\n")); 989 TALLOC_FREE(lck); 990 return NULL; 991 } 992 993 if (!fill_share_mode_lock(lck, id, servicepath, smb_fname, 994 lck->record->value, old_write_time)) { 995 DEBUG(3, ("fill_share_mode_lock failed\n")); 996 TALLOC_FREE(lck); 997 return NULL; 998 } 999 1000 talloc_set_destructor(lck, share_mode_lock_destructor); 1001 1002 return lck; 1003 } 1004 1005 struct share_mode_lock *fetch_share_mode_unlocked(TALLOC_CTX *mem_ctx, 1006 const struct file_id id) 1007 { 1008 struct share_mode_lock *lck; 1009 struct file_id tmp; 1010 TDB_DATA key = locking_key(&id, &tmp); 1011 TDB_DATA data; 1012 1013 if (!(lck = TALLOC_P(mem_ctx, struct share_mode_lock))) { 1014 DEBUG(0, ("talloc failed\n")); 1015 return NULL; 1016 } 1017 1018 if (lock_db->fetch(lock_db, lck, key, &data) == -1) { 1019 DEBUG(3, ("Could not fetch share entry\n")); 1020 TALLOC_FREE(lck); 1021 return NULL; 1022 } 1023 1024 if (!fill_share_mode_lock(lck, id, NULL, NULL, data, NULL)) { 1025 DEBUG(10, ("fetch_share_mode_unlocked: no share_mode record " 1026 "around (file not open)\n")); 1027 TALLOC_FREE(lck); 1028 return NULL; 1029 } 1030 1031 return lck; 450 struct share_mode_lock *get_existing_share_mode_lock(TALLOC_CTX *mem_ctx, 451 const struct file_id id) 452 { 453 return get_share_mode_lock(mem_ctx, id, NULL, NULL, NULL); 1032 454 } 1033 455 … … 1041 463 bool rename_share_filename(struct messaging_context *msg_ctx, 1042 464 struct share_mode_lock *lck, 465 struct file_id id, 1043 466 const char *servicepath, 1044 467 uint32_t orig_name_hash, … … 1046 469 const struct smb_filename *smb_fname_dst) 1047 470 { 471 struct share_mode_data *d = lck->data; 1048 472 size_t sp_len; 1049 473 size_t bn_len; … … 1051 475 size_t msg_len; 1052 476 char *frm = NULL; 1053 int i;477 uint32_t i; 1054 478 bool strip_two_chars = false; 1055 479 bool has_stream = smb_fname_dst->stream_name != NULL; 480 struct server_id self_pid = messaging_server_id(msg_ctx); 1056 481 1057 482 DEBUG(10, ("rename_share_filename: servicepath %s newname %s\n", … … 1069 494 } 1070 495 1071 lck->servicepath = talloc_strdup(lck, servicepath);1072 lck->base_name = talloc_strdup(lck, smb_fname_dst->base_name +496 d->servicepath = talloc_strdup(d, servicepath); 497 d->base_name = talloc_strdup(d, smb_fname_dst->base_name + 1073 498 (strip_two_chars ? 2 : 0)); 1074 lck->stream_name = talloc_strdup(lck, smb_fname_dst->stream_name);1075 if ( lck->base_name == NULL ||1076 (has_stream && lck->stream_name == NULL) ||1077 lck->servicepath == NULL) {499 d->stream_name = talloc_strdup(d, smb_fname_dst->stream_name); 500 if (d->base_name == NULL || 501 (has_stream && d->stream_name == NULL) || 502 d->servicepath == NULL) { 1078 503 DEBUG(0, ("rename_share_filename: talloc failed\n")); 1079 504 return False; 1080 505 } 1081 lck->modified = True;1082 1083 sp_len = strlen( lck->servicepath);1084 bn_len = strlen( lck->base_name);1085 sn_len = has_stream ? strlen( lck->stream_name) : 0;506 d->modified = True; 507 508 sp_len = strlen(d->servicepath); 509 bn_len = strlen(d->base_name); 510 sn_len = has_stream ? strlen(d->stream_name) : 0; 1086 511 1087 512 msg_len = MSG_FILE_RENAMED_MIN_SIZE + sp_len + 1 + bn_len + 1 + … … 1089 514 1090 515 /* Set up the name changed message. */ 1091 frm = TALLOC_ARRAY(lck, char, msg_len);516 frm = talloc_array(d, char, msg_len); 1092 517 if (!frm) { 1093 518 return False; 1094 519 } 1095 520 1096 push_file_id_24(frm, & lck->id);521 push_file_id_24(frm, &id); 1097 522 1098 523 DEBUG(10,("rename_share_filename: msg_len = %u\n", (unsigned int)msg_len )); 1099 524 1100 safe_strcpy(&frm[24], lck->servicepath, sp_len); 1101 safe_strcpy(&frm[24 + sp_len + 1], lck->base_name, bn_len); 1102 safe_strcpy(&frm[24 + sp_len + 1 + bn_len + 1], lck->stream_name, 1103 sn_len); 525 strlcpy(&frm[24], 526 d->servicepath ? d->servicepath : "", 527 sp_len+1); 528 strlcpy(&frm[24 + sp_len + 1], 529 d->base_name ? d->base_name : "", 530 bn_len+1); 531 strlcpy(&frm[24 + sp_len + 1 + bn_len + 1], 532 d->stream_name ? d->stream_name : "", 533 sn_len+1); 1104 534 1105 535 /* Send the messages. */ 1106 for (i=0; i<lck->num_share_modes; i++) { 1107 struct share_mode_entry *se = &lck->share_modes[i]; 536 for (i=0; i<d->num_share_modes; i++) { 537 struct share_mode_entry *se = &d->share_modes[i]; 538 struct server_id_buf tmp; 539 1108 540 if (!is_valid_share_mode_entry(se)) { 1109 541 continue; … … 1119 551 1120 552 /* But not to ourselves... */ 1121 if (procid_is_me(&se->pid)) { 553 if (serverid_equal(&se->pid, &self_pid)) { 554 continue; 555 } 556 557 if (share_mode_stale_pid(d, i)) { 1122 558 continue; 1123 559 } … … 1126 562 "pid %s file_id %s sharepath %s base_name %s " 1127 563 "stream_name %s\n", 1128 procid_str_static(&se->pid),1129 file_id_string_tos(& lck->id),1130 lck->servicepath, lck->base_name,1131 has_stream ? lck->stream_name : ""));564 server_id_str_buf(se->pid, &tmp), 565 file_id_string_tos(&id), 566 d->servicepath, d->base_name, 567 has_stream ? d->stream_name : "")); 1132 568 1133 569 messaging_send_buf(msg_ctx, se->pid, MSG_SMB_FILE_RENAME, 1134 (uint8 *)frm, msg_len); 570 (uint8_t *)frm, msg_len); 571 } 572 573 for (i=0; i<d->num_leases; i++) { 574 /* Update the filename in leases_db. */ 575 NTSTATUS status; 576 struct share_mode_lease *l; 577 578 l = &d->leases[i]; 579 580 status = leases_db_rename(&l->client_guid, 581 &l->lease_key, 582 &id, 583 d->servicepath, 584 d->base_name, 585 d->stream_name); 586 if (!NT_STATUS_IS_OK(status)) { 587 /* Any error recovery possible here ? */ 588 DEBUG(1,("Failed to rename lease key for " 589 "renamed file %s:%s. %s\n", 590 d->base_name, 591 d->stream_name, 592 nt_errstr(status))); 593 continue; 594 } 1135 595 } 1136 596 … … 1162 622 1163 623 if (write_time) { 1164 struct timespec wt; 1165 1166 wt = lck->changed_write_time; 1167 if (null_timespec(wt)) { 1168 wt = lck->old_write_time; 1169 } 1170 1171 *write_time = wt; 624 *write_time = get_share_mode_write_time(lck); 1172 625 } 1173 626 … … 1179 632 int num_props = 0; 1180 633 1181 if (e->op_type == UNUSED_SHARE_MODE_ENTRY) { 1182 /* cope with dead entries from the process not 1183 existing. These should not be considered valid, 1184 otherwise we end up doing zero timeout sharing 1185 violation */ 1186 return False; 634 if (e->stale) { 635 return false; 1187 636 } 1188 637 … … 1190 639 num_props += (EXCLUSIVE_OPLOCK_TYPE(e->op_type) ? 1 : 0); 1191 640 num_props += (LEVEL_II_OPLOCK_TYPE(e->op_type) ? 1 : 0); 1192 1193 SMB_ASSERT(num_props <= 1); 641 num_props += (e->op_type == LEASE_OPLOCK); 642 643 if ((num_props > 1) && serverid_exists(&e->pid)) { 644 smb_panic("Invalid share mode entry"); 645 } 1194 646 return (num_props != 0); 1195 647 } 1196 648 1197 bool is_deferred_open_entry(const struct share_mode_entry *e) 1198 { 1199 return (e->op_type == DEFERRED_OPEN_ENTRY); 1200 } 1201 1202 bool is_unused_share_mode_entry(const struct share_mode_entry *e) 1203 { 1204 return (e->op_type == UNUSED_SHARE_MODE_ENTRY); 1205 } 1206 1207 /******************************************************************* 1208 Fill a share mode entry. 1209 ********************************************************************/ 1210 1211 static void fill_share_mode_entry(struct share_mode_entry *e, 1212 files_struct *fsp, 1213 uid_t uid, uint64_t mid, uint16 op_type) 1214 { 649 /* 650 * See if we need to remove a lease being referred to by a 651 * share mode that is being marked stale or deleted. 652 */ 653 654 static void remove_share_mode_lease(struct share_mode_data *d, 655 struct share_mode_entry *e) 656 { 657 struct GUID client_guid; 658 struct smb2_lease_key lease_key; 659 uint16_t op_type; 660 uint32_t lease_idx; 661 uint32_t i; 662 663 op_type = e->op_type; 664 e->op_type = NO_OPLOCK; 665 666 d->modified = true; 667 668 if (op_type != LEASE_OPLOCK) { 669 return; 670 } 671 672 /* 673 * This used to reference a lease. If there's no other one referencing 674 * it, remove it. 675 */ 676 677 lease_idx = e->lease_idx; 678 e->lease_idx = UINT32_MAX; 679 680 for (i=0; i<d->num_share_modes; i++) { 681 if (d->share_modes[i].stale) { 682 continue; 683 } 684 if (e == &d->share_modes[i]) { 685 /* Not ourselves. */ 686 continue; 687 } 688 if (d->share_modes[i].lease_idx == lease_idx) { 689 break; 690 } 691 } 692 if (i < d->num_share_modes) { 693 /* 694 * Found another one 695 */ 696 return; 697 } 698 699 memcpy(&client_guid, 700 &d->leases[lease_idx].client_guid, 701 sizeof(client_guid)); 702 lease_key = d->leases[lease_idx].lease_key; 703 704 d->num_leases -= 1; 705 d->leases[lease_idx] = d->leases[d->num_leases]; 706 707 /* 708 * We changed the lease array. Fix all references to it. 709 */ 710 for (i=0; i<d->num_share_modes; i++) { 711 if (d->share_modes[i].lease_idx == d->num_leases) { 712 d->share_modes[i].lease_idx = lease_idx; 713 d->share_modes[i].lease = &d->leases[lease_idx]; 714 } 715 } 716 717 { 718 NTSTATUS status; 719 720 status = leases_db_del(&client_guid, 721 &lease_key, 722 &e->id); 723 724 DEBUG(10, ("%s: leases_db_del returned %s\n", __func__, 725 nt_errstr(status))); 726 } 727 } 728 729 /* 730 * In case d->share_modes[i] conflicts with something or otherwise is 731 * being used, we need to make sure the corresponding process still 732 * exists. 733 */ 734 bool share_mode_stale_pid(struct share_mode_data *d, uint32_t idx) 735 { 736 struct server_id_buf tmp; 737 struct share_mode_entry *e; 738 739 if (idx > d->num_share_modes) { 740 DEBUG(1, ("Asking for index %u, only %u around\n", 741 idx, (unsigned)d->num_share_modes)); 742 return false; 743 } 744 e = &d->share_modes[idx]; 745 if (e->stale) { 746 /* 747 * Checked before 748 */ 749 return true; 750 } 751 if (serverid_exists(&e->pid)) { 752 DEBUG(10, ("PID %s (index %u out of %u) still exists\n", 753 server_id_str_buf(e->pid, &tmp), idx, 754 (unsigned)d->num_share_modes)); 755 return false; 756 } 757 DEBUG(10, ("PID %s (index %u out of %u) does not exist anymore\n", 758 server_id_str_buf(e->pid, &tmp), idx, 759 (unsigned)d->num_share_modes)); 760 761 e->stale = true; 762 763 if (d->num_delete_tokens != 0) { 764 uint32_t i, num_stale; 765 766 /* 767 * We cannot have any delete tokens 768 * if there are no valid share modes. 769 */ 770 771 num_stale = 0; 772 773 for (i=0; i<d->num_share_modes; i++) { 774 if (d->share_modes[i].stale) { 775 num_stale += 1; 776 } 777 } 778 779 if (num_stale == d->num_share_modes) { 780 /* 781 * No non-stale share mode found 782 */ 783 TALLOC_FREE(d->delete_tokens); 784 d->num_delete_tokens = 0; 785 } 786 } 787 788 remove_share_mode_lease(d, e); 789 790 d->modified = true; 791 return true; 792 } 793 794 void remove_stale_share_mode_entries(struct share_mode_data *d) 795 { 796 uint32_t i; 797 798 i = 0; 799 while (i < d->num_share_modes) { 800 if (d->share_modes[i].stale) { 801 struct share_mode_entry *m = d->share_modes; 802 m[i] = m[d->num_share_modes-1]; 803 d->num_share_modes -= 1; 804 } else { 805 i += 1; 806 } 807 } 808 } 809 810 bool set_share_mode(struct share_mode_lock *lck, struct files_struct *fsp, 811 uid_t uid, uint64_t mid, uint16_t op_type, 812 uint32_t lease_idx) 813 { 814 struct share_mode_data *d = lck->data; 815 struct share_mode_entry *tmp, *e; 816 struct share_mode_lease *lease = NULL; 817 818 if (lease_idx == UINT32_MAX) { 819 lease = NULL; 820 } else if (lease_idx >= d->num_leases) { 821 return false; 822 } else { 823 lease = &d->leases[lease_idx]; 824 } 825 826 tmp = talloc_realloc(d, d->share_modes, struct share_mode_entry, 827 d->num_share_modes+1); 828 if (tmp == NULL) { 829 return false; 830 } 831 d->share_modes = tmp; 832 e = &d->share_modes[d->num_share_modes]; 833 d->num_share_modes += 1; 834 d->modified = true; 835 1215 836 ZERO_STRUCTP(e); 1216 e->pid = sconn_server_id(fsp->conn->sconn);837 e->pid = messaging_server_id(fsp->conn->sconn->msg_ctx); 1217 838 e->share_access = fsp->share_access; 1218 839 e->private_options = fsp->fh->private_options; … … 1220 841 e->op_mid = mid; 1221 842 e->op_type = op_type; 843 e->lease_idx = lease_idx; 844 e->lease = lease; 1222 845 e->time.tv_sec = fsp->open_time.tv_sec; 1223 846 e->time.tv_usec = fsp->open_time.tv_usec; 1224 847 e->id = fsp->file_id; 1225 848 e->share_file_id = fsp->fh->gen_id; 1226 e->uid = (uint32)uid; 1227 e->flags = fsp->posix_open ? SHARE_MODE_FLAG_POSIX_OPEN : 0; 849 e->uid = (uint32_t)uid; 850 e->flags = (fsp->posix_flags & FSP_POSIX_FLAGS_OPEN) ? 851 SHARE_MODE_FLAG_POSIX_OPEN : 0; 1228 852 e->name_hash = fsp->name_hash; 1229 } 1230 1231 static void fill_deferred_open_entry(struct share_mode_entry *e, 1232 const struct timeval request_time, 1233 struct file_id id, 1234 struct server_id pid, 1235 uint64_t mid) 1236 { 1237 ZERO_STRUCTP(e); 1238 e->pid = pid; 1239 e->op_mid = mid; 1240 e->op_type = DEFERRED_OPEN_ENTRY; 1241 e->time.tv_sec = request_time.tv_sec; 1242 e->time.tv_usec = request_time.tv_usec; 1243 e->id = id; 1244 e->uid = (uint32)-1; 1245 e->flags = 0; 1246 } 1247 1248 static void add_share_mode_entry(struct share_mode_lock *lck, 1249 const struct share_mode_entry *entry) 1250 { 853 854 return true; 855 } 856 857 static struct share_mode_entry *find_share_mode_entry( 858 struct share_mode_lock *lck, files_struct *fsp) 859 { 860 struct share_mode_data *d = lck->data; 861 struct server_id pid; 1251 862 int i; 1252 863 1253 for (i=0; i<lck->num_share_modes; i++) { 1254 struct share_mode_entry *e = &lck->share_modes[i]; 1255 if (is_unused_share_mode_entry(e)) { 1256 *e = *entry; 1257 break; 1258 } 1259 } 1260 1261 if (i == lck->num_share_modes) { 1262 /* No unused entry found */ 1263 ADD_TO_ARRAY(lck, struct share_mode_entry, *entry, 1264 &lck->share_modes, &lck->num_share_modes); 1265 } 1266 lck->modified = True; 1267 } 1268 1269 void set_share_mode(struct share_mode_lock *lck, files_struct *fsp, 1270 uid_t uid, uint64_t mid, uint16 op_type) 1271 { 1272 struct share_mode_entry entry; 1273 fill_share_mode_entry(&entry, fsp, uid, mid, op_type); 1274 add_share_mode_entry(lck, &entry); 1275 } 1276 1277 void add_deferred_open(struct share_mode_lock *lck, uint64_t mid, 1278 struct timeval request_time, 1279 struct server_id pid, struct file_id id) 1280 { 1281 struct share_mode_entry entry; 1282 fill_deferred_open_entry(&entry, request_time, id, pid, mid); 1283 add_share_mode_entry(lck, &entry); 1284 } 1285 1286 /******************************************************************* 1287 Check if two share mode entries are identical, ignoring oplock 1288 and mid info and desired_access. (Removed paranoia test - it's 1289 not automatically a logic error if they are identical. JRA.) 1290 ********************************************************************/ 1291 1292 static bool share_modes_identical(struct share_mode_entry *e1, 1293 struct share_mode_entry *e2) 1294 { 1295 /* We used to check for e1->share_access == e2->share_access here 1296 as well as the other fields but 2 different DOS or FCB opens 1297 sharing the same share mode entry may validly differ in 1298 fsp->share_access field. */ 1299 1300 return (procid_equal(&e1->pid, &e2->pid) && 1301 file_id_equal(&e1->id, &e2->id) && 1302 e1->share_file_id == e2->share_file_id ); 1303 } 1304 1305 static bool deferred_open_identical(struct share_mode_entry *e1, 1306 struct share_mode_entry *e2) 1307 { 1308 return (procid_equal(&e1->pid, &e2->pid) && 1309 (e1->op_mid == e2->op_mid) && 1310 file_id_equal(&e1->id, &e2->id)); 1311 } 1312 1313 static struct share_mode_entry *find_share_mode_entry(struct share_mode_lock *lck, 1314 struct share_mode_entry *entry) 1315 { 1316 int i; 1317 1318 for (i=0; i<lck->num_share_modes; i++) { 1319 struct share_mode_entry *e = &lck->share_modes[i]; 1320 if (is_valid_share_mode_entry(entry) && 1321 is_valid_share_mode_entry(e) && 1322 share_modes_identical(e, entry)) { 1323 return e; 1324 } 1325 if (is_deferred_open_entry(entry) && 1326 is_deferred_open_entry(e) && 1327 deferred_open_identical(e, entry)) { 1328 return e; 1329 } 864 pid = messaging_server_id(fsp->conn->sconn->msg_ctx); 865 866 for (i=0; i<d->num_share_modes; i++) { 867 struct share_mode_entry *e = &d->share_modes[i]; 868 869 if (!is_valid_share_mode_entry(e)) { 870 continue; 871 } 872 if (!serverid_equal(&pid, &e->pid)) { 873 continue; 874 } 875 if (!file_id_equal(&fsp->file_id, &e->id)) { 876 continue; 877 } 878 if (fsp->fh->gen_id != e->share_file_id) { 879 continue; 880 } 881 return e; 1330 882 } 1331 883 return NULL; … … 1339 891 bool del_share_mode(struct share_mode_lock *lck, files_struct *fsp) 1340 892 { 1341 struct share_mode_entry entry, *e; 1342 1343 /* Don't care about the pid owner being correct here - just a search. */ 1344 fill_share_mode_entry(&entry, fsp, (uid_t)-1, 0, NO_OPLOCK); 1345 1346 e = find_share_mode_entry(lck, &entry); 893 struct share_mode_entry *e; 894 895 e = find_share_mode_entry(lck, fsp); 1347 896 if (e == NULL) { 1348 897 return False; 1349 898 } 1350 1351 e->op_type = UNUSED_SHARE_MODE_ENTRY; 1352 lck->modified = True; 899 remove_share_mode_lease(lck->data, e); 900 *e = lck->data->share_modes[lck->data->num_share_modes-1]; 901 lck->data->num_share_modes -= 1; 902 lck->data->modified = True; 1353 903 return True; 1354 904 } 1355 905 1356 void del_deferred_open_entry(struct share_mode_lock *lck, uint64_t mid, 1357 struct server_id pid) 1358 { 1359 struct share_mode_entry entry, *e; 1360 1361 fill_deferred_open_entry(&entry, timeval_zero(), 1362 lck->id, pid, mid); 1363 1364 e = find_share_mode_entry(lck, &entry); 906 bool mark_share_mode_disconnected(struct share_mode_lock *lck, 907 struct files_struct *fsp) 908 { 909 struct share_mode_entry *e; 910 911 if (lck->data->num_share_modes != 1) { 912 return false; 913 } 914 915 if (fsp->op == NULL) { 916 return false; 917 } 918 if (!fsp->op->global->durable) { 919 return false; 920 } 921 922 e = find_share_mode_entry(lck, fsp); 1365 923 if (e == NULL) { 1366 return; 1367 } 1368 1369 e->op_type = UNUSED_SHARE_MODE_ENTRY; 1370 lck->modified = True; 924 return false; 925 } 926 927 DEBUG(10, ("Marking share mode entry disconnected for durable handle\n")); 928 929 server_id_set_disconnected(&e->pid); 930 931 /* 932 * On reopen the caller needs to check that 933 * the client comes with the correct handle. 934 */ 935 e->share_file_id = fsp->op->global->open_persistent_id; 936 937 lck->data->modified = true; 938 return true; 1371 939 } 1372 940 … … 1377 945 bool remove_share_oplock(struct share_mode_lock *lck, files_struct *fsp) 1378 946 { 1379 struct share_mode_entry entry, *e; 1380 1381 /* Don't care about the pid owner being correct here - just a search. */ 1382 fill_share_mode_entry(&entry, fsp, (uid_t)-1, 0, NO_OPLOCK); 1383 1384 e = find_share_mode_entry(lck, &entry); 947 struct share_mode_data *d = lck->data; 948 struct share_mode_entry *e; 949 950 e = find_share_mode_entry(lck, fsp); 1385 951 if (e == NULL) { 1386 952 return False; 1387 953 } 1388 954 1389 if (EXCLUSIVE_OPLOCK_TYPE(e->op_type)) { 1390 /* 1391 * Going from exclusive or batch, 1392 * we always go through FAKE_LEVEL_II 1393 * first. 1394 */ 1395 if (!EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type)) { 1396 smb_panic("remove_share_oplock: logic error"); 1397 } 1398 e->op_type = FAKE_LEVEL_II_OPLOCK; 1399 } else { 1400 e->op_type = NO_OPLOCK; 1401 } 1402 lck->modified = True; 1403 return True; 955 remove_share_mode_lease(d, e); 956 d->modified = True; 957 return true; 1404 958 } 1405 959 … … 1410 964 bool downgrade_share_oplock(struct share_mode_lock *lck, files_struct *fsp) 1411 965 { 1412 struct share_mode_entry entry, *e; 1413 1414 /* Don't care about the pid owner being correct here - just a search. */ 1415 fill_share_mode_entry(&entry, fsp, (uid_t)-1, 0, NO_OPLOCK); 1416 1417 e = find_share_mode_entry(lck, &entry); 966 struct share_mode_entry *e; 967 968 e = find_share_mode_entry(lck, fsp); 1418 969 if (e == NULL) { 1419 970 return False; … … 1421 972 1422 973 e->op_type = LEVEL_II_OPLOCK; 1423 lck-> modified = True;974 lck->data->modified = True; 1424 975 return True; 1425 976 } 1426 977 1427 /**************************************************************************** 1428 Check if setting delete on close is allowed on this fsp. 1429 ****************************************************************************/ 1430 1431 NTSTATUS can_set_delete_on_close(files_struct *fsp, uint32 dosmode) 1432 { 978 NTSTATUS downgrade_share_lease(struct smbd_server_connection *sconn, 979 struct share_mode_lock *lck, 980 const struct smb2_lease_key *key, 981 uint32_t new_lease_state, 982 struct share_mode_lease **_l) 983 { 984 struct share_mode_data *d = lck->data; 985 struct share_mode_lease *l; 986 uint32_t i; 987 988 *_l = NULL; 989 990 for (i=0; i<d->num_leases; i++) { 991 if (smb2_lease_equal(&sconn->client->connections->smb2.client.guid, 992 key, 993 &d->leases[i].client_guid, 994 &d->leases[i].lease_key)) { 995 break; 996 } 997 } 998 if (i == d->num_leases) { 999 DEBUG(10, ("lease not found\n")); 1000 return NT_STATUS_INVALID_PARAMETER; 1001 } 1002 1003 l = &d->leases[i]; 1004 1005 if (!l->breaking) { 1006 DEBUG(1, ("Attempt to break from %d to %d - but we're not in breaking state\n", 1007 (int)l->current_state, (int)new_lease_state)); 1008 return NT_STATUS_UNSUCCESSFUL; 1009 } 1010 1433 1011 /* 1434 * Only allow delete on close for writable files. 1012 * Can't upgrade anything: l->breaking_to_requested (and l->current_state) 1013 * must be a strict bitwise superset of new_lease_state 1435 1014 */ 1436 1437 if ((dosmode & FILE_ATTRIBUTE_READONLY) && 1438 !lp_delete_readonly(SNUM(fsp->conn))) { 1439 DEBUG(10,("can_set_delete_on_close: file %s delete on close " 1440 "flag set but file attribute is readonly.\n", 1441 fsp_str_dbg(fsp))); 1442 return NT_STATUS_CANNOT_DELETE; 1443 } 1444 1445 /* 1446 * Only allow delete on close for writable shares. 1447 */ 1448 1449 if (!CAN_WRITE(fsp->conn)) { 1450 DEBUG(10,("can_set_delete_on_close: file %s delete on " 1451 "close flag set but write access denied on share.\n", 1452 fsp_str_dbg(fsp))); 1453 return NT_STATUS_ACCESS_DENIED; 1454 } 1455 1456 /* 1457 * Only allow delete on close for files/directories opened with delete 1458 * intent. 1459 */ 1460 1461 if (!(fsp->access_mask & DELETE_ACCESS)) { 1462 DEBUG(10,("can_set_delete_on_close: file %s delete on " 1463 "close flag set but delete access denied.\n", 1464 fsp_str_dbg(fsp))); 1465 return NT_STATUS_ACCESS_DENIED; 1466 } 1467 1468 /* Don't allow delete on close for non-empty directories. */ 1469 if (fsp->is_directory) { 1470 SMB_ASSERT(!is_ntfs_stream_smb_fname(fsp->fsp_name)); 1471 1472 /* Or the root of a share. */ 1473 if (ISDOT(fsp->fsp_name->base_name)) { 1474 DEBUG(10,("can_set_delete_on_close: can't set delete on " 1475 "close for the root of a share.\n")); 1476 return NT_STATUS_ACCESS_DENIED; 1477 } 1478 1479 return can_delete_directory_fsp(fsp); 1480 } 1015 if ((new_lease_state & l->breaking_to_requested) != new_lease_state) { 1016 DEBUG(1, ("Attempt to upgrade from %d to %d - expected %d\n", 1017 (int)l->current_state, (int)new_lease_state, 1018 (int)l->breaking_to_requested)); 1019 return NT_STATUS_REQUEST_NOT_ACCEPTED; 1020 } 1021 1022 if (l->current_state != new_lease_state) { 1023 l->current_state = new_lease_state; 1024 d->modified = true; 1025 } 1026 1027 if ((new_lease_state & ~l->breaking_to_required) != 0) { 1028 DEBUG(5, ("lease state %d not fully broken from %d to %d\n", 1029 (int)new_lease_state, 1030 (int)l->current_state, 1031 (int)l->breaking_to_required)); 1032 l->breaking_to_requested = l->breaking_to_required; 1033 if (l->current_state & (~SMB2_LEASE_READ)) { 1034 /* 1035 * Here we break in steps, as windows does 1036 * see the breaking3 and v2_breaking3 tests. 1037 */ 1038 l->breaking_to_requested |= SMB2_LEASE_READ; 1039 } 1040 d->modified = true; 1041 *_l = l; 1042 return NT_STATUS_OPLOCK_BREAK_IN_PROGRESS; 1043 } 1044 1045 DEBUG(10, ("breaking from %d to %d - expected %d\n", 1046 (int)l->current_state, (int)new_lease_state, 1047 (int)l->breaking_to_requested)); 1048 1049 l->breaking_to_requested = 0; 1050 l->breaking_to_required = 0; 1051 l->breaking = false; 1052 1053 d->modified = true; 1481 1054 1482 1055 return NT_STATUS_OK; 1483 }1484 1485 /*************************************************************************1486 Return a talloced copy of a struct security_unix_token. NULL on fail.1487 (Should this be in locking.c.... ?).1488 *************************************************************************/1489 1490 static struct security_unix_token *copy_unix_token(TALLOC_CTX *ctx, const struct security_unix_token *tok)1491 {1492 struct security_unix_token *cpy;1493 1494 cpy = TALLOC_P(ctx, struct security_unix_token);1495 if (!cpy) {1496 return NULL;1497 }1498 1499 cpy->uid = tok->uid;1500 cpy->gid = tok->gid;1501 cpy->ngroups = tok->ngroups;1502 if (tok->ngroups) {1503 /* Make this a talloc child of cpy. */1504 cpy->groups = TALLOC_ARRAY(cpy, gid_t, tok->ngroups);1505 if (!cpy->groups) {1506 return NULL;1507 }1508 memcpy(cpy->groups, tok->groups, tok->ngroups * sizeof(gid_t));1509 }1510 return cpy;1511 1056 } 1512 1057 … … 1515 1060 ****************************************************************************/ 1516 1061 1517 static bool add_delete_on_close_token(struct share_mode_ lock *lck,1062 static bool add_delete_on_close_token(struct share_mode_data *d, 1518 1063 uint32_t name_hash, 1519 1064 const struct security_token *nt_tok, 1520 1065 const struct security_unix_token *tok) 1521 1066 { 1522 struct delete_token_list *dtl; 1523 1524 dtl = TALLOC_ZERO_P(lck, struct delete_token_list); 1525 if (dtl == NULL) { 1067 struct delete_token *tmp, *dtl; 1068 1069 tmp = talloc_realloc(d, d->delete_tokens, struct delete_token, 1070 d->num_delete_tokens+1); 1071 if (tmp == NULL) { 1526 1072 return false; 1527 1073 } 1074 d->delete_tokens = tmp; 1075 dtl = &d->delete_tokens[d->num_delete_tokens]; 1528 1076 1529 1077 dtl->name_hash = name_hash; 1530 dtl->delete_token = copy_unix_token(dtl, tok); 1078 dtl->delete_nt_token = dup_nt_token(d->delete_tokens, nt_tok); 1079 if (dtl->delete_nt_token == NULL) { 1080 return false; 1081 } 1082 dtl->delete_token = copy_unix_token(d->delete_tokens, tok); 1531 1083 if (dtl->delete_token == NULL) { 1532 TALLOC_FREE(dtl);1533 1084 return false; 1534 1085 } 1535 dtl->delete_nt_token = dup_nt_token(dtl, nt_tok); 1536 if (dtl->delete_nt_token == NULL) { 1537 TALLOC_FREE(dtl); 1538 return false; 1539 } 1540 DLIST_ADD(lck->delete_tokens, dtl); 1541 lck->modified = true; 1086 d->num_delete_tokens += 1; 1087 d->modified = true; 1542 1088 return true; 1089 } 1090 1091 void reset_delete_on_close_lck(files_struct *fsp, 1092 struct share_mode_lock *lck) 1093 { 1094 struct share_mode_data *d = lck->data; 1095 uint32_t i; 1096 1097 for (i=0; i<d->num_delete_tokens; i++) { 1098 struct delete_token *dt = &d->delete_tokens[i]; 1099 1100 if (dt->name_hash == fsp->name_hash) { 1101 d->modified = true; 1102 1103 /* Delete this entry. */ 1104 TALLOC_FREE(dt->delete_nt_token); 1105 TALLOC_FREE(dt->delete_token); 1106 *dt = d->delete_tokens[d->num_delete_tokens-1]; 1107 d->num_delete_tokens -= 1; 1108 } 1109 } 1543 1110 } 1544 1111 … … 1556 1123 void set_delete_on_close_lck(files_struct *fsp, 1557 1124 struct share_mode_lock *lck, 1558 bool delete_on_close,1559 1125 const struct security_token *nt_tok, 1560 1126 const struct security_unix_token *tok) 1561 1127 { 1562 struct delete_token_list *dtl; 1128 struct messaging_context *msg_ctx = fsp->conn->sconn->msg_ctx; 1129 struct share_mode_data *d = lck->data; 1130 uint32_t i; 1563 1131 bool ret; 1564 1565 if (delete_on_close) { 1566 SMB_ASSERT(nt_tok != NULL); 1567 SMB_ASSERT(tok != NULL); 1568 } else { 1569 SMB_ASSERT(nt_tok == NULL); 1570 SMB_ASSERT(tok == NULL); 1571 } 1572 1573 for (dtl = lck->delete_tokens; dtl; dtl = dtl->next) { 1574 if (dtl->name_hash == fsp->name_hash) { 1575 lck->modified = true; 1576 if (delete_on_close == false) { 1577 /* Delete this entry. */ 1578 DLIST_REMOVE(lck->delete_tokens, dtl); 1579 TALLOC_FREE(dtl); 1580 } else { 1581 /* Replace this token with the 1582 given tok. */ 1583 TALLOC_FREE(dtl->delete_token); 1584 dtl->delete_token = copy_unix_token(dtl, tok); 1585 SMB_ASSERT(dtl->delete_token != NULL); 1586 TALLOC_FREE(dtl->delete_nt_token); 1587 dtl->delete_nt_token = dup_nt_token(dtl, nt_tok); 1588 SMB_ASSERT(dtl->delete_nt_token != NULL); 1589 } 1132 DATA_BLOB fid_blob = {}; 1133 enum ndr_err_code ndr_err; 1134 1135 SMB_ASSERT(nt_tok != NULL); 1136 SMB_ASSERT(tok != NULL); 1137 1138 for (i=0; i<d->num_delete_tokens; i++) { 1139 struct delete_token *dt = &d->delete_tokens[i]; 1140 if (dt->name_hash == fsp->name_hash) { 1141 d->modified = true; 1142 1143 /* Replace this token with the given tok. */ 1144 TALLOC_FREE(dt->delete_nt_token); 1145 dt->delete_nt_token = dup_nt_token(dt, nt_tok); 1146 SMB_ASSERT(dt->delete_nt_token != NULL); 1147 TALLOC_FREE(dt->delete_token); 1148 dt->delete_token = copy_unix_token(dt, tok); 1149 SMB_ASSERT(dt->delete_token != NULL); 1150 1590 1151 return; 1591 1152 } 1592 1153 } 1593 1154 1594 if (!delete_on_close) { 1595 /* Nothing to delete - not found. */ 1596 return; 1597 } 1598 1599 ret = add_delete_on_close_token(lck, fsp->name_hash, nt_tok, tok); 1155 ret = add_delete_on_close_token(lck->data, fsp->name_hash, nt_tok, tok); 1600 1156 SMB_ASSERT(ret); 1157 1158 ndr_err = ndr_push_struct_blob(&fid_blob, talloc_tos(), &fsp->file_id, 1159 (ndr_push_flags_fn_t)ndr_push_file_id); 1160 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { 1161 DEBUG(10, ("ndr_push_file_id failed: %s\n", 1162 ndr_errstr(ndr_err))); 1163 } 1164 1165 for (i=0; i<d->num_share_modes; i++) { 1166 struct share_mode_entry *e = &d->share_modes[i]; 1167 NTSTATUS status; 1168 1169 status = messaging_send( 1170 msg_ctx, e->pid, MSG_SMB_NOTIFY_CANCEL_DELETED, 1171 &fid_blob); 1172 1173 if (!NT_STATUS_IS_OK(status)) { 1174 struct server_id_buf tmp; 1175 DEBUG(10, ("%s: messaging_send to %s returned %s\n", 1176 __func__, server_id_str_buf(e->pid, &tmp), 1177 nt_errstr(status))); 1178 } 1179 } 1180 1181 TALLOC_FREE(fid_blob.data); 1601 1182 } 1602 1183 … … 1606 1187 { 1607 1188 struct share_mode_lock *lck; 1608 1189 1609 1190 DEBUG(10,("set_delete_on_close: %s delete on close flag for " 1610 " fnum = %d, file %s\n",1611 delete_on_close ? "Adding" : "Removing", fsp ->fnum,1191 "%s, file %s\n", 1192 delete_on_close ? "Adding" : "Removing", fsp_fnum_dbg(fsp), 1612 1193 fsp_str_dbg(fsp))); 1613 1194 1614 lck = get_share_mode_lock(talloc_tos(), fsp->file_id, NULL, NULL, 1615 NULL); 1195 lck = get_existing_share_mode_lock(talloc_tos(), fsp->file_id); 1616 1196 if (lck == NULL) { 1617 1197 return False; … … 1619 1199 1620 1200 if (delete_on_close) { 1621 set_delete_on_close_lck(fsp, lck, true, 1622 nt_tok, 1623 tok); 1201 set_delete_on_close_lck(fsp, lck, nt_tok, tok); 1624 1202 } else { 1625 set_delete_on_close_lck(fsp, lck, false, 1626 NULL, 1627 NULL); 1203 reset_delete_on_close_lck(fsp, lck); 1628 1204 } 1629 1205 … … 1641 1217 } 1642 1218 1219 static struct delete_token *find_delete_on_close_token( 1220 struct share_mode_data *d, uint32_t name_hash) 1221 { 1222 uint32_t i; 1223 1224 DEBUG(10, ("find_delete_on_close_token: name_hash = 0x%x\n", 1225 (unsigned int)name_hash)); 1226 1227 for (i=0; i<d->num_delete_tokens; i++) { 1228 struct delete_token *dt = &d->delete_tokens[i]; 1229 1230 DEBUG(10, ("find__delete_on_close_token: dt->name_hash = 0x%x\n", 1231 (unsigned int)dt->name_hash )); 1232 if (dt->name_hash == name_hash) { 1233 return dt; 1234 } 1235 } 1236 return NULL; 1237 } 1238 1643 1239 /**************************************************************************** 1644 1240 Return the NT token and UNIX token if there's a match. Return true if … … 1647 1243 1648 1244 bool get_delete_on_close_token(struct share_mode_lock *lck, 1649 uint32_t name_hash, 1650 const struct security_token **pp_nt_tok, 1651 const struct security_unix_token **pp_tok) 1652 { 1653 struct delete_token_list *dtl; 1654 1655 DEBUG(10,("get_delete_on_close_token: name_hash = 0x%x\n", 1656 (unsigned int)name_hash )); 1657 1658 for (dtl = lck->delete_tokens; dtl; dtl = dtl->next) { 1659 DEBUG(10,("get_delete_on_close_token: dtl->name_hash = 0x%x\n", 1660 (unsigned int)dtl->name_hash )); 1661 if (dtl->name_hash == name_hash) { 1662 if (pp_nt_tok) { 1663 *pp_nt_tok = dtl->delete_nt_token; 1664 } 1665 if (pp_tok) { 1666 *pp_tok = dtl->delete_token; 1667 } 1668 return true; 1669 } 1670 } 1671 return false; 1245 uint32_t name_hash, 1246 const struct security_token **pp_nt_tok, 1247 const struct security_unix_token **pp_tok) 1248 { 1249 struct delete_token *dt; 1250 1251 dt = find_delete_on_close_token(lck->data, name_hash); 1252 if (dt == NULL) { 1253 return false; 1254 } 1255 *pp_nt_tok = dt->delete_nt_token; 1256 *pp_tok = dt->delete_token; 1257 return true; 1672 1258 } 1673 1259 1674 1260 bool is_delete_on_close_set(struct share_mode_lock *lck, uint32_t name_hash) 1675 1261 { 1676 return get_delete_on_close_token(lck, name_hash, NULL, NULL);1262 return find_delete_on_close_token(lck->data, name_hash) != NULL; 1677 1263 } 1678 1264 … … 1686 1272 file_id_string_tos(&fileid))); 1687 1273 1688 lck = get_ share_mode_lock(NULL, fileid, NULL, NULL, NULL);1274 lck = get_existing_share_mode_lock(talloc_tos(), fileid); 1689 1275 if (lck == NULL) { 1690 1276 return False; 1691 1277 } 1692 1278 1693 if (timespec_compare(&lck-> changed_write_time, &write_time) != 0) {1694 lck-> modified = True;1695 lck-> changed_write_time = write_time;1279 if (timespec_compare(&lck->data->changed_write_time, &write_time) != 0) { 1280 lck->data->modified = True; 1281 lck->data->changed_write_time = write_time; 1696 1282 } 1697 1283 … … 1709 1295 file_id_string_tos(&fileid))); 1710 1296 1711 lck = get_ share_mode_lock(NULL, fileid, NULL, NULL, NULL);1297 lck = get_existing_share_mode_lock(talloc_tos(), fileid); 1712 1298 if (lck == NULL) { 1713 1299 return False; 1714 1300 } 1715 1301 1716 if (timespec_compare(&lck-> old_write_time, &write_time) != 0) {1717 lck-> modified = True;1718 lck-> old_write_time = write_time;1302 if (timespec_compare(&lck->data->old_write_time, &write_time) != 0) { 1303 lck->data->modified = True; 1304 lck->data->old_write_time = write_time; 1719 1305 } 1720 1306 … … 1723 1309 } 1724 1310 1725 1726 struct forall_state { 1727 void (*fn)(const struct share_mode_entry *entry, 1728 const char *sharepath, 1729 const char *fname, 1730 void *private_data); 1731 void *private_data; 1732 }; 1733 1734 static int traverse_fn(struct db_record *rec, void *_state) 1735 { 1736 struct forall_state *state = (struct forall_state *)_state; 1737 int i; 1738 struct share_mode_lock *lck; 1739 1740 /* Ensure this is a locking_key record. */ 1741 if (rec->key.dsize != sizeof(struct file_id)) 1742 return 0; 1743 1744 lck = TALLOC_ZERO_P(talloc_tos(), struct share_mode_lock); 1745 if (lck == NULL) { 1746 return 0; 1747 } 1748 1749 if (!parse_share_modes(rec->value, lck)) { 1750 TALLOC_FREE(lck); 1751 DEBUG(1, ("parse_share_modes failed\n")); 1752 return 0; 1753 } 1754 1755 for (i=0; i<lck->num_share_modes; i++) { 1756 struct share_mode_entry *se = &lck->share_modes[i]; 1757 state->fn(se, 1758 lck->servicepath, 1759 lck->base_name, 1760 state->private_data); 1761 } 1762 TALLOC_FREE(lck); 1763 return 0; 1764 } 1765 1766 /******************************************************************* 1767 Call the specified function on each entry under management by the 1768 share mode system. 1769 ********************************************************************/ 1770 1771 int share_mode_forall(void (*fn)(const struct share_mode_entry *, const char *, 1772 const char *, void *), 1773 void *private_data) 1774 { 1775 struct forall_state state; 1776 1777 if (lock_db == NULL) 1778 return 0; 1779 1780 state.fn = fn; 1781 state.private_data = private_data; 1782 1783 return lock_db->traverse_read(lock_db, traverse_fn, (void *)&state); 1784 } 1311 struct timespec get_share_mode_write_time(struct share_mode_lock *lck) 1312 { 1313 struct share_mode_data *d = lck->data; 1314 1315 if (!null_timespec(d->changed_write_time)) { 1316 return d->changed_write_time; 1317 } 1318 return d->old_write_time; 1319 } -
vendor/current/source3/locking/posix.c
r740 r988 25 25 #include "system/filesys.h" 26 26 #include "locking/proto.h" 27 #include "dbwrap.h" 27 #include "dbwrap/dbwrap.h" 28 #include "dbwrap/dbwrap_rbt.h" 28 29 #include "util_tdb.h" 29 30 … … 83 84 ****************************************************************************/ 84 85 85 static bool posix_lock_in_range( SMB_OFF_T *offset_out, SMB_OFF_T*count_out,86 static bool posix_lock_in_range(off_t *offset_out, off_t *count_out, 86 87 uint64_t u_offset, uint64_t u_count) 87 88 { 88 SMB_OFF_T offset = (SMB_OFF_T)u_offset;89 SMB_OFF_T count = (SMB_OFF_T)u_count;89 off_t offset = (off_t)u_offset; 90 off_t count = (off_t)u_count; 90 91 91 92 /* 92 93 * For the type of system we are, attempt to 93 * find the maximum positive lock offset as an SMB_OFF_T.94 * find the maximum positive lock offset as an off_t. 94 95 */ 95 96 96 97 #if defined(MAX_POSITIVE_LOCK_OFFSET) /* Some systems have arbitrary limits. */ 97 98 98 SMB_OFF_T max_positive_lock_offset = (MAX_POSITIVE_LOCK_OFFSET); 99 100 #elif defined(LARGE_SMB_OFF_T) && !defined(HAVE_BROKEN_FCNTL64_LOCKS) 101 102 /* 103 * In this case SMB_OFF_T is 64 bits, 99 off_t max_positive_lock_offset = (MAX_POSITIVE_LOCK_OFFSET); 100 #else 101 /* 102 * In this case off_t is 64 bits, 104 103 * and the underlying system can handle 64 bit signed locks. 105 104 */ 106 105 107 SMB_OFF_T mask2 = ((SMB_OFF_T)0x4) << (SMB_OFF_T_BITS-4); 108 SMB_OFF_T mask = (mask2<<1); 109 SMB_OFF_T max_positive_lock_offset = ~mask; 110 111 #else /* !LARGE_SMB_OFF_T || HAVE_BROKEN_FCNTL64_LOCKS */ 112 113 /* 114 * In this case either SMB_OFF_T is 32 bits, 115 * or the underlying system cannot handle 64 bit signed locks. 116 * All offsets & counts must be 2^31 or less. 117 */ 118 119 SMB_OFF_T max_positive_lock_offset = 0x7FFFFFFF; 120 121 #endif /* !LARGE_SMB_OFF_T || HAVE_BROKEN_FCNTL64_LOCKS */ 122 106 off_t mask2 = ((off_t)0x4) << (SMB_OFF_T_BITS-4); 107 off_t mask = (mask2<<1); 108 off_t max_positive_lock_offset = ~mask; 109 110 #endif 123 111 /* 124 112 * POSIX locks of length zero mean lock to end-of-file. … … 127 115 */ 128 116 129 if (count == (SMB_OFF_T)0) {117 if (count == 0) { 130 118 DEBUG(10,("posix_lock_in_range: count = 0, ignoring.\n")); 131 119 return False; … … 138 126 139 127 if (u_offset & ~((uint64_t)max_positive_lock_offset)) { 140 DEBUG(10,("posix_lock_in_range: (offset = %.0f) offset > %.0f and we cannot handle this. Ignoring lock.\n", 141 (double)u_offset, (double)((uint64_t)max_positive_lock_offset) )); 128 DEBUG(10, ("posix_lock_in_range: (offset = %ju) offset > %ju " 129 "and we cannot handle this. Ignoring lock.\n", 130 (uintmax_t)u_offset, 131 (uintmax_t)max_positive_lock_offset)); 142 132 return False; 143 133 } … … 164 154 165 155 if (count == 0) { 166 DEBUG(10,("posix_lock_in_range: Count = 0. Ignoring lock u_offset = %.0f, u_count = %.0f\n", 167 (double)u_offset, (double)u_count )); 156 DEBUG(10, ("posix_lock_in_range: Count = 0. Ignoring lock " 157 "u_offset = %ju, u_count = %ju\n", 158 (uintmax_t)u_offset, 159 (uintmax_t)u_count)); 168 160 return False; 169 161 } … … 173 165 */ 174 166 175 DEBUG(10,("posix_lock_in_range: offset_out = %.0f, count_out = %.0f\n", 176 (double)offset, (double)count )); 167 DEBUG(10, ("posix_lock_in_range: offset_out = %ju, " 168 "count_out = %ju\n", 169 (uintmax_t)offset, (uintmax_t)count)); 177 170 178 171 *offset_out = offset; … … 183 176 184 177 bool smb_vfs_call_lock(struct vfs_handle_struct *handle, 185 struct files_struct *fsp, int op, SMB_OFF_Toffset,186 SMB_OFF_Tcount, int type)178 struct files_struct *fsp, int op, off_t offset, 179 off_t count, int type) 187 180 { 188 181 VFS_FIND(lock); 189 return handle->fns->lock (handle, fsp, op, offset, count, type);182 return handle->fns->lock_fn(handle, fsp, op, offset, count, type); 190 183 } 191 184 … … 195 188 ****************************************************************************/ 196 189 197 static bool posix_fcntl_lock(files_struct *fsp, int op, SMB_OFF_T offset, SMB_OFF_Tcount, int type)190 static bool posix_fcntl_lock(files_struct *fsp, int op, off_t offset, off_t count, int type) 198 191 { 199 192 bool ret; 200 193 201 DEBUG(8,("posix_fcntl_lock %d %d %.0f %.0f %d\n",fsp->fh->fd,op,(double)offset,(double)count,type)); 194 DEBUG(8,("posix_fcntl_lock %d %d %jd %jd %d\n", 195 fsp->fh->fd,op,(intmax_t)offset,(intmax_t)count,type)); 202 196 203 197 ret = SMB_VFS_LOCK(fsp, op, offset, count, type); … … 205 199 if (!ret && ((errno == EFBIG) || (errno == ENOLCK) || (errno == EINVAL))) { 206 200 207 DEBUG(0,("posix_fcntl_lock: WARNING: lock request at offset %.0f, length %.0f returned\n", 208 (double)offset,(double)count)); 209 DEBUGADD(0,("an %s error. This can happen when using 64 bit lock offsets\n", strerror(errno))); 210 DEBUGADD(0,("on 32 bit NFS mounted file systems.\n")); 201 DEBUG(0, ("posix_fcntl_lock: WARNING: lock request at offset " 202 "%ju, length %ju returned\n", 203 (uintmax_t)offset, (uintmax_t)count)); 204 DEBUGADD(0, ("an %s error. This can happen when using 64 bit " 205 "lock offsets\n", strerror(errno))); 206 DEBUGADD(0, ("on 32 bit NFS mounted file systems.\n")); 211 207 212 208 /* … … 215 211 */ 216 212 217 if (offset & ~(( SMB_OFF_T)0x7fffffff)) {213 if (offset & ~((off_t)0x7fffffff)) { 218 214 DEBUG(0,("Offset greater than 31 bits. Returning success.\n")); 219 215 return True; 220 216 } 221 217 222 if (count & ~(( SMB_OFF_T)0x7fffffff)) {218 if (count & ~((off_t)0x7fffffff)) { 223 219 /* 32 bit NFS file system, retry with smaller offset */ 224 220 DEBUG(0,("Count greater than 31 bits - retrying with 31 bit truncated length.\n")); … … 234 230 235 231 bool smb_vfs_call_getlock(struct vfs_handle_struct *handle, 236 struct files_struct *fsp, SMB_OFF_T*poffset,237 SMB_OFF_T*pcount, int *ptype, pid_t *ppid)232 struct files_struct *fsp, off_t *poffset, 233 off_t *pcount, int *ptype, pid_t *ppid) 238 234 { 239 235 VFS_FIND(getlock); 240 return handle->fns->getlock(handle, fsp, poffset, pcount, ptype, ppid); 236 return handle->fns->getlock_fn(handle, fsp, poffset, pcount, ptype, 237 ppid); 241 238 } 242 239 … … 246 243 ****************************************************************************/ 247 244 248 static bool posix_fcntl_getlock(files_struct *fsp, SMB_OFF_T *poffset, SMB_OFF_T*pcount, int *ptype)245 static bool posix_fcntl_getlock(files_struct *fsp, off_t *poffset, off_t *pcount, int *ptype) 249 246 { 250 247 pid_t pid; 251 248 bool ret; 252 249 253 DEBUG(8,("posix_fcntl_getlock %d %.0f %.0f %d\n", 254 fsp->fh->fd,(double)*poffset,(double)*pcount,*ptype)); 250 DEBUG(8, ("posix_fcntl_getlock %d %ju %ju %d\n", 251 fsp->fh->fd, (uintmax_t)*poffset, (uintmax_t)*pcount, 252 *ptype)); 255 253 256 254 ret = SMB_VFS_GETLOCK(fsp, poffset, pcount, ptype, &pid); … … 258 256 if (!ret && ((errno == EFBIG) || (errno == ENOLCK) || (errno == EINVAL))) { 259 257 260 DEBUG(0,("posix_fcntl_getlock: WARNING: lock request at offset %.0f, length %.0f returned\n", 261 (double)*poffset,(double)*pcount)); 262 DEBUGADD(0,("an %s error. This can happen when using 64 bit lock offsets\n", strerror(errno))); 263 DEBUGADD(0,("on 32 bit NFS mounted file systems.\n")); 258 DEBUG(0, ("posix_fcntl_getlock: WARNING: lock request at " 259 "offset %ju, length %ju returned\n", 260 (uintmax_t)*poffset, (uintmax_t)*pcount)); 261 DEBUGADD(0, ("an %s error. This can happen when using 64 bit " 262 "lock offsets\n", strerror(errno))); 263 DEBUGADD(0, ("on 32 bit NFS mounted file systems.\n")); 264 264 265 265 /* … … 268 268 */ 269 269 270 if (*poffset & ~(( SMB_OFF_T)0x7fffffff)) {270 if (*poffset & ~((off_t)0x7fffffff)) { 271 271 DEBUG(0,("Offset greater than 31 bits. Returning success.\n")); 272 272 return True; 273 273 } 274 274 275 if (*pcount & ~(( SMB_OFF_T)0x7fffffff)) {275 if (*pcount & ~((off_t)0x7fffffff)) { 276 276 /* 32 bit NFS file system, retry with smaller offset */ 277 277 DEBUG(0,("Count greater than 31 bits - retrying with 31 bit truncated length.\n")); … … 297 297 enum brl_flavour lock_flav) 298 298 { 299 SMB_OFF_Toffset;300 SMB_OFF_Tcount;299 off_t offset; 300 off_t count; 301 301 int posix_lock_type = map_posix_lock_type(fsp,*plock_type); 302 302 303 DEBUG(10, ("is_posix_locked: File %s, offset = %.0f, count = %.0f, "304 "type = %s\n", fsp_str_dbg(fsp), (double)*pu_offset,305 (double)*pu_count, posix_lock_type_name(*plock_type)));303 DEBUG(10, ("is_posix_locked: File %s, offset = %ju, count = %ju, " 304 "type = %s\n", fsp_str_dbg(fsp), (uintmax_t)*pu_offset, 305 (uintmax_t)*pu_count, posix_lock_type_name(*plock_type))); 306 306 307 307 /* … … 362 362 static TDB_DATA fd_array_key_fsp(files_struct *fsp) 363 363 { 364 return make_tdb_data((uint8 *)&fsp->file_id, sizeof(fsp->file_id));364 return make_tdb_data((uint8_t *)&fsp->file_id, sizeof(fsp->file_id)); 365 365 } 366 366 … … 404 404 405 405 /**************************************************************************** 406 The records in posix_pending_close_tdb are composed of an array of ints 407 keyed by dev/ino pair. 408 The first int is a reference count of the number of outstanding locks on 409 all open fd's on this dev/ino pair. Any subsequent ints are the fd's that 410 were open on this dev/ino pair that should have been closed, but can't as 411 the lock ref count is non zero. 406 The records in posix_pending_close_db are composed of an array of 407 ints keyed by dev/ino pair. Those ints are the fd's that were open on 408 this dev/ino pair that should have been closed, but can't as the lock 409 ref count is non zero. 412 410 ****************************************************************************/ 413 411 … … 420 418 { 421 419 struct lock_ref_count_key tmp; 422 struct db_record *rec; 423 int lock_ref_count = 0; 420 int32_t lock_ref_count = 0; 424 421 NTSTATUS status; 425 422 426 rec = posix_pending_close_db->fetch_locked( 427 posix_pending_close_db, talloc_tos(), 428 locking_ref_count_key_fsp(fsp, &tmp)); 429 430 SMB_ASSERT(rec != NULL); 431 432 if (rec->value.dptr != NULL) { 433 SMB_ASSERT(rec->value.dsize == sizeof(lock_ref_count)); 434 memcpy(&lock_ref_count, rec->value.dptr, 435 sizeof(lock_ref_count)); 436 } 437 438 lock_ref_count++; 439 440 status = rec->store(rec, make_tdb_data((uint8 *)&lock_ref_count, 441 sizeof(lock_ref_count)), 0); 423 status = dbwrap_change_int32_atomic( 424 posix_pending_close_db, locking_ref_count_key_fsp(fsp, &tmp), 425 &lock_ref_count, 1); 442 426 443 427 SMB_ASSERT(NT_STATUS_IS_OK(status)); 444 445 TALLOC_FREE(rec); 428 SMB_ASSERT(lock_ref_count < INT32_MAX); 446 429 447 430 DEBUG(10,("increment_windows_lock_ref_count for file now %s = %d\n", 448 fsp_str_dbg(fsp), lock_ref_count));431 fsp_str_dbg(fsp), (int)lock_ref_count)); 449 432 } 450 433 … … 453 436 ****************************************************************************/ 454 437 455 void reduce_windows_lock_ref_count(files_struct *fsp, unsigned int dcount)438 static void decrement_windows_lock_ref_count(files_struct *fsp) 456 439 { 457 440 struct lock_ref_count_key tmp; 458 struct db_record *rec; 459 int lock_ref_count = 0; 441 int32_t lock_ref_count = 0; 460 442 NTSTATUS status; 461 443 462 rec = posix_pending_close_db->fetch_locked( 463 posix_pending_close_db, talloc_tos(), 464 locking_ref_count_key_fsp(fsp, &tmp)); 465 466 SMB_ASSERT((rec != NULL) 467 && (rec->value.dptr != NULL) 468 && (rec->value.dsize == sizeof(lock_ref_count))); 469 470 memcpy(&lock_ref_count, rec->value.dptr, sizeof(lock_ref_count)); 471 472 SMB_ASSERT(lock_ref_count > 0); 473 474 lock_ref_count -= dcount; 475 476 status = rec->store(rec, make_tdb_data((uint8 *)&lock_ref_count, 477 sizeof(lock_ref_count)), 0); 444 status = dbwrap_change_int32_atomic( 445 posix_pending_close_db, locking_ref_count_key_fsp(fsp, &tmp), 446 &lock_ref_count, -1); 478 447 479 448 SMB_ASSERT(NT_STATUS_IS_OK(status)); 480 481 TALLOC_FREE(rec); 449 SMB_ASSERT(lock_ref_count >= 0); 482 450 483 451 DEBUG(10,("reduce_windows_lock_ref_count for file now %s = %d\n", 484 fsp_str_dbg(fsp), lock_ref_count)); 485 } 486 487 static void decrement_windows_lock_ref_count(files_struct *fsp) 488 { 489 reduce_windows_lock_ref_count(fsp, 1); 452 fsp_str_dbg(fsp), (int)lock_ref_count)); 490 453 } 491 454 … … 494 457 ****************************************************************************/ 495 458 496 static int get_windows_lock_ref_count(files_struct *fsp)459 static int32_t get_windows_lock_ref_count(files_struct *fsp) 497 460 { 498 461 struct lock_ref_count_key tmp; 499 TDB_DATA dbuf; 500 int res; 501 int lock_ref_count = 0; 502 503 res = posix_pending_close_db->fetch( 504 posix_pending_close_db, talloc_tos(), 505 locking_ref_count_key_fsp(fsp, &tmp), &dbuf); 506 507 SMB_ASSERT(res == 0); 508 509 if (dbuf.dsize != 0) { 510 SMB_ASSERT(dbuf.dsize == sizeof(lock_ref_count)); 511 memcpy(&lock_ref_count, dbuf.dptr, sizeof(lock_ref_count)); 512 TALLOC_FREE(dbuf.dptr); 513 } 514 515 DEBUG(10,("get_windows_lock_count for file %s = %d\n", 516 fsp_str_dbg(fsp), lock_ref_count)); 517 462 NTSTATUS status; 463 int32_t lock_ref_count = 0; 464 465 status = dbwrap_fetch_int32( 466 posix_pending_close_db, locking_ref_count_key_fsp(fsp, &tmp), 467 &lock_ref_count); 468 469 if (!NT_STATUS_IS_OK(status) && 470 !NT_STATUS_EQUAL(status, NT_STATUS_NOT_FOUND)) { 471 DEBUG(0, ("get_windows_lock_ref_count: Error fetching " 472 "lock ref count for file %s: %s\n", 473 fsp_str_dbg(fsp), nt_errstr(status))); 474 } 518 475 return lock_ref_count; 519 476 } … … 526 483 { 527 484 struct lock_ref_count_key tmp; 528 struct db_record *rec;529 530 rec = posix_pending_close_db->fetch_locked(531 posix_pending_close_db, talloc_tos(),532 locking_ref_count_key_fsp(fsp, &tmp));533 534 SMB_ASSERT(rec != NULL);535 485 536 486 /* Not a bug if it doesn't exist - no locks were ever granted. */ 537 487 538 rec->delete_rec(rec);539 TALLOC_FREE(rec);488 dbwrap_delete(posix_pending_close_db, 489 locking_ref_count_key_fsp(fsp, &tmp)); 540 490 541 491 DEBUG(10,("delete_windows_lock_ref_count for file %s\n", … … 550 500 { 551 501 struct db_record *rec; 552 uint8_t *new_data; 502 int *fds; 503 size_t num_fds; 553 504 NTSTATUS status; 554 555 rec = posix_pending_close_db->fetch_locked( 505 TDB_DATA value; 506 507 rec = dbwrap_fetch_locked( 556 508 posix_pending_close_db, talloc_tos(), 557 509 fd_array_key_fsp(fsp)); … … 559 511 SMB_ASSERT(rec != NULL); 560 512 561 new_data = TALLOC_ARRAY( 562 rec, uint8_t, rec->value.dsize + sizeof(fsp->fh->fd)); 563 564 SMB_ASSERT(new_data != NULL); 565 566 memcpy(new_data, rec->value.dptr, rec->value.dsize); 567 memcpy(new_data + rec->value.dsize, 568 &fsp->fh->fd, sizeof(fsp->fh->fd)); 569 570 status = rec->store( 571 rec, make_tdb_data(new_data, 572 rec->value.dsize + sizeof(fsp->fh->fd)), 0); 513 value = dbwrap_record_get_value(rec); 514 SMB_ASSERT((value.dsize % sizeof(int)) == 0); 515 516 num_fds = value.dsize / sizeof(int); 517 fds = talloc_array(rec, int, num_fds+1); 518 519 SMB_ASSERT(fds != NULL); 520 521 memcpy(fds, value.dptr, value.dsize); 522 fds[num_fds] = fsp->fh->fd; 523 524 status = dbwrap_record_store( 525 rec, make_tdb_data((uint8_t *)fds, talloc_get_size(fds)), 0); 573 526 574 527 SMB_ASSERT(NT_STATUS_IS_OK(status)); … … 588 541 struct db_record *rec; 589 542 590 rec = posix_pending_close_db->fetch_locked(543 rec = dbwrap_fetch_locked( 591 544 posix_pending_close_db, talloc_tos(), 592 545 fd_array_key_fsp(fsp)); 593 546 594 547 SMB_ASSERT(rec != NULL); 595 rec->delete_rec(rec);548 dbwrap_record_delete(rec); 596 549 TALLOC_FREE(rec); 597 550 } … … 606 559 { 607 560 TDB_DATA dbuf; 608 int res;609 610 res = posix_pending_close_db->fetch(561 NTSTATUS status; 562 563 status = dbwrap_fetch( 611 564 posix_pending_close_db, mem_ctx, fd_array_key_fsp(fsp), 612 565 &dbuf); 613 566 614 SMB_ASSERT(res == 0); 567 if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_FOUND)) { 568 *entries = NULL; 569 return 0; 570 } 571 572 SMB_ASSERT(NT_STATUS_IS_OK(status)); 615 573 616 574 if (dbuf.dsize == 0) { … … 716 674 struct lock_list *next; 717 675 struct lock_list *prev; 718 SMB_OFF_Tstart;719 SMB_OFF_Tsize;676 off_t start; 677 off_t size; 720 678 }; 721 679 … … 729 687 struct lock_list *lhead, 730 688 const struct lock_context *lock_ctx, /* Lock context lhead belongs to. */ 731 files_struct *fsp,732 689 const struct lock_struct *plocks, 733 690 int num_locks) … … 740 697 */ 741 698 742 DEBUG(10, ("posix_lock_list: curr: start=%.0f,size=%.0f\n",743 (double)lhead->start, (double)lhead->size ));699 DEBUG(10, ("posix_lock_list: curr: start=%ju,size=%ju\n", 700 (uintmax_t)lhead->start, (uintmax_t)lhead->size )); 744 701 745 702 for (i=0; i<num_locks && lhead; i++) { … … 753 710 754 711 /* Ignore locks not owned by this process. */ 755 if (! procid_equal(&lock->context.pid, &lock_ctx->pid)) {712 if (!serverid_equal(&lock->context.pid, &lock_ctx->pid)) { 756 713 continue; 757 714 } … … 765 722 for (l_curr = lhead; l_curr;) { 766 723 767 DEBUG(10,("posix_lock_list: lock: fnum=%d: start=%.0f,size=%.0f:type=%s", lock->fnum, 768 (double)lock->start, (double)lock->size, posix_lock_type_name(lock->lock_type) )); 724 DEBUG(10, ("posix_lock_list: lock: fnum=%ju: " 725 "start=%ju,size=%ju:type=%s", 726 (uintmax_t)lock->fnum, 727 (uintmax_t)lock->start, 728 (uintmax_t)lock->size, 729 posix_lock_type_name(lock->lock_type) )); 769 730 770 731 if ( (l_curr->start >= (lock->start + lock->size)) || … … 840 801 l_curr->start = lock->start + lock->size; 841 802 842 DEBUG(10,(" truncate high case: start=%.0f,size=%.0f\n", 843 (double)l_curr->start, (double)l_curr->size )); 803 DEBUG(10, (" truncate high case: start=%ju," 804 "size=%ju\n", 805 (uintmax_t)l_curr->start, 806 (uintmax_t)l_curr->size )); 844 807 845 808 l_curr = l_curr->next; … … 868 831 l_curr->size = lock->start - l_curr->start; 869 832 870 DEBUG(10,(" truncate low case: start=%.0f,size=%.0f\n", 871 (double)l_curr->start, (double)l_curr->size )); 833 DEBUG(10, (" truncate low case: start=%ju," 834 "size=%ju\n", 835 (uintmax_t)l_curr->start, 836 (uintmax_t)l_curr->size )); 872 837 873 838 l_curr = l_curr->next; … … 893 858 +-------+ +---------+ 894 859 **********************************************/ 895 struct lock_list *l_new = TALLOC_P(ctx, struct lock_list);860 struct lock_list *l_new = talloc(ctx, struct lock_list); 896 861 897 862 if(l_new == NULL) { … … 907 872 l_curr->size = lock->start - l_curr->start; 908 873 909 DEBUG(10,(" split case: curr: start=%.0f,size=%.0f \ 910 new: start=%.0f,size=%.0f\n", (double)l_curr->start, (double)l_curr->size, 911 (double)l_new->start, (double)l_new->size )); 874 DEBUG(10, (" split case: curr: start=%ju," 875 "size=%ju new: start=%ju," 876 "size=%ju\n", 877 (uintmax_t)l_curr->start, 878 (uintmax_t)l_curr->size, 879 (uintmax_t)l_new->start, 880 (uintmax_t)l_new->size )); 912 881 913 882 /* … … 927 896 char *msg = NULL; 928 897 929 if (asprintf(&msg, "logic flaw in cases: l_curr: start = %.0f, size = %.0f : \ 930 lock: start = %.0f, size = %.0f", (double)l_curr->start, (double)l_curr->size, (double)lock->start, (double)lock->size ) != -1) { 898 if (asprintf(&msg, "logic flaw in cases: " 899 "l_curr: start = %ju, " 900 "size = %ju : lock: " 901 "start = %ju, size = %ju", 902 (uintmax_t)l_curr->start, 903 (uintmax_t)l_curr->size, 904 (uintmax_t)lock->start, 905 (uintmax_t)lock->size ) != -1) { 931 906 smb_panic(msg); 932 907 } else { … … 954 929 int *errno_ret) 955 930 { 956 SMB_OFF_Toffset;957 SMB_OFF_Tcount;931 off_t offset; 932 off_t count; 958 933 int posix_lock_type = map_posix_lock_type(fsp,lock_type); 959 934 bool ret = True; … … 963 938 struct lock_list *ll = NULL; 964 939 965 DEBUG(5, ("set_posix_lock_windows_flavour: File %s, offset = %.0f, "966 "count = %.0f, type = %s\n", fsp_str_dbg(fsp),967 (double)u_offset, (double)u_count,968 posix_lock_type_name(lock_type)));940 DEBUG(5, ("set_posix_lock_windows_flavour: File %s, offset = %ju, " 941 "count = %ju, type = %s\n", fsp_str_dbg(fsp), 942 (uintmax_t)u_offset, (uintmax_t)u_count, 943 posix_lock_type_name(lock_type))); 969 944 970 945 /* … … 1001 976 } 1002 977 1003 if ((ll = TALLOC_P(l_ctx, struct lock_list)) == NULL) {978 if ((ll = talloc(l_ctx, struct lock_list)) == NULL) { 1004 979 DEBUG(0,("set_posix_lock_windows_flavour: unable to talloc unlock list.\n")); 1005 980 talloc_destroy(l_ctx); … … 1029 1004 llist, 1030 1005 lock_ctx, /* Lock context llist belongs to. */ 1031 fsp,1032 1006 plocks, 1033 1007 num_locks); … … 1043 1017 count = ll->size; 1044 1018 1045 DEBUG(5,("set_posix_lock_windows_flavour: Real lock: Type = %s: offset = %.0f, count = %.0f\n", 1046 posix_lock_type_name(posix_lock_type), (double)offset, (double)count )); 1047 1048 if (!posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,posix_lock_type)) { 1019 DEBUG(5, ("set_posix_lock_windows_flavour: Real lock: " 1020 "Type = %s: offset = %ju, count = %ju\n", 1021 posix_lock_type_name(posix_lock_type), 1022 (uintmax_t)offset, (uintmax_t)count )); 1023 1024 if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,posix_lock_type)) { 1049 1025 *errno_ret = errno; 1050 DEBUG(5,("set_posix_lock_windows_flavour: Lock fail !: Type = %s: offset = %.0f, count = %.0f. Errno = %s\n", 1051 posix_lock_type_name(posix_lock_type), (double)offset, (double)count, strerror(errno) )); 1026 DEBUG(5, ("set_posix_lock_windows_flavour: Lock " 1027 "fail !: Type = %s: offset = %ju, " 1028 "count = %ju. Errno = %s\n", 1029 posix_lock_type_name(posix_lock_type), 1030 (uintmax_t)offset, (uintmax_t)count, 1031 strerror(errno) )); 1052 1032 ret = False; 1053 1033 break; … … 1065 1045 count = ll->size; 1066 1046 1067 DEBUG(5,("set_posix_lock_windows_flavour: Backing out locks: Type = %s: offset = %.0f, count = %.0f\n", 1068 posix_lock_type_name(posix_lock_type), (double)offset, (double)count )); 1069 1070 posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,F_UNLCK); 1047 DEBUG(5, ("set_posix_lock_windows_flavour: Backing " 1048 "out locks: Type = %s: offset = %ju, " 1049 "count = %ju\n", 1050 posix_lock_type_name(posix_lock_type), 1051 (uintmax_t)offset, (uintmax_t)count )); 1052 1053 posix_fcntl_lock(fsp,F_SETLK,offset,count,F_UNLCK); 1071 1054 } 1072 1055 } else { … … 1092 1075 int num_locks) 1093 1076 { 1094 SMB_OFF_Toffset;1095 SMB_OFF_Tcount;1077 off_t offset; 1078 off_t count; 1096 1079 bool ret = True; 1097 1080 TALLOC_CTX *ul_ctx = NULL; … … 1099 1082 struct lock_list *ul = NULL; 1100 1083 1101 DEBUG(5, ("release_posix_lock_windows_flavour: File %s, offset = %.0f, "1102 "count = %.0f\n", fsp_str_dbg(fsp),1103 (double)u_offset, (double)u_count));1084 DEBUG(5, ("release_posix_lock_windows_flavour: File %s, offset = %ju, " 1085 "count = %ju\n", fsp_str_dbg(fsp), 1086 (uintmax_t)u_offset, (uintmax_t)u_count)); 1104 1087 1105 1088 /* Remember the number of Windows locks we have on this dev/ino pair. */ … … 1120 1103 } 1121 1104 1122 if ((ul = TALLOC_P(ul_ctx, struct lock_list)) == NULL) {1105 if ((ul = talloc(ul_ctx, struct lock_list)) == NULL) { 1123 1106 DEBUG(0,("release_posix_lock_windows_flavour: unable to talloc unlock list.\n")); 1124 1107 talloc_destroy(ul_ctx); … … 1149 1132 ulist, 1150 1133 lock_ctx, /* Lock context ulist belongs to. */ 1151 fsp,1152 1134 plocks, 1153 1135 num_locks); … … 1164 1146 (!ulist || ulist->next != NULL || ulist->start != offset || ulist->size != count)) { 1165 1147 1166 DEBUG(5,("release_posix_lock_windows_flavour: downgrading lock to READ: offset = %.0f, count = %.0f\n", 1167 (double)offset, (double)count )); 1168 1169 if (!posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,F_RDLCK)) { 1148 DEBUG(5, ("release_posix_lock_windows_flavour: downgrading " 1149 "lock to READ: offset = %ju, count = %ju\n", 1150 (uintmax_t)offset, (uintmax_t)count )); 1151 1152 if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,F_RDLCK)) { 1170 1153 DEBUG(0,("release_posix_lock_windows_flavour: downgrade of lock failed with error %s !\n", strerror(errno) )); 1171 1154 talloc_destroy(ul_ctx); … … 1182 1165 count = ulist->size; 1183 1166 1184 DEBUG(5,("release_posix_lock_windows_flavour: Real unlock: offset = %.0f, count = %.0f\n", 1185 (double)offset, (double)count )); 1186 1187 if (!posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,F_UNLCK)) { 1167 DEBUG(5, ("release_posix_lock_windows_flavour: Real unlock: " 1168 "offset = %ju, count = %ju\n", 1169 (uintmax_t)offset, (uintmax_t)count )); 1170 1171 if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,F_UNLCK)) { 1188 1172 ret = False; 1189 1173 } … … 1214 1198 int *errno_ret) 1215 1199 { 1216 SMB_OFF_Toffset;1217 SMB_OFF_Tcount;1200 off_t offset; 1201 off_t count; 1218 1202 int posix_lock_type = map_posix_lock_type(fsp,lock_type); 1219 1203 1220 DEBUG(5,("set_posix_lock_posix_flavour: File %s, offset = % .0f, count "1221 "= % .0f, type = %s\n", fsp_str_dbg(fsp),1222 ( double)u_offset, (double)u_count,1204 DEBUG(5,("set_posix_lock_posix_flavour: File %s, offset = %ju, count " 1205 "= %ju, type = %s\n", fsp_str_dbg(fsp), 1206 (uintmax_t)u_offset, (uintmax_t)u_count, 1223 1207 posix_lock_type_name(lock_type))); 1224 1208 … … 1232 1216 } 1233 1217 1234 if (!posix_fcntl_lock(fsp, SMB_F_SETLK,offset,count,posix_lock_type)) {1218 if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,posix_lock_type)) { 1235 1219 *errno_ret = errno; 1236 DEBUG(5,("set_posix_lock_posix_flavour: Lock fail !: Type = %s: offset = % .0f, count = %.0f. Errno = %s\n",1237 posix_lock_type_name(posix_lock_type), ( double)offset, (double)count, strerror(errno) ));1220 DEBUG(5,("set_posix_lock_posix_flavour: Lock fail !: Type = %s: offset = %ju, count = %ju. Errno = %s\n", 1221 posix_lock_type_name(posix_lock_type), (intmax_t)offset, (intmax_t)count, strerror(errno) )); 1238 1222 return False; 1239 1223 } … … 1258 1242 { 1259 1243 bool ret = True; 1260 SMB_OFF_Toffset;1261 SMB_OFF_Tcount;1244 off_t offset; 1245 off_t count; 1262 1246 TALLOC_CTX *ul_ctx = NULL; 1263 1247 struct lock_list *ulist = NULL; 1264 1248 struct lock_list *ul = NULL; 1265 1249 1266 DEBUG(5, ("release_posix_lock_posix_flavour: File %s, offset = %.0f, "1267 "count = %.0f\n", fsp_str_dbg(fsp),1268 (double)u_offset, (double)u_count));1250 DEBUG(5, ("release_posix_lock_posix_flavour: File %s, offset = %ju, " 1251 "count = %ju\n", fsp_str_dbg(fsp), 1252 (uintmax_t)u_offset, (uintmax_t)u_count)); 1269 1253 1270 1254 /* … … 1282 1266 } 1283 1267 1284 if ((ul = TALLOC_P(ul_ctx, struct lock_list)) == NULL) {1268 if ((ul = talloc(ul_ctx, struct lock_list)) == NULL) { 1285 1269 DEBUG(0,("release_posix_lock_windows_flavour: unable to talloc unlock list.\n")); 1286 1270 talloc_destroy(ul_ctx); … … 1307 1291 ulist, 1308 1292 lock_ctx, /* Lock context ulist belongs to. */ 1309 fsp,1310 1293 plocks, 1311 1294 num_locks); … … 1319 1302 count = ulist->size; 1320 1303 1321 DEBUG(5,("release_posix_lock_posix_flavour: Real unlock: offset = %.0f, count = %.0f\n", 1322 (double)offset, (double)count )); 1323 1324 if (!posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,F_UNLCK)) { 1304 DEBUG(5, ("release_posix_lock_posix_flavour: Real unlock: " 1305 "offset = %ju, count = %ju\n", 1306 (uintmax_t)offset, (uintmax_t)count )); 1307 1308 if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,F_UNLCK)) { 1325 1309 ret = False; 1326 1310 } -
vendor/current/source3/locking/proto.h
r746 r988 26 26 /* The following definitions come from locking/brlock.c */ 27 27 28 bool brl_same_context(const struct lock_context *ctx1,29 const struct lock_context *ctx2);30 NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, bool blocking_lock);31 28 void brl_init(bool read_only); 32 29 void brl_shutdown(void); 30 31 unsigned int brl_num_locks(const struct byte_range_lock *brl); 32 struct files_struct *brl_fsp(struct byte_range_lock *brl); 33 uint32_t brl_num_read_oplocks(const struct byte_range_lock *brl); 34 void brl_set_num_read_oplocks(struct byte_range_lock *brl, 35 uint32_t num_read_oplocks); 33 36 34 37 NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck, … … 45 48 enum brl_flavour lock_flav, 46 49 bool blocking_lock, 47 uint64_t *psmblctx, 48 struct blocking_lock_record *blr); 50 uint64_t *psmblctx); 49 51 bool brl_unlock(struct messaging_context *msg_ctx, 50 52 struct byte_range_lock *br_lck, … … 58 60 const struct lock_struct *plock); 59 61 bool brl_locktest(struct byte_range_lock *br_lck, 60 uint64_t smblctx, 61 struct server_id pid, 62 br_off start, 63 br_off size, 64 enum brl_type lock_type, 65 enum brl_flavour lock_flav); 62 const struct lock_struct *rw_probe); 66 63 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck, 67 64 uint64_t *psmblctx, … … 76 73 br_off start, 77 74 br_off size, 78 enum brl_flavour lock_flav, 79 struct blocking_lock_record *blr); 75 enum brl_flavour lock_flav); 80 76 bool brl_lock_cancel_default(struct byte_range_lock *br_lck, 81 77 struct lock_struct *plock); 78 bool brl_mark_disconnected(struct files_struct *fsp); 79 bool brl_reconnect_disconnected(struct files_struct *fsp); 82 80 void brl_close_fnum(struct messaging_context *msg_ctx, 83 81 struct byte_range_lock *br_lck); … … 91 89 files_struct *fsp); 92 90 struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp); 93 void brl_register_msgs(struct messaging_context *msg_ctx); 91 void brl_revalidate(struct messaging_context *msg_ctx, 92 void *private_data, 93 uint32_t msg_type, 94 struct server_id server_id, 95 DATA_BLOB *data); 96 bool brl_cleanup_disconnected(struct file_id fid, uint64_t open_persistent_id); 94 97 95 98 /* The following definitions come from locking/locking.c */ … … 122 125 bool blocking_lock, 123 126 NTSTATUS *perr, 124 uint64_t *psmblctx, 125 struct blocking_lock_record *blr); 127 uint64_t *psmblctx); 126 128 NTSTATUS do_unlock(struct messaging_context *msg_ctx, 127 129 files_struct *fsp, … … 131 133 enum brl_flavour lock_flav); 132 134 NTSTATUS do_lock_cancel(files_struct *fsp, 133 uint64 smblctx,135 uint64_t smblctx, 134 136 uint64_t count, 135 137 uint64_t offset, 136 enum brl_flavour lock_flav, 137 struct blocking_lock_record *blr); 138 enum brl_flavour lock_flav); 138 139 void locking_close_file(struct messaging_context *msg_ctx, 139 140 files_struct *fsp, … … 143 144 bool locking_end(void); 144 145 char *share_mode_str(TALLOC_CTX *ctx, int num, const struct share_mode_entry *e); 145 struct share_mode_lock *get_share_mode_lock(TALLOC_CTX *mem_ctx, 146 const struct file_id id, 147 const char *servicepath, 148 const struct smb_filename *smb_fname, 149 const struct timespec *old_write_time); 146 struct share_mode_lock *get_existing_share_mode_lock(TALLOC_CTX *mem_ctx, 147 struct file_id id); 148 struct share_mode_lock *get_share_mode_lock( 149 TALLOC_CTX *mem_ctx, 150 struct file_id id, 151 const char *servicepath, 152 const struct smb_filename *smb_fname, 153 const struct timespec *old_write_time); 150 154 struct share_mode_lock *fetch_share_mode_unlocked(TALLOC_CTX *mem_ctx, 151 conststruct file_id id);155 struct file_id id); 152 156 bool rename_share_filename(struct messaging_context *msg_ctx, 153 157 struct share_mode_lock *lck, 158 struct file_id id, 154 159 const char *servicepath, 155 160 uint32_t orig_name_hash, … … 161 166 struct timespec *write_time); 162 167 bool is_valid_share_mode_entry(const struct share_mode_entry *e); 163 bool is_deferred_open_entry(const struct share_mode_entry *e); 164 bool is_unused_share_mode_entry(const struct share_mode_entry *e); 165 void set_share_mode(struct share_mode_lock *lck, files_struct *fsp, 166 uid_t uid, uint64_t mid, uint16 op_type); 167 void add_deferred_open(struct share_mode_lock *lck, uint64_t mid, 168 struct timeval request_time, 169 struct server_id pid, struct file_id id); 168 bool share_mode_stale_pid(struct share_mode_data *d, uint32_t idx); 169 bool set_share_mode(struct share_mode_lock *lck, struct files_struct *fsp, 170 uid_t uid, uint64_t mid, uint16_t op_type, 171 uint32_t lease_idx); 172 void remove_stale_share_mode_entries(struct share_mode_data *d); 170 173 bool del_share_mode(struct share_mode_lock *lck, files_struct *fsp); 171 void del_deferred_open_entry(struct share_mode_lock *lck, uint64_t mid,172 struct server_id pid);174 bool mark_share_mode_disconnected(struct share_mode_lock *lck, 175 struct files_struct *fsp); 173 176 bool remove_share_oplock(struct share_mode_lock *lck, files_struct *fsp); 174 177 bool downgrade_share_oplock(struct share_mode_lock *lck, files_struct *fsp); 175 NTSTATUS can_set_delete_on_close(files_struct *fsp, uint32 dosmode); 178 struct share_mode_lease; 179 NTSTATUS downgrade_share_lease(struct smbd_server_connection *sconn, 180 struct share_mode_lock *lck, 181 const struct smb2_lease_key *key, 182 uint32_t new_lease_state, 183 struct share_mode_lease **_l); 176 184 bool get_delete_on_close_token(struct share_mode_lock *lck, 177 uint32_t name_hash, 178 const struct security_token **pp_nt_tok, 179 const struct security_unix_token **pp_tok); 185 uint32_t name_hash, 186 const struct security_token **pp_nt_tok, 187 const struct security_unix_token **pp_tok); 188 void reset_delete_on_close_lck(files_struct *fsp, 189 struct share_mode_lock *lck); 180 190 void set_delete_on_close_lck(files_struct *fsp, 181 191 struct share_mode_lock *lck, 182 bool delete_on_close,183 192 const struct security_token *nt_tok, 184 193 const struct security_unix_token *tok); … … 189 198 bool set_sticky_write_time(struct file_id fileid, struct timespec write_time); 190 199 bool set_write_time(struct file_id fileid, struct timespec write_time); 191 int share_mode_forall(void (*fn)(const struct share_mode_entry *, const char *, 200 struct timespec get_share_mode_write_time(struct share_mode_lock *lck); 201 int share_mode_forall(int (*fn)(struct file_id fid, 202 const struct share_mode_data *data, 203 void *private_data), 204 void *private_data); 205 int share_entry_forall(int (*fn)(const struct share_mode_entry *, 206 const char *, const char *, 192 207 const char *, void *), 193 208 void *private_data); 209 bool share_mode_cleanup_disconnected(struct file_id id, 210 uint64_t open_persistent_id); 211 194 212 195 213 /* The following definitions come from locking/posix.c */ … … 202 220 bool posix_locking_init(bool read_only); 203 221 bool posix_locking_end(void); 204 void reduce_windows_lock_ref_count(files_struct *fsp, unsigned int dcount);205 222 int fd_close_posix(struct files_struct *fsp); 206 223 bool set_posix_lock_windows_flavour(files_struct *fsp,
Note:
See TracChangeset
for help on using the changeset viewer.