Changeset 740 for vendor/current/source3/locking/brlock.c
- Timestamp:
- Nov 14, 2012, 12:59:34 PM (13 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
vendor/current/source3/locking/brlock.c
r414 r740 26 26 27 27 #include "includes.h" 28 #include "system/filesys.h" 29 #include "locking/proto.h" 30 #include "smbd/globals.h" 31 #include "dbwrap.h" 32 #include "serverid.h" 33 #include "messages.h" 28 34 29 35 #undef DBGC_CLASS … … 42 48 static void print_lock_struct(unsigned int i, struct lock_struct *pls) 43 49 { 44 DEBUG(10,("[%u]: smb pid = %u, tid = %u, pid = %s, ",50 DEBUG(10,("[%u]: smblctx = %llu, tid = %u, pid = %s, ", 45 51 i, 46 (unsigned int)pls->context.smbpid,52 (unsigned long long)pls->context.smblctx, 47 53 (unsigned int)pls->context.tid, 48 54 procid_str(talloc_tos(), &pls->context.pid) )); … … 64 70 { 65 71 return (procid_equal(&ctx1->pid, &ctx2->pid) && 66 (ctx1->smb pid == ctx2->smbpid) &&72 (ctx1->smblctx == ctx2->smblctx) && 67 73 (ctx1->tid == ctx2->tid)); 68 74 } … … 271 277 } 272 278 273 tdb_flags = TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST ;279 tdb_flags = TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH; 274 280 275 281 if (!lp_clustering()) { … … 333 339 SMB_ASSERT(plock->lock_type != UNLOCK_LOCK); 334 340 341 if ((plock->start + plock->size - 1 < plock->start) && 342 plock->size != 0) { 343 return NT_STATUS_INVALID_LOCK_RANGE; 344 } 345 335 346 for (i=0; i < br_lck->num_locks; i++) { 336 if (locks[i].start + locks[i].size < locks[i].start) {337 /* 64-bit wrap. Error. */338 return NT_STATUS_INVALID_LOCK_RANGE;339 }340 341 347 /* Do any Windows or POSIX locks conflict ? */ 342 348 if (brl_conflict(&locks[i], plock)) { 343 349 /* Remember who blocked us. */ 344 plock->context.smb pid = locks[i].context.smbpid;350 plock->context.smblctx = locks[i].context.smblctx; 345 351 return brl_lock_failed(fsp,plock,blocking_lock); 346 352 } … … 373 379 374 380 /* We don't know who blocked us. */ 375 plock->context.smb pid = 0xFFFFFFFF;381 plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL; 376 382 377 383 if (errno_ret == EACCES || errno_ret == EAGAIN) { … … 716 722 717 723 /* Don't allow 64-bit lock wrap. */ 718 if (plock->start + plock->size < plock->start || 719 plock->start + plock->size < plock->size) { 724 if (plock->start + plock->size - 1 < plock->start) { 720 725 return NT_STATUS_INVALID_PARAMETER; 721 726 } … … 748 753 SAFE_FREE(tp); 749 754 /* Remember who blocked us. */ 750 plock->context.smb pid = curr_lock->context.smbpid;755 plock->context.smblctx = curr_lock->context.smblctx; 751 756 return NT_STATUS_FILE_LOCK_CONFLICT; 752 757 } … … 763 768 SAFE_FREE(tp); 764 769 /* Remember who blocked us. */ 765 plock->context.smb pid = curr_lock->context.smbpid;770 plock->context.smblctx = curr_lock->context.smblctx; 766 771 return NT_STATUS_FILE_LOCK_CONFLICT; 767 772 } … … 823 828 824 829 /* We don't know who blocked us. */ 825 plock->context.smb pid = 0xFFFFFFFF;830 plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL; 826 831 827 832 if (errno_ret == EACCES || errno_ret == EAGAIN) { … … 903 908 NTSTATUS brl_lock(struct messaging_context *msg_ctx, 904 909 struct byte_range_lock *br_lck, 905 uint 32 smbpid,910 uint64_t smblctx, 906 911 struct server_id pid, 907 912 br_off start, … … 910 915 enum brl_flavour lock_flav, 911 916 bool blocking_lock, 912 uint 32 *psmbpid,917 uint64_t *psmblctx, 913 918 struct blocking_lock_record *blr) 914 919 { … … 927 932 #endif 928 933 929 lock.context.smb pid = smbpid;934 lock.context.smblctx = smblctx; 930 935 lock.context.pid = pid; 931 936 lock.context.tid = br_lck->fsp->conn->cnum; … … 945 950 #if ZERO_ZERO 946 951 /* sort the lock list */ 947 qsort(br_lck->lock_data, (size_t)br_lck->num_locks, sizeof(lock), lock_compare);952 TYPESAFE_QSORT(br_lck->lock_data, (size_t)br_lck->num_locks, lock_compare); 948 953 #endif 949 954 950 955 /* If we're returning an error, return who blocked us. */ 951 if (!NT_STATUS_IS_OK(ret) && psmb pid) {952 *psmb pid = lock.context.smbpid;956 if (!NT_STATUS_IS_OK(ret) && psmblctx) { 957 *psmblctx = lock.context.smblctx; 953 958 } 954 959 return ret; … … 997 1002 for (i = 0; i < br_lck->num_locks; i++) { 998 1003 struct lock_struct *lock = &locks[i]; 1004 1005 if (IS_PENDING_LOCK(lock->lock_type)) { 1006 continue; 1007 } 999 1008 1000 1009 /* Only remove our own locks that match in start, size, and flavour. */ … … 1232 1241 bool brl_unlock(struct messaging_context *msg_ctx, 1233 1242 struct byte_range_lock *br_lck, 1234 uint 32 smbpid,1243 uint64_t smblctx, 1235 1244 struct server_id pid, 1236 1245 br_off start, … … 1240 1249 struct lock_struct lock; 1241 1250 1242 lock.context.smb pid = smbpid;1251 lock.context.smblctx = smblctx; 1243 1252 lock.context.pid = pid; 1244 1253 lock.context.tid = br_lck->fsp->conn->cnum; … … 1263 1272 1264 1273 bool brl_locktest(struct byte_range_lock *br_lck, 1265 uint 32 smbpid,1274 uint64_t smblctx, 1266 1275 struct server_id pid, 1267 1276 br_off start, … … 1276 1285 files_struct *fsp = br_lck->fsp; 1277 1286 1278 lock.context.smb pid = smbpid;1287 lock.context.smblctx = smblctx; 1279 1288 lock.context.pid = pid; 1280 1289 lock.context.tid = br_lck->fsp->conn->cnum; … … 1321 1330 1322 1331 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck, 1323 uint 32 *psmbpid,1332 uint64_t *psmblctx, 1324 1333 struct server_id pid, 1325 1334 br_off *pstart, … … 1333 1342 files_struct *fsp = br_lck->fsp; 1334 1343 1335 lock.context.smb pid = *psmbpid;1344 lock.context.smblctx = *psmblctx; 1336 1345 lock.context.pid = pid; 1337 1346 lock.context.tid = br_lck->fsp->conn->cnum; … … 1354 1363 1355 1364 if (conflict) { 1356 *psmb pid = exlock->context.smbpid;1365 *psmblctx = exlock->context.smblctx; 1357 1366 *pstart = exlock->start; 1358 1367 *psize = exlock->size; … … 1375 1384 1376 1385 if (ret) { 1377 /* Hmmm. No clue what to set smb pidto - use -1. */1378 *psmb pid = 0xFFFF;1386 /* Hmmm. No clue what to set smblctx to - use -1. */ 1387 *psmblctx = 0xFFFFFFFFFFFFFFFFLL; 1379 1388 return NT_STATUS_LOCK_NOT_GRANTED; 1380 1389 } … … 1398 1407 ****************************************************************************/ 1399 1408 bool brl_lock_cancel(struct byte_range_lock *br_lck, 1400 uint 32 smbpid,1409 uint64_t smblctx, 1401 1410 struct server_id pid, 1402 1411 br_off start, … … 1408 1417 struct lock_struct lock; 1409 1418 1410 lock.context.smb pid = smbpid;1419 lock.context.smblctx = smblctx; 1411 1420 lock.context.pid = pid; 1412 1421 lock.context.tid = br_lck->fsp->conn->cnum; … … 1477 1486 uint16 tid = fsp->conn->cnum; 1478 1487 int fnum = fsp->fnum; 1479 unsigned int i, j, dcount=0; 1480 int num_deleted_windows_locks = 0; 1488 unsigned int i; 1481 1489 struct lock_struct *locks = br_lck->lock_data; 1482 struct server_id pid = procid_self(); 1483 bool unlock_individually = False; 1484 bool posix_level2_contention_ended = false; 1485 1486 if(lp_posix_locking(fsp->conn->params)) { 1487 1488 /* Check if there are any Windows locks associated with this dev/ino 1489 pair that are not this fnum. If so we need to call unlock on each 1490 one in order to release the system POSIX locks correctly. */ 1491 1492 for (i=0; i < br_lck->num_locks; i++) { 1493 struct lock_struct *lock = &locks[i]; 1494 1495 if (!procid_equal(&lock->context.pid, &pid)) { 1496 continue; 1490 struct server_id pid = sconn_server_id(fsp->conn->sconn); 1491 struct lock_struct *locks_copy; 1492 unsigned int num_locks_copy; 1493 1494 /* Copy the current lock array. */ 1495 if (br_lck->num_locks) { 1496 locks_copy = (struct lock_struct *)TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct)); 1497 if (!locks_copy) { 1498 smb_panic("brl_close_fnum: talloc failed"); 1497 1499 } 1498 1499 if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) { 1500 continue; /* Ignore pending. */ 1501 } 1502 1503 if (lock->context.tid != tid || lock->fnum != fnum) { 1504 unlock_individually = True; 1505 break; 1506 } 1507 } 1508 1509 if (unlock_individually) { 1510 struct lock_struct *locks_copy; 1511 unsigned int num_locks_copy; 1512 1513 /* Copy the current lock array. */ 1514 if (br_lck->num_locks) { 1515 locks_copy = (struct lock_struct *)TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct)); 1516 if (!locks_copy) { 1517 smb_panic("brl_close_fnum: talloc failed"); 1518 } 1519 } else { 1520 locks_copy = NULL; 1521 } 1522 1523 num_locks_copy = br_lck->num_locks; 1524 1525 for (i=0; i < num_locks_copy; i++) { 1526 struct lock_struct *lock = &locks_copy[i]; 1527 1528 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) && 1529 (lock->fnum == fnum)) { 1530 brl_unlock(msg_ctx, 1531 br_lck, 1532 lock->context.smbpid, 1533 pid, 1534 lock->start, 1535 lock->size, 1536 lock->lock_flav); 1537 } 1538 } 1539 return; 1540 } 1541 } 1542 1543 /* We can bulk delete - any POSIX locks will be removed when the fd closes. */ 1544 1545 /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */ 1546 1547 for (i=0; i < br_lck->num_locks; i++) { 1548 struct lock_struct *lock = &locks[i]; 1549 bool del_this_lock = False; 1550 1551 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) { 1552 if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) { 1553 del_this_lock = True; 1554 num_deleted_windows_locks++; 1555 contend_level2_oplocks_end(br_lck->fsp, 1556 LEVEL2_CONTEND_WINDOWS_BRL); 1557 } else if (lock->lock_flav == POSIX_LOCK) { 1558 del_this_lock = True; 1559 1560 /* Only end level2 contention once for posix */ 1561 if (!posix_level2_contention_ended) { 1562 posix_level2_contention_ended = true; 1563 contend_level2_oplocks_end(br_lck->fsp, 1564 LEVEL2_CONTEND_POSIX_BRL); 1565 } 1566 } 1567 } 1568 1569 if (del_this_lock) { 1570 /* Send unlock messages to any pending waiters that overlap. */ 1571 for (j=0; j < br_lck->num_locks; j++) { 1572 struct lock_struct *pend_lock = &locks[j]; 1573 1574 /* Ignore our own or non-pending locks. */ 1575 if (!IS_PENDING_LOCK(pend_lock->lock_type)) { 1576 continue; 1577 } 1578 1579 /* Optimisation - don't send to this fnum as we're 1580 closing it. */ 1581 if (pend_lock->context.tid == tid && 1582 procid_equal(&pend_lock->context.pid, &pid) && 1583 pend_lock->fnum == fnum) { 1584 continue; 1585 } 1586 1587 /* We could send specific lock info here... */ 1588 if (brl_pending_overlap(lock, pend_lock)) { 1589 messaging_send(msg_ctx, pend_lock->context.pid, 1590 MSG_SMB_UNLOCK, &data_blob_null); 1591 } 1592 } 1593 1594 /* found it - delete it */ 1595 if (br_lck->num_locks > 1 && i < br_lck->num_locks - 1) { 1596 memmove(&locks[i], &locks[i+1], 1597 sizeof(*locks)*((br_lck->num_locks-1) - i)); 1598 } 1599 br_lck->num_locks--; 1600 br_lck->modified = True; 1601 i--; 1602 dcount++; 1603 } 1604 } 1605 1606 if(lp_posix_locking(fsp->conn->params) && num_deleted_windows_locks) { 1607 /* Reduce the Windows lock POSIX reference count on this dev/ino pair. */ 1608 reduce_windows_lock_ref_count(fsp, num_deleted_windows_locks); 1500 } else { 1501 locks_copy = NULL; 1502 } 1503 1504 num_locks_copy = br_lck->num_locks; 1505 1506 for (i=0; i < num_locks_copy; i++) { 1507 struct lock_struct *lock = &locks_copy[i]; 1508 1509 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) && 1510 (lock->fnum == fnum)) { 1511 brl_unlock(msg_ctx, 1512 br_lck, 1513 lock->context.smblctx, 1514 pid, 1515 lock->start, 1516 lock->size, 1517 lock->lock_flav); 1518 } 1609 1519 } 1610 1520 } … … 1621 1531 for (i = 0; i < *pnum_entries; i++) { 1622 1532 struct lock_struct *lock_data = &locks[i]; 1623 if (! process_exists(lock_data->context.pid)) {1533 if (!serverid_exists(&lock_data->context.pid)) { 1624 1534 /* This process no longer exists - mark this 1625 1535 entry as invalid by zeroing it. */ … … 1643 1553 for (i = 0; i < *pnum_entries; i++) { 1644 1554 struct lock_struct *lock_data = &locks[i]; 1645 if (lock_data->context.smb pid&&1555 if (lock_data->context.smblctx && 1646 1556 lock_data->context.tid) { 1647 1557 /* Valid (nonzero) entry - copy it. */ … … 1757 1667 ********************************************************************/ 1758 1668 1759 static int byte_range_lock_destructor(struct byte_range_lock *br_lck)1669 static void byte_range_lock_flush(struct byte_range_lock *br_lck) 1760 1670 { 1761 1671 if (br_lck->read_only) { … … 1792 1702 done: 1793 1703 1704 br_lck->read_only = true; 1705 br_lck->modified = false; 1706 1707 TALLOC_FREE(br_lck->record); 1708 } 1709 1710 static int byte_range_lock_destructor(struct byte_range_lock *br_lck) 1711 { 1712 byte_range_lock_flush(br_lck); 1794 1713 SAFE_FREE(br_lck->lock_data); 1795 TALLOC_FREE(br_lck->record);1796 1714 return 0; 1797 1715 } … … 1808 1726 TDB_DATA key, data; 1809 1727 struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock); 1728 bool do_read_only = read_only; 1810 1729 1811 1730 if (br_lck == NULL) { … … 1824 1743 /* We must be read/write to clean 1825 1744 the dead entries. */ 1826 read_only = False;1827 } 1828 1829 if ( read_only) {1745 do_read_only = false; 1746 } 1747 1748 if (do_read_only) { 1830 1749 if (brlock_db->fetch(brlock_db, br_lck, key, &data) == -1) { 1831 1750 DEBUG(3, ("Could not fetch byte range lock record\n")); … … 1834 1753 } 1835 1754 br_lck->record = NULL; 1836 } 1837 else { 1755 } else { 1838 1756 br_lck->record = brlock_db->fetch_locked(brlock_db, br_lck, key); 1839 1757 … … 1847 1765 } 1848 1766 1849 br_lck->read_only = read_only;1767 br_lck->read_only = do_read_only; 1850 1768 br_lck->lock_data = NULL; 1851 1769 … … 1899 1817 } 1900 1818 } 1819 1820 if (do_read_only != read_only) { 1821 /* 1822 * this stores the record and gets rid of 1823 * the write lock that is needed for a cleanup 1824 */ 1825 byte_range_lock_flush(br_lck); 1826 } 1827 1901 1828 return br_lck; 1902 1829 } … … 1923 1850 TALLOC_FREE(fsp->brlock_rec); 1924 1851 1925 br_lock = brl_get_locks_internal(talloc_tos(), fsp, false);1852 br_lock = brl_get_locks_internal(talloc_tos(), fsp, true); 1926 1853 if (br_lock == NULL) { 1927 1854 return NULL; … … 1929 1856 fsp->brlock_seqnum = brlock_db->get_seqnum(brlock_db); 1930 1857 1931 fsp->brlock_rec = talloc_zero(fsp, struct byte_range_lock); 1932 if (fsp->brlock_rec == NULL) { 1933 goto fail; 1934 } 1935 fsp->brlock_rec->fsp = fsp; 1936 fsp->brlock_rec->num_locks = br_lock->num_locks; 1937 fsp->brlock_rec->read_only = true; 1938 fsp->brlock_rec->key = br_lock->key; 1939 1940 fsp->brlock_rec->lock_data = (struct lock_struct *) 1941 talloc_memdup(fsp->brlock_rec, br_lock->lock_data, 1942 sizeof(struct lock_struct) * br_lock->num_locks); 1943 if (fsp->brlock_rec->lock_data == NULL) { 1944 goto fail; 1945 } 1946 1947 TALLOC_FREE(br_lock); 1858 fsp->brlock_rec = talloc_move(fsp, &br_lock); 1859 1948 1860 return fsp->brlock_rec; 1949 fail:1950 TALLOC_FREE(br_lock);1951 TALLOC_FREE(fsp->brlock_rec);1952 return NULL;1953 1861 } 1954 1862 … … 2030 1938 } 2031 1939 2032 qsort(state->pids, state->num_pids, sizeof(state->pids[0]), 2033 compare_procids); 1940 TYPESAFE_QSORT(state->pids, state->num_pids, compare_procids); 2034 1941 2035 1942 ZERO_STRUCT(last_pid);
Note:
See TracChangeset
for help on using the changeset viewer.