26 #define LOCK_MODULE_IMPLEMENTATION
32 #include "lock0lock.ic"
33 #include "lock0priv.ic"
44 #define LOCK_MAX_N_STEPS_IN_DEADLOCK_CHECK 1000000
48 #define LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK 200
53 #define LOCK_RELEASE_KERNEL_INTERVAL 1000
59 #define LOCK_PAGE_BITMAP_MARGIN 64
310 #define LK(a,b) (1 << ((a) * LOCK_NUM + (b)))
311 #define LKS(a,b) LK(a,b) | LK(b,a)
317 #define LOCK_MODE_COMPATIBILITY 0 \
318 | LK(LOCK_IS, LOCK_IS) | LK(LOCK_IX, LOCK_IX) | LK(LOCK_S, LOCK_S) \
319 | LKS(LOCK_IX, LOCK_IS) | LKS(LOCK_IS, LOCK_AUTO_INC) \
320 | LKS(LOCK_S, LOCK_IS) \
321 | LKS(LOCK_AUTO_INC, LOCK_IS) | LKS(LOCK_AUTO_INC, LOCK_IX)
336 #define LOCK_MODE_STRONGER_OR_EQ 0 \
337 | LK(LOCK_IS, LOCK_IS) \
338 | LK(LOCK_IX, LOCK_IS) | LK(LOCK_IX, LOCK_IX) \
339 | LK(LOCK_S, LOCK_IS) | LK(LOCK_S, LOCK_S) \
340 | LK(LOCK_AUTO_INC, LOCK_AUTO_INC) \
341 | LK(LOCK_X, LOCK_IS) | LK(LOCK_X, LOCK_IX) | LK(LOCK_X, LOCK_S) \
342 | LK(LOCK_X, LOCK_AUTO_INC) | LK(LOCK_X, LOCK_X)
345 UNIV_INTERN ibool lock_print_waits = FALSE;
360 lock_rec_validate_page(
371 UNIV_INTERN ibool lock_deadlock_found = FALSE;
372 UNIV_INTERN FILE* lock_latest_err_file;
375 #define LOCK_VICTIM_IS_START 1
376 #define LOCK_VICTIM_IS_OTHER 2
377 #define LOCK_EXCEED_MAX_DEPTH 3
386 lock_deadlock_occurs(
400 lock_deadlock_recursive(
417 lock_rec_get_nth_bit(
436 return(1 & ((
const byte*) &lock[1])[byte_index] >> bit_index);
441 #define lock_mutex_enter_kernel() mutex_enter(&kernel_mutex)
442 #define lock_mutex_exit_kernel() mutex_exit(&kernel_mutex)
454 const ulint* offsets,
455 ibool has_kernel_mutex)
462 if (!has_kernel_mutex) {
463 mutex_enter(&kernel_mutex);
471 fputs(
" InnoDB: Error: transaction id associated"
475 fputs(
"InnoDB: in ", stderr);
476 dict_index_name_print(stderr, NULL, index);
478 "InnoDB: is " TRX_ID_FMT " which is higher than the"
480 "InnoDB: The table is corrupt. You have to do"
481 " dump + drop + reimport.\n",
487 if (!has_kernel_mutex) {
488 mutex_exit(&kernel_mutex);
505 const ulint* offsets,
558 return(max_trx_id < view->up_limit_id);
576 ut_a(lock_latest_err_file);
586 if (lock_latest_err_file != NULL) {
587 fclose(lock_latest_err_file);
588 lock_latest_err_file = NULL;
604 return((ulint)
sizeof(
lock_t));
653 enum lock_mode* mode)
665 enum lock_mode lock_mode;
671 if (dest == tab_lock->
table) {
676 src = tab_lock->
table;
683 }
else if (src != tab_lock->
table) {
691 lock_mode = lock_get_mode(lock);
692 if (lock_mode == LOCK_IX || lock_mode == LOCK_IS) {
693 if (*mode != LOCK_NONE && *mode != lock_mode) {
728 lock_mutex_enter_kernel();
733 if (lock->
trx != trx) {
744 switch (lock_get_mode(lock)) {
761 lock_mutex_exit_kernel();
770 lock_set_lock_and_trx_wait(
787 lock_reset_lock_and_trx_wait(
791 ut_ad((lock->
trx)->wait_lock == lock);
792 ut_ad(lock_get_wait(lock));
796 (lock->
trx)->wait_lock = NULL;
825 lock_rec_get_rec_not_gap(
845 lock_rec_get_insert_intention(
865 lock_mode_stronger_or_eq(
867 enum lock_mode mode1,
868 enum lock_mode mode2)
870 ut_ad(mode1 == LOCK_X || mode1 == LOCK_S || mode1 == LOCK_IX
871 || mode1 == LOCK_IS || mode1 == LOCK_AUTO_INC);
872 ut_ad(mode2 == LOCK_X || mode2 == LOCK_S || mode2 == LOCK_IX
873 || mode2 == LOCK_IS || mode2 == LOCK_AUTO_INC);
875 return((LOCK_MODE_STRONGER_OR_EQ) & LK(mode1, mode2));
883 lock_mode_compatible(
885 enum lock_mode mode1,
886 enum lock_mode mode2)
888 ut_ad(mode1 == LOCK_X || mode1 == LOCK_S || mode1 == LOCK_IX
889 || mode1 == LOCK_IS || mode1 == LOCK_AUTO_INC);
890 ut_ad(mode2 == LOCK_X || mode2 == LOCK_S || mode2 == LOCK_IX
891 || mode2 == LOCK_IS || mode2 == LOCK_AUTO_INC);
893 return((LOCK_MODE_COMPATIBILITY) & LK(mode1, mode2));
901 lock_rec_has_to_wait(
912 ibool lock_is_on_supremum)
920 if (trx != lock2->
trx
921 && !lock_mode_compatible(static_cast<lock_mode>(
LOCK_MODE_MASK & type_mode),
922 lock_get_mode(lock2))) {
927 if ((lock_is_on_supremum || (type_mode &
LOCK_GAP))
938 if (!(type_mode & LOCK_INSERT_INTENTION)
939 && lock_rec_get_gap(lock2)) {
947 if ((type_mode & LOCK_GAP)
948 && lock_rec_get_rec_not_gap(lock2)) {
956 if (lock_rec_get_insert_intention(lock2)) {
991 ut_ad(lock1 && lock2);
993 if (lock1->
trx != lock2->
trx
994 && !lock_mode_compatible(lock_get_mode(lock1),
995 lock_get_mode(lock2))) {
1002 return(lock_rec_has_to_wait(lock1->
trx,
1004 lock_rec_get_nth_bit(
1021 lock_rec_get_n_bits(
1032 lock_rec_set_nth_bit(
1042 ut_ad(i < lock->un_member.rec_lock.n_bits);
1047 ((byte*) &lock[1])[byte_index] |= 1 << bit_index;
1063 for (i = 0; i < lock_rec_get_n_bits(lock); i++) {
1065 if (lock_rec_get_nth_bit(lock, i)) {
1071 return(ULINT_UNDEFINED);
1078 lock_rec_reset_nth_bit(
1089 ut_ad(i < lock->un_member.rec_lock.n_bits);
1094 ((byte*) &lock[1])[byte_index] &= ~(1 << bit_index);
1102 lock_rec_get_next_on_page(
1109 ut_ad(mutex_own(&kernel_mutex));
1139 lock_rec_get_first_on_page_addr(
1146 ut_ad(mutex_own(&kernel_mutex));
1175 mutex_enter(&kernel_mutex);
1177 if (lock_rec_get_first_on_page_addr(space, page_no)) {
1183 mutex_exit(&kernel_mutex);
1194 lock_rec_get_first_on_page(
1203 ut_ad(mutex_own(&kernel_mutex));
1232 ut_ad(mutex_own(&kernel_mutex));
1236 lock = lock_rec_get_next_on_page(lock);
1237 }
while (lock && !lock_rec_get_nth_bit(lock, heap_no));
1254 ut_ad(mutex_own(&kernel_mutex));
1256 for (lock = lock_rec_get_first_on_page(block); lock;
1257 lock = lock_rec_get_next_on_page(lock)) {
1258 if (lock_rec_get_nth_bit(lock, heap_no)) {
1272 lock_rec_bitmap_reset(
1283 n_bytes = lock_rec_get_n_bits(lock) / 8;
1285 ut_ad((lock_rec_get_n_bits(lock) % 8) == 0);
1287 memset(&lock[1], 0, n_bytes);
1304 size =
sizeof(
lock_t) + lock_rec_get_n_bits(lock) / 8;
1322 lock_t* found_lock = NULL;
1324 ut_ad(mutex_own(&kernel_mutex));
1330 lock = lock_rec_get_first_on_page_addr(space, page_no);
1335 if (lock == in_lock) {
1340 if (lock_rec_get_nth_bit(lock, heap_no)) {
1345 lock = lock_rec_get_next_on_page(lock);
1360 enum lock_mode mode)
1364 ut_ad(mutex_own(&kernel_mutex));
1370 while (lock != NULL) {
1372 if (lock->
trx == trx
1373 && lock_mode_stronger_or_eq(lock_get_mode(lock), mode)) {
1378 ut_ad(!lock_get_wait(lock));
1411 ut_ad(mutex_own(&kernel_mutex));
1413 || (precise_mode & LOCK_MODE_MASK) == LOCK_X);
1414 ut_ad(!(precise_mode & LOCK_INSERT_INTENTION));
1416 lock = lock_rec_get_first(block, heap_no);
1419 if (lock->
trx == trx
1420 && lock_mode_stronger_or_eq(lock_get_mode(lock),
1421 static_cast<lock_mode>(precise_mode & LOCK_MODE_MASK))
1422 && !lock_get_wait(lock)
1423 && (!lock_rec_get_rec_not_gap(lock)
1425 || heap_no == PAGE_HEAP_NO_SUPREMUM)
1426 && (!lock_rec_get_gap(lock)
1427 || (precise_mode & LOCK_GAP)
1428 || heap_no == PAGE_HEAP_NO_SUPREMUM)
1429 && (!lock_rec_get_insert_intention(lock))) {
1434 lock = lock_rec_get_next(heap_no, lock);
1446 lock_rec_other_has_expl_req(
1448 enum lock_mode mode,
1464 ut_ad(mutex_own(&kernel_mutex));
1465 ut_ad(mode == LOCK_X || mode == LOCK_S);
1466 ut_ad(gap == 0 || gap == LOCK_GAP);
1469 lock = lock_rec_get_first(block, heap_no);
1472 if (lock->
trx != trx
1474 || !(lock_rec_get_gap(lock)
1475 || heap_no == PAGE_HEAP_NO_SUPREMUM))
1476 && (wait || !lock_get_wait(lock))
1477 && lock_mode_stronger_or_eq(lock_get_mode(lock), mode)) {
1482 lock = lock_rec_get_next(heap_no, lock);
1495 lock_rec_other_has_conflicting(
1497 enum lock_mode mode,
1508 ut_ad(mutex_own(&kernel_mutex));
1510 lock = lock_rec_get_first(block, heap_no);
1512 if (UNIV_LIKELY_NULL(lock)) {
1513 if (UNIV_UNLIKELY(heap_no == PAGE_HEAP_NO_SUPREMUM)) {
1516 if (lock_rec_has_to_wait(trx, mode, lock,
1521 lock = lock_rec_get_next(heap_no, lock);
1526 if (lock_rec_has_to_wait(trx, mode, lock,
1531 lock = lock_rec_get_next(heap_no, lock);
1546 lock_rec_find_similar_on_page(
1553 ut_ad(mutex_own(&kernel_mutex));
1555 while (lock != NULL) {
1556 if (lock->
trx == trx
1558 && lock_rec_get_n_bits(lock) > heap_no) {
1563 lock = lock_rec_get_next_on_page(lock);
1575 lock_sec_rec_some_has_impl_off_kernel(
1579 const ulint* offsets)
1583 ut_ad(mutex_own(&kernel_mutex));
1604 rec, index, offsets, TRUE)) {
1605 buf_page_print(page, 0);
1626 ulint n_records = 0;
1634 n_bits = lock_rec_get_n_bits(lock);
1636 for (n_bit = 0; n_bit < n_bits; n_bit++) {
1637 if (lock_rec_get_nth_bit(lock, n_bit)) {
1675 ut_ad(mutex_own(&kernel_mutex));
1679 page = block->
frame;
1687 if (UNIV_UNLIKELY(heap_no == PAGE_HEAP_NO_SUPREMUM)) {
1688 ut_ad(!(type_mode & LOCK_REC_NOT_GAP));
1695 n_bytes = 1 + n_bits / 8;
1704 lock->
index = index;
1713 lock_rec_bitmap_reset(lock);
1716 lock_rec_set_nth_bit(lock, heap_no);
1720 if (UNIV_UNLIKELY(type_mode &
LOCK_WAIT)) {
1722 lock_set_lock_and_trx_wait(lock, trx);
1737 lock_rec_enqueue_waiting(
1757 ut_ad(mutex_own(&kernel_mutex));
1767 return(DB_QUE_THR_SUSPENDED);
1778 fputs(
" InnoDB: Error: a record lock wait happens"
1779 " in a dictionary operation!\n"
1780 "InnoDB: ", stderr);
1781 dict_index_name_print(stderr, trx, index);
1783 "InnoDB: Submit a detailed bug report"
1784 " to http://bugs.mysql.com\n",
1789 lock = lock_rec_create(type_mode | LOCK_WAIT,
1790 block, heap_no, index, trx);
1795 if (UNIV_UNLIKELY(lock_deadlock_occurs(lock, trx))) {
1797 lock_reset_lock_and_trx_wait(lock);
1798 lock_rec_reset_nth_bit(lock, heap_no);
1800 return(DB_DEADLOCK);
1812 trx->was_chosen_as_deadlock_victim = FALSE;
1818 if (lock_print_waits) {
1819 fprintf(stderr,
"Lock wait for trx " TRX_ID_FMT " in index ",
1825 return(DB_LOCK_WAIT);
1838 lock_rec_add_to_queue(
1851 ut_ad(mutex_own(&kernel_mutex));
1853 switch (type_mode & LOCK_MODE_MASK) {
1861 if (!(type_mode & (LOCK_WAIT | LOCK_GAP))) {
1866 = lock_rec_other_has_expl_req(mode, 0, LOCK_WAIT,
1867 block, heap_no, trx);
1879 if (UNIV_UNLIKELY(heap_no == PAGE_HEAP_NO_SUPREMUM)) {
1880 ut_ad(!(type_mode & LOCK_REC_NOT_GAP));
1890 lock = lock_rec_get_first_on_page(block);
1892 while (lock != NULL) {
1893 if (lock_get_wait(lock)
1894 && (lock_rec_get_nth_bit(lock, heap_no))) {
1896 goto somebody_waits;
1899 lock = lock_rec_get_next_on_page(lock);
1902 if (UNIV_LIKELY(!(type_mode & LOCK_WAIT))) {
1908 lock = lock_rec_find_similar_on_page(
1910 lock_rec_get_first_on_page(block), trx);
1914 lock_rec_set_nth_bit(lock, heap_no);
1921 return(lock_rec_create(type_mode, block, heap_no, index, trx));
1925 enum lock_rec_req_status {
1931 LOCK_REC_SUCCESS_CREATED
1943 enum lock_rec_req_status
1962 ut_ad(mutex_own(&kernel_mutex));
1963 ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
1965 ut_ad((LOCK_MODE_MASK & mode) != LOCK_X
1967 ut_ad((LOCK_MODE_MASK & mode) == LOCK_S
1968 || (LOCK_MODE_MASK & mode) == LOCK_X);
1969 ut_ad(mode - (LOCK_MODE_MASK & mode) == LOCK_GAP
1970 || mode - (LOCK_MODE_MASK & mode) == 0
1971 || mode - (LOCK_MODE_MASK & mode) == LOCK_REC_NOT_GAP);
1973 lock = lock_rec_get_first_on_page(block);
1979 lock_rec_create(mode, block, heap_no, index, trx);
1982 return(LOCK_REC_SUCCESS_CREATED);
1985 if (lock_rec_get_next_on_page(lock)) {
1987 return(LOCK_REC_FAIL);
1990 if (lock->
trx != trx
1992 || lock_rec_get_n_bits(lock) <= heap_no) {
1994 return(LOCK_REC_FAIL);
2001 if (!lock_rec_get_nth_bit(lock, heap_no)) {
2002 lock_rec_set_nth_bit(lock, heap_no);
2003 return(LOCK_REC_SUCCESS_CREATED);
2007 return(LOCK_REC_SUCCESS);
2036 ut_ad(mutex_own(&kernel_mutex));
2037 ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
2039 ut_ad((LOCK_MODE_MASK & mode) != LOCK_X
2041 ut_ad((LOCK_MODE_MASK & mode) == LOCK_S
2042 || (LOCK_MODE_MASK & mode) == LOCK_X);
2043 ut_ad(mode - (LOCK_MODE_MASK & mode) == LOCK_GAP
2044 || mode - (LOCK_MODE_MASK & mode) == 0
2045 || mode - (LOCK_MODE_MASK & mode) == LOCK_REC_NOT_GAP);
2049 if (lock_rec_has_expl(mode, block, heap_no, trx)) {
2053 }
else if (lock_rec_other_has_conflicting(static_cast<lock_mode>(mode), block, heap_no, trx)) {
2059 return(lock_rec_enqueue_waiting(mode, block, heap_no,
2064 lock_rec_add_to_queue(
LOCK_REC | mode, block,
2065 heap_no, index, trx);
2097 ut_ad(mutex_own(&kernel_mutex));
2098 ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
2100 ut_ad((LOCK_MODE_MASK & mode) != LOCK_X
2102 ut_ad((LOCK_MODE_MASK & mode) == LOCK_S
2103 || (LOCK_MODE_MASK & mode) == LOCK_X);
2104 ut_ad(mode - (LOCK_MODE_MASK & mode) == LOCK_GAP
2105 || mode - (LOCK_MODE_MASK & mode) == LOCK_REC_NOT_GAP
2106 || mode - (LOCK_MODE_MASK & mode) == 0);
2110 switch (lock_rec_lock_fast(impl, mode, block, heap_no, index, thr)) {
2111 case LOCK_REC_SUCCESS:
2113 case LOCK_REC_SUCCESS_CREATED:
2116 return(lock_rec_lock_slow(impl, mode, block,
2117 heap_no, index, thr));
2129 lock_rec_has_to_wait_in_queue(
2138 ut_ad(mutex_own(&kernel_mutex));
2139 ut_ad(lock_get_wait(wait_lock));
2146 lock = lock_rec_get_first_on_page_addr(space, page_no);
2148 while (lock != wait_lock) {
2150 if (lock_rec_get_nth_bit(lock, heap_no)
2156 lock = lock_rec_get_next_on_page(lock);
2171 ut_ad(mutex_own(&kernel_mutex));
2173 lock_reset_lock_and_trx_wait(lock);
2175 if (lock_get_mode(lock) == LOCK_AUTO_INC) {
2181 "InnoDB: Error: trx already had"
2182 " an AUTO-INC lock!\n");
2191 if (lock_print_waits) {
2192 fprintf(stderr,
"Lock wait for trx " TRX_ID_FMT " ends\n",
2217 ut_ad(mutex_own(&kernel_mutex));
2225 lock_reset_lock_and_trx_wait(lock);
2238 lock_rec_dequeue_from_page(
2250 ut_ad(mutex_own(&kernel_mutex));
2266 lock = lock_rec_get_first_on_page_addr(space, page_no);
2268 while (lock != NULL) {
2269 if (lock_get_wait(lock)
2270 && !lock_rec_has_to_wait_in_queue(lock)) {
2276 lock = lock_rec_get_next_on_page(lock);
2293 ut_ad(mutex_own(&kernel_mutex));
2313 lock_rec_free_all_from_discard_page(
2322 ut_ad(mutex_own(&kernel_mutex));
2327 lock = lock_rec_get_first_on_page_addr(space, page_no);
2329 while (lock != NULL) {
2331 ut_ad(!lock_get_wait(lock));
2333 next_lock = lock_rec_get_next_on_page(lock);
2335 lock_rec_discard(lock);
2348 lock_rec_reset_and_release_wait(
2356 ut_ad(mutex_own(&kernel_mutex));
2358 lock = lock_rec_get_first(block, heap_no);
2360 while (lock != NULL) {
2361 if (lock_get_wait(lock)) {
2362 lock_rec_cancel(lock);
2364 lock_rec_reset_nth_bit(lock, heap_no);
2367 lock = lock_rec_get_next(heap_no, lock);
2378 lock_rec_inherit_to_gap(
2393 ut_ad(mutex_own(&kernel_mutex));
2395 lock = lock_rec_get_first(block, heap_no);
2403 while (lock != NULL) {
2404 if (!lock_rec_get_insert_intention(lock)
2405 && !((srv_locks_unsafe_for_binlog
2406 || lock->
trx->isolation_level
2407 <= TRX_ISO_READ_COMMITTED)
2408 && lock_get_mode(lock) == LOCK_X)) {
2410 lock_rec_add_to_queue(
LOCK_REC | LOCK_GAP
2411 | lock_get_mode(lock),
2412 heir_block, heir_heap_no,
2416 lock = lock_rec_get_next(heap_no, lock);
2426 lock_rec_inherit_to_gap_if_gap_lock(
2438 ut_ad(mutex_own(&kernel_mutex));
2440 lock = lock_rec_get_first(block, heap_no);
2442 while (lock != NULL) {
2443 if (!lock_rec_get_insert_intention(lock)
2444 && (heap_no == PAGE_HEAP_NO_SUPREMUM
2445 || !lock_rec_get_rec_not_gap(lock))) {
2447 lock_rec_add_to_queue(
LOCK_REC | LOCK_GAP
2448 | lock_get_mode(lock),
2449 block, heir_heap_no,
2453 lock = lock_rec_get_next(heap_no, lock);
2468 ulint receiver_heap_no,
2472 ulint donator_heap_no)
2477 ut_ad(mutex_own(&kernel_mutex));
2479 lock = lock_rec_get_first(donator, donator_heap_no);
2481 ut_ad(lock_rec_get_first(receiver, receiver_heap_no) == NULL);
2483 while (lock != NULL) {
2484 const ulint type_mode = lock->
type_mode;
2486 lock_rec_reset_nth_bit(lock, donator_heap_no);
2488 if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
2489 lock_reset_lock_and_trx_wait(lock);
2495 lock_rec_add_to_queue(type_mode, receiver, receiver_heap_no,
2497 lock = lock_rec_get_next(donator_heap_no, lock);
2500 ut_ad(lock_rec_get_first(donator, donator_heap_no) == NULL);
2522 lock_mutex_enter_kernel();
2524 lock = lock_rec_get_first_on_page(block);
2527 lock_mutex_exit_kernel();
2542 lock_t* old_lock = lock_rec_copy(lock, heap);
2547 lock_rec_bitmap_reset(lock);
2549 if (lock_get_wait(lock)) {
2550 lock_reset_lock_and_trx_wait(lock);
2553 lock = lock_rec_get_next_on_page(lock);
2554 }
while (lock != NULL);
2576 ut_ad(comp || !memcmp(page_cur_get_rec(&cur1),
2577 page_cur_get_rec(&cur2),
2581 if (UNIV_LIKELY(comp)) {
2583 page_cur_get_rec(&cur2));
2585 page_cur_get_rec(&cur1));
2588 page_cur_get_rec(&cur2));
2590 page_cur_get_rec(&cur1));
2593 if (lock_rec_get_nth_bit(lock, old_heap_no)) {
2596 ut_d(lock_rec_reset_nth_bit(lock,
2602 lock_rec_add_to_queue(lock->
type_mode, block,
2615 (new_heap_no == PAGE_HEAP_NO_SUPREMUM)) {
2617 ut_ad(old_heap_no == PAGE_HEAP_NO_SUPREMUM);
2630 if (UNIV_UNLIKELY(i != ULINT_UNDEFINED)) {
2632 "lock_move_reorganize_page():"
2633 " %lu not moved in %p\n",
2634 (ulong) i, (
void*) lock);
2641 lock_mutex_exit_kernel();
2645 #ifdef UNIV_DEBUG_LOCK_VALIDATE
2666 lock_mutex_enter_kernel();
2674 for (lock = lock_rec_get_first_on_page(block); lock;
2675 lock = lock_rec_get_next_on_page(lock)) {
2678 const ulint type_mode = lock->
type_mode;
2697 page_cur_get_rec(&cur1));
2700 page_cur_get_rec(&cur1));
2701 ut_ad(!memcmp(page_cur_get_rec(&cur1),
2702 page_cur_get_rec(&cur2),
2704 page_cur_get_rec(&cur2))));
2707 if (lock_rec_get_nth_bit(lock, heap_no)) {
2708 lock_rec_reset_nth_bit(lock, heap_no);
2710 if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
2711 lock_reset_lock_and_trx_wait(lock);
2716 page_cur_get_rec(&cur2));
2719 page_cur_get_rec(&cur2));
2722 lock_rec_add_to_queue(type_mode,
2732 lock_mutex_exit_kernel();
2734 #ifdef UNIV_DEBUG_LOCK_VALIDATE
2754 const rec_t* old_end)
2766 lock_mutex_enter_kernel();
2768 for (lock = lock_rec_get_first_on_page(block); lock;
2769 lock = lock_rec_get_next_on_page(lock)) {
2772 const ulint type_mode = lock->
type_mode;
2783 while (page_cur_get_rec(&cur1) != rec) {
2788 page_cur_get_rec(&cur1));
2791 page_cur_get_rec(&cur1));
2792 ut_ad(!memcmp(page_cur_get_rec(&cur1),
2793 page_cur_get_rec(&cur2),
2799 if (lock_rec_get_nth_bit(lock, heap_no)) {
2800 lock_rec_reset_nth_bit(lock, heap_no);
2802 if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
2803 lock_reset_lock_and_trx_wait(lock);
2808 page_cur_get_rec(&cur2));
2811 page_cur_get_rec(&cur2));
2814 lock_rec_add_to_queue(type_mode,
2827 for (i = PAGE_HEAP_NO_USER_LOW;
2828 i < lock_rec_get_n_bits(lock); i++) {
2830 (lock_rec_get_nth_bit(lock, i))) {
2833 "lock_move_rec_list_start():"
2834 " %lu not moved in %p\n",
2835 (ulong) i, (
void*) lock);
2843 lock_mutex_exit_kernel();
2845 #ifdef UNIV_DEBUG_LOCK_VALIDATE
2862 lock_mutex_enter_kernel();
2867 lock_rec_move(right_block, left_block,
2868 PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
2873 lock_rec_inherit_to_gap(left_block, right_block,
2874 PAGE_HEAP_NO_SUPREMUM, heap_no);
2876 lock_mutex_exit_kernel();
2887 const rec_t* orig_succ,
2895 lock_mutex_enter_kernel();
2901 lock_rec_inherit_to_gap(right_block, left_block,
2903 PAGE_HEAP_NO_SUPREMUM);
2908 lock_rec_reset_and_release_wait(left_block,
2909 PAGE_HEAP_NO_SUPREMUM);
2911 lock_rec_free_all_from_discard_page(left_block);
2913 lock_mutex_exit_kernel();
2930 lock_mutex_enter_kernel();
2935 lock_rec_move(block, root,
2936 PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
2937 lock_mutex_exit_kernel();
2952 lock_mutex_enter_kernel();
2957 lock_rec_move(new_block, block,
2958 PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
2959 lock_rec_free_all_from_discard_page(block);
2961 lock_mutex_exit_kernel();
2975 lock_mutex_enter_kernel();
2980 lock_rec_inherit_to_gap(left_block, right_block,
2981 PAGE_HEAP_NO_SUPREMUM, heap_no);
2983 lock_mutex_exit_kernel();
2994 const rec_t* orig_pred,
3000 const rec_t* left_next_rec;
3004 lock_mutex_enter_kernel();
3013 lock_rec_inherit_to_gap(left_block, left_block,
3015 PAGE_HEAP_NO_SUPREMUM);
3020 lock_rec_reset_and_release_wait(left_block,
3021 PAGE_HEAP_NO_SUPREMUM);
3027 lock_rec_move(left_block, right_block,
3028 PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
3030 lock_rec_free_all_from_discard_page(right_block);
3032 lock_mutex_exit_kernel();
3053 mutex_enter(&kernel_mutex);
3055 lock_rec_reset_and_release_wait(heir_block, heir_heap_no);
3057 lock_rec_inherit_to_gap(heir_block, block, heir_heap_no, heap_no);
3059 mutex_exit(&kernel_mutex);
3079 lock_mutex_enter_kernel();
3081 if (!lock_rec_get_first_on_page(block)) {
3084 lock_mutex_exit_kernel();
3093 rec = page + PAGE_NEW_INFIMUM;
3098 lock_rec_inherit_to_gap(heir_block, block,
3099 heir_heap_no, heap_no);
3101 lock_rec_reset_and_release_wait(block, heap_no);
3104 }
while (heap_no != PAGE_HEAP_NO_SUPREMUM);
3106 rec = page + PAGE_OLD_INFIMUM;
3111 lock_rec_inherit_to_gap(heir_block, block,
3112 heir_heap_no, heap_no);
3114 lock_rec_reset_and_release_wait(block, heap_no);
3117 }
while (heap_no != PAGE_HEAP_NO_SUPREMUM);
3120 lock_rec_free_all_from_discard_page(block);
3122 lock_mutex_exit_kernel();
3134 ulint receiver_heap_no;
3135 ulint donator_heap_no;
3152 lock_mutex_enter_kernel();
3153 lock_rec_inherit_to_gap_if_gap_lock(block,
3154 receiver_heap_no, donator_heap_no);
3155 lock_mutex_exit_kernel();
3185 lock_mutex_enter_kernel();
3189 lock_rec_inherit_to_gap(block, block, next_heap_no, heap_no);
3193 lock_rec_reset_and_release_wait(block, heap_no);
3195 lock_mutex_exit_kernel();
3220 lock_mutex_enter_kernel();
3222 lock_rec_move(block, block, PAGE_HEAP_NO_INFIMUM, heap_no);
3224 lock_mutex_exit_kernel();
3245 lock_mutex_enter_kernel();
3247 lock_rec_move(block, donator, heap_no, PAGE_HEAP_NO_INFIMUM);
3249 lock_mutex_exit_kernel();
3261 lock_deadlock_occurs(
3272 ut_ad(mutex_own(&kernel_mutex));
3285 ret = lock_deadlock_recursive(trx, trx, lock, &cost, 0);
3288 case LOCK_VICTIM_IS_OTHER:
3293 case LOCK_EXCEED_MAX_DEPTH:
3297 rewind(lock_latest_err_file);
3300 fputs(
"TOO DEEP OR LONG SEARCH IN THE LOCK TABLE"
3301 " WAITS-FOR GRAPH, WE WILL ROLL BACK"
3302 " FOLLOWING TRANSACTION \n",
3303 lock_latest_err_file);
3305 fputs(
"\n*** TRANSACTION:\n", lock_latest_err_file);
3306 trx_print(lock_latest_err_file, trx, 3000);
3308 fputs(
"*** WAITING FOR THIS LOCK TO BE GRANTED:\n",
3309 lock_latest_err_file);
3318 case LOCK_VICTIM_IS_START:
3319 fputs(
"*** WE ROLL BACK TRANSACTION (2)\n",
3320 lock_latest_err_file);
3328 lock_deadlock_found = TRUE;
3343 lock_deadlock_recursive(
3358 ulint heap_no = ULINT_UNDEFINED;
3363 ut_ad(mutex_own(&kernel_mutex));
3379 ut_a(heap_no != ULINT_UNDEFINED);
3384 lock = lock_rec_get_first_on_page_addr(space, page_no);
3388 && lock != wait_lock
3389 && !lock_rec_get_nth_bit(lock, heap_no)) {
3391 lock = lock_rec_get_next_on_page(lock);
3394 if (lock == wait_lock) {
3398 ut_ad(lock == NULL || lock_rec_get_nth_bit(lock, heap_no));
3408 if (heap_no == ULINT_UNDEFINED) {
3411 un_member.tab_lock.locks, lock);
3424 = depth > LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK
3425 || *cost > LOCK_MAX_N_STEPS_IN_DEADLOCK_CHECK;
3427 lock_trx = lock->
trx;
3429 if (lock_trx == start) {
3435 FILE* ef = lock_latest_err_file;
3440 fputs(
"\n*** (1) TRANSACTION:\n", ef);
3444 fputs(
"*** (1) WAITING FOR THIS LOCK"
3445 " TO BE GRANTED:\n", ef);
3453 fputs(
"*** (2) TRANSACTION:\n", ef);
3457 fputs(
"*** (2) HOLDS THE LOCK(S):\n", ef);
3465 fputs(
"*** (2) WAITING FOR THIS LOCK"
3466 " TO BE GRANTED:\n", ef);
3475 if (lock_print_waits) {
3476 fputs(
"Deadlock detected\n",
3487 return(LOCK_VICTIM_IS_START);
3490 lock_deadlock_found = TRUE;
3496 fputs(
"*** WE ROLL BACK TRANSACTION (1)\n",
3499 wait_lock->
trx->was_chosen_as_deadlock_victim
3511 return(LOCK_VICTIM_IS_OTHER);
3517 if (lock_print_waits) {
3518 fputs(
"Deadlock search exceeds"
3519 " max steps or depth.\n",
3526 return(LOCK_EXCEED_MAX_DEPTH);
3529 if (lock_trx->
que_state == TRX_QUE_LOCK_WAIT) {
3535 ret = lock_deadlock_recursive(
3546 if (heap_no != ULINT_UNDEFINED) {
3551 lock = lock_rec_get_next_on_page(lock);
3552 }
while (lock != NULL
3553 && lock != wait_lock
3554 && !lock_rec_get_nth_bit(lock, heap_no));
3556 if (lock == wait_lock) {
3580 ut_ad(table && trx);
3581 ut_ad(mutex_own(&kernel_mutex));
3583 if ((type_mode & LOCK_MODE_MASK) == LOCK_AUTO_INC) {
3590 if (type_mode == LOCK_AUTO_INC) {
3610 if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
3612 lock_set_lock_and_trx_wait(lock, trx);
3624 lock_table_remove_low(
3631 ut_ad(mutex_own(&kernel_mutex));
3638 if (lock_get_mode(lock) == LOCK_AUTO_INC) {
3655 if (!lock_get_wait(lock)
3660 ut_a(autoinc_lock == lock);
3680 lock_table_enqueue_waiting(
3690 ut_ad(mutex_own(&kernel_mutex));
3699 return(DB_QUE_THR_SUSPENDED);
3710 fputs(
" InnoDB: Error: a table lock wait happens"
3711 " in a dictionary operation!\n"
3712 "InnoDB: Table name ", stderr);
3715 "InnoDB: Submit a detailed bug report"
3716 " to http://bugs.mysql.com\n",
3722 lock = lock_table_create(table, mode | LOCK_WAIT, trx);
3727 if (lock_deadlock_occurs(lock, trx)) {
3731 lock_table_remove_low(lock);
3732 lock_reset_lock_and_trx_wait(lock);
3734 return(DB_DEADLOCK);
3745 trx->was_chosen_as_deadlock_victim = FALSE;
3750 return(DB_LOCK_WAIT);
3759 lock_table_other_has_incompatible(
3766 enum lock_mode mode)
3770 ut_ad(mutex_own(&kernel_mutex));
3774 while (lock != NULL) {
3776 if ((lock->
trx != trx)
3777 && (!lock_mode_compatible(lock_get_mode(lock), mode))
3778 && (wait || !(lock_get_wait(lock)))) {
3800 enum lock_mode mode,
3806 ut_ad(table && thr);
3808 if (flags & BTR_NO_LOCKING_FLAG) {
3817 lock_mutex_enter_kernel();
3821 if (lock_table_has(trx, table, mode)) {
3823 lock_mutex_exit_kernel();
3831 if (lock_table_other_has_incompatible(trx, LOCK_WAIT, table, mode)) {
3836 err = lock_table_enqueue_waiting(mode | flags, table, thr);
3838 lock_mutex_exit_kernel();
3843 lock_table_create(table, mode | flags, trx);
3845 ut_a(!flags || mode == LOCK_S || mode == LOCK_X);
3847 lock_mutex_exit_kernel();
3857 lock_table_has_to_wait_in_queue(
3864 ut_ad(mutex_own(&kernel_mutex));
3865 ut_ad(lock_get_wait(wait_lock));
3871 while (lock != wait_lock) {
3898 ut_ad(mutex_own(&kernel_mutex));
3903 lock_table_remove_low(in_lock);
3908 while (lock != NULL) {
3910 if (lock_get_wait(lock)
3911 && !lock_table_has_to_wait_in_queue(lock)) {
3935 enum lock_mode lock_mode)
3946 mutex_enter(&kernel_mutex);
3948 first_lock = lock_rec_get_first(block, heap_no);
3953 for (lock = first_lock; lock != NULL;
3954 lock = lock_rec_get_next(heap_no, lock)) {
3955 if (lock->
trx == trx && lock_get_mode(lock) == lock_mode) {
3956 ut_a(!lock_get_wait(lock));
3957 lock_rec_reset_nth_bit(lock, heap_no);
3962 mutex_exit(&kernel_mutex);
3965 " InnoDB: Error: unlock row could not"
3966 " find a %lu mode lock on the record\n",
3974 for (lock = first_lock; lock != NULL;
3975 lock = lock_rec_get_next(heap_no, lock)) {
3976 if (lock_get_wait(lock)
3977 && !lock_rec_has_to_wait_in_queue(lock)) {
3984 mutex_exit(&kernel_mutex);
4000 ut_ad(mutex_own(&kernel_mutex));
4006 while (lock != NULL) {
4012 lock_rec_dequeue_from_page(lock);
4016 if (lock_get_mode(lock) != LOCK_IS
4029 lock_table_dequeue(lock);
4032 if (count == LOCK_RELEASE_KERNEL_INTERVAL) {
4036 lock_mutex_exit_kernel();
4038 lock_mutex_enter_kernel();
4060 ut_ad(mutex_own(&kernel_mutex));
4064 lock_rec_dequeue_from_page(lock);
4068 if (lock->
trx->autoinc_locks != NULL) {
4073 lock_table_dequeue(lock);
4078 lock_reset_lock_and_trx_wait(lock);
4086 #define IS_LOCK_S_OR_X(lock) \
4087 (lock_get_mode(lock) == LOCK_S \
4088 || lock_get_mode(lock) == LOCK_X)
4098 lock_remove_all_on_table_for_trx(
4102 ibool remove_also_table_sx_locks)
4108 ut_ad(mutex_own(&kernel_mutex));
4112 while (lock != NULL) {
4117 ut_a(!lock_get_wait(lock));
4119 lock_rec_discard(lock);
4122 && (remove_also_table_sx_locks
4123 || !IS_LOCK_S_OR_X(lock))) {
4125 ut_a(!lock_get_wait(lock));
4127 lock_table_remove_low(lock);
4145 ibool remove_also_table_sx_locks)
4151 mutex_enter(&kernel_mutex);
4155 while (lock != NULL) {
4163 if (remove_also_table_sx_locks
4165 && IS_LOCK_S_OR_X(lock))) {
4167 ut_a(!lock_get_wait(lock));
4170 lock_remove_all_on_table_for_trx(table, lock->
trx,
4171 remove_also_table_sx_locks);
4173 if (prev_lock == NULL) {
4177 un_member.tab_lock.locks, lock);
4183 prev_lock) != lock) {
4188 un_member.tab_lock.locks, prev_lock);
4192 un_member.tab_lock.locks, lock);
4196 mutex_exit(&kernel_mutex);
4210 ut_ad(mutex_own(&kernel_mutex));
4213 fputs(
"TABLE LOCK table ", file);
4218 if (lock_get_mode(lock) == LOCK_S) {
4219 fputs(
" lock mode S", file);
4220 }
else if (lock_get_mode(lock) == LOCK_X) {
4221 fputs(
" lock mode X", file);
4222 }
else if (lock_get_mode(lock) == LOCK_IS) {
4223 fputs(
" lock mode IS", file);
4224 }
else if (lock_get_mode(lock) == LOCK_IX) {
4225 fputs(
" lock mode IX", file);
4226 }
else if (lock_get_mode(lock) == LOCK_AUTO_INC) {
4227 fputs(
" lock mode AUTO-INC", file);
4229 fprintf(file,
" unknown lock mode %lu",
4230 (ulong) lock_get_mode(lock));
4233 if (lock_get_wait(lock)) {
4234 fputs(
" waiting", file);
4255 ulint offsets_[REC_OFFS_NORMAL_SIZE];
4256 ulint* offsets = offsets_;
4257 rec_offs_init(offsets_);
4259 ut_ad(mutex_own(&kernel_mutex));
4265 fprintf(file,
"RECORD LOCKS space id %lu page no %lu n bits %lu ",
4266 (ulong) space, (ulong) page_no,
4267 (ulong) lock_rec_get_n_bits(lock));
4268 dict_index_name_print(file, lock->
trx, lock->
index);
4271 if (lock_get_mode(lock) == LOCK_S) {
4272 fputs(
" lock mode S", file);
4273 }
else if (lock_get_mode(lock) == LOCK_X) {
4274 fputs(
" lock_mode X", file);
4279 if (lock_rec_get_gap(lock)) {
4280 fputs(
" locks gap before rec", file);
4283 if (lock_rec_get_rec_not_gap(lock)) {
4284 fputs(
" locks rec but not gap", file);
4287 if (lock_rec_get_insert_intention(lock)) {
4288 fputs(
" insert intention", file);
4291 if (lock_get_wait(lock)) {
4292 fputs(
" waiting", file);
4301 for (i = 0; i < lock_rec_get_n_bits(lock); ++i) {
4303 if (!lock_rec_get_nth_bit(lock, i)) {
4307 fprintf(file,
"Record lock, heap no %lu", (ulong) i);
4313 buf_block_get_frame(block), i);
4315 offsets = rec_get_offsets(
4316 rec, lock->
index, offsets,
4317 ULINT_UNDEFINED, &heap);
4327 if (UNIV_LIKELY_NULL(heap)) {
4336 #define PRINT_NUM_OF_LOCK_STRUCTS
4339 #ifdef PRINT_NUM_OF_LOCK_STRUCTS
4345 lock_get_n_rec_locks(
void)
4352 ut_ad(mutex_own(&kernel_mutex));
4384 lock_mutex_enter_kernel();
4385 }
else if (mutex_enter_nowait(&kernel_mutex)) {
4386 fputs(
"FAIL TO OBTAIN KERNEL MUTEX, "
4387 "SKIP LOCK INFO PRINTING\n", file);
4391 if (lock_deadlock_found) {
4392 fputs(
"------------------------\n"
4393 "LATEST DETECTED DEADLOCK\n"
4394 "------------------------\n", file);
4399 fputs(
"------------\n"
4401 "------------\n", file);
4403 fprintf(file,
"Trx id counter " TRX_ID_FMT "\n",
4413 "History list length %lu\n",
4416 #ifdef PRINT_NUM_OF_LOCK_STRUCTS
4418 "Total number of lock structs in row lock hash table %lu\n",
4419 (ulong) lock_get_n_rec_locks());
4433 ibool load_page_first = TRUE;
4440 fprintf(file,
"LIST OF TRANSACTIONS FOR EACH SESSION:\n");
4465 while (trx && (i < nth_trx)) {
4471 lock_mutex_exit_kernel();
4473 ut_ad(lock_validate());
4478 if (nth_lock == 0) {
4484 "Trx read view will not see trx with"
4491 if (trx->
que_state == TRX_QUE_LOCK_WAIT) {
4493 "------- TRX HAS BEEN WAITING %lu SEC"
4494 " FOR THIS LOCK TO BE GRANTED:\n",
4495 (ulong) difftime(time(NULL),
4504 fputs(
"------------------\n", file);
4508 if (!srv_print_innodb_lock_monitor) {
4520 while (lock && (i < nth_lock)) {
4533 if (load_page_first) {
4535 ulint zip_size= fil_space_get_zip_size(space);
4538 if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) {
4546 fprintf(file,
"RECORD LOCKS on"
4547 " non-existing space %lu\n",
4552 lock_mutex_exit_kernel();
4561 load_page_first = FALSE;
4563 lock_mutex_enter_kernel();
4576 load_page_first = TRUE;
4580 if (nth_lock >= 10) {
4581 fputs(
"10 LOCKS PRINTED FOR THIS TRX:"
4582 " SUPPRESSING FURTHER PRINTS\n",
4600 lock_table_queue_validate(
4606 ut_ad(mutex_own(&kernel_mutex));
4611 ut_a(((lock->
trx)->conc_state == TRX_ACTIVE)
4612 || ((lock->
trx)->conc_state == TRX_PREPARED)
4613 || ((lock->
trx)->conc_state == TRX_COMMITTED_IN_MEMORY));
4615 if (!lock_get_wait(lock)) {
4617 ut_a(!lock_table_other_has_incompatible(
4618 lock->
trx, 0, table,
4619 lock_get_mode(lock)));
4622 ut_a(lock_table_has_to_wait_in_queue(lock));
4636 lock_rec_queue_validate(
4641 const ulint* offsets)
4654 lock_mutex_enter_kernel();
4658 lock = lock_rec_get_first(block, heap_no);
4664 case TRX_COMMITTED_IN_MEMORY:
4672 if (lock_get_wait(lock)) {
4673 ut_a(lock_rec_has_to_wait_in_queue(lock));
4680 lock = lock_rec_get_next(heap_no, lock);
4683 lock_mutex_exit_kernel();
4694 && lock_rec_other_has_expl_req(LOCK_S, 0, LOCK_WAIT,
4695 block, heap_no, impl_trx)) {
4697 ut_a(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP,
4698 block, heap_no, impl_trx));
4732 impl_trx = lock_sec_rec_some_has_impl_off_kernel(
4733 rec, index, offsets);
4736 && lock_rec_other_has_expl_req(LOCK_S, 0, LOCK_WAIT,
4737 block, heap_no, impl_trx)) {
4739 ut_a(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP,
4740 block, heap_no, impl_trx));
4745 lock = lock_rec_get_first(block, heap_no);
4757 if (!lock_rec_get_gap(lock) && !lock_get_wait(lock)) {
4759 enum lock_mode mode;
4761 if (lock_get_mode(lock) == LOCK_S) {
4766 ut_a(!lock_rec_other_has_expl_req(
4767 mode, 0, 0, block, heap_no, lock->
trx));
4769 }
else if (lock_get_wait(lock) && !lock_rec_get_gap(lock)) {
4771 ut_a(lock_rec_has_to_wait_in_queue(lock));
4774 lock = lock_rec_get_next(heap_no, lock);
4777 lock_mutex_exit_kernel();
4787 lock_rec_validate_page(
4803 ulint offsets_[REC_OFFS_NORMAL_SIZE];
4804 ulint* offsets = offsets_;
4805 rec_offs_init(offsets_);
4807 ut_ad(!mutex_own(&kernel_mutex));
4811 zip_size = fil_space_get_zip_size(space);
4812 ut_ad(zip_size != ULINT_UNDEFINED);
4813 block =
buf_page_get(space, zip_size, page_no, RW_X_LATCH, &mtr);
4814 buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
4816 page = block->
frame;
4818 lock_mutex_enter_kernel();
4820 lock = lock_rec_get_first_on_page_addr(space, page_no);
4826 for (i = 0; i < nth_lock; i++) {
4828 lock = lock_rec_get_next_on_page(lock);
4840 # ifdef UNIV_SYNC_DEBUG
4845 if (!sync_thread_levels_contains(SYNC_FSP))
4847 for (i = nth_bit; i < lock_rec_get_n_bits(lock); i++) {
4849 if (i == 1 || lock_rec_get_nth_bit(lock, i)) {
4851 index = lock->
index;
4854 offsets = rec_get_offsets(rec, index, offsets,
4855 ULINT_UNDEFINED, &heap);
4858 "Validating %lu %lu\n",
4859 (ulong) space, (ulong) page_no);
4861 lock_mutex_exit_kernel();
4868 lock_rec_queue_validate(block, rec, index, offsets);
4870 lock_mutex_enter_kernel();
4884 lock_mutex_exit_kernel();
4888 if (UNIV_LIKELY_NULL(heap)) {
4909 lock_mutex_enter_kernel();
4919 lock_table_queue_validate(
4937 ib_uint64_t space_page;
4945 if (space_page >= limit) {
4957 lock_mutex_exit_kernel();
4959 lock_rec_validate_page(space, page_no);
4961 lock_mutex_enter_kernel();
4967 lock_mutex_exit_kernel();
4997 const rec_t* next_rec;
5001 ulint next_rec_heap_no;
5005 if (flags & BTR_NO_LOCKING_FLAG) {
5014 lock_mutex_enter_kernel();
5019 ut_ad(lock_table_has(trx, index->
table, LOCK_IX)
5021 && lock_table_has(trx, index->
table, LOCK_S)));
5023 lock = lock_rec_get_first(block, next_rec_heap_no);
5025 if (UNIV_LIKELY(lock == NULL)) {
5028 lock_mutex_exit_kernel();
5054 if (lock_rec_other_has_conflicting(
5055 static_cast<lock_mode>(LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION),
5056 block, next_rec_heap_no, trx)) {
5059 err = lock_rec_enqueue_waiting(LOCK_X | LOCK_GAP
5060 | LOCK_INSERT_INTENTION,
5061 block, next_rec_heap_no,
5067 lock_mutex_exit_kernel();
5086 ulint offsets_[REC_OFFS_NORMAL_SIZE];
5087 const ulint* offsets;
5088 rec_offs_init(offsets_);
5090 offsets = rec_get_offsets(next_rec, index, offsets_,
5091 ULINT_UNDEFINED, &heap);
5092 ut_ad(lock_rec_queue_validate(block,
5093 next_rec, index, offsets));
5094 if (UNIV_LIKELY_NULL(heap)) {
5109 lock_rec_convert_impl_to_expl(
5114 const ulint* offsets)
5118 ut_ad(mutex_own(&kernel_mutex));
5126 impl_trx = lock_sec_rec_some_has_impl_off_kernel(
5127 rec, index, offsets);
5136 if (!lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, block,
5137 heap_no, impl_trx)) {
5139 lock_rec_add_to_queue(
5140 LOCK_REC | LOCK_X | LOCK_REC_NOT_GAP,
5141 block, heap_no, index, impl_trx);
5164 const ulint* offsets,
5174 if (flags & BTR_NO_LOCKING_FLAG) {
5183 lock_mutex_enter_kernel();
5190 lock_rec_convert_impl_to_expl(block, rec, index, offsets);
5192 err = lock_rec_lock(TRUE, LOCK_X | LOCK_REC_NOT_GAP,
5193 block, heap_no, index, thr);
5195 lock_mutex_exit_kernel();
5197 ut_ad(lock_rec_queue_validate(block, rec, index, offsets));
5232 if (flags & BTR_NO_LOCKING_FLAG) {
5244 lock_mutex_enter_kernel();
5248 err = lock_rec_lock(TRUE, LOCK_X | LOCK_REC_NOT_GAP,
5249 block, heap_no, index, thr);
5251 lock_mutex_exit_kernel();
5256 ulint offsets_[REC_OFFS_NORMAL_SIZE];
5257 const ulint* offsets;
5258 rec_offs_init(offsets_);
5260 offsets = rec_get_offsets(rec, index, offsets_,
5261 ULINT_UNDEFINED, &heap);
5262 ut_ad(lock_rec_queue_validate(block, rec, index, offsets));
5263 if (UNIV_LIKELY_NULL(heap)) {
5300 const ulint* offsets,
5301 enum lock_mode mode,
5317 ut_ad(mode == LOCK_X || mode == LOCK_S);
5319 if (flags & BTR_NO_LOCKING_FLAG) {
5326 lock_mutex_enter_kernel();
5328 ut_ad(mode != LOCK_X
5330 ut_ad(mode != LOCK_S
5341 lock_rec_convert_impl_to_expl(block, rec, index, offsets);
5344 err = lock_rec_lock(FALSE, mode | gap_mode,
5345 block, heap_no, index, thr);
5347 lock_mutex_exit_kernel();
5349 ut_ad(lock_rec_queue_validate(block, rec, index, offsets));
5379 const ulint* offsets,
5380 enum lock_mode mode,
5396 || gap_mode == LOCK_REC_NOT_GAP);
5399 if (flags & BTR_NO_LOCKING_FLAG) {
5406 lock_mutex_enter_kernel();
5408 ut_ad(mode != LOCK_X
5410 ut_ad(mode != LOCK_S
5413 if (UNIV_LIKELY(heap_no != PAGE_HEAP_NO_SUPREMUM)) {
5415 lock_rec_convert_impl_to_expl(block, rec, index, offsets);
5418 err = lock_rec_lock(FALSE, mode | gap_mode,
5419 block, heap_no, index, thr);
5421 lock_mutex_exit_kernel();
5423 ut_ad(lock_rec_queue_validate(block, rec, index, offsets));
5449 enum lock_mode mode,
5459 ulint offsets_[REC_OFFS_NORMAL_SIZE];
5460 ulint* offsets = offsets_;
5462 rec_offs_init(offsets_);
5464 offsets = rec_get_offsets(rec, index, offsets,
5465 ULINT_UNDEFINED, &tmp_heap);
5467 offsets, mode, gap_mode, thr);
5483 lock_release_autoinc_last_lock(
5490 ut_ad(mutex_own(&kernel_mutex));
5498 ut_a(lock_get_mode(lock) == LOCK_AUTO_INC);
5504 lock_table_dequeue(lock);
5516 ut_a(trx->autoinc_locks != NULL);
5529 ut_ad(mutex_own(&kernel_mutex));
5531 ut_a(trx->autoinc_locks != NULL);
5540 lock_release_autoinc_last_lock(trx->autoinc_locks);
5569 return(lock->
trx->
id);
5585 && lock_rec_get_gap(lock);
5587 switch (lock_get_mode(lock)) {
5670 table = lock_get_table(lock);
5687 table = lock_get_table(lock);
5689 return(table->
name);
5703 return(lock->
index);