lock0lock.c
上传用户:romrleung
上传日期:2022-05-23
资源大小:18897k
文件大小:131k
- rec_t* sup;
- lock_mutex_enter_kernel();
- lock = lock_rec_get_first_on_page(page);
- if (lock == NULL) {
- lock_mutex_exit_kernel();
- return;
- }
- heap = mem_heap_create(256);
-
- /* Copy first all the locks on the page to heap and reset the
- bitmaps in the original locks; chain the copies of the locks
- using the trx_locks field in them. */
- UT_LIST_INIT(old_locks);
-
- while (lock != NULL) {
- /* Make a copy of the lock */
- old_lock = lock_rec_copy(lock, heap);
- UT_LIST_ADD_LAST(trx_locks, old_locks, old_lock);
- /* Reset bitmap of lock */
- lock_rec_bitmap_reset(lock);
- if (lock_get_wait(lock)) {
- lock_reset_lock_and_trx_wait(lock);
- }
- lock = lock_rec_get_next_on_page(lock);
- }
- sup = page_get_supremum_rec(page);
-
- lock = UT_LIST_GET_FIRST(old_locks);
- while (lock) {
- /* NOTE: we copy also the locks set on the infimum and
- supremum of the page; the infimum may carry locks if an
- update of a record is occurring on the page, and its locks
- were temporarily stored on the infimum */
-
- page_cur_set_before_first(page, &cur1);
- page_cur_set_before_first(old_page, &cur2);
- /* Set locks according to old locks */
- for (;;) {
- ut_ad(0 == ut_memcmp(page_cur_get_rec(&cur1),
- page_cur_get_rec(&cur2),
- rec_get_data_size(
- page_cur_get_rec(&cur2))));
-
- old_heap_no = rec_get_heap_no(page_cur_get_rec(&cur2));
- if (lock_rec_get_nth_bit(lock, old_heap_no)) {
- /* NOTE that the old lock bitmap could be too
- small for the new heap number! */
- lock_rec_add_to_queue(lock->type_mode,
- page_cur_get_rec(&cur1),
- lock->index, lock->trx);
- /* if ((page_cur_get_rec(&cur1) == sup)
- && lock_get_wait(lock)) {
- fprintf(stderr,
- "---n--n!!!Lock reorg: supr type %lun",
- lock->type_mode);
- } */
- }
- if (page_cur_get_rec(&cur1) == sup) {
- break;
- }
- page_cur_move_to_next(&cur1);
- page_cur_move_to_next(&cur2);
- }
- /* Remember that we chained old locks on the trx_locks field: */
- lock = UT_LIST_GET_NEXT(trx_locks, lock);
- }
- lock_mutex_exit_kernel();
- mem_heap_free(heap);
- /* ut_ad(lock_rec_validate_page(buf_frame_get_space_id(page),
- buf_frame_get_page_no(page))); */
- }
- /*****************************************************************
- Moves the explicit locks on user records to another page if a record
- list end is moved to another page. */
- void
- lock_move_rec_list_end(
- /*===================*/
- page_t* new_page, /* in: index page to move to */
- page_t* page, /* in: index page */
- rec_t* rec) /* in: record on page: this is the
- first record moved */
- {
- lock_t* lock;
- page_cur_t cur1;
- page_cur_t cur2;
- ulint heap_no;
- rec_t* sup;
- ulint type_mode;
-
- lock_mutex_enter_kernel();
- /* Note: when we move locks from record to record, waiting locks
- and possible granted gap type locks behind them are enqueued in
- the original order, because new elements are inserted to a hash
- table to the end of the hash chain, and lock_rec_add_to_queue
- does not reuse locks if there are waiters in the queue. */
- sup = page_get_supremum_rec(page);
-
- lock = lock_rec_get_first_on_page(page);
- while (lock != NULL) {
-
- page_cur_position(rec, &cur1);
- if (page_cur_is_before_first(&cur1)) {
- page_cur_move_to_next(&cur1);
- }
- page_cur_set_before_first(new_page, &cur2);
- page_cur_move_to_next(&cur2);
-
- /* Copy lock requests on user records to new page and
- reset the lock bits on the old */
- while (page_cur_get_rec(&cur1) != sup) {
- ut_ad(0 == ut_memcmp(page_cur_get_rec(&cur1),
- page_cur_get_rec(&cur2),
- rec_get_data_size(
- page_cur_get_rec(&cur2))));
-
- heap_no = rec_get_heap_no(page_cur_get_rec(&cur1));
- if (lock_rec_get_nth_bit(lock, heap_no)) {
- type_mode = lock->type_mode;
- lock_rec_reset_nth_bit(lock, heap_no);
- if (lock_get_wait(lock)) {
- lock_reset_lock_and_trx_wait(lock);
- }
- lock_rec_add_to_queue(type_mode,
- page_cur_get_rec(&cur2),
- lock->index, lock->trx);
- }
- page_cur_move_to_next(&cur1);
- page_cur_move_to_next(&cur2);
- }
- lock = lock_rec_get_next_on_page(lock);
- }
-
- lock_mutex_exit_kernel();
- /* ut_ad(lock_rec_validate_page(buf_frame_get_space_id(page),
- buf_frame_get_page_no(page)));
- ut_ad(lock_rec_validate_page(buf_frame_get_space_id(new_page),
- buf_frame_get_page_no(new_page))); */
- }
- /*****************************************************************
- Moves the explicit locks on user records to another page if a record
- list start is moved to another page. */
- void
- lock_move_rec_list_start(
- /*=====================*/
- page_t* new_page, /* in: index page to move to */
- page_t* page, /* in: index page */
- rec_t* rec, /* in: record on page: this is the
- first record NOT copied */
- rec_t* old_end) /* in: old previous-to-last record on
- new_page before the records were copied */
- {
- lock_t* lock;
- page_cur_t cur1;
- page_cur_t cur2;
- ulint heap_no;
- ulint type_mode;
- ut_a(new_page);
- lock_mutex_enter_kernel();
- lock = lock_rec_get_first_on_page(page);
- while (lock != NULL) {
-
- page_cur_set_before_first(page, &cur1);
- page_cur_move_to_next(&cur1);
- page_cur_position(old_end, &cur2);
- page_cur_move_to_next(&cur2);
- /* Copy lock requests on user records to new page and
- reset the lock bits on the old */
- while (page_cur_get_rec(&cur1) != rec) {
- ut_ad(0 == ut_memcmp(page_cur_get_rec(&cur1),
- page_cur_get_rec(&cur2),
- rec_get_data_size(
- page_cur_get_rec(&cur2))));
-
- heap_no = rec_get_heap_no(page_cur_get_rec(&cur1));
- if (lock_rec_get_nth_bit(lock, heap_no)) {
- type_mode = lock->type_mode;
- lock_rec_reset_nth_bit(lock, heap_no);
- if (lock_get_wait(lock)) {
- lock_reset_lock_and_trx_wait(lock);
- }
- lock_rec_add_to_queue(type_mode,
- page_cur_get_rec(&cur2),
- lock->index, lock->trx);
- }
- page_cur_move_to_next(&cur1);
- page_cur_move_to_next(&cur2);
- }
- lock = lock_rec_get_next_on_page(lock);
- }
-
- lock_mutex_exit_kernel();
- /* ut_ad(lock_rec_validate_page(buf_frame_get_space_id(page),
- buf_frame_get_page_no(page)));
- ut_ad(lock_rec_validate_page(buf_frame_get_space_id(new_page),
- buf_frame_get_page_no(new_page))); */
- }
- /*****************************************************************
- Updates the lock table when a page is split to the right. */
- void
- lock_update_split_right(
- /*====================*/
- page_t* right_page, /* in: right page */
- page_t* left_page) /* in: left page */
- {
- lock_mutex_enter_kernel();
-
- /* Move the locks on the supremum of the left page to the supremum
- of the right page */
- lock_rec_move(page_get_supremum_rec(right_page),
- page_get_supremum_rec(left_page));
-
- /* Inherit the locks to the supremum of left page from the successor
- of the infimum on right page */
- lock_rec_inherit_to_gap(page_get_supremum_rec(left_page),
- page_rec_get_next(page_get_infimum_rec(right_page)));
- lock_mutex_exit_kernel();
- }
- /*****************************************************************
- Updates the lock table when a page is merged to the right. */
- void
- lock_update_merge_right(
- /*====================*/
- rec_t* orig_succ, /* in: original successor of infimum
- on the right page before merge */
- page_t* left_page) /* in: merged index page which will be
- discarded */
- {
- lock_mutex_enter_kernel();
-
- /* Inherit the locks from the supremum of the left page to the
- original successor of infimum on the right page, to which the left
- page was merged */
- lock_rec_inherit_to_gap(orig_succ, page_get_supremum_rec(left_page));
- /* Reset the locks on the supremum of the left page, releasing
- waiting transactions */
- lock_rec_reset_and_release_wait(page_get_supremum_rec(left_page));
-
- lock_rec_free_all_from_discard_page(left_page);
- lock_mutex_exit_kernel();
- }
- /*****************************************************************
- Updates the lock table when the root page is copied to another in
- btr_root_raise_and_insert. Note that we leave lock structs on the
- root page, even though they do not make sense on other than leaf
- pages: the reason is that in a pessimistic update the infimum record
- of the root page will act as a dummy carrier of the locks of the record
- to be updated. */
- void
- lock_update_root_raise(
- /*===================*/
- page_t* new_page, /* in: index page to which copied */
- page_t* root) /* in: root page */
- {
- lock_mutex_enter_kernel();
-
- /* Move the locks on the supremum of the root to the supremum
- of new_page */
- lock_rec_move(page_get_supremum_rec(new_page),
- page_get_supremum_rec(root));
- lock_mutex_exit_kernel();
- }
- /*****************************************************************
- Updates the lock table when a page is copied to another and the original page
- is removed from the chain of leaf pages, except if page is the root! */
- void
- lock_update_copy_and_discard(
- /*=========================*/
- page_t* new_page, /* in: index page to which copied */
- page_t* page) /* in: index page; NOT the root! */
- {
- lock_mutex_enter_kernel();
-
- /* Move the locks on the supremum of the old page to the supremum
- of new_page */
- lock_rec_move(page_get_supremum_rec(new_page),
- page_get_supremum_rec(page));
- lock_rec_free_all_from_discard_page(page);
- lock_mutex_exit_kernel();
- }
- /*****************************************************************
- Updates the lock table when a page is split to the left. */
- void
- lock_update_split_left(
- /*===================*/
- page_t* right_page, /* in: right page */
- page_t* left_page) /* in: left page */
- {
- lock_mutex_enter_kernel();
-
- /* Inherit the locks to the supremum of the left page from the
- successor of the infimum on the right page */
- lock_rec_inherit_to_gap(page_get_supremum_rec(left_page),
- page_rec_get_next(page_get_infimum_rec(right_page)));
- lock_mutex_exit_kernel();
- }
- /*****************************************************************
- Updates the lock table when a page is merged to the left. */
- void
- lock_update_merge_left(
- /*===================*/
- page_t* left_page, /* in: left page to which merged */
- rec_t* orig_pred, /* in: original predecessor of supremum
- on the left page before merge */
- page_t* right_page) /* in: merged index page which will be
- discarded */
- {
- lock_mutex_enter_kernel();
-
- if (page_rec_get_next(orig_pred) != page_get_supremum_rec(left_page)) {
- /* Inherit the locks on the supremum of the left page to the
- first record which was moved from the right page */
- lock_rec_inherit_to_gap(page_rec_get_next(orig_pred),
- page_get_supremum_rec(left_page));
- /* Reset the locks on the supremum of the left page,
- releasing waiting transactions */
- lock_rec_reset_and_release_wait(page_get_supremum_rec(
- left_page));
- }
- /* Move the locks from the supremum of right page to the supremum
- of the left page */
-
- lock_rec_move(page_get_supremum_rec(left_page),
- page_get_supremum_rec(right_page));
- lock_rec_free_all_from_discard_page(right_page);
- lock_mutex_exit_kernel();
- }
- /*****************************************************************
- Resets the original locks on heir and replaces them with gap type locks
- inherited from rec. */
- void
- lock_rec_reset_and_inherit_gap_locks(
- /*=================================*/
- rec_t* heir, /* in: heir record */
- rec_t* rec) /* in: record */
- {
- mutex_enter(&kernel_mutex);
- lock_rec_reset_and_release_wait(heir);
-
- lock_rec_inherit_to_gap(heir, rec);
- mutex_exit(&kernel_mutex);
- }
- /*****************************************************************
- Updates the lock table when a page is discarded. */
- void
- lock_update_discard(
- /*================*/
- rec_t* heir, /* in: record which will inherit the locks */
- page_t* page) /* in: index page which will be discarded */
- {
- rec_t* rec;
- lock_mutex_enter_kernel();
-
- if (NULL == lock_rec_get_first_on_page(page)) {
- /* No locks exist on page, nothing to do */
- lock_mutex_exit_kernel();
- return;
- }
-
- /* Inherit all the locks on the page to the record and reset all
- the locks on the page */
- rec = page_get_infimum_rec(page);
- for (;;) {
- lock_rec_inherit_to_gap(heir, rec);
- /* Reset the locks on rec, releasing waiting transactions */
- lock_rec_reset_and_release_wait(rec);
- if (rec == page_get_supremum_rec(page)) {
- break;
- }
-
- rec = page_rec_get_next(rec);
- }
- lock_rec_free_all_from_discard_page(page);
- lock_mutex_exit_kernel();
- }
- /*****************************************************************
- Updates the lock table when a new user record is inserted. */
- void
- lock_update_insert(
- /*===============*/
- rec_t* rec) /* in: the inserted record */
- {
- lock_mutex_enter_kernel();
- /* Inherit the gap-locking locks for rec, in gap mode, from the next
- record */
- lock_rec_inherit_to_gap_if_gap_lock(rec, page_rec_get_next(rec));
- lock_mutex_exit_kernel();
- }
- /*****************************************************************
- Updates the lock table when a record is removed. */
- void
- lock_update_delete(
- /*===============*/
- rec_t* rec) /* in: the record to be removed */
- {
- lock_mutex_enter_kernel();
- /* Let the next record inherit the locks from rec, in gap mode */
- lock_rec_inherit_to_gap(page_rec_get_next(rec), rec);
- /* Reset the lock bits on rec and release waiting transactions */
- lock_rec_reset_and_release_wait(rec);
- lock_mutex_exit_kernel();
- }
-
- /*************************************************************************
- Stores on the page infimum record the explicit locks of another record.
- This function is used to store the lock state of a record when it is
- updated and the size of the record changes in the update. The record
- is moved in such an update, perhaps to another page. The infimum record
- acts as a dummy carrier record, taking care of lock releases while the
- actual record is being moved. */
- void
- lock_rec_store_on_page_infimum(
- /*===========================*/
- rec_t* rec) /* in: record whose lock state is stored
- on the infimum record of the same page; lock
- bits are reset on the record */
- {
- page_t* page;
- page = buf_frame_align(rec);
- lock_mutex_enter_kernel();
-
- lock_rec_move(page_get_infimum_rec(page), rec);
- lock_mutex_exit_kernel();
- }
- /*************************************************************************
- Restores the state of explicit lock requests on a single record, where the
- state was stored on the infimum of the page. */
- void
- lock_rec_restore_from_page_infimum(
- /*===============================*/
- rec_t* rec, /* in: record whose lock state is restored */
- page_t* page) /* in: page (rec is not necessarily on this page)
- whose infimum stored the lock state; lock bits are
- reset on the infimum */
- {
- lock_mutex_enter_kernel();
-
- lock_rec_move(rec, page_get_infimum_rec(page));
-
- lock_mutex_exit_kernel();
- }
- /*=========== DEADLOCK CHECKING ======================================*/
- /************************************************************************
- Checks if a lock request results in a deadlock. */
- static
- ibool
- lock_deadlock_occurs(
- /*=================*/
- /* out: TRUE if a deadlock was detected and we
- chose trx as a victim; FALSE if no deadlock, or
- there was a deadlock, but we chose other
- transaction(s) as victim(s) */
- lock_t* lock, /* in: lock the transaction is requesting */
- trx_t* trx) /* in: transaction */
- {
- dict_table_t* table;
- dict_index_t* index;
- trx_t* mark_trx;
- ulint ret;
- ulint cost = 0;
- ut_ad(trx && lock);
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- retry:
- /* We check that adding this trx to the waits-for graph
- does not produce a cycle. First mark all active transactions
- with 0: */
- mark_trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
- while (mark_trx) {
- mark_trx->deadlock_mark = 0;
- mark_trx = UT_LIST_GET_NEXT(trx_list, mark_trx);
- }
- ret = lock_deadlock_recursive(trx, trx, lock, &cost);
- if (ret == LOCK_VICTIM_IS_OTHER) {
- /* We chose some other trx as a victim: retry if there still
- is a deadlock */
- goto retry;
- }
- if (ret == LOCK_VICTIM_IS_START) {
- if (lock_get_type(lock) & LOCK_TABLE) {
- table = lock->un_member.tab_lock.table;
- index = NULL;
- } else {
- index = lock->index;
- table = index->table;
- }
- lock_deadlock_found = TRUE;
- fputs("*** WE ROLL BACK TRANSACTION (2)n",
- lock_latest_err_file);
- return(TRUE);
- }
-
- return(FALSE);
- }
- /************************************************************************
- Looks recursively for a deadlock. */
- static
- ulint
- lock_deadlock_recursive(
- /*====================*/
- /* out: 0 if no deadlock found,
- LOCK_VICTIM_IS_START if there was a deadlock
- and we chose 'start' as the victim,
- LOCK_VICTIM_IS_OTHER if a deadlock
- was found and we chose some other trx as a
- victim: we must do the search again in this
- last case because there may be another
- deadlock! */
- trx_t* start, /* in: recursion starting point */
- trx_t* trx, /* in: a transaction waiting for a lock */
- lock_t* wait_lock, /* in: the lock trx is waiting to be granted */
- ulint* cost) /* in/out: number of calculation steps thus
- far: if this exceeds LOCK_MAX_N_STEPS_...
- we return LOCK_VICTIM_IS_START */
- {
- lock_t* lock;
- ulint bit_no = ULINT_UNDEFINED;
- trx_t* lock_trx;
- ulint ret;
-
- ut_a(trx && start && wait_lock);
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
-
- if (trx->deadlock_mark == 1) {
- /* We have already exhaustively searched the subtree starting
- from this trx */
- return(0);
- }
- *cost = *cost + 1;
- if (*cost > LOCK_MAX_N_STEPS_IN_DEADLOCK_CHECK) {
- return(LOCK_VICTIM_IS_START);
- }
- lock = wait_lock;
- if (lock_get_type(wait_lock) == LOCK_REC) {
- bit_no = lock_rec_find_set_bit(wait_lock);
- ut_a(bit_no != ULINT_UNDEFINED);
- }
- /* Look at the locks ahead of wait_lock in the lock queue */
- for (;;) {
- if (lock_get_type(lock) & LOCK_TABLE) {
- lock = UT_LIST_GET_PREV(un_member.tab_lock.locks, lock);
- } else {
- ut_ad(lock_get_type(lock) == LOCK_REC);
- ut_a(bit_no != ULINT_UNDEFINED);
- lock = lock_rec_get_prev(lock, bit_no);
- }
- if (lock == NULL) {
- /* We can mark this subtree as searched */
- trx->deadlock_mark = 1;
- return(FALSE);
- }
- if (lock_has_to_wait(wait_lock, lock)) {
- lock_trx = lock->trx;
- if (lock_trx == start) {
- /* We came back to the recursion starting
- point: a deadlock detected */
- FILE* ef = lock_latest_err_file;
-
- rewind(ef);
- ut_print_timestamp(ef);
- fputs("n*** (1) TRANSACTION:n", ef);
- trx_print(ef, wait_lock->trx);
- fputs(
- "*** (1) WAITING FOR THIS LOCK TO BE GRANTED:n", ef);
-
- if (lock_get_type(wait_lock) == LOCK_REC) {
- lock_rec_print(ef, wait_lock);
- } else {
- lock_table_print(ef, wait_lock);
- }
-
- fputs("*** (2) TRANSACTION:n", ef);
- trx_print(ef, lock->trx);
- fputs("*** (2) HOLDS THE LOCK(S):n", ef);
-
- if (lock_get_type(lock) == LOCK_REC) {
- lock_rec_print(ef, lock);
- } else {
- lock_table_print(ef, lock);
- }
-
- fputs(
- "*** (2) WAITING FOR THIS LOCK TO BE GRANTED:n", ef);
-
- if (lock_get_type(start->wait_lock)
- == LOCK_REC) {
- lock_rec_print(ef, start->wait_lock);
- } else {
- lock_table_print(ef, start->wait_lock);
- }
- if (lock_print_waits) {
- fputs("Deadlock detectedn", stderr);
- }
- if (ut_dulint_cmp(wait_lock->trx->undo_no,
- start->undo_no) >= 0) {
- /* Our recursion starting point
- transaction is 'smaller', let us
- choose 'start' as the victim and roll
- back it */
- return(LOCK_VICTIM_IS_START);
- }
- lock_deadlock_found = TRUE;
- /* Let us choose the transaction of wait_lock
- as a victim to try to avoid deadlocking our
- recursion starting point transaction */
-
- fputs("*** WE ROLL BACK TRANSACTION (1)n",
- ef);
-
- wait_lock->trx->was_chosen_as_deadlock_victim
- = TRUE;
-
- lock_cancel_waiting_and_release(wait_lock);
- /* Since trx and wait_lock are no longer
- in the waits-for graph, we can return FALSE;
- note that our selective algorithm can choose
- several transactions as victims, but still
- we may end up rolling back also the recursion
- starting point transaction! */
- return(LOCK_VICTIM_IS_OTHER);
- }
-
- if (lock_trx->que_state == TRX_QUE_LOCK_WAIT) {
- /* Another trx ahead has requested lock in an
- incompatible mode, and is itself waiting for
- a lock */
- ret = lock_deadlock_recursive(start, lock_trx,
- lock_trx->wait_lock, cost);
- if (ret != 0) {
- return(ret);
- }
- }
- }
- }/* end of the 'for (;;)'-loop */
- }
- /*========================= TABLE LOCKS ==============================*/
- /*************************************************************************
- Creates a table lock object and adds it as the last in the lock queue
- of the table. Does NOT check for deadlocks or lock compatibility. */
- UNIV_INLINE
- lock_t*
- lock_table_create(
- /*==============*/
- /* out, own: new lock object, or NULL if
- out of memory */
- dict_table_t* table, /* in: database table in dictionary cache */
- ulint type_mode,/* in: lock mode possibly ORed with
- LOCK_WAIT */
- trx_t* trx) /* in: trx */
- {
- lock_t* lock;
- ut_ad(table && trx);
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- if (type_mode == LOCK_AUTO_INC) {
- /* Only one trx can have the lock on the table
- at a time: we may use the memory preallocated
- to the table object */
- lock = table->auto_inc_lock;
- ut_a(trx->auto_inc_lock == NULL);
- trx->auto_inc_lock = lock;
- } else {
- lock = mem_heap_alloc(trx->lock_heap, sizeof(lock_t));
- }
- if (lock == NULL) {
- return(NULL);
- }
- UT_LIST_ADD_LAST(trx_locks, trx->trx_locks, lock);
- lock->type_mode = type_mode | LOCK_TABLE;
- lock->trx = trx;
- if (lock_get_type(lock) == LOCK_TABLE_EXP) {
- lock->trx->n_lock_table_exp++;
- }
- lock->un_member.tab_lock.table = table;
- UT_LIST_ADD_LAST(un_member.tab_lock.locks, table->locks, lock);
- if (type_mode & LOCK_WAIT) {
- lock_set_lock_and_trx_wait(lock, trx);
- }
- return(lock);
- }
- /*****************************************************************
- Removes a table lock request from the queue and the trx list of locks;
- this is a low-level function which does NOT check if waiting requests
- can now be granted. */
- UNIV_INLINE
- void
- lock_table_remove_low(
- /*==================*/
- lock_t* lock) /* in: table lock */
- {
- dict_table_t* table;
- trx_t* trx;
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- table = lock->un_member.tab_lock.table;
- trx = lock->trx;
- if (lock == trx->auto_inc_lock) {
- trx->auto_inc_lock = NULL;
- }
- if (lock_get_type(lock) == LOCK_TABLE_EXP) {
- lock->trx->n_lock_table_exp--;
- }
- UT_LIST_REMOVE(trx_locks, trx->trx_locks, lock);
- UT_LIST_REMOVE(un_member.tab_lock.locks, table->locks, lock);
- }
- /*************************************************************************
- Enqueues a waiting request for a table lock which cannot be granted
- immediately. Checks for deadlocks. */
- static
- ulint
- lock_table_enqueue_waiting(
- /*=======================*/
- /* out: DB_LOCK_WAIT, DB_DEADLOCK, or
- DB_QUE_THR_SUSPENDED, or DB_SUCCESS;
- DB_SUCCESS means that there was a deadlock,
- but another transaction was chosen as a
- victim, and we got the lock immediately:
- no need to wait then */
- ulint mode, /* in: lock mode this transaction is
- requesting */
- dict_table_t* table, /* in: table */
- que_thr_t* thr) /* in: query thread */
- {
- lock_t* lock;
- trx_t* trx;
-
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
-
- /* Test if there already is some other reason to suspend thread:
- we do not enqueue a lock request if the query thread should be
- stopped anyway */
- if (que_thr_stop(thr)) {
- ut_error;
- return(DB_QUE_THR_SUSPENDED);
- }
- trx = thr_get_trx(thr);
- if (trx->dict_operation) {
- ut_print_timestamp(stderr);
- fputs(
- " InnoDB: Error: a table lock wait happens in a dictionary operation!n"
- "InnoDB: Table name ", stderr);
- ut_print_name(stderr, trx, table->name);
- fputs(".n"
- "InnoDB: Submit a detailed bug report to http://bugs.mysql.comn",
- stderr);
- }
-
- /* Enqueue the lock request that will wait to be granted */
- lock = lock_table_create(table, mode | LOCK_WAIT, trx);
- /* Check if a deadlock occurs: if yes, remove the lock request and
- return an error code */
- if (lock_deadlock_occurs(lock, trx)) {
- lock_reset_lock_and_trx_wait(lock);
- lock_table_remove_low(lock);
- return(DB_DEADLOCK);
- }
- if (trx->wait_lock == NULL) {
- /* Deadlock resolution chose another transaction as a victim,
- and we accidentally got our lock granted! */
-
- return(DB_SUCCESS);
- }
-
- trx->que_state = TRX_QUE_LOCK_WAIT;
- trx->was_chosen_as_deadlock_victim = FALSE;
- trx->wait_started = time(NULL);
- ut_a(que_thr_stop(thr));
- return(DB_LOCK_WAIT);
- }
- /*************************************************************************
- Checks if other transactions have an incompatible mode lock request in
- the lock queue. */
- UNIV_INLINE
- ibool
- lock_table_other_has_incompatible(
- /*==============================*/
- trx_t* trx, /* in: transaction, or NULL if all
- transactions should be included */
- ulint wait, /* in: LOCK_WAIT if also waiting locks are
- taken into account, or 0 if not */
- dict_table_t* table, /* in: table */
- ulint mode) /* in: lock mode */
- {
- lock_t* lock;
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- lock = UT_LIST_GET_LAST(table->locks);
- while (lock != NULL) {
- if ((lock->trx != trx)
- && (!lock_mode_compatible(lock_get_mode(lock), mode))
- && (wait || !(lock_get_wait(lock)))) {
- return(TRUE);
- }
- lock = UT_LIST_GET_PREV(un_member.tab_lock.locks, lock);
- }
- return(FALSE);
- }
- /*************************************************************************
- Locks the specified database table in the mode given. If the lock cannot
- be granted immediately, the query thread is put to wait. */
- ulint
- lock_table(
- /*=======*/
- /* out: DB_SUCCESS, DB_LOCK_WAIT,
- DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
- ulint flags, /* in: if BTR_NO_LOCKING_FLAG bit is set,
- does nothing;
- if LOCK_TABLE_EXP bits are set,
- creates an explicit table lock */
- dict_table_t* table, /* in: database table in dictionary cache */
- ulint mode, /* in: lock mode */
- que_thr_t* thr) /* in: query thread */
- {
- trx_t* trx;
- ulint err;
-
- ut_ad(table && thr);
- if (flags & BTR_NO_LOCKING_FLAG) {
- return(DB_SUCCESS);
- }
- ut_a(flags == 0 || flags == LOCK_TABLE_EXP);
- trx = thr_get_trx(thr);
- lock_mutex_enter_kernel();
- /* Look for stronger locks the same trx already has on the table */
- if (lock_table_has(trx, table, mode)) {
- lock_mutex_exit_kernel();
- return(DB_SUCCESS);
- }
- /* We have to check if the new lock is compatible with any locks
- other transactions have in the table lock queue. */
- if (lock_table_other_has_incompatible(trx, LOCK_WAIT, table, mode)) {
-
- /* Another trx has a request on the table in an incompatible
- mode: this trx may have to wait */
- err = lock_table_enqueue_waiting(mode, table, thr);
-
- lock_mutex_exit_kernel();
- return(err);
- }
- lock_table_create(table, mode | flags, trx);
- ut_a(!flags || mode == LOCK_S || mode == LOCK_X);
- lock_mutex_exit_kernel();
- return(DB_SUCCESS);
- }
- /*************************************************************************
- Checks if there are any locks set on the table. */
- ibool
- lock_is_on_table(
- /*=============*/
- /* out: TRUE if there are lock(s) */
- dict_table_t* table) /* in: database table in dictionary cache */
- {
- ibool ret;
- ut_ad(table);
- lock_mutex_enter_kernel();
- if (UT_LIST_GET_LAST(table->locks)) {
- ret = TRUE;
- } else {
- ret = FALSE;
- }
- lock_mutex_exit_kernel();
- return(ret);
- }
- /*************************************************************************
- Checks if a waiting table lock request still has to wait in a queue. */
- static
- ibool
- lock_table_has_to_wait_in_queue(
- /*============================*/
- /* out: TRUE if still has to wait */
- lock_t* wait_lock) /* in: waiting table lock */
- {
- dict_table_t* table;
- lock_t* lock;
- ut_ad(lock_get_wait(wait_lock));
-
- table = wait_lock->un_member.tab_lock.table;
- lock = UT_LIST_GET_FIRST(table->locks);
- while (lock != wait_lock) {
- if (lock_has_to_wait(wait_lock, lock)) {
- return(TRUE);
- }
- lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock);
- }
- return(FALSE);
- }
- /*****************************************************************
- Removes a table lock request, waiting or granted, from the queue and grants
- locks to other transactions in the queue, if they now are entitled to a
- lock. */
- static
- void
- lock_table_dequeue(
- /*===============*/
- lock_t* in_lock)/* in: table lock object; transactions waiting
- behind will get their lock requests granted, if
- they are now qualified to it */
- {
- lock_t* lock;
-
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- ut_a(lock_get_type(in_lock) == LOCK_TABLE ||
- lock_get_type(in_lock) == LOCK_TABLE_EXP);
- lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, in_lock);
- lock_table_remove_low(in_lock);
- /* Check if waiting locks in the queue can now be granted: grant
- locks if there are no conflicting locks ahead. */
- while (lock != NULL) {
- if (lock_get_wait(lock)
- && !lock_table_has_to_wait_in_queue(lock)) {
- /* Grant the lock */
- lock_grant(lock);
- }
- lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock);
- }
- }
- /*=========================== LOCK RELEASE ==============================*/
- /*************************************************************************
- Releases a table lock.
- Releases possible other transactions waiting for this lock. */
- void
- lock_table_unlock(
- /*==============*/
- lock_t* lock) /* in: lock */
- {
- mutex_enter(&kernel_mutex);
- lock_table_dequeue(lock);
- mutex_exit(&kernel_mutex);
- }
- /*************************************************************************
- Releases an auto-inc lock a transaction possibly has on a table.
- Releases possible other transactions waiting for this lock. */
- void
- lock_table_unlock_auto_inc(
- /*=======================*/
- trx_t* trx) /* in: transaction */
- {
- if (trx->auto_inc_lock) {
- mutex_enter(&kernel_mutex);
- lock_table_dequeue(trx->auto_inc_lock);
- mutex_exit(&kernel_mutex);
- }
- }
- /*************************************************************************
- Releases transaction locks, and releases possible other transactions waiting
- because of these locks. */
- void
- lock_release_off_kernel(
- /*====================*/
- trx_t* trx) /* in: transaction */
- {
- dict_table_t* table;
- ulint count;
- lock_t* lock;
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- lock = UT_LIST_GET_LAST(trx->trx_locks);
-
- count = 0;
- while (lock != NULL) {
- count++;
- if (lock_get_type(lock) == LOCK_REC) {
-
- lock_rec_dequeue_from_page(lock);
- } else {
- ut_ad(lock_get_type(lock) & LOCK_TABLE);
- if (lock_get_mode(lock) != LOCK_IS
- && 0 != ut_dulint_cmp(trx->undo_no,
- ut_dulint_zero)) {
- /* The trx may have modified the table.
- We block the use of the MySQL query cache
- for all currently active transactions. */
- table = lock->un_member.tab_lock.table;
-
- table->query_cache_inv_trx_id =
- trx_sys->max_trx_id;
- }
- lock_table_dequeue(lock);
- if (lock_get_type(lock) == LOCK_TABLE_EXP) {
- ut_a(lock_get_mode(lock) == LOCK_S
- || lock_get_mode(lock) == LOCK_X);
- }
- }
- if (count == LOCK_RELEASE_KERNEL_INTERVAL) {
- /* Release the kernel mutex for a while, so that we
- do not monopolize it */
- lock_mutex_exit_kernel();
- lock_mutex_enter_kernel();
- count = 0;
- }
- lock = UT_LIST_GET_LAST(trx->trx_locks);
- }
- mem_heap_empty(trx->lock_heap);
- ut_a(trx->auto_inc_lock == NULL);
- ut_a(trx->n_lock_table_exp == 0);
- }
- /*************************************************************************
- Releases table locks explicitly requested with LOCK TABLES (indicated by
- lock type LOCK_TABLE_EXP), and releases possible other transactions waiting
- because of these locks. */
- void
- lock_release_tables_off_kernel(
- /*===========================*/
- trx_t* trx) /* in: transaction */
- {
- dict_table_t* table;
- ulint count;
- lock_t* lock;
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- lock = UT_LIST_GET_LAST(trx->trx_locks);
- count = 0;
- while (lock != NULL) {
- count++;
- if (lock_get_type(lock) == LOCK_TABLE_EXP) {
- ut_a(lock_get_mode(lock) == LOCK_S
- || lock_get_mode(lock) == LOCK_X);
- if (trx->insert_undo || trx->update_undo) {
- /* The trx may have modified the table.
- We block the use of the MySQL query
- cache for all currently active
- transactions. */
- table = lock->un_member.tab_lock.table;
- table->query_cache_inv_trx_id =
- trx_sys->max_trx_id;
- }
- lock_table_dequeue(lock);
- lock = UT_LIST_GET_LAST(trx->trx_locks);
- continue;
- }
- if (count == LOCK_RELEASE_KERNEL_INTERVAL) {
- /* Release the kernel mutex for a while, so that we
- do not monopolize it */
- lock_mutex_exit_kernel();
- lock_mutex_enter_kernel();
- count = 0;
- }
- lock = UT_LIST_GET_PREV(trx_locks, lock);
- }
- ut_a(trx->n_lock_table_exp == 0);
- }
- /*************************************************************************
- Cancels a waiting lock request and releases possible other transactions
- waiting behind it. */
- void
- lock_cancel_waiting_and_release(
- /*============================*/
- lock_t* lock) /* in: waiting lock request */
- {
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- if (lock_get_type(lock) == LOCK_REC) {
-
- lock_rec_dequeue_from_page(lock);
- } else {
- ut_ad(lock_get_type(lock) & LOCK_TABLE);
- lock_table_dequeue(lock);
- }
- /* Reset the wait flag and the back pointer to lock in trx */
- lock_reset_lock_and_trx_wait(lock);
- /* The following function releases the trx from lock wait */
- trx_end_lock_wait(lock->trx);
- }
- /*************************************************************************
- Resets all record and table locks of a transaction on a table to be dropped.
- No lock is allowed to be a wait lock. */
- static
- void
- lock_reset_all_on_table_for_trx(
- /*============================*/
- dict_table_t* table, /* in: table to be dropped */
- trx_t* trx) /* in: a transaction */
- {
- lock_t* lock;
- lock_t* prev_lock;
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- lock = UT_LIST_GET_LAST(trx->trx_locks);
-
- while (lock != NULL) {
- prev_lock = UT_LIST_GET_PREV(trx_locks, lock);
-
- if (lock_get_type(lock) == LOCK_REC
- && lock->index->table == table) {
- ut_a(!lock_get_wait(lock));
-
- lock_rec_discard(lock);
- } else if (lock_get_type(lock) & LOCK_TABLE
- && lock->un_member.tab_lock.table == table) {
- ut_a(!lock_get_wait(lock));
-
- lock_table_remove_low(lock);
- }
- lock = prev_lock;
- }
- }
- /*************************************************************************
- Resets all locks, both table and record locks, on a table to be dropped.
- No lock is allowed to be a wait lock. */
- void
- lock_reset_all_on_table(
- /*====================*/
- dict_table_t* table) /* in: table to be dropped */
- {
- lock_t* lock;
- mutex_enter(&kernel_mutex);
- lock = UT_LIST_GET_FIRST(table->locks);
- while (lock) {
- ut_a(!lock_get_wait(lock));
- lock_reset_all_on_table_for_trx(table, lock->trx);
- lock = UT_LIST_GET_FIRST(table->locks);
- }
- mutex_exit(&kernel_mutex);
- }
- /*===================== VALIDATION AND DEBUGGING ====================*/
- /*************************************************************************
- Prints info of a table lock. */
- void
- lock_table_print(
- /*=============*/
- FILE* file, /* in: file where to print */
- lock_t* lock) /* in: table type lock */
- {
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- ut_a(lock_get_type(lock) == LOCK_TABLE ||
- lock_get_type(lock) == LOCK_TABLE_EXP);
- if (lock_get_type(lock) == LOCK_TABLE_EXP) {
- fputs("EXPLICIT ", file);
- }
- fputs("TABLE LOCK table ", file);
- ut_print_name(file, lock->trx, lock->un_member.tab_lock.table->name);
- fprintf(file, " trx id %lu %lu",
- (ulong) (lock->trx)->id.high, (ulong) (lock->trx)->id.low);
- if (lock_get_mode(lock) == LOCK_S) {
- fputs(" lock mode S", file);
- } else if (lock_get_mode(lock) == LOCK_X) {
- fputs(" lock mode X", file);
- } else if (lock_get_mode(lock) == LOCK_IS) {
- fputs(" lock mode IS", file);
- } else if (lock_get_mode(lock) == LOCK_IX) {
- fputs(" lock mode IX", file);
- } else if (lock_get_mode(lock) == LOCK_AUTO_INC) {
- fputs(" lock mode AUTO-INC", file);
- } else {
- fprintf(file, " unknown lock mode %lu", (ulong) lock_get_mode(lock));
- }
- if (lock_get_wait(lock)) {
- fputs(" waiting", file);
- }
- putc('n', file);
- }
-
- /*************************************************************************
- Prints info of a record lock. */
- void
- lock_rec_print(
- /*===========*/
- FILE* file, /* in: file where to print */
- lock_t* lock) /* in: record type lock */
- {
- page_t* page;
- ulint space;
- ulint page_no;
- ulint i;
- mtr_t mtr;
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- ut_a(lock_get_type(lock) == LOCK_REC);
- space = lock->un_member.rec_lock.space;
- page_no = lock->un_member.rec_lock.page_no;
- fprintf(file, "RECORD LOCKS space id %lu page no %lu n bits %lu ",
- (ulong) space, (ulong) page_no,
- (ulong) lock_rec_get_n_bits(lock));
- dict_index_name_print(file, lock->trx, lock->index);
- fprintf(file, " trx id %lu %lu",
- (ulong) (lock->trx)->id.high,
- (ulong) (lock->trx)->id.low);
- if (lock_get_mode(lock) == LOCK_S) {
- fputs(" lock mode S", file);
- } else if (lock_get_mode(lock) == LOCK_X) {
- fputs(" lock_mode X", file);
- } else {
- ut_error;
- }
- if (lock_rec_get_gap(lock)) {
- fputs(" locks gap before rec", file);
- }
- if (lock_rec_get_rec_not_gap(lock)) {
- fputs(" locks rec but not gap", file);
- }
- if (lock_rec_get_insert_intention(lock)) {
- fputs(" insert intention", file);
- }
- if (lock_get_wait(lock)) {
- fputs(" waiting", file);
- }
- mtr_start(&mtr);
- putc('n', file);
- /* If the page is not in the buffer pool, we cannot load it
- because we have the kernel mutex and ibuf operations would
- break the latching order */
-
- page = buf_page_get_gen(space, page_no, RW_NO_LATCH,
- NULL, BUF_GET_IF_IN_POOL,
- __FILE__, __LINE__, &mtr);
- if (page) {
- page = buf_page_get_nowait(space, page_no, RW_S_LATCH, &mtr);
- if (!page) {
- /* Let us try to get an X-latch. If the current thread
- is holding an X-latch on the page, we cannot get an
- S-latch. */
-
- page = buf_page_get_nowait(space, page_no, RW_X_LATCH,
- &mtr);
- }
- }
-
- if (page) {
- #ifdef UNIV_SYNC_DEBUG
- buf_page_dbg_add_level(page, SYNC_NO_ORDER_CHECK);
- #endif /* UNIV_SYNC_DEBUG */
- }
- for (i = 0; i < lock_rec_get_n_bits(lock); i++) {
- if (lock_rec_get_nth_bit(lock, i)) {
- fprintf(file, "Record lock, heap no %lu ", (ulong) i);
- if (page) {
- rec_print(file,
- page_find_rec_with_heap_no(page, i));
- }
- putc('n', file);
- }
- }
- mtr_commit(&mtr);
- }
-
- /*************************************************************************
- Calculates the number of record lock structs in the record lock hash table. */
- static
- ulint
- lock_get_n_rec_locks(void)
- /*======================*/
- {
- lock_t* lock;
- ulint n_locks = 0;
- ulint i;
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- for (i = 0; i < hash_get_n_cells(lock_sys->rec_hash); i++) {
- lock = HASH_GET_FIRST(lock_sys->rec_hash, i);
- while (lock) {
- n_locks++;
- lock = HASH_GET_NEXT(hash, lock);
- }
- }
- return(n_locks);
- }
-
- /*************************************************************************
- Prints info of locks for all transactions. */
- void
- lock_print_info_summary(
- /*====================*/
- FILE* file) /* in: file where to print */
- {
- /* We must protect the MySQL thd->query field with a MySQL mutex, and
- because the MySQL mutex must be reserved before the kernel_mutex of
- InnoDB, we call innobase_mysql_prepare_print_arbitrary_thd() here. */
- innobase_mysql_prepare_print_arbitrary_thd();
- lock_mutex_enter_kernel();
- if (lock_deadlock_found) {
- fputs(
- "------------------------n"
- "LATEST DETECTED DEADLOCKn"
- "------------------------n", file);
- ut_copy_file(file, lock_latest_err_file);
- }
- fputs(
- "------------n"
- "TRANSACTIONSn"
- "------------n", file);
- fprintf(file, "Trx id counter %lu %lun",
- (ulong) ut_dulint_get_high(trx_sys->max_trx_id),
- (ulong) ut_dulint_get_low(trx_sys->max_trx_id));
- fprintf(file,
- "Purge done for trx's n:o < %lu %lu undo n:o < %lu %lun",
- (ulong) ut_dulint_get_high(purge_sys->purge_trx_no),
- (ulong) ut_dulint_get_low(purge_sys->purge_trx_no),
- (ulong) ut_dulint_get_high(purge_sys->purge_undo_no),
- (ulong) ut_dulint_get_low(purge_sys->purge_undo_no));
- fprintf(file,
- "History list length %lun", (ulong) trx_sys->rseg_history_len);
- fprintf(file,
- "Total number of lock structs in row lock hash table %lun",
- (ulong) lock_get_n_rec_locks());
- }
- /*************************************************************************
- Prints info of locks for each transaction. */
- void
- lock_print_info_all_transactions(
- /*=============================*/
- FILE* file) /* in: file where to print */
- {
- lock_t* lock;
- ulint space;
- ulint page_no;
- page_t* page;
- ibool load_page_first = TRUE;
- ulint nth_trx = 0;
- ulint nth_lock = 0;
- ulint i;
- mtr_t mtr;
- trx_t* trx;
- fprintf(file, "LIST OF TRANSACTIONS FOR EACH SESSION:n");
- /* First print info on non-active transactions */
- trx = UT_LIST_GET_FIRST(trx_sys->mysql_trx_list);
- while (trx) {
- if (trx->conc_state == TRX_NOT_STARTED) {
- fputs("---", file);
- trx_print(file, trx);
- }
-
- trx = UT_LIST_GET_NEXT(mysql_trx_list, trx);
- }
- loop:
- trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
- i = 0;
- /* Since we temporarily release the kernel mutex when
- reading a database page in below, variable trx may be
- obsolete now and we must loop through the trx list to
- get probably the same trx, or some other trx. */
-
- while (trx && (i < nth_trx)) {
- trx = UT_LIST_GET_NEXT(trx_list, trx);
- i++;
- }
- if (trx == NULL) {
- lock_mutex_exit_kernel();
- innobase_mysql_end_print_arbitrary_thd();
- ut_ad(lock_validate());
- return;
- }
- if (nth_lock == 0) {
- fputs("---", file);
- trx_print(file, trx);
-
- if (trx->read_view) {
- fprintf(file,
- "Trx read view will not see trx with id >= %lu %lu, sees < %lu %lun",
- (ulong) ut_dulint_get_high(trx->read_view->low_limit_id),
- (ulong) ut_dulint_get_low(trx->read_view->low_limit_id),
- (ulong) ut_dulint_get_high(trx->read_view->up_limit_id),
- (ulong) ut_dulint_get_low(trx->read_view->up_limit_id));
- }
- if (trx->que_state == TRX_QUE_LOCK_WAIT) {
- fprintf(file,
- "------- TRX HAS BEEN WAITING %lu SEC FOR THIS LOCK TO BE GRANTED:n",
- (ulong)difftime(time(NULL), trx->wait_started));
- if (lock_get_type(trx->wait_lock) == LOCK_REC) {
- lock_rec_print(file, trx->wait_lock);
- } else {
- lock_table_print(file, trx->wait_lock);
- }
- fputs("------------------n", file);
- }
- }
- if (!srv_print_innodb_lock_monitor) {
- nth_trx++;
- goto loop;
- }
- i = 0;
- /* Look at the note about the trx loop above why we loop here:
- lock may be an obsolete pointer now. */
-
- lock = UT_LIST_GET_FIRST(trx->trx_locks);
-
- while (lock && (i < nth_lock)) {
- lock = UT_LIST_GET_NEXT(trx_locks, lock);
- i++;
- }
- if (lock == NULL) {
- nth_trx++;
- nth_lock = 0;
- goto loop;
- }
- if (lock_get_type(lock) == LOCK_REC) {
- space = lock->un_member.rec_lock.space;
- page_no = lock->un_member.rec_lock.page_no;
- if (load_page_first) {
- lock_mutex_exit_kernel();
- innobase_mysql_end_print_arbitrary_thd();
- mtr_start(&mtr);
-
- page = buf_page_get_with_no_latch(space, page_no, &mtr);
- mtr_commit(&mtr);
- load_page_first = FALSE;
- innobase_mysql_prepare_print_arbitrary_thd();
- lock_mutex_enter_kernel();
- goto loop;
- }
-
- lock_rec_print(file, lock);
- } else {
- ut_ad(lock_get_type(lock) & LOCK_TABLE);
-
- lock_table_print(file, lock);
- }
- load_page_first = TRUE;
- nth_lock++;
- if (nth_lock >= 10) {
- fputs(
- "10 LOCKS PRINTED FOR THIS TRX: SUPPRESSING FURTHER PRINTSn",
- file);
-
- nth_trx++;
- nth_lock = 0;
- goto loop;
- }
- goto loop;
- }
- /*************************************************************************
- Validates the lock queue on a table. */
- ibool
- lock_table_queue_validate(
- /*======================*/
- /* out: TRUE if ok */
- dict_table_t* table) /* in: table */
- {
- lock_t* lock;
- ibool is_waiting;
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- is_waiting = FALSE;
- lock = UT_LIST_GET_FIRST(table->locks);
- while (lock) {
- ut_a(((lock->trx)->conc_state == TRX_ACTIVE)
- || ((lock->trx)->conc_state == TRX_COMMITTED_IN_MEMORY));
-
- if (!lock_get_wait(lock)) {
- ut_a(!is_waiting);
-
- ut_a(!lock_table_other_has_incompatible(lock->trx, 0,
- table, lock_get_mode(lock)));
- } else {
- is_waiting = TRUE;
- ut_a(lock_table_has_to_wait_in_queue(lock));
- }
- lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock);
- }
- return(TRUE);
- }
- /*************************************************************************
- Validates the lock queue on a single record. */
- ibool
- lock_rec_queue_validate(
- /*====================*/
- /* out: TRUE if ok */
- rec_t* rec, /* in: record to look at */
- dict_index_t* index) /* in: index, or NULL if not known */
- {
- trx_t* impl_trx;
- lock_t* lock;
-
- ut_a(rec);
- lock_mutex_enter_kernel();
- if (page_rec_is_supremum(rec) || page_rec_is_infimum(rec)) {
- lock = lock_rec_get_first(rec);
- while (lock) {
- ut_a(lock->trx->conc_state == TRX_ACTIVE
- || lock->trx->conc_state
- == TRX_COMMITTED_IN_MEMORY);
-
- ut_a(trx_in_trx_list(lock->trx));
-
- if (lock_get_wait(lock)) {
- ut_a(lock_rec_has_to_wait_in_queue(lock));
- }
- if (index) {
- ut_a(lock->index == index);
- }
- lock = lock_rec_get_next(rec, lock);
- }
- lock_mutex_exit_kernel();
- return(TRUE);
- }
- if (index && (index->type & DICT_CLUSTERED)) {
-
- impl_trx = lock_clust_rec_some_has_impl(rec, index);
- if (impl_trx && lock_rec_other_has_expl_req(LOCK_S, 0,
- LOCK_WAIT, rec, impl_trx)) {
- ut_a(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, rec,
- impl_trx));
- }
- }
- if (index && !(index->type & DICT_CLUSTERED)) {
-
- /* The kernel mutex may get released temporarily in the
- next function call: we have to release lock table mutex
- to obey the latching order */
-
- impl_trx = lock_sec_rec_some_has_impl_off_kernel(rec, index);
- if (impl_trx && lock_rec_other_has_expl_req(LOCK_S, 0,
- LOCK_WAIT, rec, impl_trx)) {
- ut_a(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, rec,
- impl_trx));
- }
- }
- lock = lock_rec_get_first(rec);
- while (lock) {
- ut_a(lock->trx->conc_state == TRX_ACTIVE
- || lock->trx->conc_state == TRX_COMMITTED_IN_MEMORY);
- ut_a(trx_in_trx_list(lock->trx));
-
- if (index) {
- ut_a(lock->index == index);
- }
- if (!lock_rec_get_gap(lock) && !lock_get_wait(lock)) {
-
- if (lock_get_mode(lock) == LOCK_S) {
- ut_a(!lock_rec_other_has_expl_req(LOCK_X,
- 0, 0, rec, lock->trx));
- } else {
- ut_a(!lock_rec_other_has_expl_req(LOCK_S,
- 0, 0, rec, lock->trx));
- }
- } else if (lock_get_wait(lock) && !lock_rec_get_gap(lock)) {
- ut_a(lock_rec_has_to_wait_in_queue(lock));
- }
- lock = lock_rec_get_next(rec, lock);
- }
- lock_mutex_exit_kernel();
- return(TRUE);
- }
- /*************************************************************************
- Validates the record lock queues on a page. */
- ibool
- lock_rec_validate_page(
- /*===================*/
- /* out: TRUE if ok */
- ulint space, /* in: space id */
- ulint page_no)/* in: page number */
- {
- dict_index_t* index;
- page_t* page;
- lock_t* lock;
- rec_t* rec;
- ulint nth_lock = 0;
- ulint nth_bit = 0;
- ulint i;
- mtr_t mtr;
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(!mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- mtr_start(&mtr);
-
- page = buf_page_get(space, page_no, RW_X_LATCH, &mtr);
- #ifdef UNIV_SYNC_DEBUG
- buf_page_dbg_add_level(page, SYNC_NO_ORDER_CHECK);
- #endif /* UNIV_SYNC_DEBUG */
- lock_mutex_enter_kernel();
- loop:
- lock = lock_rec_get_first_on_page_addr(space, page_no);
- if (!lock) {
- goto function_exit;
- }
- for (i = 0; i < nth_lock; i++) {
- lock = lock_rec_get_next_on_page(lock);
- if (!lock) {
- goto function_exit;
- }
- }
- ut_a(trx_in_trx_list(lock->trx));
- ut_a(lock->trx->conc_state == TRX_ACTIVE
- || lock->trx->conc_state == TRX_COMMITTED_IN_MEMORY);
-
- for (i = nth_bit; i < lock_rec_get_n_bits(lock); i++) {
- if (i == 1 || lock_rec_get_nth_bit(lock, i)) {
- index = lock->index;
- rec = page_find_rec_with_heap_no(page, i);
- fprintf(stderr,
- "Validating %lu %lun", (ulong) space, (ulong) page_no);
- lock_mutex_exit_kernel();
- lock_rec_queue_validate(rec, index);
- lock_mutex_enter_kernel();
- nth_bit = i + 1;
- goto loop;
- }
- }
- nth_bit = 0;
- nth_lock++;
- goto loop;
- function_exit:
- lock_mutex_exit_kernel();
- mtr_commit(&mtr);
- return(TRUE);
- }
-
- /*************************************************************************
- Validates the lock system. */
- ibool
- lock_validate(void)
- /*===============*/
- /* out: TRUE if ok */
- {
- lock_t* lock;
- trx_t* trx;
- dulint limit;
- ulint space;
- ulint page_no;
- ulint i;
- lock_mutex_enter_kernel();
-
- trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
- while (trx) {
- lock = UT_LIST_GET_FIRST(trx->trx_locks);
-
- while (lock) {
- if (lock_get_type(lock) & LOCK_TABLE) {
-
- lock_table_queue_validate(
- lock->un_member.tab_lock.table);
- }
-
- lock = UT_LIST_GET_NEXT(trx_locks, lock);
- }
-
- trx = UT_LIST_GET_NEXT(trx_list, trx);
- }
- for (i = 0; i < hash_get_n_cells(lock_sys->rec_hash); i++) {
- limit = ut_dulint_zero;
- for (;;) {
- lock = HASH_GET_FIRST(lock_sys->rec_hash, i);
- while (lock) {
- ut_a(trx_in_trx_list(lock->trx));
- space = lock->un_member.rec_lock.space;
- page_no = lock->un_member.rec_lock.page_no;
-
- if (ut_dulint_cmp(
- ut_dulint_create(space, page_no),
- limit) >= 0) {
- break;
- }
- lock = HASH_GET_NEXT(hash, lock);
- }
- if (!lock) {
- break;
- }
-
- lock_mutex_exit_kernel();
- lock_rec_validate_page(space, page_no);
- lock_mutex_enter_kernel();
- limit = ut_dulint_create(space, page_no + 1);
- }
- }
- lock_mutex_exit_kernel();
- return(TRUE);
- }
- /*============ RECORD LOCK CHECKS FOR ROW OPERATIONS ====================*/
- /*************************************************************************
- Checks if locks of other transactions prevent an immediate insert of
- a record. If they do, first tests if the query thread should anyway
- be suspended for some reason; if not, then puts the transaction and
- the query thread to the lock wait state and inserts a waiting request
- for a gap x-lock to the lock queue. */
- ulint
- lock_rec_insert_check_and_lock(
- /*===========================*/
- /* out: DB_SUCCESS, DB_LOCK_WAIT,
- DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
- ulint flags, /* in: if BTR_NO_LOCKING_FLAG bit is set,
- does nothing */
- rec_t* rec, /* in: record after which to insert */
- dict_index_t* index, /* in: index */
- que_thr_t* thr, /* in: query thread */
- ibool* inherit)/* out: set to TRUE if the new inserted
- record maybe should inherit LOCK_GAP type
- locks from the successor record */
- {
- rec_t* next_rec;
- trx_t* trx;
- lock_t* lock;
- ulint err;
- if (flags & BTR_NO_LOCKING_FLAG) {
- return(DB_SUCCESS);
- }
- ut_ad(rec);
- trx = thr_get_trx(thr);
- next_rec = page_rec_get_next(rec);
- *inherit = FALSE;
- lock_mutex_enter_kernel();
- ut_ad(lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
- lock = lock_rec_get_first(next_rec);
- if (lock == NULL) {
- /* We optimize CPU time usage in the simplest case */
- lock_mutex_exit_kernel();
- if (!(index->type & DICT_CLUSTERED)) {
- /* Update the page max trx id field */
- page_update_max_trx_id(buf_frame_align(rec),
- thr_get_trx(thr)->id);
- }
-
- return(DB_SUCCESS);
- }
- *inherit = TRUE;
- /* If another transaction has an explicit lock request which locks
- the gap, waiting or granted, on the successor, the insert has to wait.
- An exception is the case where the lock by the another transaction
- is a gap type lock which it placed to wait for its turn to insert. We
- do not consider that kind of a lock conflicting with our insert. This
- eliminates an unnecessary deadlock which resulted when 2 transactions
- had to wait for their insert. Both had waiting gap type lock requests
- on the successor, which produced an unnecessary deadlock. */
- if (lock_rec_other_has_conflicting(LOCK_X | LOCK_GAP
- | LOCK_INSERT_INTENTION, next_rec, trx)) {
- /* Note that we may get DB_SUCCESS also here! */
- err = lock_rec_enqueue_waiting(LOCK_X | LOCK_GAP
- | LOCK_INSERT_INTENTION,
- next_rec, index, thr);
- } else {
- err = DB_SUCCESS;
- }
- lock_mutex_exit_kernel();
- if (!(index->type & DICT_CLUSTERED) && (err == DB_SUCCESS)) {
- /* Update the page max trx id field */
- page_update_max_trx_id(buf_frame_align(rec),
- thr_get_trx(thr)->id);
- }
-
- ut_ad(lock_rec_queue_validate(next_rec, index));
- return(err);
- }
- /*************************************************************************
- If a transaction has an implicit x-lock on a record, but no explicit x-lock
- set on the record, sets one for it. NOTE that in the case of a secondary
- index, the kernel mutex may get temporarily released. */
- static
- void
- lock_rec_convert_impl_to_expl(
- /*==========================*/
- rec_t* rec, /* in: user record on page */
- dict_index_t* index) /* in: index of record */
- {
- trx_t* impl_trx;
- #ifdef UNIV_SYNC_DEBUG
- ut_ad(mutex_own(&kernel_mutex));
- #endif /* UNIV_SYNC_DEBUG */
- ut_ad(page_rec_is_user_rec(rec));
- if (index->type & DICT_CLUSTERED) {
- impl_trx = lock_clust_rec_some_has_impl(rec, index);
- } else {
- impl_trx = lock_sec_rec_some_has_impl_off_kernel(rec, index);
- }
- if (impl_trx) {
- /* If the transaction has no explicit x-lock set on the
- record, set one for it */
- if (!lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, rec,
- impl_trx)) {
- lock_rec_add_to_queue(LOCK_REC | LOCK_X
- | LOCK_REC_NOT_GAP, rec, index,
- impl_trx);
- }
- }
- }
- /*************************************************************************
- Checks if locks of other transactions prevent an immediate modify (update,
- delete mark, or delete unmark) of a clustered index record. If they do,
- first tests if the query thread should anyway be suspended for some
- reason; if not, then puts the transaction and the query thread to the
- lock wait state and inserts a waiting request for a record x-lock to the
- lock queue. */
- ulint
- lock_clust_rec_modify_check_and_lock(
- /*=================================*/
- /* out: DB_SUCCESS, DB_LOCK_WAIT,
- DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
- ulint flags, /* in: if BTR_NO_LOCKING_FLAG bit is set,
- does nothing */
- rec_t* rec, /* in: record which should be modified */
- dict_index_t* index, /* in: clustered index */
- que_thr_t* thr) /* in: query thread */
- {
- ulint err;
-
- if (flags & BTR_NO_LOCKING_FLAG) {
- return(DB_SUCCESS);
- }
- ut_ad(index->type & DICT_CLUSTERED);
- lock_mutex_enter_kernel();
- ut_ad(lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
- /* If a transaction has no explicit x-lock set on the record, set one
- for it */
- lock_rec_convert_impl_to_expl(rec, index);
- err = lock_rec_lock(TRUE, LOCK_X | LOCK_REC_NOT_GAP, rec, index, thr);
- lock_mutex_exit_kernel();
- ut_ad(lock_rec_queue_validate(rec, index));
- return(err);
- }
- /*************************************************************************
- Checks if locks of other transactions prevent an immediate modify (delete
- mark or delete unmark) of a secondary index record. */
- ulint
- lock_sec_rec_modify_check_and_lock(
- /*===============================*/
- /* out: DB_SUCCESS, DB_LOCK_WAIT,
- DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
- ulint flags, /* in: if BTR_NO_LOCKING_FLAG bit is set,
- does nothing */
- rec_t* rec, /* in: record which should be modified;
- NOTE: as this is a secondary index, we
- always have to modify the clustered index
- record first: see the comment below */
- dict_index_t* index, /* in: secondary index */
- que_thr_t* thr) /* in: query thread */
- {
- ulint err;
-
- if (flags & BTR_NO_LOCKING_FLAG) {
- return(DB_SUCCESS);
- }
- ut_ad(!(index->type & DICT_CLUSTERED));
- /* Another transaction cannot have an implicit lock on the record,
- because when we come here, we already have modified the clustered
- index record, and this would not have been possible if another active
- transaction had modified this secondary index record. */
- lock_mutex_enter_kernel();
- ut_ad(lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
- err = lock_rec_lock(TRUE, LOCK_X | LOCK_REC_NOT_GAP, rec, index, thr);
- lock_mutex_exit_kernel();
-
- ut_ad(lock_rec_queue_validate(rec, index));
- if (err == DB_SUCCESS) {
- /* Update the page max trx id field */
- page_update_max_trx_id(buf_frame_align(rec),
- thr_get_trx(thr)->id);
- }
- return(err);
- }
- /*************************************************************************
- Like the counterpart for a clustered index below, but now we read a
- secondary index record. */
- ulint
- lock_sec_rec_read_check_and_lock(
- /*=============================*/
- /* out: DB_SUCCESS, DB_LOCK_WAIT,
- DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
- ulint flags, /* in: if BTR_NO_LOCKING_FLAG bit is set,
- does nothing */
- rec_t* rec, /* in: user record or page supremum record
- which should be read or passed over by a read
- cursor */
- dict_index_t* index, /* in: secondary index */
- ulint mode, /* in: mode of the lock which the read cursor
- should set on records: LOCK_S or LOCK_X; the
- latter is possible in SELECT FOR UPDATE */
- ulint gap_mode,/* in: LOCK_ORDINARY, LOCK_GAP, or
- LOCK_REC_NOT_GAP */
- que_thr_t* thr) /* in: query thread */
- {
- ulint err;
- ut_ad(!(index->type & DICT_CLUSTERED));
- ut_ad(page_rec_is_user_rec(rec) || page_rec_is_supremum(rec));
- if (flags & BTR_NO_LOCKING_FLAG) {
- return(DB_SUCCESS);
- }
- lock_mutex_enter_kernel();
- ut_ad(mode != LOCK_X
- || lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
- ut_ad(mode != LOCK_S
- || lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
- /* Some transaction may have an implicit x-lock on the record only
- if the max trx id for the page >= min trx id for the trx list or a
- database recovery is running. */
- if (((ut_dulint_cmp(page_get_max_trx_id(buf_frame_align(rec)),
- trx_list_get_min_trx_id()) >= 0)
- || recv_recovery_is_on())
- && !page_rec_is_supremum(rec)) {
- lock_rec_convert_impl_to_expl(rec, index);
- }
- err = lock_rec_lock(FALSE, mode | gap_mode, rec, index, thr);
- lock_mutex_exit_kernel();
- ut_ad(lock_rec_queue_validate(rec, index));
- return(err);
- }
- /*************************************************************************
- Checks if locks of other transactions prevent an immediate read, or passing
- over by a read cursor, of a clustered index record. If they do, first tests
- if the query thread should anyway be suspended for some reason; if not, then
- puts the transaction and the query thread to the lock wait state and inserts a
- waiting request for a record lock to the lock queue. Sets the requested mode
- lock on the record. */
- ulint
- lock_clust_rec_read_check_and_lock(
- /*===============================*/
- /* out: DB_SUCCESS, DB_LOCK_WAIT,
- DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
- ulint flags, /* in: if BTR_NO_LOCKING_FLAG bit is set,
- does nothing */
- rec_t* rec, /* in: user record or page supremum record
- which should be read or passed over by a read
- cursor */
- dict_index_t* index, /* in: clustered index */
- ulint mode, /* in: mode of the lock which the read cursor
- should set on records: LOCK_S or LOCK_X; the
- latter is possible in SELECT FOR UPDATE */
- ulint gap_mode,/* in: LOCK_ORDINARY, LOCK_GAP, or
- LOCK_REC_NOT_GAP */
- que_thr_t* thr) /* in: query thread */
- {
- ulint err;
- ut_ad(index->type & DICT_CLUSTERED);
- ut_ad(page_rec_is_user_rec(rec) || page_rec_is_supremum(rec));
- ut_ad(gap_mode == LOCK_ORDINARY || gap_mode == LOCK_GAP
- || gap_mode == LOCK_REC_NOT_GAP);
- if (flags & BTR_NO_LOCKING_FLAG) {
- return(DB_SUCCESS);
- }
- lock_mutex_enter_kernel();
- ut_ad(mode != LOCK_X
- || lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
- ut_ad(mode != LOCK_S
- || lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
-
- if (!page_rec_is_supremum(rec)) {
-
- lock_rec_convert_impl_to_expl(rec, index);
- }
- err = lock_rec_lock(FALSE, mode | gap_mode, rec, index, thr);
- lock_mutex_exit_kernel();
- ut_ad(lock_rec_queue_validate(rec, index));
-
- return(err);
- }