Drizzled Public API Documentation

lock0lock.cc
00001 /*****************************************************************************
00002 
00003 Copyright (C) 1996, 2010, Innobase Oy. All Rights Reserved.
00004 
00005 This program is free software; you can redistribute it and/or modify it under
00006 the terms of the GNU General Public License as published by the Free Software
00007 Foundation; version 2 of the License.
00008 
00009 This program is distributed in the hope that it will be useful, but WITHOUT
00010 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
00011 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
00012 
00013 You should have received a copy of the GNU General Public License along with
00014 this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
00015 St, Fifth Floor, Boston, MA 02110-1301 USA
00016 
00017 *****************************************************************************/
00018 
00019 /**************************************************/
00026 #define LOCK_MODULE_IMPLEMENTATION
00027 
00028 #include "lock0lock.h"
00029 #include "lock0priv.h"
00030 
00031 #ifdef UNIV_NONINL
00032 #include "lock0lock.ic"
00033 #include "lock0priv.ic"
00034 #endif
00035 
00036 #include "ha_prototypes.h"
00037 #include "usr0sess.h"
00038 #include "trx0purge.h"
00039 #include "dict0mem.h"
00040 #include "trx0sys.h"
00041 
00042 /* Restricts the length of search we will do in the waits-for
00043 graph of transactions */
00044 #define LOCK_MAX_N_STEPS_IN_DEADLOCK_CHECK 1000000
00045 
00046 /* Restricts the recursion depth of the search we will do in the waits-for
00047 graph of transactions */
00048 #define LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK 200
00049 
00050 /* When releasing transaction locks, this specifies how often we release
00051 the kernel mutex for a moment to give also others access to it */
00052 
00053 #define LOCK_RELEASE_KERNEL_INTERVAL  1000
00054 
00055 /* Safety margin when creating a new record lock: this many extra records
00056 can be inserted to the page without need to create a lock with a bigger
00057 bitmap */
00058 
00059 #define LOCK_PAGE_BITMAP_MARGIN   64
00060 
00061 /* An explicit record lock affects both the record and the gap before it.
00062 An implicit x-lock does not affect the gap, it only locks the index
00063 record from read or update.
00064 
00065 If a transaction has modified or inserted an index record, then
00066 it owns an implicit x-lock on the record. On a secondary index record,
00067 a transaction has an implicit x-lock also if it has modified the
00068 clustered index record, the max trx id of the page where the secondary
00069 index record resides is >= trx id of the transaction (or database recovery
00070 is running), and there are no explicit non-gap lock requests on the
00071 secondary index record.
00072 
00073 This complicated definition for a secondary index comes from the
00074 implementation: we want to be able to determine if a secondary index
00075 record has an implicit x-lock, just by looking at the present clustered
00076 index record, not at the historical versions of the record. The
00077 complicated definition can be explained to the user so that there is
00078 nondeterminism in the access path when a query is answered: we may,
00079 or may not, access the clustered index record and thus may, or may not,
00080 bump into an x-lock set there.
00081 
00082 Different transaction can have conflicting locks set on the gap at the
00083 same time. The locks on the gap are purely inhibitive: an insert cannot
00084 be made, or a select cursor may have to wait if a different transaction
00085 has a conflicting lock on the gap. An x-lock on the gap does not give
00086 the right to insert into the gap.
00087 
00088 An explicit lock can be placed on a user record or the supremum record of
00089 a page. The locks on the supremum record are always thought to be of the gap
00090 type, though the gap bit is not set. When we perform an update of a record
00091 where the size of the record changes, we may temporarily store its explicit
00092 locks on the infimum record of the page, though the infimum otherwise never
00093 carries locks.
00094 
00095 A waiting record lock can also be of the gap type. A waiting lock request
00096 can be granted when there is no conflicting mode lock request by another
00097 transaction ahead of it in the explicit lock queue.
00098 
00099 In version 4.0.5 we added yet another explicit lock type: LOCK_REC_NOT_GAP.
00100 It only locks the record it is placed on, not the gap before the record.
00101 This lock type is necessary to emulate an Oracle-like READ COMMITTED isolation
00102 level.
00103 
00104 -------------------------------------------------------------------------
00105 RULE 1: If there is an implicit x-lock on a record, and there are non-gap
00106 -------
00107 lock requests waiting in the queue, then the transaction holding the implicit
00108 x-lock also has an explicit non-gap record x-lock. Therefore, as locks are
00109 released, we can grant locks to waiting lock requests purely by looking at
00110 the explicit lock requests in the queue.
00111 
00112 RULE 3: Different transactions cannot have conflicting granted non-gap locks
00113 -------
00114 on a record at the same time. However, they can have conflicting granted gap
00115 locks.
00116 RULE 4: If a there is a waiting lock request in a queue, no lock request,
00117 -------
00118 gap or not, can be inserted ahead of it in the queue. In record deletes
00119 and page splits new gap type locks can be created by the database manager
00120 for a transaction, and without rule 4, the waits-for graph of transactions
00121 might become cyclic without the database noticing it, as the deadlock check
00122 is only performed when a transaction itself requests a lock!
00123 -------------------------------------------------------------------------
00124 
00125 An insert is allowed to a gap if there are no explicit lock requests by
00126 other transactions on the next record. It does not matter if these lock
00127 requests are granted or waiting, gap bit set or not, with the exception
00128 that a gap type request set by another transaction to wait for
00129 its turn to do an insert is ignored. On the other hand, an
00130 implicit x-lock by another transaction does not prevent an insert, which
00131 allows for more concurrency when using an Oracle-style sequence number
00132 generator for the primary key with many transactions doing inserts
00133 concurrently.
00134 
00135 A modify of a record is allowed if the transaction has an x-lock on the
00136 record, or if other transactions do not have any non-gap lock requests on the
00137 record.
00138 
00139 A read of a single user record with a cursor is allowed if the transaction
00140 has a non-gap explicit, or an implicit lock on the record, or if the other
00141 transactions have no x-lock requests on the record. At a page supremum a
00142 read is always allowed.
00143 
00144 In summary, an implicit lock is seen as a granted x-lock only on the
00145 record, not on the gap. An explicit lock with no gap bit set is a lock
00146 both on the record and the gap. If the gap bit is set, the lock is only
00147 on the gap. Different transaction cannot own conflicting locks on the
00148 record at the same time, but they may own conflicting locks on the gap.
00149 Granted locks on a record give an access right to the record, but gap type
00150 locks just inhibit operations.
00151 
00152 NOTE: Finding out if some transaction has an implicit x-lock on a secondary
00153 index record can be cumbersome. We may have to look at previous versions of
00154 the corresponding clustered index record to find out if a delete marked
00155 secondary index record was delete marked by an active transaction, not by
00156 a committed one.
00157 
00158 FACT A: If a transaction has inserted a row, it can delete it any time
00159 without need to wait for locks.
00160 
00161 PROOF: The transaction has an implicit x-lock on every index record inserted
00162 for the row, and can thus modify each record without the need to wait. Q.E.D.
00163 
00164 FACT B: If a transaction has read some result set with a cursor, it can read
00165 it again, and retrieves the same result set, if it has not modified the
00166 result set in the meantime. Hence, there is no phantom problem. If the
00167 biggest record, in the alphabetical order, touched by the cursor is removed,
00168 a lock wait may occur, otherwise not.
00169 
00170 PROOF: When a read cursor proceeds, it sets an s-lock on each user record
00171 it passes, and a gap type s-lock on each page supremum. The cursor must
00172 wait until it has these locks granted. Then no other transaction can
00173 have a granted x-lock on any of the user records, and therefore cannot
00174 modify the user records. Neither can any other transaction insert into
00175 the gaps which were passed over by the cursor. Page splits and merges,
00176 and removal of obsolete versions of records do not affect this, because
00177 when a user record or a page supremum is removed, the next record inherits
00178 its locks as gap type locks, and therefore blocks inserts to the same gap.
00179 Also, if a page supremum is inserted, it inherits its locks from the successor
00180 record. When the cursor is positioned again at the start of the result set,
00181 the records it will touch on its course are either records it touched
00182 during the last pass or new inserted page supremums. It can immediately
00183 access all these records, and when it arrives at the biggest record, it
00184 notices that the result set is complete. If the biggest record was removed,
00185 lock wait can occur because the next record only inherits a gap type lock,
00186 and a wait may be needed. Q.E.D. */
00187 
00188 /* If an index record should be changed or a new inserted, we must check
00189 the lock on the record or the next. When a read cursor starts reading,
00190 we will set a record level s-lock on each record it passes, except on the
00191 initial record on which the cursor is positioned before we start to fetch
00192 records. Our index tree search has the convention that the B-tree
00193 cursor is positioned BEFORE the first possibly matching record in
00194 the search. Optimizations are possible here: if the record is searched
00195 on an equality condition to a unique key, we could actually set a special
00196 lock on the record, a lock which would not prevent any insert before
00197 this record. In the next key locking an x-lock set on a record also
00198 prevents inserts just before that record.
00199   There are special infimum and supremum records on each page.
00200 A supremum record can be locked by a read cursor. This records cannot be
00201 updated but the lock prevents insert of a user record to the end of
00202 the page.
00203   Next key locks will prevent the phantom problem where new rows
00204 could appear to SELECT result sets after the select operation has been
00205 performed. Prevention of phantoms ensures the serilizability of
00206 transactions.
00207   What should we check if an insert of a new record is wanted?
00208 Only the lock on the next record on the same page, because also the
00209 supremum record can carry a lock. An s-lock prevents insertion, but
00210 what about an x-lock? If it was set by a searched update, then there
00211 is implicitly an s-lock, too, and the insert should be prevented.
00212 What if our transaction owns an x-lock to the next record, but there is
00213 a waiting s-lock request on the next record? If this s-lock was placed
00214 by a read cursor moving in the ascending order in the index, we cannot
00215 do the insert immediately, because when we finally commit our transaction,
00216 the read cursor should see also the new inserted record. So we should
00217 move the read cursor backward from the the next record for it to pass over
00218 the new inserted record. This move backward may be too cumbersome to
00219 implement. If we in this situation just enqueue a second x-lock request
00220 for our transaction on the next record, then the deadlock mechanism
00221 notices a deadlock between our transaction and the s-lock request
00222 transaction. This seems to be an ok solution.
00223   We could have the convention that granted explicit record locks,
00224 lock the corresponding records from changing, and also lock the gaps
00225 before them from inserting. A waiting explicit lock request locks the gap
00226 before from inserting. Implicit record x-locks, which we derive from the
00227 transaction id in the clustered index record, only lock the record itself
00228 from modification, not the gap before it from inserting.
00229   How should we store update locks? If the search is done by a unique
00230 key, we could just modify the record trx id. Otherwise, we could put a record
00231 x-lock on the record. If the update changes ordering fields of the
00232 clustered index record, the inserted new record needs no record lock in
00233 lock table, the trx id is enough. The same holds for a secondary index
00234 record. Searched delete is similar to update.
00235 
00236 PROBLEM:
00237 What about waiting lock requests? If a transaction is waiting to make an
00238 update to a record which another modified, how does the other transaction
00239 know to send the end-lock-wait signal to the waiting transaction? If we have
00240 the convention that a transaction may wait for just one lock at a time, how
00241 do we preserve it if lock wait ends?
00242 
00243 PROBLEM:
00244 Checking the trx id label of a secondary index record. In the case of a
00245 modification, not an insert, is this necessary? A secondary index record
00246 is modified only by setting or resetting its deleted flag. A secondary index
00247 record contains fields to uniquely determine the corresponding clustered
00248 index record. A secondary index record is therefore only modified if we
00249 also modify the clustered index record, and the trx id checking is done
00250 on the clustered index record, before we come to modify the secondary index
00251 record. So, in the case of delete marking or unmarking a secondary index
00252 record, we do not have to care about trx ids, only the locks in the lock
00253 table must be checked. In the case of a select from a secondary index, the
00254 trx id is relevant, and in this case we may have to search the clustered
00255 index record.
00256 
00257 PROBLEM: How to update record locks when page is split or merged, or
00258 --------------------------------------------------------------------
00259 a record is deleted or updated?
00260 If the size of fields in a record changes, we perform the update by
00261 a delete followed by an insert. How can we retain the locks set or
00262 waiting on the record? Because a record lock is indexed in the bitmap
00263 by the heap number of the record, when we remove the record from the
00264 record list, it is possible still to keep the lock bits. If the page
00265 is reorganized, we could make a table of old and new heap numbers,
00266 and permute the bitmaps in the locks accordingly. We can add to the
00267 table a row telling where the updated record ended. If the update does
00268 not require a reorganization of the page, we can simply move the lock
00269 bits for the updated record to the position determined by its new heap
00270 number (we may have to allocate a new lock, if we run out of the bitmap
00271 in the old one).
00272   A more complicated case is the one where the reinsertion of the
00273 updated record is done pessimistically, because the structure of the
00274 tree may change.
00275 
00276 PROBLEM: If a supremum record is removed in a page merge, or a record
00277 ---------------------------------------------------------------------
00278 removed in a purge, what to do to the waiting lock requests? In a split to
00279 the right, we just move the lock requests to the new supremum. If a record
00280 is removed, we could move the waiting lock request to its inheritor, the
00281 next record in the index. But, the next record may already have lock
00282 requests on its own queue. A new deadlock check should be made then. Maybe
00283 it is easier just to release the waiting transactions. They can then enqueue
00284 new lock requests on appropriate records.
00285 
00286 PROBLEM: When a record is inserted, what locks should it inherit from the
00287 -------------------------------------------------------------------------
00288 upper neighbor? An insert of a new supremum record in a page split is
00289 always possible, but an insert of a new user record requires that the upper
00290 neighbor does not have any lock requests by other transactions, granted or
00291 waiting, in its lock queue. Solution: We can copy the locks as gap type
00292 locks, so that also the waiting locks are transformed to granted gap type
00293 locks on the inserted record. */
00294 
00295 /* LOCK COMPATIBILITY MATRIX
00296  *    IS IX S  X  AI
00297  * IS +  +  +  -  +
00298  * IX +  +  -  -  +
00299  * S  +  -  +  -  -
00300  * X  -  -  -  -  -
00301  * AI +  +  -  -  -
00302  *
00303  * Note that for rows, InnoDB only acquires S or X locks.
00304  * For tables, InnoDB normally acquires IS or IX locks.
00305  * S or X table locks are only acquired for LOCK TABLES.
00306  * Auto-increment (AI) locks are needed because of
00307  * statement-level MySQL binlog.
00308  * See also lock_mode_compatible().
00309  */
00310 #define LK(a,b) (1 << ((a) * LOCK_NUM + (b)))
00311 #define LKS(a,b) LK(a,b) | LK(b,a)
00312 
00313 /* Define the lock compatibility matrix in a ulint.  The first line below
00314 defines the diagonal entries.  The following lines define the compatibility
00315 for LOCK_IX, LOCK_S, and LOCK_AUTO_INC using LKS(), since the matrix
00316 is symmetric. */
00317 #define LOCK_MODE_COMPATIBILITY 0         \
00318  | LK(LOCK_IS, LOCK_IS) | LK(LOCK_IX, LOCK_IX) | LK(LOCK_S, LOCK_S) \
00319  | LKS(LOCK_IX, LOCK_IS) | LKS(LOCK_IS, LOCK_AUTO_INC)      \
00320  | LKS(LOCK_S, LOCK_IS)             \
00321  | LKS(LOCK_AUTO_INC, LOCK_IS) | LKS(LOCK_AUTO_INC, LOCK_IX)
00322 
00323 /* STRONGER-OR-EQUAL RELATION (mode1=row, mode2=column)
00324  *    IS IX S  X  AI
00325  * IS +  -  -  -  -
00326  * IX +  +  -  -  -
00327  * S  +  -  +  -  -
00328  * X  +  +  +  +  +
00329  * AI -  -  -  -  +
00330  * See lock_mode_stronger_or_eq().
00331  */
00332 
00333 /* Define the stronger-or-equal lock relation in a ulint.  This relation
00334 contains all pairs LK(mode1, mode2) where mode1 is stronger than or
00335 equal to mode2. */
00336 #define LOCK_MODE_STRONGER_OR_EQ 0          \
00337  | LK(LOCK_IS, LOCK_IS)             \
00338  | LK(LOCK_IX, LOCK_IS) | LK(LOCK_IX, LOCK_IX)        \
00339  | LK(LOCK_S, LOCK_IS) | LK(LOCK_S, LOCK_S)       \
00340  | LK(LOCK_AUTO_INC, LOCK_AUTO_INC)         \
00341  | LK(LOCK_X, LOCK_IS) | LK(LOCK_X, LOCK_IX) | LK(LOCK_X, LOCK_S) \
00342  | LK(LOCK_X, LOCK_AUTO_INC) | LK(LOCK_X, LOCK_X)
00343 
00344 #ifdef UNIV_DEBUG
00345 UNIV_INTERN ibool lock_print_waits  = FALSE;
00346 
00347 /*********************************************************************/
00350 static
00351 ibool
00352 lock_validate(void);
00353 /*===============*/
00354 
00355 /*********************************************************************/
00358 static
00359 ibool
00360 lock_rec_validate_page(
00361 /*===================*/
00362   ulint space,  
00363   ulint page_no);
00364 #endif /* UNIV_DEBUG */
00365 
00366 /* The lock system */
00367 UNIV_INTERN lock_sys_t* lock_sys  = NULL;
00368 
00369 /* We store info on the latest deadlock error to this buffer. InnoDB
00370 Monitor will then fetch it and print */
00371 UNIV_INTERN ibool lock_deadlock_found = FALSE;
00372 UNIV_INTERN FILE* lock_latest_err_file;
00373 
00374 /* Flags for recursive deadlock search */
00375 #define LOCK_VICTIM_IS_START  1
00376 #define LOCK_VICTIM_IS_OTHER  2
00377 #define LOCK_EXCEED_MAX_DEPTH 3
00378 
00379 /********************************************************************/
00384 static
00385 ibool
00386 lock_deadlock_occurs(
00387 /*=================*/
00388   lock_t* lock, 
00389   trx_t*  trx); 
00390 /********************************************************************/
00398 static
00399 ulint
00400 lock_deadlock_recursive(
00401 /*====================*/
00402   trx_t*  start,    
00403   trx_t*  trx,    
00404   lock_t* wait_lock,  
00405   ulint*  cost,   
00408   ulint depth);   
00412 /*********************************************************************/
00415 UNIV_INLINE
00416 ibool
00417 lock_rec_get_nth_bit(
00418 /*=================*/
00419   const lock_t* lock, 
00420   ulint   i)  
00421 {
00422   ulint byte_index;
00423   ulint bit_index;
00424 
00425   ut_ad(lock);
00426   ut_ad(lock_get_type_low(lock) == LOCK_REC);
00427 
00428   if (i >= lock->un_member.rec_lock.n_bits) {
00429 
00430     return(FALSE);
00431   }
00432 
00433   byte_index = i / 8;
00434   bit_index = i % 8;
00435 
00436   return(1 & ((const byte*) &lock[1])[byte_index] >> bit_index);
00437 }
00438 
00439 /*************************************************************************/
00440 
00441 #define lock_mutex_enter_kernel() mutex_enter(&kernel_mutex)
00442 #define lock_mutex_exit_kernel()  mutex_exit(&kernel_mutex)
00443 
00444 /*********************************************************************/
00447 UNIV_INTERN
00448 ibool
00449 lock_check_trx_id_sanity(
00450 /*=====================*/
00451   trx_id_t  trx_id,   
00452   const rec_t*  rec,    
00453   dict_index_t* index,    
00454   const ulint*  offsets,  
00455   ibool   has_kernel_mutex)
00457 {
00458   ibool is_ok   = TRUE;
00459 
00460   ut_ad(rec_offs_validate(rec, index, offsets));
00461 
00462   if (!has_kernel_mutex) {
00463     mutex_enter(&kernel_mutex);
00464   }
00465 
00466   /* A sanity check: the trx_id in rec must be smaller than the global
00467   trx id counter */
00468 
00469   if (UNIV_UNLIKELY(trx_id >= trx_sys->max_trx_id)) {
00470     ut_print_timestamp(stderr);
00471     fputs("  InnoDB: Error: transaction id associated"
00472           " with record\n",
00473           stderr);
00474     rec_print_new(stderr, rec, offsets);
00475     fputs("InnoDB: in ", stderr);
00476     dict_index_name_print(stderr, NULL, index);
00477     fprintf(stderr, "\n"
00478       "InnoDB: is " TRX_ID_FMT " which is higher than the"
00479       " global trx id counter " TRX_ID_FMT "!\n"
00480       "InnoDB: The table is corrupt. You have to do"
00481       " dump + drop + reimport.\n",
00482       trx_id, trx_sys->max_trx_id);
00483 
00484     is_ok = FALSE;
00485   }
00486 
00487   if (!has_kernel_mutex) {
00488     mutex_exit(&kernel_mutex);
00489   }
00490 
00491   return(is_ok);
00492 }
00493 
00494 /*********************************************************************/
00498 UNIV_INTERN
00499 ibool
00500 lock_clust_rec_cons_read_sees(
00501 /*==========================*/
00502   const rec_t*  rec,  
00504   dict_index_t* index,  
00505   const ulint*  offsets,
00506   read_view_t*  view) 
00507 {
00508   trx_id_t  trx_id;
00509 
00510   ut_ad(dict_index_is_clust(index));
00511   ut_ad(page_rec_is_user_rec(rec));
00512   ut_ad(rec_offs_validate(rec, index, offsets));
00513 
00514   /* NOTE that we call this function while holding the search
00515   system latch. To obey the latching order we must NOT reserve the
00516   kernel mutex here! */
00517 
00518   trx_id = row_get_rec_trx_id(rec, index, offsets);
00519 
00520   return(read_view_sees_trx_id(view, trx_id));
00521 }
00522 
00523 /*********************************************************************/
00533 UNIV_INTERN
00534 ulint
00535 lock_sec_rec_cons_read_sees(
00536 /*========================*/
00537   const rec_t*    rec,  
00540   const read_view_t*  view) 
00541 {
00542   trx_id_t  max_trx_id;
00543 
00544   ut_ad(page_rec_is_user_rec(rec));
00545 
00546   /* NOTE that we might call this function while holding the search
00547   system latch. To obey the latching order we must NOT reserve the
00548   kernel mutex here! */
00549 
00550   if (recv_recovery_is_on()) {
00551 
00552     return(FALSE);
00553   }
00554 
00555   max_trx_id = page_get_max_trx_id(page_align(rec));
00556   ut_ad(max_trx_id);
00557 
00558   return(max_trx_id < view->up_limit_id);
00559 }
00560 
00561 /*********************************************************************/
00563 UNIV_INTERN
00564 void
00565 lock_sys_create(
00566 /*============*/
00567   ulint n_cells)  
00568 {
00569   lock_sys = static_cast<lock_sys_t *>(mem_alloc(sizeof(lock_sys_t)));
00570 
00571   lock_sys->rec_hash = hash_create(n_cells);
00572 
00573   /* hash_create_mutexes(lock_sys->rec_hash, 2, SYNC_REC_LOCK); */
00574 
00575   lock_latest_err_file = os_file_create_tmpfile();
00576   ut_a(lock_latest_err_file);
00577 }
00578 
00579 /*********************************************************************/
00581 UNIV_INTERN
00582 void
00583 lock_sys_close(void)
00584 /*================*/
00585 {
00586   if (lock_latest_err_file != NULL) {
00587     fclose(lock_latest_err_file);
00588     lock_latest_err_file = NULL;
00589   }
00590 
00591   hash_table_free(lock_sys->rec_hash);
00592   mem_free(lock_sys);
00593   lock_sys = NULL;
00594 }
00595 
00596 /*********************************************************************/
00599 UNIV_INTERN
00600 ulint
00601 lock_get_size(void)
00602 /*===============*/
00603 {
00604   return((ulint)sizeof(lock_t));
00605 }
00606 
00607 /*********************************************************************/
00610 UNIV_INLINE
00611 enum lock_mode
00612 lock_get_mode(
00613 /*==========*/
00614   const lock_t* lock) 
00615 {
00616   ut_ad(lock);
00617 
00618   return static_cast<lock_mode>(lock->type_mode & LOCK_MODE_MASK);
00619 }
00620 
00621 /*********************************************************************/
00624 UNIV_INLINE
00625 ibool
00626 lock_get_wait(
00627 /*==========*/
00628   const lock_t* lock) 
00629 {
00630   ut_ad(lock);
00631 
00632   if (UNIV_UNLIKELY(lock->type_mode & LOCK_WAIT)) {
00633 
00634     return(TRUE);
00635   }
00636 
00637   return(FALSE);
00638 }
00639 
00640 /*********************************************************************/
00647 UNIV_INTERN
00648 dict_table_t*
00649 lock_get_src_table(
00650 /*===============*/
00651   trx_t*    trx,  
00652   dict_table_t* dest, 
00653   enum lock_mode* mode) 
00654 {
00655   dict_table_t* src;
00656   lock_t*   lock;
00657 
00658   src = NULL;
00659   *mode = LOCK_NONE;
00660 
00661   for (lock = UT_LIST_GET_FIRST(trx->trx_locks);
00662        lock;
00663        lock = UT_LIST_GET_NEXT(trx_locks, lock)) {
00664     lock_table_t* tab_lock;
00665     enum lock_mode  lock_mode;
00666     if (!(lock_get_type_low(lock) & LOCK_TABLE)) {
00667       /* We are only interested in table locks. */
00668       continue;
00669     }
00670     tab_lock = &lock->un_member.tab_lock;
00671     if (dest == tab_lock->table) {
00672       /* We are not interested in the destination table. */
00673       continue;
00674     } else if (!src) {
00675       /* This presumably is the source table. */
00676       src = tab_lock->table;
00677       if (UT_LIST_GET_LEN(src->locks) != 1
00678           || UT_LIST_GET_FIRST(src->locks) != lock) {
00679         /* We only support the case when
00680         there is only one lock on this table. */
00681         return(NULL);
00682       }
00683     } else if (src != tab_lock->table) {
00684       /* The transaction is locking more than
00685       two tables (src and dest): abort */
00686       return(NULL);
00687     }
00688 
00689     /* Check that the source table is locked by
00690     LOCK_IX or LOCK_IS. */
00691     lock_mode = lock_get_mode(lock);
00692     if (lock_mode == LOCK_IX || lock_mode == LOCK_IS) {
00693       if (*mode != LOCK_NONE && *mode != lock_mode) {
00694         /* There are multiple locks on src. */
00695         return(NULL);
00696       }
00697       *mode = lock_mode;
00698     }
00699   }
00700 
00701   if (!src) {
00702     /* No source table lock found: flag the situation to caller */
00703     src = dest;
00704   }
00705 
00706   return(src);
00707 }
00708 
00709 /*********************************************************************/
00715 UNIV_INTERN
00716 ibool
00717 lock_is_table_exclusive(
00718 /*====================*/
00719   dict_table_t* table,  
00720   trx_t*    trx)  
00721 {
00722   const lock_t* lock;
00723   ibool   ok  = FALSE;
00724 
00725   ut_ad(table);
00726   ut_ad(trx);
00727 
00728   lock_mutex_enter_kernel();
00729 
00730   for (lock = UT_LIST_GET_FIRST(table->locks);
00731        lock;
00732        lock = UT_LIST_GET_NEXT(locks, &lock->un_member.tab_lock)) {
00733     if (lock->trx != trx) {
00734       /* A lock on the table is held
00735       by some other transaction. */
00736       goto not_ok;
00737     }
00738 
00739     if (!(lock_get_type_low(lock) & LOCK_TABLE)) {
00740       /* We are interested in table locks only. */
00741       continue;
00742     }
00743 
00744     switch (lock_get_mode(lock)) {
00745     case LOCK_IX:
00746       ok = TRUE;
00747       break;
00748     case LOCK_AUTO_INC:
00749       /* It is allowed for trx to hold an
00750       auto_increment lock. */
00751       break;
00752     default:
00753 not_ok:
00754       /* Other table locks than LOCK_IX are not allowed. */
00755       ok = FALSE;
00756       goto func_exit;
00757     }
00758   }
00759 
00760 func_exit:
00761   lock_mutex_exit_kernel();
00762 
00763   return(ok);
00764 }
00765 
00766 /*********************************************************************/
00768 UNIV_INLINE
00769 void
00770 lock_set_lock_and_trx_wait(
00771 /*=======================*/
00772   lock_t* lock, 
00773   trx_t*  trx)  
00774 {
00775   ut_ad(lock);
00776   ut_ad(trx->wait_lock == NULL);
00777 
00778   trx->wait_lock = lock;
00779   lock->type_mode |= LOCK_WAIT;
00780 }
00781 
00782 /**********************************************************************/
00785 UNIV_INLINE
00786 void
00787 lock_reset_lock_and_trx_wait(
00788 /*=========================*/
00789   lock_t* lock) 
00790 {
00791   ut_ad((lock->trx)->wait_lock == lock);
00792   ut_ad(lock_get_wait(lock));
00793 
00794   /* Reset the back pointer in trx to this waiting lock request */
00795 
00796   (lock->trx)->wait_lock = NULL;
00797   lock->type_mode &= ~LOCK_WAIT;
00798 }
00799 
00800 /*********************************************************************/
00803 UNIV_INLINE
00804 ibool
00805 lock_rec_get_gap(
00806 /*=============*/
00807   const lock_t* lock) 
00808 {
00809   ut_ad(lock);
00810   ut_ad(lock_get_type_low(lock) == LOCK_REC);
00811 
00812   if (lock->type_mode & LOCK_GAP) {
00813 
00814     return(TRUE);
00815   }
00816 
00817   return(FALSE);
00818 }
00819 
00820 /*********************************************************************/
00823 UNIV_INLINE
00824 ibool
00825 lock_rec_get_rec_not_gap(
00826 /*=====================*/
00827   const lock_t* lock) 
00828 {
00829   ut_ad(lock);
00830   ut_ad(lock_get_type_low(lock) == LOCK_REC);
00831 
00832   if (lock->type_mode & LOCK_REC_NOT_GAP) {
00833 
00834     return(TRUE);
00835   }
00836 
00837   return(FALSE);
00838 }
00839 
00840 /*********************************************************************/
00843 UNIV_INLINE
00844 ibool
00845 lock_rec_get_insert_intention(
00846 /*==========================*/
00847   const lock_t* lock) 
00848 {
00849   ut_ad(lock);
00850   ut_ad(lock_get_type_low(lock) == LOCK_REC);
00851 
00852   if (lock->type_mode & LOCK_INSERT_INTENTION) {
00853 
00854     return(TRUE);
00855   }
00856 
00857   return(FALSE);
00858 }
00859 
00860 /*********************************************************************/
00863 UNIV_INLINE
00864 ulint
00865 lock_mode_stronger_or_eq(
00866 /*=====================*/
00867   enum lock_mode  mode1,  
00868   enum lock_mode  mode2)  
00869 {
00870   ut_ad(mode1 == LOCK_X || mode1 == LOCK_S || mode1 == LOCK_IX
00871         || mode1 == LOCK_IS || mode1 == LOCK_AUTO_INC);
00872   ut_ad(mode2 == LOCK_X || mode2 == LOCK_S || mode2 == LOCK_IX
00873         || mode2 == LOCK_IS || mode2 == LOCK_AUTO_INC);
00874 
00875   return((LOCK_MODE_STRONGER_OR_EQ) & LK(mode1, mode2));
00876 }
00877 
00878 /*********************************************************************/
00881 UNIV_INLINE
00882 ulint
00883 lock_mode_compatible(
00884 /*=================*/
00885   enum lock_mode  mode1,  
00886   enum lock_mode  mode2)  
00887 {
00888   ut_ad(mode1 == LOCK_X || mode1 == LOCK_S || mode1 == LOCK_IX
00889         || mode1 == LOCK_IS || mode1 == LOCK_AUTO_INC);
00890   ut_ad(mode2 == LOCK_X || mode2 == LOCK_S || mode2 == LOCK_IX
00891         || mode2 == LOCK_IS || mode2 == LOCK_AUTO_INC);
00892 
00893   return((LOCK_MODE_COMPATIBILITY) & LK(mode1, mode2));
00894 }
00895 
00896 /*********************************************************************/
00899 UNIV_INLINE
00900 ibool
00901 lock_rec_has_to_wait(
00902 /*=================*/
00903   const trx_t*  trx,  
00904   ulint   type_mode,
00908   const lock_t* lock2,  
00912   ibool lock_is_on_supremum)  
00916 {
00917   ut_ad(trx && lock2);
00918   ut_ad(lock_get_type_low(lock2) == LOCK_REC);
00919 
00920   if (trx != lock2->trx
00921       && !lock_mode_compatible(static_cast<lock_mode>(LOCK_MODE_MASK & type_mode),
00922              lock_get_mode(lock2))) {
00923 
00924     /* We have somewhat complex rules when gap type record locks
00925     cause waits */
00926 
00927     if ((lock_is_on_supremum || (type_mode & LOCK_GAP))
00928         && !(type_mode & LOCK_INSERT_INTENTION)) {
00929 
00930       /* Gap type locks without LOCK_INSERT_INTENTION flag
00931       do not need to wait for anything. This is because
00932       different users can have conflicting lock types
00933       on gaps. */
00934 
00935       return(FALSE);
00936     }
00937 
00938     if (!(type_mode & LOCK_INSERT_INTENTION)
00939         && lock_rec_get_gap(lock2)) {
00940 
00941       /* Record lock (LOCK_ORDINARY or LOCK_REC_NOT_GAP
00942       does not need to wait for a gap type lock */
00943 
00944       return(FALSE);
00945     }
00946 
00947     if ((type_mode & LOCK_GAP)
00948         && lock_rec_get_rec_not_gap(lock2)) {
00949 
00950       /* Lock on gap does not need to wait for
00951       a LOCK_REC_NOT_GAP type lock */
00952 
00953       return(FALSE);
00954     }
00955 
00956     if (lock_rec_get_insert_intention(lock2)) {
00957 
00958       /* No lock request needs to wait for an insert
00959       intention lock to be removed. This is ok since our
00960       rules allow conflicting locks on gaps. This eliminates
00961       a spurious deadlock caused by a next-key lock waiting
00962       for an insert intention lock; when the insert
00963       intention lock was granted, the insert deadlocked on
00964       the waiting next-key lock.
00965 
00966       Also, insert intention locks do not disturb each
00967       other. */
00968 
00969       return(FALSE);
00970     }
00971 
00972     return(TRUE);
00973   }
00974 
00975   return(FALSE);
00976 }
00977 
00978 /*********************************************************************/
00981 UNIV_INTERN
00982 ibool
00983 lock_has_to_wait(
00984 /*=============*/
00985   const lock_t* lock1,  
00986   const lock_t* lock2)  
00990 {
00991   ut_ad(lock1 && lock2);
00992 
00993   if (lock1->trx != lock2->trx
00994       && !lock_mode_compatible(lock_get_mode(lock1),
00995              lock_get_mode(lock2))) {
00996     if (lock_get_type_low(lock1) == LOCK_REC) {
00997       ut_ad(lock_get_type_low(lock2) == LOCK_REC);
00998 
00999       /* If this lock request is for a supremum record
01000       then the second bit on the lock bitmap is set */
01001 
01002       return(lock_rec_has_to_wait(lock1->trx,
01003                 lock1->type_mode, lock2,
01004                 lock_rec_get_nth_bit(
01005                   lock1, 1)));
01006     }
01007 
01008     return(TRUE);
01009   }
01010 
01011   return(FALSE);
01012 }
01013 
01014 /*============== RECORD LOCK BASIC FUNCTIONS ============================*/
01015 
01016 /*********************************************************************/
01019 UNIV_INLINE
01020 ulint
01021 lock_rec_get_n_bits(
01022 /*================*/
01023   const lock_t* lock) 
01024 {
01025   return(lock->un_member.rec_lock.n_bits);
01026 }
01027 
01028 /**********************************************************************/
01030 UNIV_INLINE
01031 void
01032 lock_rec_set_nth_bit(
01033 /*=================*/
01034   lock_t* lock, 
01035   ulint i)  
01036 {
01037   ulint byte_index;
01038   ulint bit_index;
01039 
01040   ut_ad(lock);
01041   ut_ad(lock_get_type_low(lock) == LOCK_REC);
01042   ut_ad(i < lock->un_member.rec_lock.n_bits);
01043 
01044   byte_index = i / 8;
01045   bit_index = i % 8;
01046 
01047   ((byte*) &lock[1])[byte_index] |= 1 << bit_index;
01048 }
01049 
01050 /**********************************************************************/
01055 UNIV_INTERN
01056 ulint
01057 lock_rec_find_set_bit(
01058 /*==================*/
01059   const lock_t* lock) 
01060 {
01061   ulint i;
01062 
01063   for (i = 0; i < lock_rec_get_n_bits(lock); i++) {
01064 
01065     if (lock_rec_get_nth_bit(lock, i)) {
01066 
01067       return(i);
01068     }
01069   }
01070 
01071   return(ULINT_UNDEFINED);
01072 }
01073 
01074 /**********************************************************************/
01076 UNIV_INLINE
01077 void
01078 lock_rec_reset_nth_bit(
01079 /*===================*/
01080   lock_t* lock, 
01081   ulint i)  
01083 {
01084   ulint byte_index;
01085   ulint bit_index;
01086 
01087   ut_ad(lock);
01088   ut_ad(lock_get_type_low(lock) == LOCK_REC);
01089   ut_ad(i < lock->un_member.rec_lock.n_bits);
01090 
01091   byte_index = i / 8;
01092   bit_index = i % 8;
01093 
01094   ((byte*) &lock[1])[byte_index] &= ~(1 << bit_index);
01095 }
01096 
01097 /*********************************************************************/
01100 UNIV_INLINE
01101 lock_t*
01102 lock_rec_get_next_on_page(
01103 /*======================*/
01104   lock_t* lock) 
01105 {
01106   ulint space;
01107   ulint page_no;
01108 
01109   ut_ad(mutex_own(&kernel_mutex));
01110   ut_ad(lock_get_type_low(lock) == LOCK_REC);
01111 
01112   space = lock->un_member.rec_lock.space;
01113   page_no = lock->un_member.rec_lock.page_no;
01114 
01115   for (;;) {
01116     lock = static_cast<lock_t *>(HASH_GET_NEXT(hash, lock));
01117 
01118     if (!lock) {
01119 
01120       break;
01121     }
01122 
01123     if ((lock->un_member.rec_lock.space == space)
01124         && (lock->un_member.rec_lock.page_no == page_no)) {
01125 
01126       break;
01127     }
01128   }
01129 
01130   return(lock);
01131 }
01132 
01133 /*********************************************************************/
01137 UNIV_INLINE
01138 lock_t*
01139 lock_rec_get_first_on_page_addr(
01140 /*============================*/
01141   ulint space,  
01142   ulint page_no)
01143 {
01144   lock_t* lock;
01145 
01146   ut_ad(mutex_own(&kernel_mutex));
01147 
01148   lock = static_cast<lock_t *>(HASH_GET_FIRST(lock_sys->rec_hash,
01149             lock_rec_hash(space, page_no)));
01150   while (lock) {
01151     if ((lock->un_member.rec_lock.space == space)
01152         && (lock->un_member.rec_lock.page_no == page_no)) {
01153 
01154       break;
01155     }
01156 
01157     lock = static_cast<lock_t *>(HASH_GET_NEXT(hash, lock));
01158   }
01159 
01160   return(lock);
01161 }
01162 
01163 /*********************************************************************/
01166 UNIV_INTERN
01167 ibool
01168 lock_rec_expl_exist_on_page(
01169 /*========================*/
01170   ulint space,  
01171   ulint page_no)
01172 {
01173   ibool ret;
01174 
01175   mutex_enter(&kernel_mutex);
01176 
01177   if (lock_rec_get_first_on_page_addr(space, page_no)) {
01178     ret = TRUE;
01179   } else {
01180     ret = FALSE;
01181   }
01182 
01183   mutex_exit(&kernel_mutex);
01184 
01185   return(ret);
01186 }
01187 
01188 /*********************************************************************/
01192 UNIV_INLINE
01193 lock_t*
01194 lock_rec_get_first_on_page(
01195 /*=======================*/
01196   const buf_block_t*  block)  
01197 {
01198   ulint hash;
01199   lock_t* lock;
01200   ulint space = buf_block_get_space(block);
01201   ulint page_no = buf_block_get_page_no(block);
01202 
01203   ut_ad(mutex_own(&kernel_mutex));
01204 
01205   hash = buf_block_get_lock_hash_val(block);
01206 
01207   lock = static_cast<lock_t *>(HASH_GET_FIRST(lock_sys->rec_hash, hash));
01208 
01209   while (lock) {
01210     if ((lock->un_member.rec_lock.space == space)
01211         && (lock->un_member.rec_lock.page_no == page_no)) {
01212 
01213       break;
01214     }
01215 
01216     lock = static_cast<lock_t *>(HASH_GET_NEXT(hash, lock));
01217   }
01218 
01219   return(lock);
01220 }
01221 
01222 /*********************************************************************/
01225 UNIV_INLINE
01226 lock_t*
01227 lock_rec_get_next(
01228 /*==============*/
01229   ulint heap_no,
01230   lock_t* lock) 
01231 {
01232   ut_ad(mutex_own(&kernel_mutex));
01233 
01234   do {
01235     ut_ad(lock_get_type_low(lock) == LOCK_REC);
01236     lock = lock_rec_get_next_on_page(lock);
01237   } while (lock && !lock_rec_get_nth_bit(lock, heap_no));
01238 
01239   return(lock);
01240 }
01241 
01242 /*********************************************************************/
01245 UNIV_INLINE
01246 lock_t*
01247 lock_rec_get_first(
01248 /*===============*/
01249   const buf_block_t*  block,  
01250   ulint     heap_no)
01251 {
01252   lock_t* lock;
01253 
01254   ut_ad(mutex_own(&kernel_mutex));
01255 
01256   for (lock = lock_rec_get_first_on_page(block); lock;
01257        lock = lock_rec_get_next_on_page(lock)) {
01258     if (lock_rec_get_nth_bit(lock, heap_no)) {
01259       break;
01260     }
01261   }
01262 
01263   return(lock);
01264 }
01265 
01266 /*********************************************************************/
01270 static
01271 void
01272 lock_rec_bitmap_reset(
01273 /*==================*/
01274   lock_t* lock) 
01275 {
01276   ulint n_bytes;
01277 
01278   ut_ad(lock_get_type_low(lock) == LOCK_REC);
01279 
01280   /* Reset to zero the bitmap which resides immediately after the lock
01281   struct */
01282 
01283   n_bytes = lock_rec_get_n_bits(lock) / 8;
01284 
01285   ut_ad((lock_rec_get_n_bits(lock) % 8) == 0);
01286 
01287   memset(&lock[1], 0, n_bytes);
01288 }
01289 
01290 /*********************************************************************/
01293 static
01294 lock_t*
01295 lock_rec_copy(
01296 /*==========*/
01297   const lock_t* lock, 
01298   mem_heap_t* heap) 
01299 {
01300   ulint size;
01301 
01302   ut_ad(lock_get_type_low(lock) == LOCK_REC);
01303 
01304   size = sizeof(lock_t) + lock_rec_get_n_bits(lock) / 8;
01305 
01306   return static_cast<lock_t *>(mem_heap_dup(heap, lock, size));
01307 }
01308 
01309 /*********************************************************************/
01312 UNIV_INTERN
01313 const lock_t*
01314 lock_rec_get_prev(
01315 /*==============*/
01316   const lock_t* in_lock,
01317   ulint   heap_no)
01318 {
01319   lock_t* lock;
01320   ulint space;
01321   ulint page_no;
01322   lock_t* found_lock  = NULL;
01323 
01324   ut_ad(mutex_own(&kernel_mutex));
01325   ut_ad(lock_get_type_low(in_lock) == LOCK_REC);
01326 
01327   space = in_lock->un_member.rec_lock.space;
01328   page_no = in_lock->un_member.rec_lock.page_no;
01329 
01330   lock = lock_rec_get_first_on_page_addr(space, page_no);
01331 
01332   for (;;) {
01333     ut_ad(lock);
01334 
01335     if (lock == in_lock) {
01336 
01337       return(found_lock);
01338     }
01339 
01340     if (lock_rec_get_nth_bit(lock, heap_no)) {
01341 
01342       found_lock = lock;
01343     }
01344 
01345     lock = lock_rec_get_next_on_page(lock);
01346   }
01347 }
01348 
01349 /*============= FUNCTIONS FOR ANALYZING TABLE LOCK QUEUE ================*/
01350 
01351 /*********************************************************************/
01354 UNIV_INLINE
01355 lock_t*
01356 lock_table_has(
01357 /*===========*/
01358   trx_t*    trx,  
01359   dict_table_t* table,  
01360   enum lock_mode  mode) 
01361 {
01362   lock_t* lock;
01363 
01364   ut_ad(mutex_own(&kernel_mutex));
01365 
01366   /* Look for stronger locks the same trx already has on the table */
01367 
01368   lock = UT_LIST_GET_LAST(table->locks);
01369 
01370   while (lock != NULL) {
01371 
01372     if (lock->trx == trx
01373         && lock_mode_stronger_or_eq(lock_get_mode(lock), mode)) {
01374 
01375       /* The same trx already has locked the table in
01376       a mode stronger or equal to the mode given */
01377 
01378       ut_ad(!lock_get_wait(lock));
01379 
01380       return(lock);
01381     }
01382 
01383     lock = UT_LIST_GET_PREV(un_member.tab_lock.locks, lock);
01384   }
01385 
01386   return(NULL);
01387 }
01388 
01389 /*============= FUNCTIONS FOR ANALYZING RECORD LOCK QUEUE ================*/
01390 
01391 /*********************************************************************/
01395 UNIV_INLINE
01396 lock_t*
01397 lock_rec_has_expl(
01398 /*==============*/
01399   ulint     precise_mode,
01404   const buf_block_t*  block,  
01406   ulint     heap_no,
01407   trx_t*      trx)  
01408 {
01409   lock_t* lock;
01410 
01411   ut_ad(mutex_own(&kernel_mutex));
01412   ut_ad((precise_mode & LOCK_MODE_MASK) == LOCK_S
01413         || (precise_mode & LOCK_MODE_MASK) == LOCK_X);
01414   ut_ad(!(precise_mode & LOCK_INSERT_INTENTION));
01415 
01416   lock = lock_rec_get_first(block, heap_no);
01417 
01418   while (lock) {
01419     if (lock->trx == trx
01420         && lock_mode_stronger_or_eq(lock_get_mode(lock),
01421             static_cast<lock_mode>(precise_mode & LOCK_MODE_MASK))
01422         && !lock_get_wait(lock)
01423         && (!lock_rec_get_rec_not_gap(lock)
01424       || (precise_mode & LOCK_REC_NOT_GAP)
01425       || heap_no == PAGE_HEAP_NO_SUPREMUM)
01426         && (!lock_rec_get_gap(lock)
01427       || (precise_mode & LOCK_GAP)
01428       || heap_no == PAGE_HEAP_NO_SUPREMUM)
01429         && (!lock_rec_get_insert_intention(lock))) {
01430 
01431       return(lock);
01432     }
01433 
01434     lock = lock_rec_get_next(heap_no, lock);
01435   }
01436 
01437   return(NULL);
01438 }
01439 
01440 #ifdef UNIV_DEBUG
01441 /*********************************************************************/
01444 static
01445 lock_t*
01446 lock_rec_other_has_expl_req(
01447 /*========================*/
01448   enum lock_mode    mode, 
01449   ulint     gap,  
01452   ulint     wait, 
01455   const buf_block_t*  block,  
01457   ulint     heap_no,
01458   const trx_t*    trx)  
01461 {
01462   lock_t* lock;
01463 
01464   ut_ad(mutex_own(&kernel_mutex));
01465   ut_ad(mode == LOCK_X || mode == LOCK_S);
01466   ut_ad(gap == 0 || gap == LOCK_GAP);
01467   ut_ad(wait == 0 || wait == LOCK_WAIT);
01468 
01469   lock = lock_rec_get_first(block, heap_no);
01470 
01471   while (lock) {
01472     if (lock->trx != trx
01473         && (gap
01474       || !(lock_rec_get_gap(lock)
01475            || heap_no == PAGE_HEAP_NO_SUPREMUM))
01476         && (wait || !lock_get_wait(lock))
01477         && lock_mode_stronger_or_eq(lock_get_mode(lock), mode)) {
01478 
01479       return(lock);
01480     }
01481 
01482     lock = lock_rec_get_next(heap_no, lock);
01483   }
01484 
01485   return(NULL);
01486 }
01487 #endif /* UNIV_DEBUG */
01488 
01489 /*********************************************************************/
01493 static
01494 lock_t*
01495 lock_rec_other_has_conflicting(
01496 /*===========================*/
01497   enum lock_mode    mode, 
01501   const buf_block_t*  block,  
01503   ulint     heap_no,
01504   trx_t*      trx)  
01505 {
01506   lock_t* lock;
01507 
01508   ut_ad(mutex_own(&kernel_mutex));
01509 
01510   lock = lock_rec_get_first(block, heap_no);
01511 
01512   if (UNIV_LIKELY_NULL(lock)) {
01513     if (UNIV_UNLIKELY(heap_no == PAGE_HEAP_NO_SUPREMUM)) {
01514 
01515       do {
01516         if (lock_rec_has_to_wait(trx, mode, lock,
01517                TRUE)) {
01518           return(lock);
01519         }
01520 
01521         lock = lock_rec_get_next(heap_no, lock);
01522       } while (lock);
01523     } else {
01524 
01525       do {
01526         if (lock_rec_has_to_wait(trx, mode, lock,
01527                FALSE)) {
01528           return(lock);
01529         }
01530 
01531         lock = lock_rec_get_next(heap_no, lock);
01532       } while (lock);
01533     }
01534   }
01535 
01536   return(NULL);
01537 }
01538 
01539 /*********************************************************************/
01544 UNIV_INLINE
01545 lock_t*
01546 lock_rec_find_similar_on_page(
01547 /*==========================*/
01548   ulint   type_mode,  
01549   ulint   heap_no,  
01550   lock_t*   lock,   
01551   const trx_t*  trx)    
01552 {
01553   ut_ad(mutex_own(&kernel_mutex));
01554 
01555   while (lock != NULL) {
01556     if (lock->trx == trx
01557         && lock->type_mode == type_mode
01558         && lock_rec_get_n_bits(lock) > heap_no) {
01559 
01560       return(lock);
01561     }
01562 
01563     lock = lock_rec_get_next_on_page(lock);
01564   }
01565 
01566   return(NULL);
01567 }
01568 
01569 /*********************************************************************/
01573 static
01574 trx_t*
01575 lock_sec_rec_some_has_impl_off_kernel(
01576 /*==================================*/
01577   const rec_t*  rec,  
01578   dict_index_t* index,  
01579   const ulint*  offsets)
01580 {
01581   const page_t* page = page_align(rec);
01582 
01583   ut_ad(mutex_own(&kernel_mutex));
01584   ut_ad(!dict_index_is_clust(index));
01585   ut_ad(page_rec_is_user_rec(rec));
01586   ut_ad(rec_offs_validate(rec, index, offsets));
01587 
01588   /* Some transaction may have an implicit x-lock on the record only
01589   if the max trx id for the page >= min trx id for the trx list, or
01590   database recovery is running. We do not write the changes of a page
01591   max trx id to the log, and therefore during recovery, this value
01592   for a page may be incorrect. */
01593 
01594   if (page_get_max_trx_id(page) < trx_list_get_min_trx_id()
01595       && !recv_recovery_is_on()) {
01596 
01597     return(NULL);
01598   }
01599 
01600   /* Ok, in this case it is possible that some transaction has an
01601   implicit x-lock. We have to look in the clustered index. */
01602 
01603   if (!lock_check_trx_id_sanity(page_get_max_trx_id(page),
01604               rec, index, offsets, TRUE)) {
01605     buf_page_print(page, 0);
01606 
01607     /* The page is corrupt: try to avoid a crash by returning
01608     NULL */
01609     return(NULL);
01610   }
01611 
01612   return(row_vers_impl_x_locked_off_kernel(rec, index, offsets));
01613 }
01614 
01615 /*********************************************************************/
01619 UNIV_INTERN
01620 ulint
01621 lock_number_of_rows_locked(
01622 /*=======================*/
01623   const trx_t*  trx)  
01624 {
01625   lock_t* lock;
01626   ulint   n_records = 0;
01627   ulint n_bits;
01628   ulint n_bit;
01629 
01630   lock = UT_LIST_GET_FIRST(trx->trx_locks);
01631 
01632   while (lock) {
01633     if (lock_get_type_low(lock) == LOCK_REC) {
01634       n_bits = lock_rec_get_n_bits(lock);
01635 
01636       for (n_bit = 0; n_bit < n_bits; n_bit++) {
01637         if (lock_rec_get_nth_bit(lock, n_bit)) {
01638           n_records++;
01639         }
01640       }
01641     }
01642 
01643     lock = UT_LIST_GET_NEXT(trx_locks, lock);
01644   }
01645 
01646   return (n_records);
01647 }
01648 
01649 /*============== RECORD LOCK CREATION AND QUEUE MANAGEMENT =============*/
01650 
01651 /*********************************************************************/
01655 static
01656 lock_t*
01657 lock_rec_create(
01658 /*============*/
01659   ulint     type_mode,
01662   const buf_block_t*  block,  
01664   ulint     heap_no,
01665   dict_index_t*   index,  
01666   trx_t*      trx)  
01667 {
01668   lock_t*   lock;
01669   ulint   page_no;
01670   ulint   space;
01671   ulint   n_bits;
01672   ulint   n_bytes;
01673   const page_t* page;
01674 
01675   ut_ad(mutex_own(&kernel_mutex));
01676 
01677   space = buf_block_get_space(block);
01678   page_no = buf_block_get_page_no(block);
01679   page = block->frame;
01680 
01681   ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
01682 
01683   /* If rec is the supremum record, then we reset the gap and
01684   LOCK_REC_NOT_GAP bits, as all locks on the supremum are
01685   automatically of the gap type */
01686 
01687   if (UNIV_UNLIKELY(heap_no == PAGE_HEAP_NO_SUPREMUM)) {
01688     ut_ad(!(type_mode & LOCK_REC_NOT_GAP));
01689 
01690     type_mode = type_mode & ~(LOCK_GAP | LOCK_REC_NOT_GAP);
01691   }
01692 
01693   /* Make lock bitmap bigger by a safety margin */
01694   n_bits = page_dir_get_n_heap(page) + LOCK_PAGE_BITMAP_MARGIN;
01695   n_bytes = 1 + n_bits / 8;
01696 
01697   lock = static_cast<lock_t *>(mem_heap_alloc(trx->lock_heap, sizeof(lock_t) + n_bytes));
01698 
01699   UT_LIST_ADD_LAST(trx_locks, trx->trx_locks, lock);
01700 
01701   lock->trx = trx;
01702 
01703   lock->type_mode = (type_mode & ~LOCK_TYPE_MASK) | LOCK_REC;
01704   lock->index = index;
01705 
01706   lock->un_member.rec_lock.space = space;
01707   lock->un_member.rec_lock.page_no = page_no;
01708   lock->un_member.rec_lock.n_bits = n_bytes * 8;
01709 
01710   /* Reset to zero the bitmap which resides immediately after the
01711   lock struct */
01712 
01713   lock_rec_bitmap_reset(lock);
01714 
01715   /* Set the bit corresponding to rec */
01716   lock_rec_set_nth_bit(lock, heap_no);
01717 
01718   HASH_INSERT(lock_t, hash, lock_sys->rec_hash,
01719         lock_rec_fold(space, page_no), lock);
01720   if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
01721 
01722     lock_set_lock_and_trx_wait(lock, trx);
01723   }
01724 
01725   return(lock);
01726 }
01727 
01728 /*********************************************************************/
01735 static
01736 enum db_err
01737 lock_rec_enqueue_waiting(
01738 /*=====================*/
01739   ulint     type_mode,
01748   const buf_block_t*  block,  
01750   ulint     heap_no,
01751   dict_index_t*   index,  
01752   que_thr_t*    thr)  
01753 {
01754   lock_t* lock;
01755   trx_t*  trx;
01756 
01757   ut_ad(mutex_own(&kernel_mutex));
01758 
01759   /* Test if there already is some other reason to suspend thread:
01760   we do not enqueue a lock request if the query thread should be
01761   stopped anyway */
01762 
01763   if (UNIV_UNLIKELY(que_thr_stop(thr))) {
01764 
01765     ut_error;
01766 
01767     return(DB_QUE_THR_SUSPENDED);
01768   }
01769 
01770   trx = thr_get_trx(thr);
01771 
01772   switch (trx_get_dict_operation(trx)) {
01773   case TRX_DICT_OP_NONE:
01774     break;
01775   case TRX_DICT_OP_TABLE:
01776   case TRX_DICT_OP_INDEX:
01777     ut_print_timestamp(stderr);
01778     fputs("  InnoDB: Error: a record lock wait happens"
01779           " in a dictionary operation!\n"
01780           "InnoDB: ", stderr);
01781     dict_index_name_print(stderr, trx, index);
01782     fputs(".\n"
01783           "InnoDB: Submit a detailed bug report"
01784           " to http://bugs.mysql.com\n",
01785           stderr);
01786   }
01787 
01788   /* Enqueue the lock request that will wait to be granted */
01789   lock = lock_rec_create(type_mode | LOCK_WAIT,
01790              block, heap_no, index, trx);
01791 
01792   /* Check if a deadlock occurs: if yes, remove the lock request and
01793   return an error code */
01794 
01795   if (UNIV_UNLIKELY(lock_deadlock_occurs(lock, trx))) {
01796 
01797     lock_reset_lock_and_trx_wait(lock);
01798     lock_rec_reset_nth_bit(lock, heap_no);
01799 
01800     return(DB_DEADLOCK);
01801   }
01802 
01803   /* If there was a deadlock but we chose another transaction as a
01804   victim, it is possible that we already have the lock now granted! */
01805 
01806   if (trx->wait_lock == NULL) {
01807 
01808     return(DB_SUCCESS_LOCKED_REC);
01809   }
01810 
01811   trx->que_state = TRX_QUE_LOCK_WAIT;
01812   trx->was_chosen_as_deadlock_victim = FALSE;
01813   trx->wait_started = time(NULL);
01814 
01815   ut_a(que_thr_stop(thr));
01816 
01817 #ifdef UNIV_DEBUG
01818   if (lock_print_waits) {
01819     fprintf(stderr, "Lock wait for trx " TRX_ID_FMT " in index ",
01820       trx->id);
01821     ut_print_name(stderr, trx, FALSE, index->name);
01822   }
01823 #endif /* UNIV_DEBUG */
01824 
01825   return(DB_LOCK_WAIT);
01826 }
01827 
01828 /*********************************************************************/
01836 static
01837 lock_t*
01838 lock_rec_add_to_queue(
01839 /*==================*/
01840   ulint     type_mode,
01843   const buf_block_t*  block,  
01845   ulint     heap_no,
01846   dict_index_t*   index,  
01847   trx_t*      trx)  
01848 {
01849   lock_t* lock;
01850 
01851   ut_ad(mutex_own(&kernel_mutex));
01852 #ifdef UNIV_DEBUG
01853   switch (type_mode & LOCK_MODE_MASK) {
01854   case LOCK_X:
01855   case LOCK_S:
01856     break;
01857   default:
01858     ut_error;
01859   }
01860 
01861   if (!(type_mode & (LOCK_WAIT | LOCK_GAP))) {
01862     enum lock_mode  mode = (type_mode & LOCK_MODE_MASK) == LOCK_S
01863       ? LOCK_X
01864       : LOCK_S;
01865     lock_t*   other_lock
01866       = lock_rec_other_has_expl_req(mode, 0, LOCK_WAIT,
01867                   block, heap_no, trx);
01868     ut_a(!other_lock);
01869   }
01870 #endif /* UNIV_DEBUG */
01871 
01872   type_mode |= LOCK_REC;
01873 
01874   /* If rec is the supremum record, then we can reset the gap bit, as
01875   all locks on the supremum are automatically of the gap type, and we
01876   try to avoid unnecessary memory consumption of a new record lock
01877   struct for a gap type lock */
01878 
01879   if (UNIV_UNLIKELY(heap_no == PAGE_HEAP_NO_SUPREMUM)) {
01880     ut_ad(!(type_mode & LOCK_REC_NOT_GAP));
01881 
01882     /* There should never be LOCK_REC_NOT_GAP on a supremum
01883     record, but let us play safe */
01884 
01885     type_mode = type_mode & ~(LOCK_GAP | LOCK_REC_NOT_GAP);
01886   }
01887 
01888   /* Look for a waiting lock request on the same record or on a gap */
01889 
01890   lock = lock_rec_get_first_on_page(block);
01891 
01892   while (lock != NULL) {
01893     if (lock_get_wait(lock)
01894         && (lock_rec_get_nth_bit(lock, heap_no))) {
01895 
01896       goto somebody_waits;
01897     }
01898 
01899     lock = lock_rec_get_next_on_page(lock);
01900   }
01901 
01902   if (UNIV_LIKELY(!(type_mode & LOCK_WAIT))) {
01903 
01904     /* Look for a similar record lock on the same page:
01905     if one is found and there are no waiting lock requests,
01906     we can just set the bit */
01907 
01908     lock = lock_rec_find_similar_on_page(
01909       type_mode, heap_no,
01910       lock_rec_get_first_on_page(block), trx);
01911 
01912     if (lock) {
01913 
01914       lock_rec_set_nth_bit(lock, heap_no);
01915 
01916       return(lock);
01917     }
01918   }
01919 
01920 somebody_waits:
01921   return(lock_rec_create(type_mode, block, heap_no, index, trx));
01922 }
01923 
01925 enum lock_rec_req_status {
01927   LOCK_REC_FAIL,
01929   LOCK_REC_SUCCESS,
01931   LOCK_REC_SUCCESS_CREATED
01932 };
01933 
01934 /*********************************************************************/
01942 UNIV_INLINE
01943 enum lock_rec_req_status
01944 lock_rec_lock_fast(
01945 /*===============*/
01946   ibool     impl, 
01950   ulint     mode, 
01953   const buf_block_t*  block,  
01955   ulint     heap_no,
01956   dict_index_t*   index,  
01957   que_thr_t*    thr)  
01958 {
01959   lock_t* lock;
01960   trx_t*  trx;
01961 
01962   ut_ad(mutex_own(&kernel_mutex));
01963   ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
01964         || lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
01965   ut_ad((LOCK_MODE_MASK & mode) != LOCK_X
01966         || lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
01967   ut_ad((LOCK_MODE_MASK & mode) == LOCK_S
01968         || (LOCK_MODE_MASK & mode) == LOCK_X);
01969   ut_ad(mode - (LOCK_MODE_MASK & mode) == LOCK_GAP
01970         || mode - (LOCK_MODE_MASK & mode) == 0
01971         || mode - (LOCK_MODE_MASK & mode) == LOCK_REC_NOT_GAP);
01972 
01973   lock = lock_rec_get_first_on_page(block);
01974 
01975   trx = thr_get_trx(thr);
01976 
01977   if (lock == NULL) {
01978     if (!impl) {
01979       lock_rec_create(mode, block, heap_no, index, trx);
01980     }
01981 
01982     return(LOCK_REC_SUCCESS_CREATED);
01983   }
01984 
01985   if (lock_rec_get_next_on_page(lock)) {
01986 
01987     return(LOCK_REC_FAIL);
01988   }
01989 
01990   if (lock->trx != trx
01991       || lock->type_mode != (mode | LOCK_REC)
01992       || lock_rec_get_n_bits(lock) <= heap_no) {
01993 
01994     return(LOCK_REC_FAIL);
01995   }
01996 
01997   if (!impl) {
01998     /* If the nth bit of the record lock is already set then we
01999     do not set a new lock bit, otherwise we do set */
02000 
02001     if (!lock_rec_get_nth_bit(lock, heap_no)) {
02002       lock_rec_set_nth_bit(lock, heap_no);
02003       return(LOCK_REC_SUCCESS_CREATED);
02004     }
02005   }
02006 
02007   return(LOCK_REC_SUCCESS);
02008 }
02009 
02010 /*********************************************************************/
02017 static
02018 enum db_err
02019 lock_rec_lock_slow(
02020 /*===============*/
02021   ibool     impl, 
02025   ulint     mode, 
02028   const buf_block_t*  block,  
02030   ulint     heap_no,
02031   dict_index_t*   index,  
02032   que_thr_t*    thr)  
02033 {
02034   trx_t*  trx;
02035 
02036   ut_ad(mutex_own(&kernel_mutex));
02037   ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
02038         || lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
02039   ut_ad((LOCK_MODE_MASK & mode) != LOCK_X
02040         || lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
02041   ut_ad((LOCK_MODE_MASK & mode) == LOCK_S
02042         || (LOCK_MODE_MASK & mode) == LOCK_X);
02043   ut_ad(mode - (LOCK_MODE_MASK & mode) == LOCK_GAP
02044         || mode - (LOCK_MODE_MASK & mode) == 0
02045         || mode - (LOCK_MODE_MASK & mode) == LOCK_REC_NOT_GAP);
02046 
02047   trx = thr_get_trx(thr);
02048 
02049   if (lock_rec_has_expl(mode, block, heap_no, trx)) {
02050     /* The trx already has a strong enough lock on rec: do
02051     nothing */
02052 
02053   } else if (lock_rec_other_has_conflicting(static_cast<lock_mode>(mode), block, heap_no, trx)) {
02054 
02055     /* If another transaction has a non-gap conflicting request in
02056     the queue, as this transaction does not have a lock strong
02057     enough already granted on the record, we have to wait. */
02058 
02059     return(lock_rec_enqueue_waiting(mode, block, heap_no,
02060             index, thr));
02061   } else if (!impl) {
02062     /* Set the requested lock on the record */
02063 
02064     lock_rec_add_to_queue(LOCK_REC | mode, block,
02065               heap_no, index, trx);
02066     return(DB_SUCCESS_LOCKED_REC);
02067   }
02068 
02069   return(DB_SUCCESS);
02070 }
02071 
02072 /*********************************************************************/
02080 static
02081 enum db_err
02082 lock_rec_lock(
02083 /*==========*/
02084   ibool     impl, 
02088   ulint     mode, 
02091   const buf_block_t*  block,  
02093   ulint     heap_no,
02094   dict_index_t*   index,  
02095   que_thr_t*    thr)  
02096 {
02097   ut_ad(mutex_own(&kernel_mutex));
02098   ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
02099         || lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
02100   ut_ad((LOCK_MODE_MASK & mode) != LOCK_X
02101         || lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
02102   ut_ad((LOCK_MODE_MASK & mode) == LOCK_S
02103         || (LOCK_MODE_MASK & mode) == LOCK_X);
02104   ut_ad(mode - (LOCK_MODE_MASK & mode) == LOCK_GAP
02105         || mode - (LOCK_MODE_MASK & mode) == LOCK_REC_NOT_GAP
02106         || mode - (LOCK_MODE_MASK & mode) == 0);
02107 
02108   /* We try a simplified and faster subroutine for the most
02109   common cases */
02110   switch (lock_rec_lock_fast(impl, mode, block, heap_no, index, thr)) {
02111   case LOCK_REC_SUCCESS:
02112     return(DB_SUCCESS);
02113   case LOCK_REC_SUCCESS_CREATED:
02114     return(DB_SUCCESS_LOCKED_REC);
02115   case LOCK_REC_FAIL:
02116     return(lock_rec_lock_slow(impl, mode, block,
02117             heap_no, index, thr));
02118   }
02119 
02120   ut_error;
02121   return(DB_ERROR);
02122 }
02123 
02124 /*********************************************************************/
02127 static
02128 ibool
02129 lock_rec_has_to_wait_in_queue(
02130 /*==========================*/
02131   lock_t* wait_lock)  
02132 {
02133   lock_t* lock;
02134   ulint space;
02135   ulint page_no;
02136   ulint heap_no;
02137 
02138   ut_ad(mutex_own(&kernel_mutex));
02139   ut_ad(lock_get_wait(wait_lock));
02140   ut_ad(lock_get_type_low(wait_lock) == LOCK_REC);
02141 
02142   space = wait_lock->un_member.rec_lock.space;
02143   page_no = wait_lock->un_member.rec_lock.page_no;
02144   heap_no = lock_rec_find_set_bit(wait_lock);
02145 
02146   lock = lock_rec_get_first_on_page_addr(space, page_no);
02147 
02148   while (lock != wait_lock) {
02149 
02150     if (lock_rec_get_nth_bit(lock, heap_no)
02151         && lock_has_to_wait(wait_lock, lock)) {
02152 
02153       return(TRUE);
02154     }
02155 
02156     lock = lock_rec_get_next_on_page(lock);
02157   }
02158 
02159   return(FALSE);
02160 }
02161 
02162 /*************************************************************/
02165 static
02166 void
02167 lock_grant(
02168 /*=======*/
02169   lock_t* lock) 
02170 {
02171   ut_ad(mutex_own(&kernel_mutex));
02172 
02173   lock_reset_lock_and_trx_wait(lock);
02174 
02175   if (lock_get_mode(lock) == LOCK_AUTO_INC) {
02176     trx_t*    trx = lock->trx;
02177     dict_table_t* table = lock->un_member.tab_lock.table;
02178 
02179     if (table->autoinc_trx == trx) {
02180       fprintf(stderr,
02181         "InnoDB: Error: trx already had"
02182         " an AUTO-INC lock!\n");
02183     } else {
02184       table->autoinc_trx = trx;
02185 
02186       ib_vector_push(trx->autoinc_locks, lock);
02187     }
02188   }
02189 
02190 #ifdef UNIV_DEBUG
02191   if (lock_print_waits) {
02192     fprintf(stderr, "Lock wait for trx " TRX_ID_FMT " ends\n",
02193       lock->trx->id);
02194   }
02195 #endif /* UNIV_DEBUG */
02196 
02197   /* If we are resolving a deadlock by choosing another transaction
02198   as a victim, then our original transaction may not be in the
02199   TRX_QUE_LOCK_WAIT state, and there is no need to end the lock wait
02200   for it */
02201 
02202   if (lock->trx->que_state == TRX_QUE_LOCK_WAIT) {
02203     trx_end_lock_wait(lock->trx);
02204   }
02205 }
02206 
02207 /*************************************************************/
02211 static
02212 void
02213 lock_rec_cancel(
02214 /*============*/
02215   lock_t* lock) 
02216 {
02217   ut_ad(mutex_own(&kernel_mutex));
02218   ut_ad(lock_get_type_low(lock) == LOCK_REC);
02219 
02220   /* Reset the bit (there can be only one set bit) in the lock bitmap */
02221   lock_rec_reset_nth_bit(lock, lock_rec_find_set_bit(lock));
02222 
02223   /* Reset the wait flag and the back pointer to lock in trx */
02224 
02225   lock_reset_lock_and_trx_wait(lock);
02226 
02227   /* The following function releases the trx from lock wait */
02228 
02229   trx_end_lock_wait(lock->trx);
02230 }
02231 
02232 /*************************************************************/
02236 static
02237 void
02238 lock_rec_dequeue_from_page(
02239 /*=======================*/
02240   lock_t* in_lock)
02244 {
02245   ulint space;
02246   ulint page_no;
02247   lock_t* lock;
02248   trx_t*  trx;
02249 
02250   ut_ad(mutex_own(&kernel_mutex));
02251   ut_ad(lock_get_type_low(in_lock) == LOCK_REC);
02252 
02253   trx = in_lock->trx;
02254 
02255   space = in_lock->un_member.rec_lock.space;
02256   page_no = in_lock->un_member.rec_lock.page_no;
02257 
02258   HASH_DELETE(lock_t, hash, lock_sys->rec_hash,
02259         lock_rec_fold(space, page_no), in_lock);
02260 
02261   UT_LIST_REMOVE(trx_locks, trx->trx_locks, in_lock);
02262 
02263   /* Check if waiting locks in the queue can now be granted: grant
02264   locks if there are no conflicting locks ahead. */
02265 
02266   lock = lock_rec_get_first_on_page_addr(space, page_no);
02267 
02268   while (lock != NULL) {
02269     if (lock_get_wait(lock)
02270         && !lock_rec_has_to_wait_in_queue(lock)) {
02271 
02272       /* Grant the lock */
02273       lock_grant(lock);
02274     }
02275 
02276     lock = lock_rec_get_next_on_page(lock);
02277   }
02278 }
02279 
02280 /*************************************************************/
02282 static
02283 void
02284 lock_rec_discard(
02285 /*=============*/
02286   lock_t* in_lock)
02288 {
02289   ulint space;
02290   ulint page_no;
02291   trx_t*  trx;
02292 
02293   ut_ad(mutex_own(&kernel_mutex));
02294   ut_ad(lock_get_type_low(in_lock) == LOCK_REC);
02295 
02296   trx = in_lock->trx;
02297 
02298   space = in_lock->un_member.rec_lock.space;
02299   page_no = in_lock->un_member.rec_lock.page_no;
02300 
02301   HASH_DELETE(lock_t, hash, lock_sys->rec_hash,
02302         lock_rec_fold(space, page_no), in_lock);
02303 
02304   UT_LIST_REMOVE(trx_locks, trx->trx_locks, in_lock);
02305 }
02306 
02307 /*************************************************************/
02311 static
02312 void
02313 lock_rec_free_all_from_discard_page(
02314 /*================================*/
02315   const buf_block_t*  block)  
02316 {
02317   ulint space;
02318   ulint page_no;
02319   lock_t* lock;
02320   lock_t* next_lock;
02321 
02322   ut_ad(mutex_own(&kernel_mutex));
02323 
02324   space = buf_block_get_space(block);
02325   page_no = buf_block_get_page_no(block);
02326 
02327   lock = lock_rec_get_first_on_page_addr(space, page_no);
02328 
02329   while (lock != NULL) {
02330     ut_ad(lock_rec_find_set_bit(lock) == ULINT_UNDEFINED);
02331     ut_ad(!lock_get_wait(lock));
02332 
02333     next_lock = lock_rec_get_next_on_page(lock);
02334 
02335     lock_rec_discard(lock);
02336 
02337     lock = next_lock;
02338   }
02339 }
02340 
02341 /*============= RECORD LOCK MOVING AND INHERITING ===================*/
02342 
02343 /*************************************************************/
02346 static
02347 void
02348 lock_rec_reset_and_release_wait(
02349 /*============================*/
02350   const buf_block_t*  block,  
02352   ulint     heap_no)
02353 {
02354   lock_t* lock;
02355 
02356   ut_ad(mutex_own(&kernel_mutex));
02357 
02358   lock = lock_rec_get_first(block, heap_no);
02359 
02360   while (lock != NULL) {
02361     if (lock_get_wait(lock)) {
02362       lock_rec_cancel(lock);
02363     } else {
02364       lock_rec_reset_nth_bit(lock, heap_no);
02365     }
02366 
02367     lock = lock_rec_get_next(heap_no, lock);
02368   }
02369 }
02370 
02371 /*************************************************************/
02376 static
02377 void
02378 lock_rec_inherit_to_gap(
02379 /*====================*/
02380   const buf_block_t*  heir_block, 
02382   const buf_block_t*  block,    
02386   ulint     heir_heap_no, 
02388   ulint     heap_no)  
02390 {
02391   lock_t* lock;
02392 
02393   ut_ad(mutex_own(&kernel_mutex));
02394 
02395   lock = lock_rec_get_first(block, heap_no);
02396 
02397   /* If srv_locks_unsafe_for_binlog is TRUE or session is using
02398   READ COMMITTED isolation level, we do not want locks set
02399   by an UPDATE or a DELETE to be inherited as gap type locks. But we
02400   DO want S-locks set by a consistency constraint to be inherited also
02401   then. */
02402 
02403   while (lock != NULL) {
02404     if (!lock_rec_get_insert_intention(lock)
02405         && !((srv_locks_unsafe_for_binlog
02406         || lock->trx->isolation_level
02407         <= TRX_ISO_READ_COMMITTED)
02408        && lock_get_mode(lock) == LOCK_X)) {
02409 
02410       lock_rec_add_to_queue(LOCK_REC | LOCK_GAP
02411                 | lock_get_mode(lock),
02412                 heir_block, heir_heap_no,
02413                 lock->index, lock->trx);
02414     }
02415 
02416     lock = lock_rec_get_next(heap_no, lock);
02417   }
02418 }
02419 
02420 /*************************************************************/
02424 static
02425 void
02426 lock_rec_inherit_to_gap_if_gap_lock(
02427 /*================================*/
02428   const buf_block_t*  block,    
02429   ulint     heir_heap_no, 
02431   ulint     heap_no)  
02435 {
02436   lock_t* lock;
02437 
02438   ut_ad(mutex_own(&kernel_mutex));
02439 
02440   lock = lock_rec_get_first(block, heap_no);
02441 
02442   while (lock != NULL) {
02443     if (!lock_rec_get_insert_intention(lock)
02444         && (heap_no == PAGE_HEAP_NO_SUPREMUM
02445       || !lock_rec_get_rec_not_gap(lock))) {
02446 
02447       lock_rec_add_to_queue(LOCK_REC | LOCK_GAP
02448                 | lock_get_mode(lock),
02449                 block, heir_heap_no,
02450                 lock->index, lock->trx);
02451     }
02452 
02453     lock = lock_rec_get_next(heap_no, lock);
02454   }
02455 }
02456 
02457 /*************************************************************/
02460 static
02461 void
02462 lock_rec_move(
02463 /*==========*/
02464   const buf_block_t*  receiver, 
02466   const buf_block_t*  donator,  
02468   ulint     receiver_heap_no,
02472   ulint     donator_heap_no)
02474 {
02475   lock_t* lock;
02476 
02477   ut_ad(mutex_own(&kernel_mutex));
02478 
02479   lock = lock_rec_get_first(donator, donator_heap_no);
02480 
02481   ut_ad(lock_rec_get_first(receiver, receiver_heap_no) == NULL);
02482 
02483   while (lock != NULL) {
02484     const ulint type_mode = lock->type_mode;
02485 
02486     lock_rec_reset_nth_bit(lock, donator_heap_no);
02487 
02488     if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
02489       lock_reset_lock_and_trx_wait(lock);
02490     }
02491 
02492     /* Note that we FIRST reset the bit, and then set the lock:
02493     the function works also if donator == receiver */
02494 
02495     lock_rec_add_to_queue(type_mode, receiver, receiver_heap_no,
02496               lock->index, lock->trx);
02497     lock = lock_rec_get_next(donator_heap_no, lock);
02498   }
02499 
02500   ut_ad(lock_rec_get_first(donator, donator_heap_no) == NULL);
02501 }
02502 
02503 /*************************************************************/
02508 UNIV_INTERN
02509 void
02510 lock_move_reorganize_page(
02511 /*======================*/
02512   const buf_block_t*  block,  
02514   const buf_block_t*  oblock) 
02516 {
02517   lock_t*   lock;
02518   UT_LIST_BASE_NODE_T(lock_t) old_locks;
02519   mem_heap_t* heap    = NULL;
02520   ulint   comp;
02521 
02522   lock_mutex_enter_kernel();
02523 
02524   lock = lock_rec_get_first_on_page(block);
02525 
02526   if (lock == NULL) {
02527     lock_mutex_exit_kernel();
02528 
02529     return;
02530   }
02531 
02532   heap = mem_heap_create(256);
02533 
02534   /* Copy first all the locks on the page to heap and reset the
02535   bitmaps in the original locks; chain the copies of the locks
02536   using the trx_locks field in them. */
02537 
02538   UT_LIST_INIT(old_locks);
02539 
02540   do {
02541     /* Make a copy of the lock */
02542     lock_t* old_lock = lock_rec_copy(lock, heap);
02543 
02544     UT_LIST_ADD_LAST(trx_locks, old_locks, old_lock);
02545 
02546     /* Reset bitmap of lock */
02547     lock_rec_bitmap_reset(lock);
02548 
02549     if (lock_get_wait(lock)) {
02550       lock_reset_lock_and_trx_wait(lock);
02551     }
02552 
02553     lock = lock_rec_get_next_on_page(lock);
02554   } while (lock != NULL);
02555 
02556   comp = page_is_comp(block->frame);
02557   ut_ad(comp == page_is_comp(oblock->frame));
02558 
02559   for (lock = UT_LIST_GET_FIRST(old_locks); lock;
02560        lock = UT_LIST_GET_NEXT(trx_locks, lock)) {
02561     /* NOTE: we copy also the locks set on the infimum and
02562     supremum of the page; the infimum may carry locks if an
02563     update of a record is occurring on the page, and its locks
02564     were temporarily stored on the infimum */
02565     page_cur_t  cur1;
02566     page_cur_t  cur2;
02567 
02568     page_cur_set_before_first(block, &cur1);
02569     page_cur_set_before_first(oblock, &cur2);
02570 
02571     /* Set locks according to old locks */
02572     for (;;) {
02573       ulint old_heap_no;
02574       ulint new_heap_no;
02575 
02576       ut_ad(comp || !memcmp(page_cur_get_rec(&cur1),
02577                 page_cur_get_rec(&cur2),
02578                 rec_get_data_size_old(
02579                   page_cur_get_rec(
02580                     &cur2))));
02581       if (UNIV_LIKELY(comp)) {
02582         old_heap_no = rec_get_heap_no_new(
02583           page_cur_get_rec(&cur2));
02584         new_heap_no = rec_get_heap_no_new(
02585           page_cur_get_rec(&cur1));
02586       } else {
02587         old_heap_no = rec_get_heap_no_old(
02588           page_cur_get_rec(&cur2));
02589         new_heap_no = rec_get_heap_no_old(
02590           page_cur_get_rec(&cur1));
02591       }
02592 
02593       if (lock_rec_get_nth_bit(lock, old_heap_no)) {
02594 
02595         /* Clear the bit in old_lock. */
02596         ut_d(lock_rec_reset_nth_bit(lock,
02597                   old_heap_no));
02598 
02599         /* NOTE that the old lock bitmap could be too
02600         small for the new heap number! */
02601 
02602         lock_rec_add_to_queue(lock->type_mode, block,
02603                   new_heap_no,
02604                   lock->index, lock->trx);
02605 
02606         /* if (new_heap_no == PAGE_HEAP_NO_SUPREMUM
02607         && lock_get_wait(lock)) {
02608         fprintf(stderr,
02609         "---\n--\n!!!Lock reorg: supr type %lu\n",
02610         lock->type_mode);
02611         } */
02612       }
02613 
02614       if (UNIV_UNLIKELY
02615           (new_heap_no == PAGE_HEAP_NO_SUPREMUM)) {
02616 
02617         ut_ad(old_heap_no == PAGE_HEAP_NO_SUPREMUM);
02618         break;
02619       }
02620 
02621       page_cur_move_to_next(&cur1);
02622       page_cur_move_to_next(&cur2);
02623     }
02624 
02625 #ifdef UNIV_DEBUG
02626     {
02627       ulint i = lock_rec_find_set_bit(lock);
02628 
02629       /* Check that all locks were moved. */
02630       if (UNIV_UNLIKELY(i != ULINT_UNDEFINED)) {
02631         fprintf(stderr,
02632           "lock_move_reorganize_page():"
02633           " %lu not moved in %p\n",
02634           (ulong) i, (void*) lock);
02635         ut_error;
02636       }
02637     }
02638 #endif /* UNIV_DEBUG */
02639   }
02640 
02641   lock_mutex_exit_kernel();
02642 
02643   mem_heap_free(heap);
02644 
02645 #ifdef UNIV_DEBUG_LOCK_VALIDATE
02646   ut_ad(lock_rec_validate_page(buf_block_get_space(block),
02647              buf_block_get_page_no(block)));
02648 #endif
02649 }
02650 
02651 /*************************************************************/
02654 UNIV_INTERN
02655 void
02656 lock_move_rec_list_end(
02657 /*===================*/
02658   const buf_block_t*  new_block,  
02659   const buf_block_t*  block,    
02660   const rec_t*    rec)    
02662 {
02663   lock_t*   lock;
02664   const ulint comp  = page_rec_is_comp(rec);
02665 
02666   lock_mutex_enter_kernel();
02667 
02668   /* Note: when we move locks from record to record, waiting locks
02669   and possible granted gap type locks behind them are enqueued in
02670   the original order, because new elements are inserted to a hash
02671   table to the end of the hash chain, and lock_rec_add_to_queue
02672   does not reuse locks if there are waiters in the queue. */
02673 
02674   for (lock = lock_rec_get_first_on_page(block); lock;
02675        lock = lock_rec_get_next_on_page(lock)) {
02676     page_cur_t  cur1;
02677     page_cur_t  cur2;
02678     const ulint type_mode = lock->type_mode;
02679 
02680     page_cur_position(rec, block, &cur1);
02681 
02682     if (page_cur_is_before_first(&cur1)) {
02683       page_cur_move_to_next(&cur1);
02684     }
02685 
02686     page_cur_set_before_first(new_block, &cur2);
02687     page_cur_move_to_next(&cur2);
02688 
02689     /* Copy lock requests on user records to new page and
02690     reset the lock bits on the old */
02691 
02692     while (!page_cur_is_after_last(&cur1)) {
02693       ulint heap_no;
02694 
02695       if (comp) {
02696         heap_no = rec_get_heap_no_new(
02697           page_cur_get_rec(&cur1));
02698       } else {
02699         heap_no = rec_get_heap_no_old(
02700           page_cur_get_rec(&cur1));
02701         ut_ad(!memcmp(page_cur_get_rec(&cur1),
02702            page_cur_get_rec(&cur2),
02703            rec_get_data_size_old(
02704              page_cur_get_rec(&cur2))));
02705       }
02706 
02707       if (lock_rec_get_nth_bit(lock, heap_no)) {
02708         lock_rec_reset_nth_bit(lock, heap_no);
02709 
02710         if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
02711           lock_reset_lock_and_trx_wait(lock);
02712         }
02713 
02714         if (comp) {
02715           heap_no = rec_get_heap_no_new(
02716             page_cur_get_rec(&cur2));
02717         } else {
02718           heap_no = rec_get_heap_no_old(
02719             page_cur_get_rec(&cur2));
02720         }
02721 
02722         lock_rec_add_to_queue(type_mode,
02723                   new_block, heap_no,
02724                   lock->index, lock->trx);
02725       }
02726 
02727       page_cur_move_to_next(&cur1);
02728       page_cur_move_to_next(&cur2);
02729     }
02730   }
02731 
02732   lock_mutex_exit_kernel();
02733 
02734 #ifdef UNIV_DEBUG_LOCK_VALIDATE
02735   ut_ad(lock_rec_validate_page(buf_block_get_space(block),
02736              buf_block_get_page_no(block)));
02737   ut_ad(lock_rec_validate_page(buf_block_get_space(new_block),
02738              buf_block_get_page_no(new_block)));
02739 #endif
02740 }
02741 
02742 /*************************************************************/
02745 UNIV_INTERN
02746 void
02747 lock_move_rec_list_start(
02748 /*=====================*/
02749   const buf_block_t*  new_block,  
02750   const buf_block_t*  block,    
02751   const rec_t*    rec,    
02754   const rec_t*    old_end)  
02759 {
02760   lock_t*   lock;
02761   const ulint comp  = page_rec_is_comp(rec);
02762 
02763   ut_ad(block->frame == page_align(rec));
02764   ut_ad(new_block->frame == page_align(old_end));
02765 
02766   lock_mutex_enter_kernel();
02767 
02768   for (lock = lock_rec_get_first_on_page(block); lock;
02769        lock = lock_rec_get_next_on_page(lock)) {
02770     page_cur_t  cur1;
02771     page_cur_t  cur2;
02772     const ulint type_mode = lock->type_mode;
02773 
02774     page_cur_set_before_first(block, &cur1);
02775     page_cur_move_to_next(&cur1);
02776 
02777     page_cur_position(old_end, new_block, &cur2);
02778     page_cur_move_to_next(&cur2);
02779 
02780     /* Copy lock requests on user records to new page and
02781     reset the lock bits on the old */
02782 
02783     while (page_cur_get_rec(&cur1) != rec) {
02784       ulint heap_no;
02785 
02786       if (comp) {
02787         heap_no = rec_get_heap_no_new(
02788           page_cur_get_rec(&cur1));
02789       } else {
02790         heap_no = rec_get_heap_no_old(
02791           page_cur_get_rec(&cur1));
02792         ut_ad(!memcmp(page_cur_get_rec(&cur1),
02793                 page_cur_get_rec(&cur2),
02794                 rec_get_data_size_old(
02795                   page_cur_get_rec(
02796                     &cur2))));
02797       }
02798 
02799       if (lock_rec_get_nth_bit(lock, heap_no)) {
02800         lock_rec_reset_nth_bit(lock, heap_no);
02801 
02802         if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
02803           lock_reset_lock_and_trx_wait(lock);
02804         }
02805 
02806         if (comp) {
02807           heap_no = rec_get_heap_no_new(
02808             page_cur_get_rec(&cur2));
02809         } else {
02810           heap_no = rec_get_heap_no_old(
02811             page_cur_get_rec(&cur2));
02812         }
02813 
02814         lock_rec_add_to_queue(type_mode,
02815                   new_block, heap_no,
02816                   lock->index, lock->trx);
02817       }
02818 
02819       page_cur_move_to_next(&cur1);
02820       page_cur_move_to_next(&cur2);
02821     }
02822 
02823 #ifdef UNIV_DEBUG
02824     if (page_rec_is_supremum(rec)) {
02825       ulint i;
02826 
02827       for (i = PAGE_HEAP_NO_USER_LOW;
02828            i < lock_rec_get_n_bits(lock); i++) {
02829         if (UNIV_UNLIKELY
02830             (lock_rec_get_nth_bit(lock, i))) {
02831 
02832           fprintf(stderr,
02833             "lock_move_rec_list_start():"
02834             " %lu not moved in %p\n",
02835             (ulong) i, (void*) lock);
02836           ut_error;
02837         }
02838       }
02839     }
02840 #endif /* UNIV_DEBUG */
02841   }
02842 
02843   lock_mutex_exit_kernel();
02844 
02845 #ifdef UNIV_DEBUG_LOCK_VALIDATE
02846   ut_ad(lock_rec_validate_page(buf_block_get_space(block),
02847              buf_block_get_page_no(block)));
02848 #endif
02849 }
02850 
02851 /*************************************************************/
02853 UNIV_INTERN
02854 void
02855 lock_update_split_right(
02856 /*====================*/
02857   const buf_block_t*  right_block,  
02858   const buf_block_t*  left_block) 
02859 {
02860   ulint heap_no = lock_get_min_heap_no(right_block);
02861 
02862   lock_mutex_enter_kernel();
02863 
02864   /* Move the locks on the supremum of the left page to the supremum
02865   of the right page */
02866 
02867   lock_rec_move(right_block, left_block,
02868           PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
02869 
02870   /* Inherit the locks to the supremum of left page from the successor
02871   of the infimum on right page */
02872 
02873   lock_rec_inherit_to_gap(left_block, right_block,
02874         PAGE_HEAP_NO_SUPREMUM, heap_no);
02875 
02876   lock_mutex_exit_kernel();
02877 }
02878 
02879 /*************************************************************/
02881 UNIV_INTERN
02882 void
02883 lock_update_merge_right(
02884 /*====================*/
02885   const buf_block_t*  right_block,  
02887   const rec_t*    orig_succ,  
02891   const buf_block_t*  left_block) 
02894 {
02895   lock_mutex_enter_kernel();
02896 
02897   /* Inherit the locks from the supremum of the left page to the
02898   original successor of infimum on the right page, to which the left
02899   page was merged */
02900 
02901   lock_rec_inherit_to_gap(right_block, left_block,
02902         page_rec_get_heap_no(orig_succ),
02903         PAGE_HEAP_NO_SUPREMUM);
02904 
02905   /* Reset the locks on the supremum of the left page, releasing
02906   waiting transactions */
02907 
02908   lock_rec_reset_and_release_wait(left_block,
02909           PAGE_HEAP_NO_SUPREMUM);
02910 
02911   lock_rec_free_all_from_discard_page(left_block);
02912 
02913   lock_mutex_exit_kernel();
02914 }
02915 
02916 /*************************************************************/
02923 UNIV_INTERN
02924 void
02925 lock_update_root_raise(
02926 /*===================*/
02927   const buf_block_t*  block,  
02928   const buf_block_t*  root) 
02929 {
02930   lock_mutex_enter_kernel();
02931 
02932   /* Move the locks on the supremum of the root to the supremum
02933   of block */
02934 
02935   lock_rec_move(block, root,
02936           PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
02937   lock_mutex_exit_kernel();
02938 }
02939 
02940 /*************************************************************/
02943 UNIV_INTERN
02944 void
02945 lock_update_copy_and_discard(
02946 /*=========================*/
02947   const buf_block_t*  new_block,  
02949   const buf_block_t*  block)    
02951 {
02952   lock_mutex_enter_kernel();
02953 
02954   /* Move the locks on the supremum of the old page to the supremum
02955   of new_page */
02956 
02957   lock_rec_move(new_block, block,
02958           PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
02959   lock_rec_free_all_from_discard_page(block);
02960 
02961   lock_mutex_exit_kernel();
02962 }
02963 
02964 /*************************************************************/
02966 UNIV_INTERN
02967 void
02968 lock_update_split_left(
02969 /*===================*/
02970   const buf_block_t*  right_block,  
02971   const buf_block_t*  left_block) 
02972 {
02973   ulint heap_no = lock_get_min_heap_no(right_block);
02974 
02975   lock_mutex_enter_kernel();
02976 
02977   /* Inherit the locks to the supremum of the left page from the
02978   successor of the infimum on the right page */
02979 
02980   lock_rec_inherit_to_gap(left_block, right_block,
02981         PAGE_HEAP_NO_SUPREMUM, heap_no);
02982 
02983   lock_mutex_exit_kernel();
02984 }
02985 
02986 /*************************************************************/
02988 UNIV_INTERN
02989 void
02990 lock_update_merge_left(
02991 /*===================*/
02992   const buf_block_t*  left_block, 
02994   const rec_t*    orig_pred,  
02997   const buf_block_t*  right_block)  
02999 {
03000   const rec_t*  left_next_rec;
03001 
03002   ut_ad(left_block->frame == page_align(orig_pred));
03003 
03004   lock_mutex_enter_kernel();
03005 
03006   left_next_rec = page_rec_get_next_const(orig_pred);
03007 
03008   if (!page_rec_is_supremum(left_next_rec)) {
03009 
03010     /* Inherit the locks on the supremum of the left page to the
03011     first record which was moved from the right page */
03012 
03013     lock_rec_inherit_to_gap(left_block, left_block,
03014           page_rec_get_heap_no(left_next_rec),
03015           PAGE_HEAP_NO_SUPREMUM);
03016 
03017     /* Reset the locks on the supremum of the left page,
03018     releasing waiting transactions */
03019 
03020     lock_rec_reset_and_release_wait(left_block,
03021             PAGE_HEAP_NO_SUPREMUM);
03022   }
03023 
03024   /* Move the locks from the supremum of right page to the supremum
03025   of the left page */
03026 
03027   lock_rec_move(left_block, right_block,
03028           PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
03029 
03030   lock_rec_free_all_from_discard_page(right_block);
03031 
03032   lock_mutex_exit_kernel();
03033 }
03034 
03035 /*************************************************************/
03038 UNIV_INTERN
03039 void
03040 lock_rec_reset_and_inherit_gap_locks(
03041 /*=================================*/
03042   const buf_block_t*  heir_block, 
03044   const buf_block_t*  block,    
03048   ulint     heir_heap_no, 
03050   ulint     heap_no)  
03052 {
03053   mutex_enter(&kernel_mutex);
03054 
03055   lock_rec_reset_and_release_wait(heir_block, heir_heap_no);
03056 
03057   lock_rec_inherit_to_gap(heir_block, block, heir_heap_no, heap_no);
03058 
03059   mutex_exit(&kernel_mutex);
03060 }
03061 
03062 /*************************************************************/
03064 UNIV_INTERN
03065 void
03066 lock_update_discard(
03067 /*================*/
03068   const buf_block_t*  heir_block, 
03070   ulint     heir_heap_no, 
03072   const buf_block_t*  block)    
03074 {
03075   const page_t* page = block->frame;
03076   const rec_t*  rec;
03077   ulint   heap_no;
03078 
03079   lock_mutex_enter_kernel();
03080 
03081   if (!lock_rec_get_first_on_page(block)) {
03082     /* No locks exist on page, nothing to do */
03083 
03084     lock_mutex_exit_kernel();
03085 
03086     return;
03087   }
03088 
03089   /* Inherit all the locks on the page to the record and reset all
03090   the locks on the page */
03091 
03092   if (page_is_comp(page)) {
03093     rec = page + PAGE_NEW_INFIMUM;
03094 
03095     do {
03096       heap_no = rec_get_heap_no_new(rec);
03097 
03098       lock_rec_inherit_to_gap(heir_block, block,
03099             heir_heap_no, heap_no);
03100 
03101       lock_rec_reset_and_release_wait(block, heap_no);
03102 
03103       rec = page + rec_get_next_offs(rec, TRUE);
03104     } while (heap_no != PAGE_HEAP_NO_SUPREMUM);
03105   } else {
03106     rec = page + PAGE_OLD_INFIMUM;
03107 
03108     do {
03109       heap_no = rec_get_heap_no_old(rec);
03110 
03111       lock_rec_inherit_to_gap(heir_block, block,
03112             heir_heap_no, heap_no);
03113 
03114       lock_rec_reset_and_release_wait(block, heap_no);
03115 
03116       rec = page + rec_get_next_offs(rec, FALSE);
03117     } while (heap_no != PAGE_HEAP_NO_SUPREMUM);
03118   }
03119 
03120   lock_rec_free_all_from_discard_page(block);
03121 
03122   lock_mutex_exit_kernel();
03123 }
03124 
03125 /*************************************************************/
03127 UNIV_INTERN
03128 void
03129 lock_update_insert(
03130 /*===============*/
03131   const buf_block_t*  block,  
03132   const rec_t*    rec)  
03133 {
03134   ulint receiver_heap_no;
03135   ulint donator_heap_no;
03136 
03137   ut_ad(block->frame == page_align(rec));
03138 
03139   /* Inherit the gap-locking locks for rec, in gap mode, from the next
03140   record */
03141 
03142   if (page_rec_is_comp(rec)) {
03143     receiver_heap_no = rec_get_heap_no_new(rec);
03144     donator_heap_no = rec_get_heap_no_new(
03145       page_rec_get_next_low(rec, TRUE));
03146   } else {
03147     receiver_heap_no = rec_get_heap_no_old(rec);
03148     donator_heap_no = rec_get_heap_no_old(
03149       page_rec_get_next_low(rec, FALSE));
03150   }
03151 
03152   lock_mutex_enter_kernel();
03153   lock_rec_inherit_to_gap_if_gap_lock(block,
03154               receiver_heap_no, donator_heap_no);
03155   lock_mutex_exit_kernel();
03156 }
03157 
03158 /*************************************************************/
03160 UNIV_INTERN
03161 void
03162 lock_update_delete(
03163 /*===============*/
03164   const buf_block_t*  block,  
03165   const rec_t*    rec)  
03166 {
03167   const page_t* page = block->frame;
03168   ulint   heap_no;
03169   ulint   next_heap_no;
03170 
03171   ut_ad(page == page_align(rec));
03172 
03173   if (page_is_comp(page)) {
03174     heap_no = rec_get_heap_no_new(rec);
03175     next_heap_no = rec_get_heap_no_new(page
03176                + rec_get_next_offs(rec,
03177                        TRUE));
03178   } else {
03179     heap_no = rec_get_heap_no_old(rec);
03180     next_heap_no = rec_get_heap_no_old(page
03181                + rec_get_next_offs(rec,
03182                        FALSE));
03183   }
03184 
03185   lock_mutex_enter_kernel();
03186 
03187   /* Let the next record inherit the locks from rec, in gap mode */
03188 
03189   lock_rec_inherit_to_gap(block, block, next_heap_no, heap_no);
03190 
03191   /* Reset the lock bits on rec and release waiting transactions */
03192 
03193   lock_rec_reset_and_release_wait(block, heap_no);
03194 
03195   lock_mutex_exit_kernel();
03196 }
03197 
03198 /*********************************************************************/
03205 UNIV_INTERN
03206 void
03207 lock_rec_store_on_page_infimum(
03208 /*===========================*/
03209   const buf_block_t*  block,  
03210   const rec_t*    rec)  
03215 {
03216   ulint heap_no = page_rec_get_heap_no(rec);
03217 
03218   ut_ad(block->frame == page_align(rec));
03219 
03220   lock_mutex_enter_kernel();
03221 
03222   lock_rec_move(block, block, PAGE_HEAP_NO_INFIMUM, heap_no);
03223 
03224   lock_mutex_exit_kernel();
03225 }
03226 
03227 /*********************************************************************/
03230 UNIV_INTERN
03231 void
03232 lock_rec_restore_from_page_infimum(
03233 /*===============================*/
03234   const buf_block_t*  block,  
03235   const rec_t*    rec,  
03237   const buf_block_t*  donator)
03242 {
03243   ulint heap_no = page_rec_get_heap_no(rec);
03244 
03245   lock_mutex_enter_kernel();
03246 
03247   lock_rec_move(block, donator, heap_no, PAGE_HEAP_NO_INFIMUM);
03248 
03249   lock_mutex_exit_kernel();
03250 }
03251 
03252 /*=========== DEADLOCK CHECKING ======================================*/
03253 
03254 /********************************************************************/
03259 static
03260 ibool
03261 lock_deadlock_occurs(
03262 /*=================*/
03263   lock_t* lock, 
03264   trx_t*  trx)  
03265 {
03266   trx_t*    mark_trx;
03267   ulint   ret;
03268   ulint   cost  = 0;
03269 
03270   ut_ad(trx);
03271   ut_ad(lock);
03272   ut_ad(mutex_own(&kernel_mutex));
03273 retry:
03274   /* We check that adding this trx to the waits-for graph
03275   does not produce a cycle. First mark all active transactions
03276   with 0: */
03277 
03278   mark_trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
03279 
03280   while (mark_trx) {
03281     mark_trx->deadlock_mark = 0;
03282     mark_trx = UT_LIST_GET_NEXT(trx_list, mark_trx);
03283   }
03284 
03285   ret = lock_deadlock_recursive(trx, trx, lock, &cost, 0);
03286 
03287   switch (ret) {
03288   case LOCK_VICTIM_IS_OTHER:
03289     /* We chose some other trx as a victim: retry if there still
03290     is a deadlock */
03291     goto retry;
03292 
03293   case LOCK_EXCEED_MAX_DEPTH:
03294     /* If the lock search exceeds the max step
03295     or the max depth, the current trx will be
03296     the victim. Print its information. */
03297     rewind(lock_latest_err_file);
03298     ut_print_timestamp(lock_latest_err_file);
03299 
03300     fputs("TOO DEEP OR LONG SEARCH IN THE LOCK TABLE"
03301           " WAITS-FOR GRAPH, WE WILL ROLL BACK"
03302           " FOLLOWING TRANSACTION \n",
03303           lock_latest_err_file);
03304 
03305     fputs("\n*** TRANSACTION:\n", lock_latest_err_file);
03306           trx_print(lock_latest_err_file, trx, 3000);
03307 
03308     fputs("*** WAITING FOR THIS LOCK TO BE GRANTED:\n",
03309           lock_latest_err_file);
03310 
03311     if (lock_get_type(lock) == LOCK_REC) {
03312       lock_rec_print(lock_latest_err_file, lock);
03313     } else {
03314       lock_table_print(lock_latest_err_file, lock);
03315     }
03316     break;
03317 
03318   case LOCK_VICTIM_IS_START:
03319     fputs("*** WE ROLL BACK TRANSACTION (2)\n",
03320           lock_latest_err_file);
03321     break;
03322 
03323   default:
03324     /* No deadlock detected*/
03325     return(FALSE);
03326   }
03327 
03328   lock_deadlock_found = TRUE;
03329 
03330   return(TRUE);
03331 }
03332 
03333 /********************************************************************/
03341 static
03342 ulint
03343 lock_deadlock_recursive(
03344 /*====================*/
03345   trx_t*  start,    
03346   trx_t*  trx,    
03347   lock_t* wait_lock,  
03348   ulint*  cost,   
03351   ulint depth)    
03354 {
03355   ulint ret;
03356   lock_t* lock;
03357   trx_t*  lock_trx;
03358   ulint heap_no   = ULINT_UNDEFINED;
03359 
03360   ut_a(trx);
03361   ut_a(start);
03362   ut_a(wait_lock);
03363   ut_ad(mutex_own(&kernel_mutex));
03364 
03365   if (trx->deadlock_mark == 1) {
03366     /* We have already exhaustively searched the subtree starting
03367     from this trx */
03368 
03369     return(0);
03370   }
03371 
03372   *cost = *cost + 1;
03373 
03374   if (lock_get_type_low(wait_lock) == LOCK_REC) {
03375     ulint   space;
03376     ulint   page_no;
03377 
03378     heap_no = lock_rec_find_set_bit(wait_lock);
03379     ut_a(heap_no != ULINT_UNDEFINED);
03380 
03381     space = wait_lock->un_member.rec_lock.space;
03382     page_no = wait_lock->un_member.rec_lock.page_no;
03383 
03384     lock = lock_rec_get_first_on_page_addr(space, page_no);
03385 
03386     /* Position the iterator on the first matching record lock. */
03387     while (lock != NULL
03388            && lock != wait_lock
03389            && !lock_rec_get_nth_bit(lock, heap_no)) {
03390 
03391       lock = lock_rec_get_next_on_page(lock);
03392     }
03393 
03394     if (lock == wait_lock) {
03395       lock = NULL;
03396     }
03397 
03398     ut_ad(lock == NULL || lock_rec_get_nth_bit(lock, heap_no));
03399 
03400   } else {
03401     lock = wait_lock;
03402   }
03403 
03404   /* Look at the locks ahead of wait_lock in the lock queue */
03405 
03406   for (;;) {
03407     /* Get previous table lock. */
03408     if (heap_no == ULINT_UNDEFINED) {
03409 
03410       lock = UT_LIST_GET_PREV(
03411         un_member.tab_lock.locks, lock);
03412     }
03413 
03414     if (lock == NULL) {
03415       /* We can mark this subtree as searched */
03416       trx->deadlock_mark = 1;
03417 
03418       return(FALSE);
03419     }
03420 
03421     if (lock_has_to_wait(wait_lock, lock)) {
03422 
03423       ibool too_far
03424         = depth > LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK
03425         || *cost > LOCK_MAX_N_STEPS_IN_DEADLOCK_CHECK;
03426 
03427       lock_trx = lock->trx;
03428 
03429       if (lock_trx == start) {
03430 
03431         /* We came back to the recursion starting
03432         point: a deadlock detected; or we have
03433         searched the waits-for graph too long */
03434 
03435         FILE* ef = lock_latest_err_file;
03436 
03437         rewind(ef);
03438         ut_print_timestamp(ef);
03439 
03440         fputs("\n*** (1) TRANSACTION:\n", ef);
03441 
03442         trx_print(ef, wait_lock->trx, 3000);
03443 
03444         fputs("*** (1) WAITING FOR THIS LOCK"
03445               " TO BE GRANTED:\n", ef);
03446 
03447         if (lock_get_type_low(wait_lock) == LOCK_REC) {
03448           lock_rec_print(ef, wait_lock);
03449         } else {
03450           lock_table_print(ef, wait_lock);
03451         }
03452 
03453         fputs("*** (2) TRANSACTION:\n", ef);
03454 
03455         trx_print(ef, lock->trx, 3000);
03456 
03457         fputs("*** (2) HOLDS THE LOCK(S):\n", ef);
03458 
03459         if (lock_get_type_low(lock) == LOCK_REC) {
03460           lock_rec_print(ef, lock);
03461         } else {
03462           lock_table_print(ef, lock);
03463         }
03464 
03465         fputs("*** (2) WAITING FOR THIS LOCK"
03466               " TO BE GRANTED:\n", ef);
03467 
03468         if (lock_get_type_low(start->wait_lock)
03469             == LOCK_REC) {
03470           lock_rec_print(ef, start->wait_lock);
03471         } else {
03472           lock_table_print(ef, start->wait_lock);
03473         }
03474 #ifdef UNIV_DEBUG
03475         if (lock_print_waits) {
03476           fputs("Deadlock detected\n",
03477                 stderr);
03478         }
03479 #endif /* UNIV_DEBUG */
03480 
03481         if (trx_weight_ge(wait_lock->trx, start)) {
03482           /* Our recursion starting point
03483           transaction is 'smaller', let us
03484           choose 'start' as the victim and roll
03485           back it */
03486 
03487           return(LOCK_VICTIM_IS_START);
03488         }
03489 
03490         lock_deadlock_found = TRUE;
03491 
03492         /* Let us choose the transaction of wait_lock
03493         as a victim to try to avoid deadlocking our
03494         recursion starting point transaction */
03495 
03496         fputs("*** WE ROLL BACK TRANSACTION (1)\n",
03497               ef);
03498 
03499         wait_lock->trx->was_chosen_as_deadlock_victim
03500           = TRUE;
03501 
03502         lock_cancel_waiting_and_release(wait_lock);
03503 
03504         /* Since trx and wait_lock are no longer
03505         in the waits-for graph, we can return FALSE;
03506         note that our selective algorithm can choose
03507         several transactions as victims, but still
03508         we may end up rolling back also the recursion
03509         starting point transaction! */
03510 
03511         return(LOCK_VICTIM_IS_OTHER);
03512       }
03513 
03514       if (too_far) {
03515 
03516 #ifdef UNIV_DEBUG
03517         if (lock_print_waits) {
03518           fputs("Deadlock search exceeds"
03519                 " max steps or depth.\n",
03520                 stderr);
03521         }
03522 #endif /* UNIV_DEBUG */
03523         /* The information about transaction/lock
03524         to be rolled back is available in the top
03525         level. Do not print anything here. */
03526         return(LOCK_EXCEED_MAX_DEPTH);
03527       }
03528 
03529       if (lock_trx->que_state == TRX_QUE_LOCK_WAIT) {
03530 
03531         /* Another trx ahead has requested lock in an
03532         incompatible mode, and is itself waiting for
03533         a lock */
03534 
03535         ret = lock_deadlock_recursive(
03536           start, lock_trx,
03537           lock_trx->wait_lock, cost, depth + 1);
03538 
03539         if (ret != 0) {
03540 
03541           return(ret);
03542         }
03543       }
03544     }
03545     /* Get the next record lock to check. */
03546     if (heap_no != ULINT_UNDEFINED) {
03547 
03548       ut_a(lock != NULL);
03549 
03550       do {
03551         lock = lock_rec_get_next_on_page(lock);
03552       } while (lock != NULL
03553         && lock != wait_lock
03554         && !lock_rec_get_nth_bit(lock, heap_no));
03555 
03556       if (lock == wait_lock) {
03557         lock = NULL;
03558       }
03559     }
03560   }/* end of the 'for (;;)'-loop */
03561 }
03562 
03563 /*========================= TABLE LOCKS ==============================*/
03564 
03565 /*********************************************************************/
03569 UNIV_INLINE
03570 lock_t*
03571 lock_table_create(
03572 /*==============*/
03573   dict_table_t* table,  
03574   ulint   type_mode,
03576   trx_t*    trx)  
03577 {
03578   lock_t* lock;
03579 
03580   ut_ad(table && trx);
03581   ut_ad(mutex_own(&kernel_mutex));
03582 
03583   if ((type_mode & LOCK_MODE_MASK) == LOCK_AUTO_INC) {
03584     ++table->n_waiting_or_granted_auto_inc_locks;
03585   }
03586 
03587   /* For AUTOINC locking we reuse the lock instance only if
03588   there is no wait involved else we allocate the waiting lock
03589   from the transaction lock heap. */
03590   if (type_mode == LOCK_AUTO_INC) {
03591 
03592     lock = table->autoinc_lock;
03593 
03594     table->autoinc_trx = trx;
03595 
03596     ib_vector_push(trx->autoinc_locks, lock);
03597   } else {
03598     lock = static_cast<lock_t *>(mem_heap_alloc(trx->lock_heap, sizeof(lock_t)));
03599   }
03600 
03601   UT_LIST_ADD_LAST(trx_locks, trx->trx_locks, lock);
03602 
03603   lock->type_mode = type_mode | LOCK_TABLE;
03604   lock->trx = trx;
03605 
03606   lock->un_member.tab_lock.table = table;
03607 
03608   UT_LIST_ADD_LAST(un_member.tab_lock.locks, table->locks, lock);
03609 
03610   if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
03611 
03612     lock_set_lock_and_trx_wait(lock, trx);
03613   }
03614 
03615   return(lock);
03616 }
03617 
03618 /*************************************************************/
03622 UNIV_INLINE
03623 void
03624 lock_table_remove_low(
03625 /*==================*/
03626   lock_t* lock) 
03627 {
03628   trx_t*    trx;
03629   dict_table_t* table;
03630 
03631   ut_ad(mutex_own(&kernel_mutex));
03632 
03633   trx = lock->trx;
03634   table = lock->un_member.tab_lock.table;
03635 
03636   /* Remove the table from the transaction's AUTOINC vector, if
03637   the lock that is being release is an AUTOINC lock. */
03638   if (lock_get_mode(lock) == LOCK_AUTO_INC) {
03639 
03640     /* The table's AUTOINC lock can get transferred to
03641     another transaction before we get here. */
03642     if (table->autoinc_trx == trx) {
03643       table->autoinc_trx = NULL;
03644     }
03645 
03646     /* The locks must be freed in the reverse order from
03647     the one in which they were acquired. This is to avoid
03648     traversing the AUTOINC lock vector unnecessarily. 
03649 
03650     We only store locks that were granted in the
03651     trx->autoinc_locks vector (see lock_table_create()
03652     and lock_grant()). Therefore it can be empty and we
03653     need to check for that. */
03654 
03655     if (!lock_get_wait(lock)
03656         && !ib_vector_is_empty(trx->autoinc_locks)) {
03657       lock_t* autoinc_lock;
03658 
03659       autoinc_lock = static_cast<lock_t *>(ib_vector_pop(trx->autoinc_locks));
03660       ut_a(autoinc_lock == lock);
03661     }
03662 
03663     ut_a(table->n_waiting_or_granted_auto_inc_locks > 0);
03664     --table->n_waiting_or_granted_auto_inc_locks;
03665   }
03666 
03667   UT_LIST_REMOVE(trx_locks, trx->trx_locks, lock);
03668   UT_LIST_REMOVE(un_member.tab_lock.locks, table->locks, lock);
03669 }
03670 
03671 /*********************************************************************/
03678 static
03679 ulint
03680 lock_table_enqueue_waiting(
03681 /*=======================*/
03682   ulint   mode, 
03684   dict_table_t* table,  
03685   que_thr_t*  thr)  
03686 {
03687   lock_t* lock;
03688   trx_t*  trx;
03689 
03690   ut_ad(mutex_own(&kernel_mutex));
03691 
03692   /* Test if there already is some other reason to suspend thread:
03693   we do not enqueue a lock request if the query thread should be
03694   stopped anyway */
03695 
03696   if (que_thr_stop(thr)) {
03697     ut_error;
03698 
03699     return(DB_QUE_THR_SUSPENDED);
03700   }
03701 
03702   trx = thr_get_trx(thr);
03703 
03704   switch (trx_get_dict_operation(trx)) {
03705   case TRX_DICT_OP_NONE:
03706     break;
03707   case TRX_DICT_OP_TABLE:
03708   case TRX_DICT_OP_INDEX:
03709     ut_print_timestamp(stderr);
03710     fputs("  InnoDB: Error: a table lock wait happens"
03711           " in a dictionary operation!\n"
03712           "InnoDB: Table name ", stderr);
03713     ut_print_name(stderr, trx, TRUE, table->name);
03714     fputs(".\n"
03715           "InnoDB: Submit a detailed bug report"
03716           " to http://bugs.mysql.com\n",
03717           stderr);
03718   }
03719 
03720   /* Enqueue the lock request that will wait to be granted */
03721 
03722   lock = lock_table_create(table, mode | LOCK_WAIT, trx);
03723 
03724   /* Check if a deadlock occurs: if yes, remove the lock request and
03725   return an error code */
03726 
03727   if (lock_deadlock_occurs(lock, trx)) {
03728 
03729     /* The order here is important, we don't want to
03730     lose the state of the lock before calling remove. */
03731     lock_table_remove_low(lock);
03732     lock_reset_lock_and_trx_wait(lock);
03733 
03734     return(DB_DEADLOCK);
03735   }
03736 
03737   if (trx->wait_lock == NULL) {
03738     /* Deadlock resolution chose another transaction as a victim,
03739     and we accidentally got our lock granted! */
03740 
03741     return(DB_SUCCESS_LOCKED_REC);
03742   }
03743 
03744   trx->que_state = TRX_QUE_LOCK_WAIT;
03745   trx->was_chosen_as_deadlock_victim = FALSE;
03746   trx->wait_started = time(NULL);
03747 
03748   ut_a(que_thr_stop(thr));
03749 
03750   return(DB_LOCK_WAIT);
03751 }
03752 
03753 /*********************************************************************/
03757 UNIV_INLINE
03758 lock_t*
03759 lock_table_other_has_incompatible(
03760 /*==============================*/
03761   trx_t*    trx,  
03763   ulint   wait, 
03765   dict_table_t* table,  
03766   enum lock_mode  mode) 
03767 {
03768   lock_t* lock;
03769 
03770   ut_ad(mutex_own(&kernel_mutex));
03771 
03772   lock = UT_LIST_GET_LAST(table->locks);
03773 
03774   while (lock != NULL) {
03775 
03776     if ((lock->trx != trx)
03777         && (!lock_mode_compatible(lock_get_mode(lock), mode))
03778         && (wait || !(lock_get_wait(lock)))) {
03779 
03780       return(lock);
03781     }
03782 
03783     lock = UT_LIST_GET_PREV(un_member.tab_lock.locks, lock);
03784   }
03785 
03786   return(NULL);
03787 }
03788 
03789 /*********************************************************************/
03793 UNIV_INTERN
03794 ulint
03795 lock_table(
03796 /*=======*/
03797   ulint   flags,  
03799   dict_table_t* table,  
03800   enum lock_mode  mode, 
03801   que_thr_t*  thr)  
03802 {
03803   trx_t*  trx;
03804   ulint err;
03805 
03806   ut_ad(table && thr);
03807 
03808   if (flags & BTR_NO_LOCKING_FLAG) {
03809 
03810     return(DB_SUCCESS_LOCKED_REC);
03811   }
03812 
03813   ut_a(flags == 0);
03814 
03815   trx = thr_get_trx(thr);
03816 
03817   lock_mutex_enter_kernel();
03818 
03819   /* Look for stronger locks the same trx already has on the table */
03820 
03821   if (lock_table_has(trx, table, mode)) {
03822 
03823     lock_mutex_exit_kernel();
03824 
03825     return(DB_SUCCESS);
03826   }
03827 
03828   /* We have to check if the new lock is compatible with any locks
03829   other transactions have in the table lock queue. */
03830 
03831   if (lock_table_other_has_incompatible(trx, LOCK_WAIT, table, mode)) {
03832 
03833     /* Another trx has a request on the table in an incompatible
03834     mode: this trx may have to wait */
03835 
03836     err = lock_table_enqueue_waiting(mode | flags, table, thr);
03837 
03838     lock_mutex_exit_kernel();
03839 
03840     return(err);
03841   }
03842 
03843   lock_table_create(table, mode | flags, trx);
03844 
03845   ut_a(!flags || mode == LOCK_S || mode == LOCK_X);
03846 
03847   lock_mutex_exit_kernel();
03848 
03849   return(DB_SUCCESS);
03850 }
03851 
03852 /*********************************************************************/
03855 static
03856 ibool
03857 lock_table_has_to_wait_in_queue(
03858 /*============================*/
03859   lock_t* wait_lock)  
03860 {
03861   dict_table_t* table;
03862   lock_t*   lock;
03863 
03864   ut_ad(mutex_own(&kernel_mutex));
03865   ut_ad(lock_get_wait(wait_lock));
03866 
03867   table = wait_lock->un_member.tab_lock.table;
03868 
03869   lock = UT_LIST_GET_FIRST(table->locks);
03870 
03871   while (lock != wait_lock) {
03872 
03873     if (lock_has_to_wait(wait_lock, lock)) {
03874 
03875       return(TRUE);
03876     }
03877 
03878     lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock);
03879   }
03880 
03881   return(FALSE);
03882 }
03883 
03884 /*************************************************************/
03888 static
03889 void
03890 lock_table_dequeue(
03891 /*===============*/
03892   lock_t* in_lock)
03895 {
03896   lock_t* lock;
03897 
03898   ut_ad(mutex_own(&kernel_mutex));
03899   ut_a(lock_get_type_low(in_lock) == LOCK_TABLE);
03900 
03901   lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, in_lock);
03902 
03903   lock_table_remove_low(in_lock);
03904 
03905   /* Check if waiting locks in the queue can now be granted: grant
03906   locks if there are no conflicting locks ahead. */
03907 
03908   while (lock != NULL) {
03909 
03910     if (lock_get_wait(lock)
03911         && !lock_table_has_to_wait_in_queue(lock)) {
03912 
03913       /* Grant the lock */
03914       lock_grant(lock);
03915     }
03916 
03917     lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock);
03918   }
03919 }
03920 
03921 /*=========================== LOCK RELEASE ==============================*/
03922 
03923 /*************************************************************/
03927 UNIV_INTERN
03928 void
03929 lock_rec_unlock(
03930 /*============*/
03931   trx_t*      trx,  
03933   const buf_block_t*  block,  
03934   const rec_t*    rec,  
03935   enum lock_mode    lock_mode)
03936 {
03937   lock_t* first_lock;
03938   lock_t* lock;
03939   ulint heap_no;
03940 
03941   ut_ad(trx && rec);
03942   ut_ad(block->frame == page_align(rec));
03943 
03944   heap_no = page_rec_get_heap_no(rec);
03945 
03946   mutex_enter(&kernel_mutex);
03947 
03948   first_lock = lock_rec_get_first(block, heap_no);
03949 
03950   /* Find the last lock with the same lock_mode and transaction
03951   from the record. */
03952 
03953   for (lock = first_lock; lock != NULL;
03954        lock = lock_rec_get_next(heap_no, lock)) {
03955     if (lock->trx == trx && lock_get_mode(lock) == lock_mode) {
03956       ut_a(!lock_get_wait(lock));
03957       lock_rec_reset_nth_bit(lock, heap_no);
03958       goto released;
03959     }
03960   }
03961 
03962   mutex_exit(&kernel_mutex);
03963   ut_print_timestamp(stderr);
03964   fprintf(stderr,
03965     "  InnoDB: Error: unlock row could not"
03966     " find a %lu mode lock on the record\n",
03967     (ulong) lock_mode);
03968 
03969   return;
03970 
03971 released:
03972   /* Check if we can now grant waiting lock requests */
03973 
03974   for (lock = first_lock; lock != NULL;
03975        lock = lock_rec_get_next(heap_no, lock)) {
03976     if (lock_get_wait(lock)
03977         && !lock_rec_has_to_wait_in_queue(lock)) {
03978 
03979       /* Grant the lock */
03980       lock_grant(lock);
03981     }
03982   }
03983 
03984   mutex_exit(&kernel_mutex);
03985 }
03986 
03987 /*********************************************************************/
03990 UNIV_INTERN
03991 void
03992 lock_release_off_kernel(
03993 /*====================*/
03994   trx_t*  trx)  
03995 {
03996   dict_table_t* table;
03997   ulint   count;
03998   lock_t*   lock;
03999 
04000   ut_ad(mutex_own(&kernel_mutex));
04001 
04002   lock = UT_LIST_GET_LAST(trx->trx_locks);
04003 
04004   count = 0;
04005 
04006   while (lock != NULL) {
04007 
04008     count++;
04009 
04010     if (lock_get_type_low(lock) == LOCK_REC) {
04011 
04012       lock_rec_dequeue_from_page(lock);
04013     } else {
04014       ut_ad(lock_get_type_low(lock) & LOCK_TABLE);
04015 
04016       if (lock_get_mode(lock) != LOCK_IS
04017           && trx->undo_no != 0) {
04018 
04019         /* The trx may have modified the table. We
04020         block the use of the MySQL query cache for
04021         all currently active transactions. */
04022 
04023         table = lock->un_member.tab_lock.table;
04024 
04025         table->query_cache_inv_trx_id
04026           = trx_sys->max_trx_id;
04027       }
04028 
04029       lock_table_dequeue(lock);
04030     }
04031 
04032     if (count == LOCK_RELEASE_KERNEL_INTERVAL) {
04033       /* Release the kernel mutex for a while, so that we
04034       do not monopolize it */
04035 
04036       lock_mutex_exit_kernel();
04037 
04038       lock_mutex_enter_kernel();
04039 
04040       count = 0;
04041     }
04042 
04043     lock = UT_LIST_GET_LAST(trx->trx_locks);
04044   }
04045 
04046   ut_a(ib_vector_size(trx->autoinc_locks) == 0);
04047 
04048   mem_heap_empty(trx->lock_heap);
04049 }
04050 
04051 /*********************************************************************/
04054 UNIV_INTERN
04055 void
04056 lock_cancel_waiting_and_release(
04057 /*============================*/
04058   lock_t* lock) 
04059 {
04060   ut_ad(mutex_own(&kernel_mutex));
04061 
04062   if (lock_get_type_low(lock) == LOCK_REC) {
04063 
04064     lock_rec_dequeue_from_page(lock);
04065   } else {
04066     ut_ad(lock_get_type_low(lock) & LOCK_TABLE);
04067 
04068     if (lock->trx->autoinc_locks != NULL) {
04069       /* Release the transaction's AUTOINC locks/ */
04070       lock_release_autoinc_locks(lock->trx);
04071     }
04072 
04073     lock_table_dequeue(lock);
04074   }
04075 
04076   /* Reset the wait flag and the back pointer to lock in trx */
04077 
04078   lock_reset_lock_and_trx_wait(lock);
04079 
04080   /* The following function releases the trx from lock wait */
04081 
04082   trx_end_lock_wait(lock->trx);
04083 }
04084 
04085 /* True if a lock mode is S or X */
04086 #define IS_LOCK_S_OR_X(lock) \
04087   (lock_get_mode(lock) == LOCK_S \
04088    || lock_get_mode(lock) == LOCK_X)
04089 
04090 
04091 /*********************************************************************/
04096 static
04097 void
04098 lock_remove_all_on_table_for_trx(
04099 /*=============================*/
04100   dict_table_t* table,      
04101   trx_t*    trx,      
04102   ibool   remove_also_table_sx_locks)
04104 {
04105   lock_t* lock;
04106   lock_t* prev_lock;
04107 
04108   ut_ad(mutex_own(&kernel_mutex));
04109 
04110   lock = UT_LIST_GET_LAST(trx->trx_locks);
04111 
04112   while (lock != NULL) {
04113     prev_lock = UT_LIST_GET_PREV(trx_locks, lock);
04114 
04115     if (lock_get_type_low(lock) == LOCK_REC
04116         && lock->index->table == table) {
04117       ut_a(!lock_get_wait(lock));
04118 
04119       lock_rec_discard(lock);
04120     } else if (lock_get_type_low(lock) & LOCK_TABLE
04121          && lock->un_member.tab_lock.table == table
04122          && (remove_also_table_sx_locks
04123              || !IS_LOCK_S_OR_X(lock))) {
04124 
04125       ut_a(!lock_get_wait(lock));
04126 
04127       lock_table_remove_low(lock);
04128     }
04129 
04130     lock = prev_lock;
04131   }
04132 }
04133 
04134 /*********************************************************************/
04139 UNIV_INTERN
04140 void
04141 lock_remove_all_on_table(
04142 /*=====================*/
04143   dict_table_t* table,      
04145   ibool   remove_also_table_sx_locks)
04147 {
04148   lock_t* lock;
04149   lock_t* prev_lock;
04150 
04151   mutex_enter(&kernel_mutex);
04152 
04153   lock = UT_LIST_GET_FIRST(table->locks);
04154 
04155   while (lock != NULL) {
04156 
04157     prev_lock = UT_LIST_GET_PREV(un_member.tab_lock.locks,
04158                lock);
04159 
04160     /* If we should remove all locks (remove_also_table_sx_locks
04161     is TRUE), or if the lock is not table-level S or X lock,
04162     then check we are not going to remove a wait lock. */
04163     if (remove_also_table_sx_locks
04164         || !(lock_get_type(lock) == LOCK_TABLE
04165        && IS_LOCK_S_OR_X(lock))) {
04166 
04167       ut_a(!lock_get_wait(lock));
04168     }
04169 
04170     lock_remove_all_on_table_for_trx(table, lock->trx,
04171              remove_also_table_sx_locks);
04172 
04173     if (prev_lock == NULL) {
04174       if (lock == UT_LIST_GET_FIRST(table->locks)) {
04175         /* lock was not removed, pick its successor */
04176         lock = UT_LIST_GET_NEXT(
04177           un_member.tab_lock.locks, lock);
04178       } else {
04179         /* lock was removed, pick the first one */
04180         lock = UT_LIST_GET_FIRST(table->locks);
04181       }
04182     } else if (UT_LIST_GET_NEXT(un_member.tab_lock.locks,
04183               prev_lock) != lock) {
04184       /* If lock was removed by
04185       lock_remove_all_on_table_for_trx() then pick the
04186       successor of prev_lock ... */
04187       lock = UT_LIST_GET_NEXT(
04188         un_member.tab_lock.locks, prev_lock);
04189     } else {
04190       /* ... otherwise pick the successor of lock. */
04191       lock = UT_LIST_GET_NEXT(
04192         un_member.tab_lock.locks, lock);
04193     }
04194   }
04195 
04196   mutex_exit(&kernel_mutex);
04197 }
04198 
04199 /*===================== VALIDATION AND DEBUGGING  ====================*/
04200 
04201 /*********************************************************************/
04203 UNIV_INTERN
04204 void
04205 lock_table_print(
04206 /*=============*/
04207   FILE*   file, 
04208   const lock_t* lock) 
04209 {
04210   ut_ad(mutex_own(&kernel_mutex));
04211   ut_a(lock_get_type_low(lock) == LOCK_TABLE);
04212 
04213   fputs("TABLE LOCK table ", file);
04214   ut_print_name(file, lock->trx, TRUE,
04215           lock->un_member.tab_lock.table->name);
04216   fprintf(file, " trx id " TRX_ID_FMT, lock->trx->id);
04217 
04218   if (lock_get_mode(lock) == LOCK_S) {
04219     fputs(" lock mode S", file);
04220   } else if (lock_get_mode(lock) == LOCK_X) {
04221     fputs(" lock mode X", file);
04222   } else if (lock_get_mode(lock) == LOCK_IS) {
04223     fputs(" lock mode IS", file);
04224   } else if (lock_get_mode(lock) == LOCK_IX) {
04225     fputs(" lock mode IX", file);
04226   } else if (lock_get_mode(lock) == LOCK_AUTO_INC) {
04227     fputs(" lock mode AUTO-INC", file);
04228   } else {
04229     fprintf(file, " unknown lock mode %lu",
04230       (ulong) lock_get_mode(lock));
04231   }
04232 
04233   if (lock_get_wait(lock)) {
04234     fputs(" waiting", file);
04235   }
04236 
04237   putc('\n', file);
04238 }
04239 
04240 /*********************************************************************/
04242 UNIV_INTERN
04243 void
04244 lock_rec_print(
04245 /*===========*/
04246   FILE*   file, 
04247   const lock_t* lock) 
04248 {
04249   const buf_block_t*  block;
04250   ulint     space;
04251   ulint     page_no;
04252   ulint     i;
04253   mtr_t     mtr;
04254   mem_heap_t*   heap    = NULL;
04255   ulint     offsets_[REC_OFFS_NORMAL_SIZE];
04256   ulint*      offsets   = offsets_;
04257   rec_offs_init(offsets_);
04258 
04259   ut_ad(mutex_own(&kernel_mutex));
04260   ut_a(lock_get_type_low(lock) == LOCK_REC);
04261 
04262   space = lock->un_member.rec_lock.space;
04263   page_no = lock->un_member.rec_lock.page_no;
04264 
04265   fprintf(file, "RECORD LOCKS space id %lu page no %lu n bits %lu ",
04266     (ulong) space, (ulong) page_no,
04267     (ulong) lock_rec_get_n_bits(lock));
04268   dict_index_name_print(file, lock->trx, lock->index);
04269   fprintf(file, " trx id " TRX_ID_FMT, lock->trx->id);
04270 
04271   if (lock_get_mode(lock) == LOCK_S) {
04272     fputs(" lock mode S", file);
04273   } else if (lock_get_mode(lock) == LOCK_X) {
04274     fputs(" lock_mode X", file);
04275   } else {
04276     ut_error;
04277   }
04278 
04279   if (lock_rec_get_gap(lock)) {
04280     fputs(" locks gap before rec", file);
04281   }
04282 
04283   if (lock_rec_get_rec_not_gap(lock)) {
04284     fputs(" locks rec but not gap", file);
04285   }
04286 
04287   if (lock_rec_get_insert_intention(lock)) {
04288     fputs(" insert intention", file);
04289   }
04290 
04291   if (lock_get_wait(lock)) {
04292     fputs(" waiting", file);
04293   }
04294 
04295   mtr_start(&mtr);
04296 
04297   putc('\n', file);
04298 
04299   block = buf_page_try_get(space, page_no, &mtr);
04300 
04301   for (i = 0; i < lock_rec_get_n_bits(lock); ++i) {
04302 
04303     if (!lock_rec_get_nth_bit(lock, i)) {
04304       continue;
04305     }
04306 
04307     fprintf(file, "Record lock, heap no %lu", (ulong) i);
04308 
04309     if (block) {
04310       const rec_t*  rec;
04311 
04312       rec = page_find_rec_with_heap_no(
04313         buf_block_get_frame(block), i);
04314 
04315       offsets = rec_get_offsets(
04316         rec, lock->index, offsets,
04317         ULINT_UNDEFINED, &heap);
04318 
04319       putc(' ', file);
04320       rec_print_new(file, rec, offsets);
04321     }
04322 
04323     putc('\n', file);
04324   }
04325 
04326   mtr_commit(&mtr);
04327   if (UNIV_LIKELY_NULL(heap)) {
04328     mem_heap_free(heap);
04329   }
04330 }
04331 
04332 #ifdef UNIV_DEBUG
04333 /* Print the number of lock structs from lock_print_info_summary() only
04334 in non-production builds for performance reasons, see
04335 http://bugs.mysql.com/36942 */
04336 #define PRINT_NUM_OF_LOCK_STRUCTS
04337 #endif /* UNIV_DEBUG */
04338 
04339 #ifdef PRINT_NUM_OF_LOCK_STRUCTS
04340 /*********************************************************************/
04343 static
04344 ulint
04345 lock_get_n_rec_locks(void)
04346 /*======================*/
04347 {
04348   lock_t* lock;
04349   ulint n_locks = 0;
04350   ulint i;
04351 
04352   ut_ad(mutex_own(&kernel_mutex));
04353 
04354   for (i = 0; i < hash_get_n_cells(lock_sys->rec_hash); i++) {
04355 
04356     lock = HASH_GET_FIRST(lock_sys->rec_hash, i);
04357 
04358     while (lock) {
04359       n_locks++;
04360 
04361       lock = HASH_GET_NEXT(hash, lock);
04362     }
04363   }
04364 
04365   return(n_locks);
04366 }
04367 #endif /* PRINT_NUM_OF_LOCK_STRUCTS */
04368 
04369 /*********************************************************************/
04373 UNIV_INTERN
04374 ibool
04375 lock_print_info_summary(
04376 /*====================*/
04377   FILE* file, 
04378   ibool   nowait) 
04379 {
04380   /* if nowait is FALSE, wait on the kernel mutex,
04381   otherwise return immediately if fail to obtain the
04382   mutex. */
04383   if (!nowait) {
04384     lock_mutex_enter_kernel();
04385   } else if (mutex_enter_nowait(&kernel_mutex)) {
04386     fputs("FAIL TO OBTAIN KERNEL MUTEX, "
04387           "SKIP LOCK INFO PRINTING\n", file);
04388     return(FALSE);
04389   }
04390 
04391   if (lock_deadlock_found) {
04392     fputs("------------------------\n"
04393           "LATEST DETECTED DEADLOCK\n"
04394           "------------------------\n", file);
04395 
04396     ut_copy_file(file, lock_latest_err_file);
04397   }
04398 
04399   fputs("------------\n"
04400         "TRANSACTIONS\n"
04401         "------------\n", file);
04402 
04403   fprintf(file, "Trx id counter " TRX_ID_FMT "\n",
04404     trx_sys->max_trx_id);
04405 
04406   fprintf(file,
04407     "Purge done for trx's n:o < " TRX_ID_FMT
04408     " undo n:o < " TRX_ID_FMT "\n",
04409     purge_sys->purge_trx_no,
04410     purge_sys->purge_undo_no);
04411 
04412   fprintf(file,
04413     "History list length %lu\n",
04414     (ulong) trx_sys->rseg_history_len);
04415 
04416 #ifdef PRINT_NUM_OF_LOCK_STRUCTS
04417   fprintf(file,
04418     "Total number of lock structs in row lock hash table %lu\n",
04419     (ulong) lock_get_n_rec_locks());
04420 #endif /* PRINT_NUM_OF_LOCK_STRUCTS */
04421   return(TRUE);
04422 }
04423 
04424 /*********************************************************************/
04426 UNIV_INTERN
04427 void
04428 lock_print_info_all_transactions(
04429 /*=============================*/
04430   FILE* file) 
04431 {
04432   lock_t* lock;
04433   ibool load_page_first = TRUE;
04434   ulint nth_trx   = 0;
04435   ulint nth_lock  = 0;
04436   ulint i;
04437   mtr_t mtr;
04438   trx_t*  trx;
04439 
04440   fprintf(file, "LIST OF TRANSACTIONS FOR EACH SESSION:\n");
04441 
04442   /* First print info on non-active transactions */
04443 
04444   trx = UT_LIST_GET_FIRST(trx_sys->mysql_trx_list);
04445 
04446   while (trx) {
04447     if (trx->conc_state == TRX_NOT_STARTED) {
04448       fputs("---", file);
04449       trx_print(file, trx, 600);
04450     }
04451 
04452     trx = UT_LIST_GET_NEXT(mysql_trx_list, trx);
04453   }
04454 
04455 loop:
04456   trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
04457 
04458   i = 0;
04459 
04460   /* Since we temporarily release the kernel mutex when
04461   reading a database page in below, variable trx may be
04462   obsolete now and we must loop through the trx list to
04463   get probably the same trx, or some other trx. */
04464 
04465   while (trx && (i < nth_trx)) {
04466     trx = UT_LIST_GET_NEXT(trx_list, trx);
04467     i++;
04468   }
04469 
04470   if (trx == NULL) {
04471     lock_mutex_exit_kernel();
04472 
04473     ut_ad(lock_validate());
04474 
04475     return;
04476   }
04477 
04478   if (nth_lock == 0) {
04479     fputs("---", file);
04480     trx_print(file, trx, 600);
04481 
04482     if (trx->read_view) {
04483       fprintf(file,
04484         "Trx read view will not see trx with"
04485         " id >= " TRX_ID_FMT
04486         ", sees < " TRX_ID_FMT "\n",
04487         trx->read_view->low_limit_id,
04488         trx->read_view->up_limit_id);
04489     }
04490 
04491     if (trx->que_state == TRX_QUE_LOCK_WAIT) {
04492       fprintf(file,
04493         "------- TRX HAS BEEN WAITING %lu SEC"
04494         " FOR THIS LOCK TO BE GRANTED:\n",
04495         (ulong) difftime(time(NULL),
04496              trx->wait_started));
04497 
04498       if (lock_get_type_low(trx->wait_lock) == LOCK_REC) {
04499         lock_rec_print(file, trx->wait_lock);
04500       } else {
04501         lock_table_print(file, trx->wait_lock);
04502       }
04503 
04504       fputs("------------------\n", file);
04505     }
04506   }
04507 
04508   if (!srv_print_innodb_lock_monitor) {
04509     nth_trx++;
04510     goto loop;
04511   }
04512 
04513   i = 0;
04514 
04515   /* Look at the note about the trx loop above why we loop here:
04516   lock may be an obsolete pointer now. */
04517 
04518   lock = UT_LIST_GET_FIRST(trx->trx_locks);
04519 
04520   while (lock && (i < nth_lock)) {
04521     lock = UT_LIST_GET_NEXT(trx_locks, lock);
04522     i++;
04523   }
04524 
04525   if (lock == NULL) {
04526     nth_trx++;
04527     nth_lock = 0;
04528 
04529     goto loop;
04530   }
04531 
04532   if (lock_get_type_low(lock) == LOCK_REC) {
04533     if (load_page_first) {
04534       ulint space = lock->un_member.rec_lock.space;
04535       ulint zip_size= fil_space_get_zip_size(space);
04536       ulint page_no = lock->un_member.rec_lock.page_no;
04537 
04538       if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) {
04539 
04540         /* It is a single table tablespace and
04541         the .ibd file is missing (TRUNCATE
04542         TABLE probably stole the locks): just
04543         print the lock without attempting to
04544         load the page in the buffer pool. */
04545 
04546         fprintf(file, "RECORD LOCKS on"
04547           " non-existing space %lu\n",
04548           (ulong) space);
04549         goto print_rec;
04550       }
04551 
04552       lock_mutex_exit_kernel();
04553 
04554       mtr_start(&mtr);
04555 
04556       buf_page_get_with_no_latch(space, zip_size,
04557                page_no, &mtr);
04558 
04559       mtr_commit(&mtr);
04560 
04561       load_page_first = FALSE;
04562 
04563       lock_mutex_enter_kernel();
04564 
04565       goto loop;
04566     }
04567 
04568 print_rec:
04569     lock_rec_print(file, lock);
04570   } else {
04571     ut_ad(lock_get_type_low(lock) & LOCK_TABLE);
04572 
04573     lock_table_print(file, lock);
04574   }
04575 
04576   load_page_first = TRUE;
04577 
04578   nth_lock++;
04579 
04580   if (nth_lock >= 10) {
04581     fputs("10 LOCKS PRINTED FOR THIS TRX:"
04582           " SUPPRESSING FURTHER PRINTS\n",
04583           file);
04584 
04585     nth_trx++;
04586     nth_lock = 0;
04587 
04588     goto loop;
04589   }
04590 
04591   goto loop;
04592 }
04593 
04594 #ifdef UNIV_DEBUG
04595 /*********************************************************************/
04598 static
04599 ibool
04600 lock_table_queue_validate(
04601 /*======================*/
04602   dict_table_t* table)  
04603 {
04604   lock_t* lock;
04605 
04606   ut_ad(mutex_own(&kernel_mutex));
04607 
04608   lock = UT_LIST_GET_FIRST(table->locks);
04609 
04610   while (lock) {
04611     ut_a(((lock->trx)->conc_state == TRX_ACTIVE)
04612          || ((lock->trx)->conc_state == TRX_PREPARED)
04613          || ((lock->trx)->conc_state == TRX_COMMITTED_IN_MEMORY));
04614 
04615     if (!lock_get_wait(lock)) {
04616 
04617       ut_a(!lock_table_other_has_incompatible(
04618              lock->trx, 0, table,
04619              lock_get_mode(lock)));
04620     } else {
04621 
04622       ut_a(lock_table_has_to_wait_in_queue(lock));
04623     }
04624 
04625     lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock);
04626   }
04627 
04628   return(TRUE);
04629 }
04630 
04631 /*********************************************************************/
04634 static
04635 ibool
04636 lock_rec_queue_validate(
04637 /*====================*/
04638   const buf_block_t*  block,  
04639   const rec_t*    rec,  
04640   dict_index_t*   index,  
04641   const ulint*    offsets)
04642 {
04643   trx_t*  impl_trx;
04644   lock_t* lock;
04645   ulint heap_no;
04646 
04647   ut_a(rec);
04648   ut_a(block->frame == page_align(rec));
04649   ut_ad(rec_offs_validate(rec, index, offsets));
04650   ut_ad(!page_rec_is_comp(rec) == !rec_offs_comp(offsets));
04651 
04652   heap_no = page_rec_get_heap_no(rec);
04653 
04654   lock_mutex_enter_kernel();
04655 
04656   if (!page_rec_is_user_rec(rec)) {
04657 
04658     lock = lock_rec_get_first(block, heap_no);
04659 
04660     while (lock) {
04661       switch(lock->trx->conc_state) {
04662       case TRX_ACTIVE:
04663       case TRX_PREPARED:
04664       case TRX_COMMITTED_IN_MEMORY:
04665         break;
04666       default:
04667         ut_error;
04668       }
04669 
04670       ut_a(trx_in_trx_list(lock->trx));
04671 
04672       if (lock_get_wait(lock)) {
04673         ut_a(lock_rec_has_to_wait_in_queue(lock));
04674       }
04675 
04676       if (index) {
04677         ut_a(lock->index == index);
04678       }
04679 
04680       lock = lock_rec_get_next(heap_no, lock);
04681     }
04682 
04683     lock_mutex_exit_kernel();
04684 
04685     return(TRUE);
04686   }
04687 
04688   if (!index);
04689   else if (dict_index_is_clust(index)) {
04690 
04691     impl_trx = lock_clust_rec_some_has_impl(rec, index, offsets);
04692 
04693     if (impl_trx
04694         && lock_rec_other_has_expl_req(LOCK_S, 0, LOCK_WAIT,
04695                block, heap_no, impl_trx)) {
04696 
04697       ut_a(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP,
04698                  block, heap_no, impl_trx));
04699     }
04700 #if 0
04701   } else {
04702 
04703     /* The kernel mutex may get released temporarily in the
04704     next function call: we have to release lock table mutex
04705     to obey the latching order */
04706 
04707     /* If this thread is holding the file space latch
04708     (fil_space_t::latch), the following check WILL break
04709     latching order and may cause a deadlock of threads. */
04710 
04711     /* NOTE: This is a bogus check that would fail in the
04712     following case: Our transaction is updating a
04713     row. After it has updated the clustered index record,
04714     it goes to a secondary index record and finds someone
04715     else holding an explicit S- or X-lock on that
04716     secondary index record, presumably from a locking
04717     read. Our transaction cannot update the secondary
04718     index immediately, but places a waiting X-lock request
04719     on the secondary index record. There is nothing
04720     illegal in this. The assertion is simply too strong. */
04721 
04722     /* From the locking point of view, each secondary
04723     index is a separate table. A lock that is held on
04724     secondary index rec does not give any rights to modify
04725     or read the clustered index rec. Therefore, we can
04726     think of the sec index as a separate 'table' from the
04727     clust index 'table'. Conversely, a transaction that
04728     has acquired a lock on and modified a clustered index
04729     record may need to wait for a lock on the
04730     corresponding record in a secondary index. */
04731 
04732     impl_trx = lock_sec_rec_some_has_impl_off_kernel(
04733       rec, index, offsets);
04734 
04735     if (impl_trx
04736         && lock_rec_other_has_expl_req(LOCK_S, 0, LOCK_WAIT,
04737                block, heap_no, impl_trx)) {
04738 
04739       ut_a(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP,
04740                  block, heap_no, impl_trx));
04741     }
04742 #endif
04743   }
04744 
04745   lock = lock_rec_get_first(block, heap_no);
04746 
04747   while (lock) {
04748     ut_a(lock->trx->conc_state == TRX_ACTIVE
04749          || lock->trx->conc_state == TRX_PREPARED
04750          || lock->trx->conc_state == TRX_COMMITTED_IN_MEMORY);
04751     ut_a(trx_in_trx_list(lock->trx));
04752 
04753     if (index) {
04754       ut_a(lock->index == index);
04755     }
04756 
04757     if (!lock_rec_get_gap(lock) && !lock_get_wait(lock)) {
04758 
04759       enum lock_mode  mode;
04760 
04761       if (lock_get_mode(lock) == LOCK_S) {
04762         mode = LOCK_X;
04763       } else {
04764         mode = LOCK_S;
04765       }
04766       ut_a(!lock_rec_other_has_expl_req(
04767              mode, 0, 0, block, heap_no, lock->trx));
04768 
04769     } else if (lock_get_wait(lock) && !lock_rec_get_gap(lock)) {
04770 
04771       ut_a(lock_rec_has_to_wait_in_queue(lock));
04772     }
04773 
04774     lock = lock_rec_get_next(heap_no, lock);
04775   }
04776 
04777   lock_mutex_exit_kernel();
04778 
04779   return(TRUE);
04780 }
04781 
04782 /*********************************************************************/
04785 static
04786 ibool
04787 lock_rec_validate_page(
04788 /*===================*/
04789   ulint space,  
04790   ulint page_no)
04791 {
04792   dict_index_t* index;
04793   buf_block_t*  block;
04794   const page_t* page;
04795   lock_t*   lock;
04796   const rec_t*  rec;
04797   ulint   nth_lock  = 0;
04798   ulint   nth_bit   = 0;
04799   ulint   i;
04800   ulint   zip_size;
04801   mtr_t   mtr;
04802   mem_heap_t* heap    = NULL;
04803   ulint   offsets_[REC_OFFS_NORMAL_SIZE];
04804   ulint*    offsets   = offsets_;
04805   rec_offs_init(offsets_);
04806 
04807   ut_ad(!mutex_own(&kernel_mutex));
04808 
04809   mtr_start(&mtr);
04810 
04811   zip_size = fil_space_get_zip_size(space);
04812   ut_ad(zip_size != ULINT_UNDEFINED);
04813   block = buf_page_get(space, zip_size, page_no, RW_X_LATCH, &mtr);
04814   buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
04815 
04816   page = block->frame;
04817 
04818   lock_mutex_enter_kernel();
04819 loop:
04820   lock = lock_rec_get_first_on_page_addr(space, page_no);
04821 
04822   if (!lock) {
04823     goto function_exit;
04824   }
04825 
04826   for (i = 0; i < nth_lock; i++) {
04827 
04828     lock = lock_rec_get_next_on_page(lock);
04829 
04830     if (!lock) {
04831       goto function_exit;
04832     }
04833   }
04834 
04835   ut_a(trx_in_trx_list(lock->trx));
04836   ut_a(lock->trx->conc_state == TRX_ACTIVE
04837        || lock->trx->conc_state == TRX_PREPARED
04838        || lock->trx->conc_state == TRX_COMMITTED_IN_MEMORY);
04839 
04840 # ifdef UNIV_SYNC_DEBUG
04841   /* Only validate the record queues when this thread is not
04842   holding a space->latch.  Deadlocks are possible due to
04843   latching order violation when UNIV_DEBUG is defined while
04844   UNIV_SYNC_DEBUG is not. */
04845   if (!sync_thread_levels_contains(SYNC_FSP))
04846 # endif /* UNIV_SYNC_DEBUG */
04847   for (i = nth_bit; i < lock_rec_get_n_bits(lock); i++) {
04848 
04849     if (i == 1 || lock_rec_get_nth_bit(lock, i)) {
04850 
04851       index = lock->index;
04852       rec = page_find_rec_with_heap_no(page, i);
04853       ut_a(rec);
04854       offsets = rec_get_offsets(rec, index, offsets,
04855               ULINT_UNDEFINED, &heap);
04856 #if 0
04857       fprintf(stderr,
04858         "Validating %lu %lu\n",
04859         (ulong) space, (ulong) page_no);
04860 #endif
04861       lock_mutex_exit_kernel();
04862 
04863       /* If this thread is holding the file space
04864       latch (fil_space_t::latch), the following
04865       check WILL break the latching order and may
04866       cause a deadlock of threads. */
04867 
04868       lock_rec_queue_validate(block, rec, index, offsets);
04869 
04870       lock_mutex_enter_kernel();
04871 
04872       nth_bit = i + 1;
04873 
04874       goto loop;
04875     }
04876   }
04877 
04878   nth_bit = 0;
04879   nth_lock++;
04880 
04881   goto loop;
04882 
04883 function_exit:
04884   lock_mutex_exit_kernel();
04885 
04886   mtr_commit(&mtr);
04887 
04888   if (UNIV_LIKELY_NULL(heap)) {
04889     mem_heap_free(heap);
04890   }
04891   return(TRUE);
04892 }
04893 
04894 /*********************************************************************/
04897 static
04898 ibool
04899 lock_validate(void)
04900 /*===============*/
04901 {
04902   lock_t*   lock;
04903   trx_t*    trx;
04904   ib_uint64_t limit;
04905   ulint   space;
04906   ulint   page_no;
04907   ulint   i;
04908 
04909   lock_mutex_enter_kernel();
04910 
04911   trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
04912 
04913   while (trx) {
04914     lock = UT_LIST_GET_FIRST(trx->trx_locks);
04915 
04916     while (lock) {
04917       if (lock_get_type_low(lock) & LOCK_TABLE) {
04918 
04919         lock_table_queue_validate(
04920           lock->un_member.tab_lock.table);
04921       }
04922 
04923       lock = UT_LIST_GET_NEXT(trx_locks, lock);
04924     }
04925 
04926     trx = UT_LIST_GET_NEXT(trx_list, trx);
04927   }
04928 
04929   for (i = 0; i < hash_get_n_cells(lock_sys->rec_hash); i++) {
04930 
04931     limit = 0;
04932 
04933     for (;;) {
04934       lock = HASH_GET_FIRST(lock_sys->rec_hash, i);
04935 
04936       while (lock) {
04937         ib_uint64_t space_page;
04938         ut_a(trx_in_trx_list(lock->trx));
04939 
04940         space = lock->un_member.rec_lock.space;
04941         page_no = lock->un_member.rec_lock.page_no;
04942 
04943         space_page = ut_ull_create(space, page_no);
04944 
04945         if (space_page >= limit) {
04946           break;
04947         }
04948 
04949         lock = HASH_GET_NEXT(hash, lock);
04950       }
04951 
04952       if (!lock) {
04953 
04954         break;
04955       }
04956 
04957       lock_mutex_exit_kernel();
04958 
04959       lock_rec_validate_page(space, page_no);
04960 
04961       lock_mutex_enter_kernel();
04962 
04963       limit = ut_ull_create(space, page_no + 1);
04964     }
04965   }
04966 
04967   lock_mutex_exit_kernel();
04968 
04969   return(TRUE);
04970 }
04971 #endif /* UNIV_DEBUG */
04972 /*============ RECORD LOCK CHECKS FOR ROW OPERATIONS ====================*/
04973 
04974 /*********************************************************************/
04981 UNIV_INTERN
04982 ulint
04983 lock_rec_insert_check_and_lock(
04984 /*===========================*/
04985   ulint   flags,  
04987   const rec_t*  rec,  
04988   buf_block_t*  block,  
04989   dict_index_t* index,  
04990   que_thr_t*  thr,  
04991   mtr_t*    mtr,  
04992   ibool*    inherit)
04996 {
04997   const rec_t*  next_rec;
04998   trx_t*    trx;
04999   lock_t*   lock;
05000   ulint   err;
05001   ulint   next_rec_heap_no;
05002 
05003   ut_ad(block->frame == page_align(rec));
05004 
05005   if (flags & BTR_NO_LOCKING_FLAG) {
05006 
05007     return(DB_SUCCESS);
05008   }
05009 
05010   trx = thr_get_trx(thr);
05011   next_rec = page_rec_get_next_const(rec);
05012   next_rec_heap_no = page_rec_get_heap_no(next_rec);
05013 
05014   lock_mutex_enter_kernel();
05015 
05016   /* When inserting a record into an index, the table must be at
05017   least IX-locked or we must be building an index, in which case
05018   the table must be at least S-locked. */
05019   ut_ad(lock_table_has(trx, index->table, LOCK_IX)
05020         || (*index->name == TEMP_INDEX_PREFIX
05021       && lock_table_has(trx, index->table, LOCK_S)));
05022 
05023   lock = lock_rec_get_first(block, next_rec_heap_no);
05024 
05025   if (UNIV_LIKELY(lock == NULL)) {
05026     /* We optimize CPU time usage in the simplest case */
05027 
05028     lock_mutex_exit_kernel();
05029 
05030     if (!dict_index_is_clust(index)) {
05031       /* Update the page max trx id field */
05032       page_update_max_trx_id(block,
05033                  buf_block_get_page_zip(block),
05034                  trx->id, mtr);
05035     }
05036 
05037     *inherit = FALSE;
05038 
05039     return(DB_SUCCESS);
05040   }
05041 
05042   *inherit = TRUE;
05043 
05044   /* If another transaction has an explicit lock request which locks
05045   the gap, waiting or granted, on the successor, the insert has to wait.
05046 
05047   An exception is the case where the lock by the another transaction
05048   is a gap type lock which it placed to wait for its turn to insert. We
05049   do not consider that kind of a lock conflicting with our insert. This
05050   eliminates an unnecessary deadlock which resulted when 2 transactions
05051   had to wait for their insert. Both had waiting gap type lock requests
05052   on the successor, which produced an unnecessary deadlock. */
05053 
05054   if (lock_rec_other_has_conflicting(
05055         static_cast<lock_mode>(LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION),
05056         block, next_rec_heap_no, trx)) {
05057 
05058     /* Note that we may get DB_SUCCESS also here! */
05059     err = lock_rec_enqueue_waiting(LOCK_X | LOCK_GAP
05060                  | LOCK_INSERT_INTENTION,
05061                  block, next_rec_heap_no,
05062                  index, thr);
05063   } else {
05064     err = DB_SUCCESS;
05065   }
05066 
05067   lock_mutex_exit_kernel();
05068 
05069   switch (err) {
05070   case DB_SUCCESS_LOCKED_REC:
05071     err = DB_SUCCESS;
05072     /* fall through */
05073   case DB_SUCCESS:
05074     if (dict_index_is_clust(index)) {
05075       break;
05076     }
05077     /* Update the page max trx id field */
05078     page_update_max_trx_id(block,
05079                buf_block_get_page_zip(block),
05080                trx->id, mtr);
05081   }
05082 
05083 #ifdef UNIV_DEBUG
05084   {
05085     mem_heap_t* heap    = NULL;
05086     ulint   offsets_[REC_OFFS_NORMAL_SIZE];
05087     const ulint*  offsets;
05088     rec_offs_init(offsets_);
05089 
05090     offsets = rec_get_offsets(next_rec, index, offsets_,
05091             ULINT_UNDEFINED, &heap);
05092     ut_ad(lock_rec_queue_validate(block,
05093                 next_rec, index, offsets));
05094     if (UNIV_LIKELY_NULL(heap)) {
05095       mem_heap_free(heap);
05096     }
05097   }
05098 #endif /* UNIV_DEBUG */
05099 
05100   return(err);
05101 }
05102 
05103 /*********************************************************************/
05107 static
05108 void
05109 lock_rec_convert_impl_to_expl(
05110 /*==========================*/
05111   const buf_block_t*  block,  
05112   const rec_t*    rec,  
05113   dict_index_t*   index,  
05114   const ulint*    offsets)
05115 {
05116   trx_t*  impl_trx;
05117 
05118   ut_ad(mutex_own(&kernel_mutex));
05119   ut_ad(page_rec_is_user_rec(rec));
05120   ut_ad(rec_offs_validate(rec, index, offsets));
05121   ut_ad(!page_rec_is_comp(rec) == !rec_offs_comp(offsets));
05122 
05123   if (dict_index_is_clust(index)) {
05124     impl_trx = lock_clust_rec_some_has_impl(rec, index, offsets);
05125   } else {
05126     impl_trx = lock_sec_rec_some_has_impl_off_kernel(
05127       rec, index, offsets);
05128   }
05129 
05130   if (impl_trx) {
05131     ulint heap_no = page_rec_get_heap_no(rec);
05132 
05133     /* If the transaction has no explicit x-lock set on the
05134     record, set one for it */
05135 
05136     if (!lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, block,
05137                heap_no, impl_trx)) {
05138 
05139       lock_rec_add_to_queue(
05140         LOCK_REC | LOCK_X | LOCK_REC_NOT_GAP,
05141         block, heap_no, index, impl_trx);
05142     }
05143   }
05144 }
05145 
05146 /*********************************************************************/
05154 UNIV_INTERN
05155 ulint
05156 lock_clust_rec_modify_check_and_lock(
05157 /*=================================*/
05158   ulint     flags,  
05160   const buf_block_t*  block,  
05161   const rec_t*    rec,  
05163   dict_index_t*   index,  
05164   const ulint*    offsets,
05165   que_thr_t*    thr)  
05166 {
05167   ulint err;
05168   ulint heap_no;
05169 
05170   ut_ad(rec_offs_validate(rec, index, offsets));
05171   ut_ad(dict_index_is_clust(index));
05172   ut_ad(block->frame == page_align(rec));
05173 
05174   if (flags & BTR_NO_LOCKING_FLAG) {
05175 
05176     return(DB_SUCCESS);
05177   }
05178 
05179   heap_no = rec_offs_comp(offsets)
05180     ? rec_get_heap_no_new(rec)
05181     : rec_get_heap_no_old(rec);
05182 
05183   lock_mutex_enter_kernel();
05184 
05185   ut_ad(lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
05186 
05187   /* If a transaction has no explicit x-lock set on the record, set one
05188   for it */
05189 
05190   lock_rec_convert_impl_to_expl(block, rec, index, offsets);
05191 
05192   err = lock_rec_lock(TRUE, LOCK_X | LOCK_REC_NOT_GAP,
05193           block, heap_no, index, thr);
05194 
05195   lock_mutex_exit_kernel();
05196 
05197   ut_ad(lock_rec_queue_validate(block, rec, index, offsets));
05198 
05199   if (UNIV_UNLIKELY(err == DB_SUCCESS_LOCKED_REC)) {
05200     err = DB_SUCCESS;
05201   }
05202 
05203   return(err);
05204 }
05205 
05206 /*********************************************************************/
05210 UNIV_INTERN
05211 ulint
05212 lock_sec_rec_modify_check_and_lock(
05213 /*===============================*/
05214   ulint   flags,  
05216   buf_block_t*  block,  
05217   const rec_t*  rec,  
05222   dict_index_t* index,  
05223   que_thr_t*  thr,  
05224   mtr_t*    mtr)  
05225 {
05226   ulint err;
05227   ulint heap_no;
05228 
05229   ut_ad(!dict_index_is_clust(index));
05230   ut_ad(block->frame == page_align(rec));
05231 
05232   if (flags & BTR_NO_LOCKING_FLAG) {
05233 
05234     return(DB_SUCCESS);
05235   }
05236 
05237   heap_no = page_rec_get_heap_no(rec);
05238 
05239   /* Another transaction cannot have an implicit lock on the record,
05240   because when we come here, we already have modified the clustered
05241   index record, and this would not have been possible if another active
05242   transaction had modified this secondary index record. */
05243 
05244   lock_mutex_enter_kernel();
05245 
05246   ut_ad(lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
05247 
05248   err = lock_rec_lock(TRUE, LOCK_X | LOCK_REC_NOT_GAP,
05249           block, heap_no, index, thr);
05250 
05251   lock_mutex_exit_kernel();
05252 
05253 #ifdef UNIV_DEBUG
05254   {
05255     mem_heap_t* heap    = NULL;
05256     ulint   offsets_[REC_OFFS_NORMAL_SIZE];
05257     const ulint*  offsets;
05258     rec_offs_init(offsets_);
05259 
05260     offsets = rec_get_offsets(rec, index, offsets_,
05261             ULINT_UNDEFINED, &heap);
05262     ut_ad(lock_rec_queue_validate(block, rec, index, offsets));
05263     if (UNIV_LIKELY_NULL(heap)) {
05264       mem_heap_free(heap);
05265     }
05266   }
05267 #endif /* UNIV_DEBUG */
05268 
05269   if (err == DB_SUCCESS || err == DB_SUCCESS_LOCKED_REC) {
05270     /* Update the page max trx id field */
05271     /* It might not be necessary to do this if
05272     err == DB_SUCCESS (no new lock created),
05273     but it should not cost too much performance. */
05274     page_update_max_trx_id(block,
05275                buf_block_get_page_zip(block),
05276                thr_get_trx(thr)->id, mtr);
05277     err = DB_SUCCESS;
05278   }
05279 
05280   return(err);
05281 }
05282 
05283 /*********************************************************************/
05288 UNIV_INTERN
05289 enum db_err
05290 lock_sec_rec_read_check_and_lock(
05291 /*=============================*/
05292   ulint     flags,  
05294   const buf_block_t*  block,  
05295   const rec_t*    rec,  
05299   dict_index_t*   index,  
05300   const ulint*    offsets,
05301   enum lock_mode    mode, 
05306   ulint     gap_mode,
05308   que_thr_t*    thr)  
05309 {
05310   enum db_err err;
05311   ulint   heap_no;
05312 
05313   ut_ad(!dict_index_is_clust(index));
05314   ut_ad(block->frame == page_align(rec));
05315   ut_ad(page_rec_is_user_rec(rec) || page_rec_is_supremum(rec));
05316   ut_ad(rec_offs_validate(rec, index, offsets));
05317   ut_ad(mode == LOCK_X || mode == LOCK_S);
05318 
05319   if (flags & BTR_NO_LOCKING_FLAG) {
05320 
05321     return(DB_SUCCESS);
05322   }
05323 
05324   heap_no = page_rec_get_heap_no(rec);
05325 
05326   lock_mutex_enter_kernel();
05327 
05328   ut_ad(mode != LOCK_X
05329         || lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
05330   ut_ad(mode != LOCK_S
05331         || lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
05332 
05333   /* Some transaction may have an implicit x-lock on the record only
05334   if the max trx id for the page >= min trx id for the trx list or a
05335   database recovery is running. */
05336 
05337   if ((page_get_max_trx_id(block->frame) >= trx_list_get_min_trx_id()
05338        || recv_recovery_is_on())
05339       && !page_rec_is_supremum(rec)) {
05340 
05341     lock_rec_convert_impl_to_expl(block, rec, index, offsets);
05342   }
05343 
05344   err = lock_rec_lock(FALSE, mode | gap_mode,
05345           block, heap_no, index, thr);
05346 
05347   lock_mutex_exit_kernel();
05348 
05349   ut_ad(lock_rec_queue_validate(block, rec, index, offsets));
05350 
05351   if (UNIV_UNLIKELY(err == DB_SUCCESS_LOCKED_REC)) {
05352     err = DB_SUCCESS;
05353   }
05354 
05355   return(err);
05356 }
05357 
05358 /*********************************************************************/
05367 UNIV_INTERN
05368 enum db_err
05369 lock_clust_rec_read_check_and_lock(
05370 /*===============================*/
05371   ulint     flags,  
05373   const buf_block_t*  block,  
05374   const rec_t*    rec,  
05378   dict_index_t*   index,  
05379   const ulint*    offsets,
05380   enum lock_mode    mode, 
05385   ulint     gap_mode,
05387   que_thr_t*    thr)  
05388 {
05389   enum db_err err;
05390   ulint   heap_no;
05391 
05392   ut_ad(dict_index_is_clust(index));
05393   ut_ad(block->frame == page_align(rec));
05394   ut_ad(page_rec_is_user_rec(rec) || page_rec_is_supremum(rec));
05395   ut_ad(gap_mode == LOCK_ORDINARY || gap_mode == LOCK_GAP
05396         || gap_mode == LOCK_REC_NOT_GAP);
05397   ut_ad(rec_offs_validate(rec, index, offsets));
05398 
05399   if (flags & BTR_NO_LOCKING_FLAG) {
05400 
05401     return(DB_SUCCESS);
05402   }
05403 
05404   heap_no = page_rec_get_heap_no(rec);
05405 
05406   lock_mutex_enter_kernel();
05407 
05408   ut_ad(mode != LOCK_X
05409         || lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
05410   ut_ad(mode != LOCK_S
05411         || lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
05412 
05413   if (UNIV_LIKELY(heap_no != PAGE_HEAP_NO_SUPREMUM)) {
05414 
05415     lock_rec_convert_impl_to_expl(block, rec, index, offsets);
05416   }
05417 
05418   err = lock_rec_lock(FALSE, mode | gap_mode,
05419           block, heap_no, index, thr);
05420 
05421   lock_mutex_exit_kernel();
05422 
05423   ut_ad(lock_rec_queue_validate(block, rec, index, offsets));
05424 
05425   return(err);
05426 }
05427 /*********************************************************************/
05437 UNIV_INTERN
05438 ulint
05439 lock_clust_rec_read_check_and_lock_alt(
05440 /*===================================*/
05441   ulint     flags,  
05443   const buf_block_t*  block,  
05444   const rec_t*    rec,  
05448   dict_index_t*   index,  
05449   enum lock_mode    mode, 
05454   ulint     gap_mode,
05456   que_thr_t*    thr)  
05457 {
05458   mem_heap_t* tmp_heap  = NULL;
05459   ulint   offsets_[REC_OFFS_NORMAL_SIZE];
05460   ulint*    offsets   = offsets_;
05461   ulint   err;
05462   rec_offs_init(offsets_);
05463 
05464   offsets = rec_get_offsets(rec, index, offsets,
05465           ULINT_UNDEFINED, &tmp_heap);
05466   err = lock_clust_rec_read_check_and_lock(flags, block, rec, index,
05467              offsets, mode, gap_mode, thr);
05468   if (tmp_heap) {
05469     mem_heap_free(tmp_heap);
05470   }
05471 
05472   if (UNIV_UNLIKELY(err == DB_SUCCESS_LOCKED_REC)) {
05473     err = DB_SUCCESS;
05474   }
05475 
05476   return(err);
05477 }
05478 
05479 /*******************************************************************/
05481 UNIV_INLINE
05482 void
05483 lock_release_autoinc_last_lock(
05484 /*===========================*/
05485   ib_vector_t*  autoinc_locks)  
05486 {
05487   ulint   last;
05488   lock_t*   lock;
05489 
05490   ut_ad(mutex_own(&kernel_mutex));
05491   ut_a(!ib_vector_is_empty(autoinc_locks));
05492 
05493   /* The lock to be release must be the last lock acquired. */
05494   last = ib_vector_size(autoinc_locks) - 1;
05495   lock = static_cast<lock_t *>(ib_vector_get(autoinc_locks, last));
05496 
05497   /* Should have only AUTOINC locks in the vector. */
05498   ut_a(lock_get_mode(lock) == LOCK_AUTO_INC);
05499   ut_a(lock_get_type(lock) == LOCK_TABLE);
05500 
05501   ut_a(lock->un_member.tab_lock.table != NULL);
05502 
05503   /* This will remove the lock from the trx autoinc_locks too. */
05504   lock_table_dequeue(lock);
05505 }
05506 
05507 /*******************************************************************/
05510 UNIV_INTERN
05511 ibool
05512 lock_trx_holds_autoinc_locks(
05513 /*=========================*/
05514   const trx_t*  trx)    
05515 {
05516   ut_a(trx->autoinc_locks != NULL);
05517 
05518   return(!ib_vector_is_empty(trx->autoinc_locks));
05519 }
05520 
05521 /*******************************************************************/
05523 UNIV_INTERN
05524 void
05525 lock_release_autoinc_locks(
05526 /*=======================*/
05527   trx_t*    trx)    
05528 {
05529   ut_ad(mutex_own(&kernel_mutex));
05530 
05531   ut_a(trx->autoinc_locks != NULL);
05532 
05533   /* We release the locks in the reverse order. This is to
05534   avoid searching the vector for the element to delete at
05535   the lower level. See (lock_table_remove_low()) for details. */
05536   while (!ib_vector_is_empty(trx->autoinc_locks)) {
05537 
05538     /* lock_table_remove_low() will also remove the lock from
05539     the transaction's autoinc_locks vector. */
05540     lock_release_autoinc_last_lock(trx->autoinc_locks);
05541   }
05542 
05543   /* Should release all locks. */
05544   ut_a(ib_vector_is_empty(trx->autoinc_locks));
05545 }
05546 
05547 /*******************************************************************/
05551 UNIV_INTERN
05552 ulint
05553 lock_get_type(
05554 /*==========*/
05555   const lock_t* lock) 
05556 {
05557   return(lock_get_type_low(lock));
05558 }
05559 
05560 /*******************************************************************/
05563 UNIV_INTERN
05564 trx_id_t
05565 lock_get_trx_id(
05566 /*============*/
05567   const lock_t* lock) 
05568 {
05569   return(lock->trx->id);
05570 }
05571 
05572 /*******************************************************************/
05576 UNIV_INTERN
05577 const char*
05578 lock_get_mode_str(
05579 /*==============*/
05580   const lock_t* lock) 
05581 {
05582   ibool is_gap_lock;
05583 
05584   is_gap_lock = lock_get_type_low(lock) == LOCK_REC
05585     && lock_rec_get_gap(lock);
05586 
05587   switch (lock_get_mode(lock)) {
05588   case LOCK_S:
05589     if (is_gap_lock) {
05590       return("S,GAP");
05591     } else {
05592       return("S");
05593     }
05594   case LOCK_X:
05595     if (is_gap_lock) {
05596       return("X,GAP");
05597     } else {
05598       return("X");
05599     }
05600   case LOCK_IS:
05601     if (is_gap_lock) {
05602       return("IS,GAP");
05603     } else {
05604       return("IS");
05605     }
05606   case LOCK_IX:
05607     if (is_gap_lock) {
05608       return("IX,GAP");
05609     } else {
05610       return("IX");
05611     }
05612   case LOCK_AUTO_INC:
05613     return("AUTO_INC");
05614   default:
05615     return("UNKNOWN");
05616   }
05617 }
05618 
05619 /*******************************************************************/
05623 UNIV_INTERN
05624 const char*
05625 lock_get_type_str(
05626 /*==============*/
05627   const lock_t* lock) 
05628 {
05629   switch (lock_get_type_low(lock)) {
05630   case LOCK_REC:
05631     return("RECORD");
05632   case LOCK_TABLE:
05633     return("TABLE");
05634   default:
05635     return("UNKNOWN");
05636   }
05637 }
05638 
05639 /*******************************************************************/
05642 UNIV_INLINE
05643 dict_table_t*
05644 lock_get_table(
05645 /*===========*/
05646   const lock_t* lock) 
05647 {
05648   switch (lock_get_type_low(lock)) {
05649   case LOCK_REC:
05650     return(lock->index->table);
05651   case LOCK_TABLE:
05652     return(lock->un_member.tab_lock.table);
05653   default:
05654     ut_error;
05655     return(NULL);
05656   }
05657 }
05658 
05659 /*******************************************************************/
05662 UNIV_INTERN
05663 table_id_t
05664 lock_get_table_id(
05665 /*==============*/
05666   const lock_t* lock) 
05667 {
05668   dict_table_t* table;
05669 
05670   table = lock_get_table(lock);
05671 
05672   return(table->id);
05673 }
05674 
05675 /*******************************************************************/
05679 UNIV_INTERN
05680 const char*
05681 lock_get_table_name(
05682 /*================*/
05683   const lock_t* lock) 
05684 {
05685   dict_table_t* table;
05686 
05687   table = lock_get_table(lock);
05688 
05689   return(table->name);
05690 }
05691 
05692 /*******************************************************************/
05695 UNIV_INTERN
05696 const dict_index_t*
05697 lock_rec_get_index(
05698 /*===============*/
05699   const lock_t* lock) 
05700 {
05701   ut_a(lock_get_type_low(lock) == LOCK_REC);
05702 
05703   return(lock->index);
05704 }
05705 
05706 /*******************************************************************/
05710 UNIV_INTERN
05711 const char*
05712 lock_rec_get_index_name(
05713 /*====================*/
05714   const lock_t* lock) 
05715 {
05716   ut_a(lock_get_type_low(lock) == LOCK_REC);
05717 
05718   return(lock->index->name);
05719 }
05720 
05721 /*******************************************************************/
05724 UNIV_INTERN
05725 ulint
05726 lock_rec_get_space_id(
05727 /*==================*/
05728   const lock_t* lock) 
05729 {
05730   ut_a(lock_get_type_low(lock) == LOCK_REC);
05731 
05732   return(lock->un_member.rec_lock.space);
05733 }
05734 
05735 /*******************************************************************/
05738 UNIV_INTERN
05739 ulint
05740 lock_rec_get_page_no(
05741 /*=================*/
05742   const lock_t* lock) 
05743 {
05744   ut_a(lock_get_type_low(lock) == LOCK_REC);
05745 
05746   return(lock->un_member.rec_lock.page_no);
05747 }