Drizzled Public API Documentation

btr0sea.cc
00001 /*****************************************************************************
00002 
00003 Copyright (C) 1996, 2009, Innobase Oy. All Rights Reserved.
00004 Copyright (C) 2008, Google Inc.
00005 
00006 Portions of this file contain modifications contributed and copyrighted by
00007 Google, Inc. Those modifications are gratefully acknowledged and are described
00008 briefly in the InnoDB documentation. The contributions by Google are
00009 incorporated with their permission, and subject to the conditions contained in
00010 the file COPYING.Google.
00011 
00012 This program is free software; you can redistribute it and/or modify it under
00013 the terms of the GNU General Public License as published by the Free Software
00014 Foundation; version 2 of the License.
00015 
00016 This program is distributed in the hope that it will be useful, but WITHOUT
00017 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
00018 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
00019 
00020 You should have received a copy of the GNU General Public License along with
00021 this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
00022 St, Fifth Floor, Boston, MA 02110-1301 USA
00023 
00024 *****************************************************************************/
00025 
00026 /********************************************************************/
00033 #include "btr0sea.h"
00034 #ifdef UNIV_NONINL
00035 #include "btr0sea.ic"
00036 #endif
00037 
00038 #include "buf0buf.h"
00039 #include "page0page.h"
00040 #include "page0cur.h"
00041 #include "btr0cur.h"
00042 #include "btr0pcur.h"
00043 #include "btr0btr.h"
00044 #include "ha0ha.h"
00045 
00048 UNIV_INTERN bool    btr_search_enabled = TRUE;
00049 UNIV_INTERN ibool   btr_search_fully_disabled = FALSE;
00050 
00052 static mutex_t      btr_search_enabled_mutex;
00053 
00055 UNIV_INTERN ulint   btr_search_this_is_zero = 0;
00056 
00057 #ifdef UNIV_SEARCH_PERF_STAT
00058 
00059 UNIV_INTERN ulint   btr_search_n_succ = 0;
00061 UNIV_INTERN ulint   btr_search_n_hash_fail  = 0;
00062 #endif /* UNIV_SEARCH_PERF_STAT */
00063 
00067 UNIV_INTERN byte    btr_sea_pad1[64];
00068 
00075 /* We will allocate the latch from dynamic memory to get it to the
00076 same DRAM page as other hotspot semaphores */
00077 UNIV_INTERN rw_lock_t*    btr_search_latch_temp;
00078 
00081 UNIV_INTERN byte    btr_sea_pad2[64];
00082 
00084 UNIV_INTERN btr_search_sys_t* btr_search_sys;
00085 
00086 #ifdef UNIV_PFS_RWLOCK
00087 /* Key to register btr_search_sys with performance schema */
00088 UNIV_INTERN mysql_pfs_key_t btr_search_latch_key;
00089 #endif /* UNIV_PFS_RWLOCK */
00090 
00094 #define BTR_SEARCH_PAGE_BUILD_LIMIT 16
00095 
00098 #define BTR_SEARCH_BUILD_LIMIT    100
00099 
00100 /********************************************************************/
00105 static
00106 void
00107 btr_search_build_page_hash_index(
00108 /*=============================*/
00109   dict_index_t* index,  
00111   buf_block_t*  block,  
00112   ulint   n_fields,
00113   ulint   n_bytes,
00115   ibool   left_side);
00117 /*****************************************************************/
00127 static
00128 void
00129 btr_search_check_free_space_in_heap(void)
00130 /*=====================================*/
00131 {
00132   hash_table_t* table;
00133   mem_heap_t* heap;
00134 
00135 #ifdef UNIV_SYNC_DEBUG
00136   ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
00137   ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
00138 #endif /* UNIV_SYNC_DEBUG */
00139 
00140   table = btr_search_sys->hash_index;
00141 
00142   heap = table->heap;
00143 
00144   /* Note that we peek the value of heap->free_block without reserving
00145   the latch: this is ok, because we will not guarantee that there will
00146   be enough free space in the hash table. */
00147 
00148   if (heap->free_block == NULL) {
00149     buf_block_t*  block = buf_block_alloc(NULL, 0);
00150 
00151     rw_lock_x_lock(&btr_search_latch);
00152 
00153     if (heap->free_block == NULL) {
00154       heap->free_block = block;
00155     } else {
00156       buf_block_free(block);
00157     }
00158 
00159     rw_lock_x_unlock(&btr_search_latch);
00160   }
00161 }
00162 
00163 /*****************************************************************/
00165 UNIV_INTERN
00166 void
00167 btr_search_sys_create(
00168 /*==================*/
00169   ulint hash_size)  
00170 {
00171   /* We allocate the search latch from dynamic memory:
00172   see above at the global variable definition */
00173 
00174   btr_search_latch_temp = static_cast<rw_lock_t *>(mem_alloc(sizeof(rw_lock_t)));
00175 
00176   rw_lock_create(btr_search_latch_key, &btr_search_latch,
00177            SYNC_SEARCH_SYS);
00178   mutex_create(btr_search_enabled_mutex_key,
00179          &btr_search_enabled_mutex, SYNC_SEARCH_SYS_CONF);
00180 
00181   btr_search_sys = (btr_search_sys_t *)mem_alloc(sizeof(btr_search_sys_t));
00182 
00183   btr_search_sys->hash_index = ha_create(hash_size, 0, 0);
00184 }
00185 
00186 /*****************************************************************/
00188 UNIV_INTERN
00189 void
00190 btr_search_sys_free(void)
00191 /*=====================*/
00192 {
00193   rw_lock_free(&btr_search_latch);
00194   mem_free(btr_search_latch_temp);
00195   btr_search_latch_temp = NULL;
00196   mem_heap_free(btr_search_sys->hash_index->heap);
00197   hash_table_free(btr_search_sys->hash_index);
00198   mem_free(btr_search_sys);
00199   btr_search_sys = NULL;
00200 }
00201 
00202 /********************************************************************/
00204 UNIV_INTERN
00205 void
00206 btr_search_disable(void)
00207 /*====================*/
00208 {
00209   mutex_enter(&btr_search_enabled_mutex);
00210   rw_lock_x_lock(&btr_search_latch);
00211 
00212   /* Disable access to hash index, also tell ha_insert_for_fold()
00213   stop adding new nodes to hash index, but still allow updating
00214   existing nodes */
00215   btr_search_enabled = FALSE;
00216 
00217   /* Clear all block->is_hashed flags and remove all entries
00218   from btr_search_sys->hash_index. */
00219   buf_pool_drop_hash_index();
00220 
00221   /* hash index has been cleaned up, disallow any operation to
00222   the hash index */
00223   btr_search_fully_disabled = TRUE;
00224 
00225   /* btr_search_enabled_mutex should guarantee this. */
00226   ut_ad(!btr_search_enabled);
00227 
00228   rw_lock_x_unlock(&btr_search_latch);
00229   mutex_exit(&btr_search_enabled_mutex);
00230 }
00231 
00232 /********************************************************************/
00234 UNIV_INTERN
00235 void
00236 btr_search_enable(void)
00237 /*====================*/
00238 {
00239   mutex_enter(&btr_search_enabled_mutex);
00240   rw_lock_x_lock(&btr_search_latch);
00241 
00242   btr_search_enabled = TRUE;
00243   btr_search_fully_disabled = FALSE;
00244 
00245   rw_lock_x_unlock(&btr_search_latch);
00246   mutex_exit(&btr_search_enabled_mutex);
00247 }
00248 
00249 /*****************************************************************/
00252 UNIV_INTERN
00253 btr_search_t*
00254 btr_search_info_create(
00255 /*===================*/
00256   mem_heap_t* heap) 
00257 {
00258   btr_search_t* info;
00259 
00260   info = (btr_search_t *)mem_heap_alloc(heap, sizeof(btr_search_t));
00261 
00262 #ifdef UNIV_DEBUG
00263   info->magic_n = BTR_SEARCH_MAGIC_N;
00264 #endif /* UNIV_DEBUG */
00265 
00266   info->ref_count = 0;
00267   info->root_guess = NULL;
00268 
00269   info->hash_analysis = 0;
00270   info->n_hash_potential = 0;
00271 
00272   info->last_hash_succ = FALSE;
00273 
00274 #ifdef UNIV_SEARCH_PERF_STAT
00275   info->n_hash_succ = 0;
00276   info->n_hash_fail = 0;
00277   info->n_patt_succ = 0;
00278   info->n_searches = 0;
00279 #endif /* UNIV_SEARCH_PERF_STAT */
00280 
00281   /* Set some sensible values */
00282   info->n_fields = 1;
00283   info->n_bytes = 0;
00284 
00285   info->left_side = TRUE;
00286 
00287   return(info);
00288 }
00289 
00290 /*****************************************************************/
00294 UNIV_INTERN
00295 ulint
00296 btr_search_info_get_ref_count(
00297 /*==========================*/
00298   btr_search_t*   info) 
00299 {
00300   ulint ret;
00301 
00302   ut_ad(info);
00303 
00304 #ifdef UNIV_SYNC_DEBUG
00305   ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
00306   ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
00307 #endif /* UNIV_SYNC_DEBUG */
00308 
00309   rw_lock_s_lock(&btr_search_latch);
00310   ret = info->ref_count;
00311   rw_lock_s_unlock(&btr_search_latch);
00312 
00313   return(ret);
00314 }
00315 
00316 /*********************************************************************/
00320 static
00321 void
00322 btr_search_info_update_hash(
00323 /*========================*/
00324   btr_search_t* info, 
00325   btr_cur_t*  cursor) 
00326 {
00327   dict_index_t* index;
00328   ulint   n_unique;
00329   int   cmp;
00330 
00331 #ifdef UNIV_SYNC_DEBUG
00332   ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
00333   ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
00334 #endif /* UNIV_SYNC_DEBUG */
00335 
00336   index = cursor->index;
00337 
00338   if (dict_index_is_ibuf(index)) {
00339     /* So many deletes are performed on an insert buffer tree
00340     that we do not consider a hash index useful on it: */
00341 
00342     return;
00343   }
00344 
00345   n_unique = dict_index_get_n_unique_in_tree(index);
00346 
00347   if (info->n_hash_potential == 0) {
00348 
00349     goto set_new_recomm;
00350   }
00351 
00352   /* Test if the search would have succeeded using the recommended
00353   hash prefix */
00354 
00355   if (info->n_fields >= n_unique && cursor->up_match >= n_unique) {
00356 increment_potential:
00357     info->n_hash_potential++;
00358 
00359     return;
00360   }
00361 
00362   cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
00363         cursor->low_match, cursor->low_bytes);
00364 
00365   if (info->left_side ? cmp <= 0 : cmp > 0) {
00366 
00367     goto set_new_recomm;
00368   }
00369 
00370   cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
00371         cursor->up_match, cursor->up_bytes);
00372 
00373   if (info->left_side ? cmp <= 0 : cmp > 0) {
00374 
00375     goto increment_potential;
00376   }
00377 
00378 set_new_recomm:
00379   /* We have to set a new recommendation; skip the hash analysis
00380   for a while to avoid unnecessary CPU time usage when there is no
00381   chance for success */
00382 
00383   info->hash_analysis = 0;
00384 
00385   cmp = ut_pair_cmp(cursor->up_match, cursor->up_bytes,
00386         cursor->low_match, cursor->low_bytes);
00387   if (cmp == 0) {
00388     info->n_hash_potential = 0;
00389 
00390     /* For extra safety, we set some sensible values here */
00391 
00392     info->n_fields = 1;
00393     info->n_bytes = 0;
00394 
00395     info->left_side = TRUE;
00396 
00397   } else if (cmp > 0) {
00398     info->n_hash_potential = 1;
00399 
00400     if (cursor->up_match >= n_unique) {
00401 
00402       info->n_fields = n_unique;
00403       info->n_bytes = 0;
00404 
00405     } else if (cursor->low_match < cursor->up_match) {
00406 
00407       info->n_fields = cursor->low_match + 1;
00408       info->n_bytes = 0;
00409     } else {
00410       info->n_fields = cursor->low_match;
00411       info->n_bytes = cursor->low_bytes + 1;
00412     }
00413 
00414     info->left_side = TRUE;
00415   } else {
00416     info->n_hash_potential = 1;
00417 
00418     if (cursor->low_match >= n_unique) {
00419 
00420       info->n_fields = n_unique;
00421       info->n_bytes = 0;
00422 
00423     } else if (cursor->low_match > cursor->up_match) {
00424 
00425       info->n_fields = cursor->up_match + 1;
00426       info->n_bytes = 0;
00427     } else {
00428       info->n_fields = cursor->up_match;
00429       info->n_bytes = cursor->up_bytes + 1;
00430     }
00431 
00432     info->left_side = FALSE;
00433   }
00434 }
00435 
00436 /*********************************************************************/
00441 static
00442 ibool
00443 btr_search_update_block_hash_info(
00444 /*==============================*/
00445   btr_search_t* info, 
00446   buf_block_t*  block,  
00447   btr_cur_t*  /*cursor __attribute__((unused))*/)
00449 {
00450 #ifdef UNIV_SYNC_DEBUG
00451   ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
00452   ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
00453   ut_ad(rw_lock_own(&block->lock, RW_LOCK_SHARED)
00454         || rw_lock_own(&block->lock, RW_LOCK_EX));
00455 #endif /* UNIV_SYNC_DEBUG */
00456   ut_ad(cursor);
00457 
00458   info->last_hash_succ = FALSE;
00459 
00460   ut_a(buf_block_state_valid(block));
00461   ut_ad(info->magic_n == BTR_SEARCH_MAGIC_N);
00462 
00463   if ((block->n_hash_helps > 0)
00464       && (info->n_hash_potential > 0)
00465       && (block->n_fields == info->n_fields)
00466       && (block->n_bytes == info->n_bytes)
00467       && (block->left_side == info->left_side)) {
00468 
00469     if ((block->is_hashed)
00470         && (block->curr_n_fields == info->n_fields)
00471         && (block->curr_n_bytes == info->n_bytes)
00472         && (block->curr_left_side == info->left_side)) {
00473 
00474       /* The search would presumably have succeeded using
00475       the hash index */
00476 
00477       info->last_hash_succ = TRUE;
00478     }
00479 
00480     block->n_hash_helps++;
00481   } else {
00482     block->n_hash_helps = 1;
00483     block->n_fields = info->n_fields;
00484     block->n_bytes = info->n_bytes;
00485     block->left_side = info->left_side;
00486   }
00487 
00488 #ifdef UNIV_DEBUG
00489   if (cursor->index->table->does_not_fit_in_memory) {
00490     block->n_hash_helps = 0;
00491   }
00492 #endif /* UNIV_DEBUG */
00493 
00494   if ((block->n_hash_helps > page_get_n_recs(block->frame)
00495        / BTR_SEARCH_PAGE_BUILD_LIMIT)
00496       && (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) {
00497 
00498     if ((!block->is_hashed)
00499         || (block->n_hash_helps
00500       > 2 * page_get_n_recs(block->frame))
00501         || (block->n_fields != block->curr_n_fields)
00502         || (block->n_bytes != block->curr_n_bytes)
00503         || (block->left_side != block->curr_left_side)) {
00504 
00505       /* Build a new hash index on the page */
00506 
00507       return(TRUE);
00508     }
00509   }
00510 
00511   return(FALSE);
00512 }
00513 
00514 /*********************************************************************/
00522 static
00523 void
00524 btr_search_update_hash_ref(
00525 /*=======================*/
00526   btr_search_t* info, 
00527   buf_block_t*  block,  
00528   btr_cur_t*  cursor) 
00529 {
00530   ulint   fold;
00531   rec_t*    rec;
00532   index_id_t  index_id;
00533 
00534   ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
00535 #ifdef UNIV_SYNC_DEBUG
00536   ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
00537   ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
00538         || rw_lock_own(&(block->lock), RW_LOCK_EX));
00539 #endif /* UNIV_SYNC_DEBUG */
00540   ut_ad(page_align(btr_cur_get_rec(cursor))
00541         == buf_block_get_frame(block));
00542 
00543   if (!block->is_hashed) {
00544 
00545     return;
00546   }
00547 
00548   ut_a(block->index == cursor->index);
00549   ut_a(!dict_index_is_ibuf(cursor->index));
00550 
00551   if ((info->n_hash_potential > 0)
00552       && (block->curr_n_fields == info->n_fields)
00553       && (block->curr_n_bytes == info->n_bytes)
00554       && (block->curr_left_side == info->left_side)) {
00555     mem_heap_t* heap    = NULL;
00556     ulint   offsets_[REC_OFFS_NORMAL_SIZE];
00557     rec_offs_init(offsets_);
00558 
00559     rec = btr_cur_get_rec(cursor);
00560 
00561     if (!page_rec_is_user_rec(rec)) {
00562 
00563       return;
00564     }
00565 
00566     index_id = cursor->index->id;
00567     fold = rec_fold(rec,
00568         rec_get_offsets(rec, cursor->index, offsets_,
00569             ULINT_UNDEFINED, &heap),
00570         block->curr_n_fields,
00571         block->curr_n_bytes, index_id);
00572     if (UNIV_LIKELY_NULL(heap)) {
00573       mem_heap_free(heap);
00574     }
00575 #ifdef UNIV_SYNC_DEBUG
00576     ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
00577 #endif /* UNIV_SYNC_DEBUG */
00578 
00579     ha_insert_for_fold(btr_search_sys->hash_index, fold,
00580            block, rec);
00581   }
00582 }
00583 
00584 /*********************************************************************/
00586 UNIV_INTERN
00587 void
00588 btr_search_info_update_slow(
00589 /*========================*/
00590   btr_search_t* info, 
00591   btr_cur_t*  cursor) 
00592 {
00593   buf_block_t*  block;
00594   ibool   build_index;
00595   ulint*    params;
00596   ulint*    params2;
00597 
00598 #ifdef UNIV_SYNC_DEBUG
00599   ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
00600   ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
00601 #endif /* UNIV_SYNC_DEBUG */
00602 
00603   block = btr_cur_get_block(cursor);
00604 
00605   /* NOTE that the following two function calls do NOT protect
00606   info or block->n_fields etc. with any semaphore, to save CPU time!
00607   We cannot assume the fields are consistent when we return from
00608   those functions! */
00609 
00610   btr_search_info_update_hash(info, cursor);
00611 
00612   build_index = btr_search_update_block_hash_info(info, block, cursor);
00613 
00614   if (build_index || (cursor->flag == BTR_CUR_HASH_FAIL)) {
00615 
00616     btr_search_check_free_space_in_heap();
00617   }
00618 
00619   if (cursor->flag == BTR_CUR_HASH_FAIL) {
00620     /* Update the hash node reference, if appropriate */
00621 
00622 #ifdef UNIV_SEARCH_PERF_STAT
00623     btr_search_n_hash_fail++;
00624 #endif /* UNIV_SEARCH_PERF_STAT */
00625 
00626     rw_lock_x_lock(&btr_search_latch);
00627 
00628     btr_search_update_hash_ref(info, block, cursor);
00629 
00630     rw_lock_x_unlock(&btr_search_latch);
00631   }
00632 
00633   if (build_index) {
00634     /* Note that since we did not protect block->n_fields etc.
00635     with any semaphore, the values can be inconsistent. We have
00636     to check inside the function call that they make sense. We
00637     also malloc an array and store the values there to make sure
00638     the compiler does not let the function call parameters change
00639     inside the called function. It might be that the compiler
00640     would optimize the call just to pass pointers to block. */
00641 
00642     params = (ulint *)mem_alloc(3 * sizeof(ulint));
00643     params[0] = block->n_fields;
00644     params[1] = block->n_bytes;
00645     params[2] = block->left_side;
00646 
00647     /* Make sure the compiler cannot deduce the values and do
00648     optimizations */
00649 
00650     params2 = params + btr_search_this_is_zero;
00651 
00652     btr_search_build_page_hash_index(cursor->index,
00653              block,
00654              params2[0],
00655              params2[1],
00656              params2[2]);
00657     mem_free(params);
00658   }
00659 }
00660 
00661 /******************************************************************/
00666 static
00667 ibool
00668 btr_search_check_guess(
00669 /*===================*/
00670   btr_cur_t*  cursor, 
00671   ibool   can_only_compare_to_cursor_rec,
00679   const dtuple_t* tuple,  
00680   ulint   mode, 
00682   mtr_t*    mtr)  
00683 {
00684   rec_t*    rec;
00685   ulint   n_unique;
00686   ulint   match;
00687   ulint   bytes;
00688   int   cmp;
00689   mem_heap_t* heap    = NULL;
00690   ulint   offsets_[REC_OFFS_NORMAL_SIZE];
00691   ulint*    offsets   = offsets_;
00692   ibool   success   = FALSE;
00693   rec_offs_init(offsets_);
00694 
00695   n_unique = dict_index_get_n_unique_in_tree(cursor->index);
00696 
00697   rec = btr_cur_get_rec(cursor);
00698 
00699   ut_ad(page_rec_is_user_rec(rec));
00700 
00701   match = 0;
00702   bytes = 0;
00703 
00704   offsets = rec_get_offsets(rec, cursor->index, offsets,
00705           n_unique, &heap);
00706   cmp = page_cmp_dtuple_rec_with_match(tuple, rec,
00707                offsets, &match, &bytes);
00708 
00709   if (mode == PAGE_CUR_GE) {
00710     if (cmp == 1) {
00711       goto exit_func;
00712     }
00713 
00714     cursor->up_match = match;
00715 
00716     if (match >= n_unique) {
00717       success = TRUE;
00718       goto exit_func;
00719     }
00720   } else if (mode == PAGE_CUR_LE) {
00721     if (cmp == -1) {
00722       goto exit_func;
00723     }
00724 
00725     cursor->low_match = match;
00726 
00727   } else if (mode == PAGE_CUR_G) {
00728     if (cmp != -1) {
00729       goto exit_func;
00730     }
00731   } else if (mode == PAGE_CUR_L) {
00732     if (cmp != 1) {
00733       goto exit_func;
00734     }
00735   }
00736 
00737   if (can_only_compare_to_cursor_rec) {
00738     /* Since we could not determine if our guess is right just by
00739     looking at the record under the cursor, return FALSE */
00740     goto exit_func;
00741   }
00742 
00743   match = 0;
00744   bytes = 0;
00745 
00746   if ((mode == PAGE_CUR_G) || (mode == PAGE_CUR_GE)) {
00747     rec_t*  prev_rec;
00748 
00749     ut_ad(!page_rec_is_infimum(rec));
00750 
00751     prev_rec = page_rec_get_prev(rec);
00752 
00753     if (page_rec_is_infimum(prev_rec)) {
00754       success = btr_page_get_prev(page_align(prev_rec), mtr)
00755         == FIL_NULL;
00756 
00757       goto exit_func;
00758     }
00759 
00760     offsets = rec_get_offsets(prev_rec, cursor->index, offsets,
00761             n_unique, &heap);
00762     cmp = page_cmp_dtuple_rec_with_match(tuple, prev_rec,
00763                  offsets, &match, &bytes);
00764     if (mode == PAGE_CUR_GE) {
00765       success = cmp == 1;
00766     } else {
00767       success = cmp != -1;
00768     }
00769 
00770     goto exit_func;
00771   } else {
00772     rec_t*  next_rec;
00773 
00774     ut_ad(!page_rec_is_supremum(rec));
00775 
00776     next_rec = page_rec_get_next(rec);
00777 
00778     if (page_rec_is_supremum(next_rec)) {
00779       if (btr_page_get_next(page_align(next_rec), mtr)
00780           == FIL_NULL) {
00781 
00782         cursor->up_match = 0;
00783         success = TRUE;
00784       }
00785 
00786       goto exit_func;
00787     }
00788 
00789     offsets = rec_get_offsets(next_rec, cursor->index, offsets,
00790             n_unique, &heap);
00791     cmp = page_cmp_dtuple_rec_with_match(tuple, next_rec,
00792                  offsets, &match, &bytes);
00793     if (mode == PAGE_CUR_LE) {
00794       success = cmp == -1;
00795       cursor->up_match = match;
00796     } else {
00797       success = cmp != 1;
00798     }
00799   }
00800 exit_func:
00801   if (UNIV_LIKELY_NULL(heap)) {
00802     mem_heap_free(heap);
00803   }
00804   return(success);
00805 }
00806 
00807 /******************************************************************/
00813 UNIV_INTERN
00814 ibool
00815 btr_search_guess_on_hash(
00816 /*=====================*/
00817   dict_index_t* index,    
00818   btr_search_t* info,   
00819   const dtuple_t* tuple,    
00820   ulint   mode,   
00821   ulint   latch_mode, 
00827   btr_cur_t*  cursor,   
00828   ulint   has_search_latch,
00831   mtr_t*    mtr)    
00832 {
00833   buf_pool_t* buf_pool;
00834   buf_block_t*  block;
00835   rec_t*    rec;
00836   ulint   fold;
00837   index_id_t  index_id;
00838 #ifdef notdefined
00839   btr_cur_t cursor2;
00840   btr_pcur_t  pcur;
00841 #endif
00842   ut_ad(index && info && tuple && cursor && mtr);
00843   ut_ad((latch_mode == BTR_SEARCH_LEAF)
00844         || (latch_mode == BTR_MODIFY_LEAF));
00845 
00846   /* Note that, for efficiency, the struct info may not be protected by
00847   any latch here! */
00848 
00849   if (UNIV_UNLIKELY(info->n_hash_potential == 0)) {
00850 
00851     return(FALSE);
00852   }
00853 
00854   cursor->n_fields = info->n_fields;
00855   cursor->n_bytes = info->n_bytes;
00856 
00857   if (UNIV_UNLIKELY(dtuple_get_n_fields(tuple)
00858         < cursor->n_fields + (cursor->n_bytes > 0))) {
00859 
00860     return(FALSE);
00861   }
00862 
00863   index_id = index->id;
00864 
00865 #ifdef UNIV_SEARCH_PERF_STAT
00866   info->n_hash_succ++;
00867 #endif
00868   fold = dtuple_fold(tuple, cursor->n_fields, cursor->n_bytes, index_id);
00869 
00870   cursor->fold = fold;
00871   cursor->flag = BTR_CUR_HASH;
00872 
00873   if (UNIV_LIKELY(!has_search_latch)) {
00874     rw_lock_s_lock(&btr_search_latch);
00875 
00876     if (UNIV_UNLIKELY(!btr_search_enabled)) {
00877       goto failure_unlock;
00878     }
00879   }
00880 
00881   ut_ad(rw_lock_get_writer(&btr_search_latch) != RW_LOCK_EX);
00882   ut_ad(rw_lock_get_reader_count(&btr_search_latch) > 0);
00883 
00884   rec = (rec_t *)ha_search_and_get_data(btr_search_sys->hash_index, fold);
00885 
00886   if (UNIV_UNLIKELY(!rec)) {
00887     goto failure_unlock;
00888   }
00889 
00890   block = buf_block_align(rec);
00891 
00892   if (UNIV_LIKELY(!has_search_latch)) {
00893 
00894     if (UNIV_UNLIKELY(
00895           !buf_page_get_known_nowait(latch_mode, block,
00896                    BUF_MAKE_YOUNG,
00897                    __FILE__, __LINE__,
00898                    mtr))) {
00899       goto failure_unlock;
00900     }
00901 
00902     rw_lock_s_unlock(&btr_search_latch);
00903 
00904     buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
00905   }
00906 
00907   if (UNIV_UNLIKELY(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE)) {
00908     ut_ad(buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH);
00909 
00910     if (UNIV_LIKELY(!has_search_latch)) {
00911 
00912       btr_leaf_page_release(block, latch_mode, mtr);
00913     }
00914 
00915     goto failure;
00916   }
00917 
00918   ut_ad(page_rec_is_user_rec(rec));
00919 
00920   btr_cur_position(index, rec, block, cursor);
00921 
00922   /* Check the validity of the guess within the page */
00923 
00924   /* If we only have the latch on btr_search_latch, not on the
00925   page, it only protects the columns of the record the cursor
00926   is positioned on. We cannot look at the next of the previous
00927   record to determine if our guess for the cursor position is
00928   right. */
00929   if (UNIV_UNLIKELY(index_id != btr_page_get_index_id(block->frame))
00930       || !btr_search_check_guess(cursor,
00931                has_search_latch,
00932                tuple, mode, mtr)) {
00933     if (UNIV_LIKELY(!has_search_latch)) {
00934       btr_leaf_page_release(block, latch_mode, mtr);
00935     }
00936 
00937     goto failure;
00938   }
00939 
00940   if (UNIV_LIKELY(info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5)) {
00941 
00942     info->n_hash_potential++;
00943   }
00944 
00945 #ifdef notdefined
00946   /* These lines of code can be used in a debug version to check
00947   the correctness of the searched cursor position: */
00948 
00949   info->last_hash_succ = FALSE;
00950 
00951   /* Currently, does not work if the following fails: */
00952   ut_ad(!has_search_latch);
00953 
00954   btr_leaf_page_release(block, latch_mode, mtr);
00955 
00956   btr_cur_search_to_nth_level(index, 0, tuple, mode, latch_mode,
00957             &cursor2, 0, mtr);
00958   if (mode == PAGE_CUR_GE
00959       && page_rec_is_supremum(btr_cur_get_rec(&cursor2))) {
00960 
00961     /* If mode is PAGE_CUR_GE, then the binary search
00962     in the index tree may actually take us to the supremum
00963     of the previous page */
00964 
00965     info->last_hash_succ = FALSE;
00966 
00967     btr_pcur_open_on_user_rec(index, tuple, mode, latch_mode,
00968             &pcur, mtr);
00969     ut_ad(btr_pcur_get_rec(&pcur) == btr_cur_get_rec(cursor));
00970   } else {
00971     ut_ad(btr_cur_get_rec(&cursor2) == btr_cur_get_rec(cursor));
00972   }
00973 
00974   /* NOTE that it is theoretically possible that the above assertions
00975   fail if the page of the cursor gets removed from the buffer pool
00976   meanwhile! Thus it might not be a bug. */
00977 #endif
00978   info->last_hash_succ = TRUE;
00979 
00980 #ifdef UNIV_SEARCH_PERF_STAT
00981   btr_search_n_succ++;
00982 #endif
00983   if (UNIV_LIKELY(!has_search_latch)
00984       && buf_page_peek_if_too_old(&block->page)) {
00985 
00986     buf_page_make_young(&block->page);
00987   }
00988 
00989   /* Increment the page get statistics though we did not really
00990   fix the page: for user info only */
00991   buf_pool = buf_pool_from_bpage(&block->page);
00992   buf_pool->stat.n_page_gets++;
00993 
00994   return(TRUE);
00995 
00996   /*-------------------------------------------*/
00997 failure_unlock:
00998   if (UNIV_LIKELY(!has_search_latch)) {
00999     rw_lock_s_unlock(&btr_search_latch);
01000   }
01001 failure:
01002   cursor->flag = BTR_CUR_HASH_FAIL;
01003 
01004 #ifdef UNIV_SEARCH_PERF_STAT
01005   info->n_hash_fail++;
01006 
01007   if (info->n_hash_succ > 0) {
01008     info->n_hash_succ--;
01009   }
01010 #endif
01011   info->last_hash_succ = FALSE;
01012 
01013   return(FALSE);
01014 }
01015 
01016 /********************************************************************/
01018 UNIV_INTERN
01019 void
01020 btr_search_drop_page_hash_index(
01021 /*============================*/
01022   buf_block_t*  block)  
01026 {
01027   hash_table_t*   table;
01028   ulint     n_fields;
01029   ulint     n_bytes;
01030   const page_t*   page;
01031   const rec_t*    rec;
01032   ulint     fold;
01033   ulint     prev_fold;
01034   index_id_t    index_id;
01035   ulint     n_cached;
01036   ulint     n_recs;
01037   ulint*      folds;
01038   ulint     i;
01039   mem_heap_t*   heap;
01040   const dict_index_t* index;
01041   ulint*      offsets;
01042 
01043 #ifdef UNIV_SYNC_DEBUG
01044   ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
01045   ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
01046 #endif /* UNIV_SYNC_DEBUG */
01047 
01048 retry:
01049   rw_lock_s_lock(&btr_search_latch);
01050   page = block->frame;
01051 
01052   if (UNIV_LIKELY(!block->is_hashed)) {
01053 
01054     rw_lock_s_unlock(&btr_search_latch);
01055 
01056     return;
01057   }
01058 
01059   table = btr_search_sys->hash_index;
01060 
01061 #ifdef UNIV_SYNC_DEBUG
01062   ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
01063         || rw_lock_own(&(block->lock), RW_LOCK_EX)
01064         || (block->page.buf_fix_count == 0));
01065 #endif /* UNIV_SYNC_DEBUG */
01066 
01067   n_fields = block->curr_n_fields;
01068   n_bytes = block->curr_n_bytes;
01069   index = block->index;
01070   ut_a(!dict_index_is_ibuf(index));
01071 
01072   /* NOTE: The fields of block must not be accessed after
01073   releasing btr_search_latch, as the index page might only
01074   be s-latched! */
01075 
01076   rw_lock_s_unlock(&btr_search_latch);
01077 
01078   ut_a(n_fields + n_bytes > 0);
01079 
01080   n_recs = page_get_n_recs(page);
01081 
01082   /* Calculate and cache fold values into an array for fast deletion
01083   from the hash index */
01084 
01085   folds = (ulint *)mem_alloc(n_recs * sizeof(ulint));
01086 
01087   n_cached = 0;
01088 
01089   rec = page_get_infimum_rec(page);
01090   rec = page_rec_get_next_low(rec, page_is_comp(page));
01091 
01092   index_id = btr_page_get_index_id(page);
01093 
01094   ut_a(index_id == index->id);
01095 
01096   prev_fold = 0;
01097 
01098   heap = NULL;
01099   offsets = NULL;
01100 
01101   while (!page_rec_is_supremum(rec)) {
01102     offsets = rec_get_offsets(rec, index, offsets,
01103             n_fields + (n_bytes > 0), &heap);
01104     ut_a(rec_offs_n_fields(offsets) == n_fields + (n_bytes > 0));
01105     fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
01106 
01107     if (fold == prev_fold && prev_fold != 0) {
01108 
01109       goto next_rec;
01110     }
01111 
01112     /* Remove all hash nodes pointing to this page from the
01113     hash chain */
01114 
01115     folds[n_cached] = fold;
01116     n_cached++;
01117 next_rec:
01118     rec = page_rec_get_next_low(rec, page_rec_is_comp(rec));
01119     prev_fold = fold;
01120   }
01121 
01122   if (UNIV_LIKELY_NULL(heap)) {
01123     mem_heap_free(heap);
01124   }
01125 
01126   rw_lock_x_lock(&btr_search_latch);
01127 
01128   if (UNIV_UNLIKELY(!block->is_hashed)) {
01129     /* Someone else has meanwhile dropped the hash index */
01130 
01131     goto cleanup;
01132   }
01133 
01134   ut_a(block->index == index);
01135 
01136   if (UNIV_UNLIKELY(block->curr_n_fields != n_fields)
01137       || UNIV_UNLIKELY(block->curr_n_bytes != n_bytes)) {
01138 
01139     /* Someone else has meanwhile built a new hash index on the
01140     page, with different parameters */
01141 
01142     rw_lock_x_unlock(&btr_search_latch);
01143 
01144     mem_free(folds);
01145     goto retry;
01146   }
01147 
01148   for (i = 0; i < n_cached; i++) {
01149 
01150     ha_remove_all_nodes_to_page(table, folds[i], page);
01151   }
01152 
01153   ut_a(index->search_info->ref_count > 0);
01154   index->search_info->ref_count--;
01155 
01156   block->is_hashed = FALSE;
01157   block->index = NULL;
01158   
01159 cleanup:
01160 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
01161   if (UNIV_UNLIKELY(block->n_pointers)) {
01162     /* Corruption */
01163     ut_print_timestamp(stderr);
01164     fprintf(stderr,
01165       "  InnoDB: Corruption of adaptive hash index."
01166       " After dropping\n"
01167       "InnoDB: the hash index to a page of %s,"
01168       " still %lu hash nodes remain.\n",
01169       index->name, (ulong) block->n_pointers);
01170     rw_lock_x_unlock(&btr_search_latch);
01171 
01172     btr_search_validate();
01173   } else {
01174     rw_lock_x_unlock(&btr_search_latch);
01175   }
01176 #else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
01177   rw_lock_x_unlock(&btr_search_latch);
01178 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
01179 
01180   mem_free(folds);
01181 }
01182 
01183 /********************************************************************/
01186 UNIV_INTERN
01187 void
01188 btr_search_drop_page_hash_when_freed(
01189 /*=================================*/
01190   ulint space,    
01191   ulint zip_size, 
01193   ulint page_no)  
01194 {
01195   buf_block_t*  block;
01196   mtr_t   mtr;
01197 
01198   if (!buf_page_peek_if_search_hashed(space, page_no)) {
01199 
01200     return;
01201   }
01202 
01203   mtr_start(&mtr);
01204 
01205   /* We assume that if the caller has a latch on the page, then the
01206   caller has already dropped the hash index for the page, and we never
01207   get here. Therefore we can acquire the s-latch to the page without
01208   having to fear a deadlock. */
01209 
01210   block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, NULL,
01211         BUF_GET_IF_IN_POOL, __FILE__, __LINE__,
01212         &mtr);
01213   /* Because the buffer pool mutex was released by
01214   buf_page_peek_if_search_hashed(), it is possible that the
01215   block was removed from the buffer pool by another thread
01216   before buf_page_get_gen() got a chance to acquire the buffer
01217   pool mutex again.  Thus, we must check for a NULL return. */
01218 
01219   if (UNIV_LIKELY(block != NULL)) {
01220 
01221     buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
01222 
01223     btr_search_drop_page_hash_index(block);
01224   }
01225 
01226   mtr_commit(&mtr);
01227 }
01228 
01229 /********************************************************************/
01234 static
01235 void
01236 btr_search_build_page_hash_index(
01237 /*=============================*/
01238   dict_index_t* index,  
01239   buf_block_t*  block,  
01240   ulint   n_fields,
01241   ulint   n_bytes,
01243   ibool   left_side)
01244 {
01245   hash_table_t* table;
01246   page_t*   page;
01247   rec_t*    rec;
01248   rec_t*    next_rec;
01249   ulint   fold;
01250   ulint   next_fold;
01251   index_id_t  index_id;
01252   ulint   n_cached;
01253   ulint   n_recs;
01254   ulint*    folds;
01255   rec_t**   recs;
01256   ulint   i;
01257   mem_heap_t* heap    = NULL;
01258   ulint   offsets_[REC_OFFS_NORMAL_SIZE];
01259   ulint*    offsets   = offsets_;
01260   rec_offs_init(offsets_);
01261 
01262   ut_ad(index);
01263   ut_a(!dict_index_is_ibuf(index));
01264 
01265   table = btr_search_sys->hash_index;
01266   page = buf_block_get_frame(block);
01267 
01268 #ifdef UNIV_SYNC_DEBUG
01269   ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
01270   ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
01271         || rw_lock_own(&(block->lock), RW_LOCK_EX));
01272 #endif /* UNIV_SYNC_DEBUG */
01273 
01274   rw_lock_s_lock(&btr_search_latch);
01275 
01276   if (block->is_hashed && ((block->curr_n_fields != n_fields)
01277          || (block->curr_n_bytes != n_bytes)
01278          || (block->curr_left_side != left_side))) {
01279 
01280     rw_lock_s_unlock(&btr_search_latch);
01281 
01282     btr_search_drop_page_hash_index(block);
01283   } else {
01284     rw_lock_s_unlock(&btr_search_latch);
01285   }
01286 
01287   n_recs = page_get_n_recs(page);
01288 
01289   if (n_recs == 0) {
01290 
01291     return;
01292   }
01293 
01294   /* Check that the values for hash index build are sensible */
01295 
01296   if (n_fields + n_bytes == 0) {
01297 
01298     return;
01299   }
01300 
01301   if (dict_index_get_n_unique_in_tree(index) < n_fields
01302       || (dict_index_get_n_unique_in_tree(index) == n_fields
01303     && n_bytes > 0)) {
01304     return;
01305   }
01306 
01307   /* Calculate and cache fold values and corresponding records into
01308   an array for fast insertion to the hash index */
01309 
01310   folds = (ulint *)mem_alloc(n_recs * sizeof(ulint));
01311   recs = (rec_t **)mem_alloc(n_recs * sizeof(rec_t*));
01312 
01313   n_cached = 0;
01314 
01315   index_id = btr_page_get_index_id(page);
01316 
01317   rec = page_rec_get_next(page_get_infimum_rec(page));
01318 
01319   offsets = rec_get_offsets(rec, index, offsets,
01320           n_fields + (n_bytes > 0), &heap);
01321 
01322   if (!page_rec_is_supremum(rec)) {
01323     ut_a(n_fields <= rec_offs_n_fields(offsets));
01324 
01325     if (n_bytes > 0) {
01326       ut_a(n_fields < rec_offs_n_fields(offsets));
01327     }
01328   }
01329 
01330   fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
01331 
01332   if (left_side) {
01333 
01334     folds[n_cached] = fold;
01335     recs[n_cached] = rec;
01336     n_cached++;
01337   }
01338 
01339   for (;;) {
01340     next_rec = page_rec_get_next(rec);
01341 
01342     if (page_rec_is_supremum(next_rec)) {
01343 
01344       if (!left_side) {
01345 
01346         folds[n_cached] = fold;
01347         recs[n_cached] = rec;
01348         n_cached++;
01349       }
01350 
01351       break;
01352     }
01353 
01354     offsets = rec_get_offsets(next_rec, index, offsets,
01355             n_fields + (n_bytes > 0), &heap);
01356     next_fold = rec_fold(next_rec, offsets, n_fields,
01357              n_bytes, index_id);
01358 
01359     if (fold != next_fold) {
01360       /* Insert an entry into the hash index */
01361 
01362       if (left_side) {
01363 
01364         folds[n_cached] = next_fold;
01365         recs[n_cached] = next_rec;
01366         n_cached++;
01367       } else {
01368         folds[n_cached] = fold;
01369         recs[n_cached] = rec;
01370         n_cached++;
01371       }
01372     }
01373 
01374     rec = next_rec;
01375     fold = next_fold;
01376   }
01377 
01378   btr_search_check_free_space_in_heap();
01379 
01380   rw_lock_x_lock(&btr_search_latch);
01381 
01382   if (UNIV_UNLIKELY(btr_search_fully_disabled)) {
01383     goto exit_func;
01384   }
01385 
01386   if (block->is_hashed && ((block->curr_n_fields != n_fields)
01387          || (block->curr_n_bytes != n_bytes)
01388          || (block->curr_left_side != left_side))) {
01389     goto exit_func;
01390   }
01391 
01392   /* This counter is decremented every time we drop page
01393   hash index entries and is incremented here. Since we can
01394   rebuild hash index for a page that is already hashed, we
01395   have to take care not to increment the counter in that
01396   case. */
01397   if (!block->is_hashed) {
01398     index->search_info->ref_count++;
01399   }
01400 
01401   block->is_hashed = TRUE;
01402   block->n_hash_helps = 0;
01403 
01404   block->curr_n_fields = n_fields;
01405   block->curr_n_bytes = n_bytes;
01406   block->curr_left_side = left_side;
01407   block->index = index;
01408 
01409   for (i = 0; i < n_cached; i++) {
01410 
01411     ha_insert_for_fold(table, folds[i], block, recs[i]);
01412   }
01413 
01414 exit_func:
01415   rw_lock_x_unlock(&btr_search_latch);
01416 
01417   mem_free(folds);
01418   mem_free(recs);
01419   if (UNIV_LIKELY_NULL(heap)) {
01420     mem_heap_free(heap);
01421   }
01422 }
01423 
01424 /********************************************************************/
01429 UNIV_INTERN
01430 void
01431 btr_search_move_or_delete_hash_entries(
01432 /*===================================*/
01433   buf_block_t*  new_block,  
01435   buf_block_t*  block,    
01439   dict_index_t* index)    
01440 {
01441   ulint n_fields;
01442   ulint n_bytes;
01443   ibool left_side;
01444 
01445 #ifdef UNIV_SYNC_DEBUG
01446   ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
01447   ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX));
01448 #endif /* UNIV_SYNC_DEBUG */
01449   ut_a(!new_block->is_hashed || new_block->index == index);
01450   ut_a(!block->is_hashed || block->index == index);
01451   ut_a(!(new_block->is_hashed || block->is_hashed)
01452        || !dict_index_is_ibuf(index));
01453 
01454   rw_lock_s_lock(&btr_search_latch);
01455 
01456   if (new_block->is_hashed) {
01457 
01458     rw_lock_s_unlock(&btr_search_latch);
01459 
01460     btr_search_drop_page_hash_index(block);
01461 
01462     return;
01463   }
01464 
01465   if (block->is_hashed) {
01466 
01467     n_fields = block->curr_n_fields;
01468     n_bytes = block->curr_n_bytes;
01469     left_side = block->curr_left_side;
01470 
01471     new_block->n_fields = block->curr_n_fields;
01472     new_block->n_bytes = block->curr_n_bytes;
01473     new_block->left_side = left_side;
01474 
01475     rw_lock_s_unlock(&btr_search_latch);
01476 
01477     ut_a(n_fields + n_bytes > 0);
01478 
01479     btr_search_build_page_hash_index(index, new_block, n_fields,
01480              n_bytes, left_side);
01481     ut_ad(n_fields == block->curr_n_fields);
01482     ut_ad(n_bytes == block->curr_n_bytes);
01483     ut_ad(left_side == block->curr_left_side);
01484     return;
01485   }
01486 
01487   rw_lock_s_unlock(&btr_search_latch);
01488 }
01489 
01490 /********************************************************************/
01492 UNIV_INTERN
01493 void
01494 btr_search_update_hash_on_delete(
01495 /*=============================*/
01496   btr_cur_t*  cursor) 
01499 {
01500   hash_table_t* table;
01501   buf_block_t*  block;
01502   rec_t*    rec;
01503   ulint   fold;
01504   index_id_t  index_id;
01505   ulint   offsets_[REC_OFFS_NORMAL_SIZE];
01506   mem_heap_t* heap    = NULL;
01507   rec_offs_init(offsets_);
01508 
01509   rec = btr_cur_get_rec(cursor);
01510 
01511   block = btr_cur_get_block(cursor);
01512 
01513 #ifdef UNIV_SYNC_DEBUG
01514   ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
01515 #endif /* UNIV_SYNC_DEBUG */
01516 
01517   if (!block->is_hashed) {
01518 
01519     return;
01520   }
01521 
01522   ut_a(block->index == cursor->index);
01523   ut_a(block->curr_n_fields + block->curr_n_bytes > 0);
01524   ut_a(!dict_index_is_ibuf(cursor->index));
01525 
01526   table = btr_search_sys->hash_index;
01527 
01528   index_id = cursor->index->id;
01529   fold = rec_fold(rec, rec_get_offsets(rec, cursor->index, offsets_,
01530                ULINT_UNDEFINED, &heap),
01531       block->curr_n_fields, block->curr_n_bytes, index_id);
01532   if (UNIV_LIKELY_NULL(heap)) {
01533     mem_heap_free(heap);
01534   }
01535   rw_lock_x_lock(&btr_search_latch);
01536 
01537   ha_search_and_delete_if_found(table, fold, rec);
01538 
01539   rw_lock_x_unlock(&btr_search_latch);
01540 }
01541 
01542 /********************************************************************/
01544 UNIV_INTERN
01545 void
01546 btr_search_update_hash_node_on_insert(
01547 /*==================================*/
01548   btr_cur_t*  cursor) 
01552 {
01553   hash_table_t* table;
01554   buf_block_t*  block;
01555   rec_t*    rec;
01556 
01557   rec = btr_cur_get_rec(cursor);
01558 
01559   block = btr_cur_get_block(cursor);
01560 
01561 #ifdef UNIV_SYNC_DEBUG
01562   ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
01563 #endif /* UNIV_SYNC_DEBUG */
01564 
01565   if (!block->is_hashed) {
01566 
01567     return;
01568   }
01569 
01570   ut_a(block->index == cursor->index);
01571   ut_a(!dict_index_is_ibuf(cursor->index));
01572 
01573   rw_lock_x_lock(&btr_search_latch);
01574 
01575   if ((cursor->flag == BTR_CUR_HASH)
01576       && (cursor->n_fields == block->curr_n_fields)
01577       && (cursor->n_bytes == block->curr_n_bytes)
01578       && !block->curr_left_side) {
01579 
01580     table = btr_search_sys->hash_index;
01581 
01582     ha_search_and_update_if_found(table, cursor->fold, rec,
01583                 block, page_rec_get_next(rec));
01584 
01585     rw_lock_x_unlock(&btr_search_latch);
01586   } else {
01587     rw_lock_x_unlock(&btr_search_latch);
01588 
01589     btr_search_update_hash_on_insert(cursor);
01590   }
01591 }
01592 
01593 /********************************************************************/
01595 UNIV_INTERN
01596 void
01597 btr_search_update_hash_on_insert(
01598 /*=============================*/
01599   btr_cur_t*  cursor) 
01603 {
01604   hash_table_t* table;
01605   buf_block_t*  block;
01606   rec_t*    rec;
01607   rec_t*    ins_rec;
01608   rec_t*    next_rec;
01609   index_id_t  index_id;
01610   ulint   fold;
01611   ulint   ins_fold;
01612   ulint   next_fold = 0; /* remove warning (??? bug ???) */
01613   ulint   n_fields;
01614   ulint   n_bytes;
01615   ibool   left_side;
01616   ibool   locked    = FALSE;
01617   mem_heap_t* heap    = NULL;
01618   ulint   offsets_[REC_OFFS_NORMAL_SIZE];
01619   ulint*    offsets   = offsets_;
01620   rec_offs_init(offsets_);
01621 
01622   table = btr_search_sys->hash_index;
01623 
01624   btr_search_check_free_space_in_heap();
01625 
01626   rec = btr_cur_get_rec(cursor);
01627 
01628   block = btr_cur_get_block(cursor);
01629 
01630 #ifdef UNIV_SYNC_DEBUG
01631   ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
01632 #endif /* UNIV_SYNC_DEBUG */
01633 
01634   if (!block->is_hashed) {
01635 
01636     return;
01637   }
01638 
01639   ut_a(block->index == cursor->index);
01640   ut_a(!dict_index_is_ibuf(cursor->index));
01641 
01642   index_id = cursor->index->id;
01643 
01644   n_fields = block->curr_n_fields;
01645   n_bytes = block->curr_n_bytes;
01646   left_side = block->curr_left_side;
01647 
01648   ins_rec = page_rec_get_next(rec);
01649   next_rec = page_rec_get_next(ins_rec);
01650 
01651   offsets = rec_get_offsets(ins_rec, cursor->index, offsets,
01652           ULINT_UNDEFINED, &heap);
01653   ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index_id);
01654 
01655   if (!page_rec_is_supremum(next_rec)) {
01656     offsets = rec_get_offsets(next_rec, cursor->index, offsets,
01657             n_fields + (n_bytes > 0), &heap);
01658     next_fold = rec_fold(next_rec, offsets, n_fields,
01659              n_bytes, index_id);
01660   }
01661 
01662   if (!page_rec_is_infimum(rec)) {
01663     offsets = rec_get_offsets(rec, cursor->index, offsets,
01664             n_fields + (n_bytes > 0), &heap);
01665     fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
01666   } else {
01667     if (left_side) {
01668 
01669       rw_lock_x_lock(&btr_search_latch);
01670 
01671       locked = TRUE;
01672 
01673       ha_insert_for_fold(table, ins_fold, block, ins_rec);
01674     }
01675 
01676     goto check_next_rec;
01677   }
01678 
01679   if (fold != ins_fold) {
01680 
01681     if (!locked) {
01682 
01683       rw_lock_x_lock(&btr_search_latch);
01684 
01685       locked = TRUE;
01686     }
01687 
01688     if (!left_side) {
01689       ha_insert_for_fold(table, fold, block, rec);
01690     } else {
01691       ha_insert_for_fold(table, ins_fold, block, ins_rec);
01692     }
01693   }
01694 
01695 check_next_rec:
01696   if (page_rec_is_supremum(next_rec)) {
01697 
01698     if (!left_side) {
01699 
01700       if (!locked) {
01701         rw_lock_x_lock(&btr_search_latch);
01702 
01703         locked = TRUE;
01704       }
01705 
01706       ha_insert_for_fold(table, ins_fold, block, ins_rec);
01707     }
01708 
01709     goto function_exit;
01710   }
01711 
01712   if (ins_fold != next_fold) {
01713 
01714     if (!locked) {
01715 
01716       rw_lock_x_lock(&btr_search_latch);
01717 
01718       locked = TRUE;
01719     }
01720 
01721     if (!left_side) {
01722 
01723       ha_insert_for_fold(table, ins_fold, block, ins_rec);
01724       /*
01725       fputs("Hash insert for ", stderr);
01726       dict_index_name_print(stderr, cursor->index);
01727       fprintf(stderr, " fold %lu\n", ins_fold);
01728       */
01729     } else {
01730       ha_insert_for_fold(table, next_fold, block, next_rec);
01731     }
01732   }
01733 
01734 function_exit:
01735   if (UNIV_LIKELY_NULL(heap)) {
01736     mem_heap_free(heap);
01737   }
01738   if (locked) {
01739     rw_lock_x_unlock(&btr_search_latch);
01740   }
01741 }
01742 
01743 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
01744 /********************************************************************/
01747 UNIV_INTERN
01748 ibool
01749 btr_search_validate(void)
01750 /*=====================*/
01751 {
01752   ha_node_t*  node;
01753   ulint   n_page_dumps  = 0;
01754   ibool   ok    = TRUE;
01755   ulint   i;
01756   ulint   cell_count;
01757   mem_heap_t* heap    = NULL;
01758   ulint   offsets_[REC_OFFS_NORMAL_SIZE];
01759   ulint*    offsets   = offsets_;
01760 
01761   /* How many cells to check before temporarily releasing
01762   btr_search_latch. */
01763   ulint   chunk_size = 10000;
01764 
01765   rec_offs_init(offsets_);
01766 
01767   rw_lock_x_lock(&btr_search_latch);
01768   buf_pool_mutex_enter_all();
01769 
01770   cell_count = hash_get_n_cells(btr_search_sys->hash_index);
01771 
01772   for (i = 0; i < cell_count; i++) {
01773     /* We release btr_search_latch every once in a while to
01774     give other queries a chance to run. */
01775     if ((i != 0) && ((i % chunk_size) == 0)) {
01776       buf_pool_mutex_exit_all();
01777       rw_lock_x_unlock(&btr_search_latch);
01778       os_thread_yield();
01779       rw_lock_x_lock(&btr_search_latch);
01780       buf_pool_mutex_enter_all();
01781     }
01782 
01783     node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
01784 
01785     for (; node != NULL; node = node->next) {
01786       const buf_block_t*  block
01787         = buf_block_align(node->data);
01788       const buf_block_t*  hash_block;
01789       buf_pool_t*   buf_pool;
01790       index_id_t    page_index_id;
01791 
01792       buf_pool = buf_pool_from_bpage((buf_page_t*) block);
01793 
01794       if (UNIV_LIKELY(buf_block_get_state(block)
01795           == BUF_BLOCK_FILE_PAGE)) {
01796 
01797         /* The space and offset are only valid
01798         for file blocks.  It is possible that
01799         the block is being freed
01800         (BUF_BLOCK_REMOVE_HASH, see the
01801         assertion and the comment below) */
01802         hash_block = buf_block_hash_get(
01803           buf_pool,
01804           buf_block_get_space(block),
01805           buf_block_get_page_no(block));
01806       } else {
01807         hash_block = NULL;
01808       }
01809 
01810       if (hash_block) {
01811         ut_a(hash_block == block);
01812       } else {
01813         /* When a block is being freed,
01814         buf_LRU_search_and_free_block() first
01815         removes the block from
01816         buf_pool->page_hash by calling
01817         buf_LRU_block_remove_hashed_page().
01818         After that, it invokes
01819         btr_search_drop_page_hash_index() to
01820         remove the block from
01821         btr_search_sys->hash_index. */
01822 
01823         ut_a(buf_block_get_state(block)
01824              == BUF_BLOCK_REMOVE_HASH);
01825       }
01826 
01827       ut_a(!dict_index_is_ibuf(block->index));
01828 
01829       offsets = rec_get_offsets((const rec_t*) node->data,
01830               block->index, offsets,
01831               block->curr_n_fields
01832               + (block->curr_n_bytes > 0),
01833               &heap);
01834 
01835       page_index_id = btr_page_get_index_id(block->frame);
01836 
01837       if (UNIV_UNLIKELY
01838           (!block->is_hashed || node->fold
01839            != rec_fold((rec_t*)(node->data),
01840            offsets,
01841            block->curr_n_fields,
01842            block->curr_n_bytes,
01843            page_index_id))) {
01844         const page_t* page = block->frame;
01845 
01846         ok = FALSE;
01847         ut_print_timestamp(stderr);
01848 
01849         fprintf(stderr,
01850           "  InnoDB: Error in an adaptive hash"
01851           " index pointer to page %lu\n"
01852           "InnoDB: ptr mem address %p"
01853           " index id %llu,"
01854           " node fold %lu, rec fold %lu\n",
01855           (ulong) page_get_page_no(page),
01856           node->data,
01857           (ullint) page_index_id,
01858           (ulong) node->fold,
01859           (ulong) rec_fold((rec_t*)(node->data),
01860                offsets,
01861                block->curr_n_fields,
01862                block->curr_n_bytes,
01863                page_index_id));
01864 
01865         fputs("InnoDB: Record ", stderr);
01866         rec_print_new(stderr, (rec_t*)node->data,
01867                 offsets);
01868         fprintf(stderr, "\nInnoDB: on that page."
01869           " Page mem address %p, is hashed %lu,"
01870           " n fields %lu, n bytes %lu\n"
01871           "InnoDB: side %lu\n",
01872           (void*) page, (ulong) block->is_hashed,
01873           (ulong) block->curr_n_fields,
01874           (ulong) block->curr_n_bytes,
01875           (ulong) block->curr_left_side);
01876 
01877         if (n_page_dumps < 20) {
01878           buf_page_print(page, 0);
01879           n_page_dumps++;
01880         }
01881       }
01882     }
01883   }
01884 
01885   for (i = 0; i < cell_count; i += chunk_size) {
01886     ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1);
01887 
01888     /* We release btr_search_latch every once in a while to
01889     give other queries a chance to run. */
01890     if (i != 0) {
01891       buf_pool_mutex_exit_all();
01892       rw_lock_x_unlock(&btr_search_latch);
01893       os_thread_yield();
01894       rw_lock_x_lock(&btr_search_latch);
01895       buf_pool_mutex_enter_all();
01896     }
01897 
01898     if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
01899       ok = FALSE;
01900     }
01901   }
01902 
01903   buf_pool_mutex_exit_all();
01904   rw_lock_x_unlock(&btr_search_latch);
01905   if (UNIV_LIKELY_NULL(heap)) {
01906     mem_heap_free(heap);
01907   }
01908 
01909   return(ok);
01910 }
01911 #endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */