Drizzled Public API Documentation

row0ins.cc
1 /*****************************************************************************
2 
3 Copyright (C) 1996, 2010, Innobase Oy. All Rights Reserved.
4 
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License as published by the Free Software
7 Foundation; version 2 of the License.
8 
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
15 St, Fifth Floor, Boston, MA 02110-1301 USA
16 
17 *****************************************************************************/
18 
19 /**************************************************/
26 #include "row0ins.h"
27 
28 #ifdef UNIV_NONINL
29 #include "row0ins.ic"
30 #endif
31 
32 #include "ha_prototypes.h"
33 #include "dict0dict.h"
34 #include "dict0boot.h"
35 #include "trx0undo.h"
36 #include "btr0btr.h"
37 #include "btr0cur.h"
38 #include "mach0data.h"
39 #include "que0que.h"
40 #include "row0upd.h"
41 #include "row0sel.h"
42 #include "row0row.h"
43 #include "rem0cmp.h"
44 #include "lock0lock.h"
45 #include "log0log.h"
46 #include "eval0eval.h"
47 #include "data0data.h"
48 #include "usr0sess.h"
49 #include "buf0lru.h"
50 
51 #define ROW_INS_PREV 1
52 #define ROW_INS_NEXT 2
53 
54 /*************************************************************************
55 IMPORTANT NOTE: Any operation that generates redo MUST check that there
56 is enough space in the redo log before for that operation. This is
57 done by calling log_free_check(). The reason for checking the
58 availability of the redo log space before the start of the operation is
59 that we MUST not hold any synchonization objects when performing the
60 check.
61 If you make a change in this module make sure that no codepath is
62 introduced where a call to log_free_check() is bypassed. */
63 
64 /*********************************************************************/
67 UNIV_INTERN
70 /*============*/
71  ulint ins_type,
72  dict_table_t* table,
73  mem_heap_t* heap)
74 {
75  ins_node_t* node;
76 
77  node = static_cast<ins_node_t *>(mem_heap_alloc(heap, sizeof(ins_node_t)));
78 
79  node->common.type = QUE_NODE_INSERT;
80 
81  node->ins_type = ins_type;
82 
83  node->state = INS_NODE_SET_IX_LOCK;
84  node->table = table;
85  node->index = NULL;
86  node->entry = NULL;
87 
88  node->select = NULL;
89 
90  node->trx_id = 0;
91 
92  node->entry_sys_heap = mem_heap_create(128);
93 
94  node->magic_n = INS_NODE_MAGIC_N;
95 
96  return(node);
97 }
98 
99 /***********************************************************/
101 UNIV_INTERN
102 void
104 /*=======================*/
105  ins_node_t* node)
106 {
107  dict_index_t* index;
108  dtuple_t* entry;
109 
110  ut_ad(node->entry_sys_heap);
111 
112  UT_LIST_INIT(node->entry_list);
113 
114  index = dict_table_get_first_index(node->table);
115 
116  while (index != NULL) {
117  entry = row_build_index_entry(node->row, NULL, index,
118  node->entry_sys_heap);
119  UT_LIST_ADD_LAST(tuple_list, node->entry_list, entry);
120 
121  index = dict_table_get_next_index(index);
122  }
123 }
124 
125 /*****************************************************************/
127 static
128 void
129 row_ins_alloc_sys_fields(
130 /*=====================*/
131  ins_node_t* node)
132 {
133  dtuple_t* row;
134  dict_table_t* table;
135  mem_heap_t* heap;
136  const dict_col_t* col;
137  dfield_t* dfield;
138  byte* ptr;
139 
140  row = node->row;
141  table = node->table;
142  heap = node->entry_sys_heap;
143 
144  ut_ad(row && table && heap);
146 
147  /* 1. Allocate buffer for row id */
148 
149  col = dict_table_get_sys_col(table, DATA_ROW_ID);
150 
151  dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
152 
153  ptr = static_cast<byte *>(mem_heap_zalloc(heap, DATA_ROW_ID_LEN));
154 
155  dfield_set_data(dfield, ptr, DATA_ROW_ID_LEN);
156 
157  node->row_id_buf = ptr;
158 
159  /* 3. Allocate buffer for trx id */
160 
161  col = dict_table_get_sys_col(table, DATA_TRX_ID);
162 
163  dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
164  ptr = static_cast<byte *>(mem_heap_zalloc(heap, DATA_TRX_ID_LEN));
165 
166  dfield_set_data(dfield, ptr, DATA_TRX_ID_LEN);
167 
168  node->trx_id_buf = ptr;
169 
170  /* 4. Allocate buffer for roll ptr */
171 
172  col = dict_table_get_sys_col(table, DATA_ROLL_PTR);
173 
174  dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
175  ptr = static_cast<byte *>(mem_heap_zalloc(heap, DATA_ROLL_PTR_LEN));
176 
177  dfield_set_data(dfield, ptr, DATA_ROLL_PTR_LEN);
178 }
179 
180 /*********************************************************************/
184 UNIV_INTERN
185 void
187 /*=================*/
188  ins_node_t* node,
189  dtuple_t* row)
190 {
191  node->state = INS_NODE_SET_IX_LOCK;
192  node->index = NULL;
193  node->entry = NULL;
194 
195  node->row = row;
196 
197  mem_heap_empty(node->entry_sys_heap);
198 
199  /* Create templates for index entries */
200 
202 
203  /* Allocate from entry_sys_heap buffers for sys fields */
204 
205  row_ins_alloc_sys_fields(node);
206 
207  /* As we allocated a new trx id buf, the trx id should be written
208  there again: */
209 
210  node->trx_id = 0;
211 }
212 
213 /*******************************************************************/
218 static
219 ulint
220 row_ins_sec_index_entry_by_modify(
221 /*==============================*/
222  ulint mode,
225  btr_cur_t* cursor,
226  const dtuple_t* entry,
227  que_thr_t* thr,
228  mtr_t* mtr)
230 {
231  big_rec_t* dummy_big_rec;
232  mem_heap_t* heap;
233  upd_t* update;
234  rec_t* rec;
235  ulint err;
236 
237  rec = btr_cur_get_rec(cursor);
238 
239  ut_ad(!dict_index_is_clust(cursor->index));
241  dict_table_is_comp(cursor->index->table)));
242 
243  /* We know that in the alphabetical ordering, entry and rec are
244  identified. But in their binary form there may be differences if
245  there are char fields in them. Therefore we have to calculate the
246  difference. */
247 
248  heap = mem_heap_create(1024);
249 
251  cursor->index, entry, rec, thr_get_trx(thr), heap);
252  if (mode == BTR_MODIFY_LEAF) {
253  /* Try an optimistic updating of the record, keeping changes
254  within the page */
255 
256  err = btr_cur_optimistic_update(BTR_KEEP_SYS_FLAG, cursor,
257  update, 0, thr, mtr);
258  switch (err) {
259  case DB_OVERFLOW:
260  case DB_UNDERFLOW:
261  case DB_ZIP_OVERFLOW:
262  err = DB_FAIL;
263  }
264  } else {
265  ut_a(mode == BTR_MODIFY_TREE);
266  if (buf_LRU_buf_pool_running_out()) {
267 
268  err = DB_LOCK_TABLE_FULL;
269 
270  goto func_exit;
271  }
272 
273  err = btr_cur_pessimistic_update(BTR_KEEP_SYS_FLAG, cursor,
274  &heap, &dummy_big_rec, update,
275  0, thr, mtr);
276  ut_ad(!dummy_big_rec);
277  }
278 func_exit:
279  mem_heap_free(heap);
280 
281  return(err);
282 }
283 
284 /*******************************************************************/
289 static
290 ulint
291 row_ins_clust_index_entry_by_modify(
292 /*================================*/
293  ulint mode,
296  btr_cur_t* cursor,
297  mem_heap_t** heap,
298  big_rec_t** big_rec,
301  const dtuple_t* entry,
302  que_thr_t* thr,
303  mtr_t* mtr)
305 {
306  rec_t* rec;
307  upd_t* update;
308  ulint err;
309 
310  ut_ad(dict_index_is_clust(cursor->index));
311 
312  *big_rec = NULL;
313 
314  rec = btr_cur_get_rec(cursor);
315 
317  dict_table_is_comp(cursor->index->table)));
318 
319  if (!*heap) {
320  *heap = mem_heap_create(1024);
321  }
322 
323  /* Build an update vector containing all the fields to be modified;
324  NOTE that this vector may NOT contain system columns trx_id or
325  roll_ptr */
326 
327  update = row_upd_build_difference_binary(cursor->index, entry, rec,
328  thr_get_trx(thr), *heap);
329  if (mode == BTR_MODIFY_LEAF) {
330  /* Try optimistic updating of the record, keeping changes
331  within the page */
332 
333  err = btr_cur_optimistic_update(0, cursor, update, 0, thr,
334  mtr);
335  switch (err) {
336  case DB_OVERFLOW:
337  case DB_UNDERFLOW:
338  case DB_ZIP_OVERFLOW:
339  err = DB_FAIL;
340  }
341  } else {
342  ut_a(mode == BTR_MODIFY_TREE);
343  if (buf_LRU_buf_pool_running_out()) {
344 
345  return(DB_LOCK_TABLE_FULL);
346 
347  }
348  err = btr_cur_pessimistic_update(0, cursor,
349  heap, big_rec, update,
350  0, thr, mtr);
351  }
352 
353  return(err);
354 }
355 
356 /*********************************************************************/
360 static
361 ibool
362 row_ins_cascade_ancestor_updates_table(
363 /*===================================*/
364  que_node_t* node,
365  dict_table_t* table)
366 {
367  que_node_t* parent;
368  upd_node_t* upd_node;
369 
370  parent = que_node_get_parent(node);
371 
372  while (que_node_get_type(parent) == QUE_NODE_UPDATE) {
373 
374  upd_node = static_cast<upd_node_t *>(parent);
375 
376  if (upd_node->table == table && upd_node->is_delete == FALSE) {
377 
378  return(TRUE);
379  }
380 
381  parent = que_node_get_parent(parent);
382 
383  ut_a(parent);
384  }
385 
386  return(FALSE);
387 }
388 
389 /*********************************************************************/
393 static
394 ulint
395 row_ins_cascade_n_ancestors(
396 /*========================*/
397  que_node_t* node)
398 {
399  que_node_t* parent;
400  ulint n_ancestors = 0;
401 
402  parent = que_node_get_parent(node);
403 
404  while (que_node_get_type(parent) == QUE_NODE_UPDATE) {
405  n_ancestors++;
406 
407  parent = que_node_get_parent(parent);
408 
409  ut_a(parent);
410  }
411 
412  return(n_ancestors);
413 }
414 
415 /******************************************************************/
422 static
423 ulint
424 row_ins_cascade_calc_update_vec(
425 /*============================*/
426  upd_node_t* node,
428  dict_foreign_t* foreign,
430  mem_heap_t* heap)
432 {
433  upd_node_t* cascade = node->cascade_node;
434  dict_table_t* table = foreign->foreign_table;
435  dict_index_t* index = foreign->foreign_index;
436  upd_t* update;
437  upd_field_t* ufield;
438  dict_table_t* parent_table;
439  dict_index_t* parent_index;
440  upd_t* parent_update;
441  upd_field_t* parent_ufield;
442  ulint n_fields_updated;
443  ulint parent_field_no;
444  ulint i;
445  ulint j;
446 
447  ut_a(node);
448  ut_a(foreign);
449  ut_a(cascade);
450  ut_a(table);
451  ut_a(index);
452 
453  /* Calculate the appropriate update vector which will set the fields
454  in the child index record to the same value (possibly padded with
455  spaces if the column is a fixed length CHAR or FIXBINARY column) as
456  the referenced index record will get in the update. */
457 
458  parent_table = node->table;
459  ut_a(parent_table == foreign->referenced_table);
460  parent_index = foreign->referenced_index;
461  parent_update = node->update;
462 
463  update = cascade->update;
464 
465  update->info_bits = 0;
466  update->n_fields = foreign->n_fields;
467 
468  n_fields_updated = 0;
469 
470  for (i = 0; i < foreign->n_fields; i++) {
471 
472  parent_field_no = dict_table_get_nth_col_pos(
473  parent_table,
474  dict_index_get_nth_col_no(parent_index, i));
475 
476  for (j = 0; j < parent_update->n_fields; j++) {
477  parent_ufield = parent_update->fields + j;
478 
479  if (parent_ufield->field_no == parent_field_no) {
480 
481  ulint min_size;
482  const dict_col_t* col;
483  ulint ufield_len;
484 
485  col = dict_index_get_nth_col(index, i);
486 
487  /* A field in the parent index record is
488  updated. Let us make the update vector
489  field for the child table. */
490 
491  ufield = update->fields + n_fields_updated;
492 
493  ufield->field_no
494  = dict_table_get_nth_col_pos(
495  table, dict_col_get_no(col));
496  ufield->exp = NULL;
497 
498  ufield->new_val = parent_ufield->new_val;
499  ufield_len = dfield_get_len(&ufield->new_val);
500 
501  /* Clear the "external storage" flag */
502  dfield_set_len(&ufield->new_val, ufield_len);
503 
504  /* Do not allow a NOT NULL column to be
505  updated as NULL */
506 
507  if (dfield_is_null(&ufield->new_val)
508  && (col->prtype & DATA_NOT_NULL)) {
509 
510  return(ULINT_UNDEFINED);
511  }
512 
513  /* If the new value would not fit in the
514  column, do not allow the update */
515 
516  if (!dfield_is_null(&ufield->new_val)
517  && dtype_get_at_most_n_mbchars(
518  col->prtype, col->mbminmaxlen,
519  col->len,
520  ufield_len,
521  static_cast<const char *>(dfield_get_data(&ufield->new_val)))
522  < ufield_len) {
523 
524  return(ULINT_UNDEFINED);
525  }
526 
527  /* If the parent column type has a different
528  length than the child column type, we may
529  need to pad with spaces the new value of the
530  child column */
531 
532  min_size = dict_col_get_min_size(col);
533 
534  /* Because UNIV_SQL_NULL (the marker
535  of SQL NULL values) exceeds all possible
536  values of min_size, the test below will
537  not hold for SQL NULL columns. */
538 
539  if (min_size > ufield_len) {
540 
541  byte* pad;
542  ulint pad_len;
543  byte* padded_data;
544  ulint mbminlen;
545 
546  padded_data = static_cast<unsigned char *>(mem_heap_alloc(
547  heap, min_size));
548 
549  pad = padded_data + ufield_len;
550  pad_len = min_size - ufield_len;
551 
552  memcpy(padded_data,
553  dfield_get_data(&ufield
554  ->new_val),
555  ufield_len);
556 
557  mbminlen = dict_col_get_mbminlen(col);
558 
559  ut_ad(!(ufield_len % mbminlen));
560  ut_ad(!(min_size % mbminlen));
561 
562  if (mbminlen == 1
564  col->prtype)
565  == DATA_MYSQL_BINARY_CHARSET_COLL) {
566  /* Do not pad BINARY columns */
567  return(ULINT_UNDEFINED);
568  }
569 
570  row_mysql_pad_col(mbminlen,
571  pad, pad_len);
572  dfield_set_data(&ufield->new_val,
573  padded_data, min_size);
574  }
575 
576  n_fields_updated++;
577  }
578  }
579  }
580 
581  update->n_fields = n_fields_updated;
582 
583  return(n_fields_updated);
584 }
585 
586 /*********************************************************************/
589 static
590 void
591 row_ins_set_detailed(
592 /*=================*/
593  trx_t* trx,
594  dict_foreign_t* foreign)
595 {
596  mutex_enter(&srv_misc_tmpfile_mutex);
597  rewind(srv_misc_tmpfile);
598 
599  if (os_file_set_eof(srv_misc_tmpfile)) {
600  ut_print_name(srv_misc_tmpfile, trx, TRUE,
601  foreign->foreign_table_name);
602  dict_print_info_on_foreign_key_in_create_format(
603  srv_misc_tmpfile, trx, foreign, FALSE);
604  trx_set_detailed_error_from_file(trx, srv_misc_tmpfile);
605  } else {
606  trx_set_detailed_error(trx, "temp file operation failed");
607  }
608 
609  mutex_exit(&srv_misc_tmpfile_mutex);
610 }
611 
612 /*********************************************************************/
615 static
616 void
617 row_ins_foreign_report_err(
618 /*=======================*/
619  const char* errstr,
621  que_thr_t* thr,
623  dict_foreign_t* foreign,
624  const rec_t* rec,
626  const dtuple_t* entry)
628 {
629  FILE* ef = dict_foreign_err_file;
630  trx_t* trx = thr_get_trx(thr);
631 
632  row_ins_set_detailed(trx, foreign);
633 
634  mutex_enter(&dict_foreign_err_mutex);
635  rewind(ef);
636  ut_print_timestamp(ef);
637  fputs(" Transaction:\n", ef);
638  trx_print(ef, trx, 600);
639 
640  fputs("Foreign key constraint fails for table ", ef);
641  ut_print_name(ef, trx, TRUE, foreign->foreign_table_name);
642  fputs(":\n", ef);
643  dict_print_info_on_foreign_key_in_create_format(ef, trx, foreign,
644  TRUE);
645  putc('\n', ef);
646  fputs(errstr, ef);
647  fputs(" in parent table, in index ", ef);
648  ut_print_name(ef, trx, FALSE, foreign->referenced_index->name);
649  if (entry) {
650  fputs(" tuple:\n", ef);
651  dtuple_print(ef, entry);
652  }
653  fputs("\nBut in child table ", ef);
654  ut_print_name(ef, trx, TRUE, foreign->foreign_table_name);
655  fputs(", in index ", ef);
656  ut_print_name(ef, trx, FALSE, foreign->foreign_index->name);
657  if (rec) {
658  fputs(", there is a record:\n", ef);
659  rec_print(ef, rec, foreign->foreign_index);
660  } else {
661  fputs(", the record is not available\n", ef);
662  }
663  putc('\n', ef);
664 
665  mutex_exit(&dict_foreign_err_mutex);
666 }
667 
668 /*********************************************************************/
672 static
673 void
674 row_ins_foreign_report_add_err(
675 /*===========================*/
676  trx_t* trx,
677  dict_foreign_t* foreign,
678  const rec_t* rec,
681  const dtuple_t* entry)
683 {
684  FILE* ef = dict_foreign_err_file;
685 
686  row_ins_set_detailed(trx, foreign);
687 
688  mutex_enter(&dict_foreign_err_mutex);
689  rewind(ef);
690  ut_print_timestamp(ef);
691  fputs(" Transaction:\n", ef);
692  trx_print(ef, trx, 600);
693  fputs("Foreign key constraint fails for table ", ef);
694  ut_print_name(ef, trx, TRUE, foreign->foreign_table_name);
695  fputs(":\n", ef);
696  dict_print_info_on_foreign_key_in_create_format(ef, trx, foreign,
697  TRUE);
698  fputs("\nTrying to add in child table, in index ", ef);
699  ut_print_name(ef, trx, FALSE, foreign->foreign_index->name);
700  if (entry) {
701  fputs(" tuple:\n", ef);
702  /* TODO: DB_TRX_ID and DB_ROLL_PTR may be uninitialized.
703  It would be better to only display the user columns. */
704  dtuple_print(ef, entry);
705  }
706  fputs("\nBut in parent table ", ef);
707  ut_print_name(ef, trx, TRUE, foreign->referenced_table_name);
708  fputs(", in index ", ef);
709  ut_print_name(ef, trx, FALSE, foreign->referenced_index->name);
710  fputs(",\nthe closest match we can find is record:\n", ef);
711  if (rec && page_rec_is_supremum(rec)) {
712  /* If the cursor ended on a supremum record, it is better
713  to report the previous record in the error message, so that
714  the user gets a more descriptive error message. */
715  rec = page_rec_get_prev_const(rec);
716  }
717 
718  if (rec) {
719  rec_print(ef, rec, foreign->referenced_index);
720  }
721  putc('\n', ef);
722 
723  mutex_exit(&dict_foreign_err_mutex);
724 }
725 
726 /*********************************************************************/
728 static
729 void
730 row_ins_invalidate_query_cache(
731 /*===========================*/
732  que_thr_t* unused,
734  const char* name)
736 {
737  char* buf;
738  char* ptr;
739  ulint len = strlen(name) + 1;
740 
741  (void)unused;
742 
743  buf = mem_strdupl(name, len);
744 
745  ptr = strchr(buf, '/');
746  ut_a(ptr);
747  *ptr = '\0';
748 
749  mem_free(buf);
750 }
751 
752 /*********************************************************************/
757 static
758 ulint
759 row_ins_foreign_check_on_constraint(
760 /*================================*/
761  que_thr_t* thr,
763  dict_foreign_t* foreign,
765  btr_pcur_t* pcur,
767  dtuple_t* entry,
769  mtr_t* mtr)
771 {
772  upd_node_t* node;
773  upd_node_t* cascade;
774  dict_table_t* table = foreign->foreign_table;
775  dict_index_t* index;
776  dict_index_t* clust_index;
777  dtuple_t* ref;
778  mem_heap_t* upd_vec_heap = NULL;
779  const rec_t* rec;
780  const rec_t* clust_rec;
781  const buf_block_t* clust_block;
782  upd_t* update;
783  ulint n_to_update;
784  ulint err;
785  ulint i;
786  trx_t* trx;
787  mem_heap_t* tmp_heap = NULL;
788 
789  ut_a(thr);
790  ut_a(foreign);
791  ut_a(pcur);
792  ut_a(mtr);
793 
794  trx = thr_get_trx(thr);
795 
796  /* Since we are going to delete or update a row, we have to invalidate
797  the MySQL query cache for table. A deadlock of threads is not possible
798  here because the caller of this function does not hold any latches with
799  the sync0sync.h rank above the kernel mutex. The query cache mutex has
800  a rank just above the kernel mutex. */
801 
802  row_ins_invalidate_query_cache(thr, table->name);
803 
804  node = static_cast<upd_node_t *>(thr->run_node);
805 
806  if (node->is_delete && 0 == (foreign->type
809 
810  row_ins_foreign_report_err("Trying to delete",
811  thr, foreign,
812  btr_pcur_get_rec(pcur), entry);
813 
814  return(DB_ROW_IS_REFERENCED);
815  }
816 
817  if (!node->is_delete && 0 == (foreign->type
820 
821  /* This is an UPDATE */
822 
823  row_ins_foreign_report_err("Trying to update",
824  thr, foreign,
825  btr_pcur_get_rec(pcur), entry);
826 
827  return(DB_ROW_IS_REFERENCED);
828  }
829 
830  if (node->cascade_node == NULL) {
831  /* Extend our query graph by creating a child to current
832  update node. The child is used in the cascade or set null
833  operation. */
834 
835  node->cascade_heap = mem_heap_create(128);
836  node->cascade_node = row_create_update_node_for_mysql(
837  table, node->cascade_heap);
838  que_node_set_parent(node->cascade_node, node);
839  }
840 
841  /* Initialize cascade_node to do the operation we want. Note that we
842  use the SAME cascade node to do all foreign key operations of the
843  SQL DELETE: the table of the cascade node may change if there are
844  several child tables to the table where the delete is done! */
845 
846  cascade = node->cascade_node;
847 
848  cascade->table = table;
849 
850  cascade->foreign = foreign;
851 
852  if (node->is_delete
853  && (foreign->type & DICT_FOREIGN_ON_DELETE_CASCADE)) {
854  cascade->is_delete = TRUE;
855  } else {
856  cascade->is_delete = FALSE;
857 
858  if (foreign->n_fields > cascade->update_n_fields) {
859  /* We have to make the update vector longer */
860 
861  cascade->update = upd_create(foreign->n_fields,
862  node->cascade_heap);
863  cascade->update_n_fields = foreign->n_fields;
864  }
865  }
866 
867  /* We do not allow cyclic cascaded updating (DELETE is allowed,
868  but not UPDATE) of the same table, as this can lead to an infinite
869  cycle. Check that we are not updating the same table which is
870  already being modified in this cascade chain. We have to check
871  this also because the modification of the indexes of a 'parent'
872  table may still be incomplete, and we must avoid seeing the indexes
873  of the parent table in an inconsistent state! */
874 
875  if (!cascade->is_delete
876  && row_ins_cascade_ancestor_updates_table(cascade, table)) {
877 
878  /* We do not know if this would break foreign key
879  constraints, but play safe and return an error */
880 
881  err = DB_ROW_IS_REFERENCED;
882 
883  row_ins_foreign_report_err(
884  "Trying an update, possibly causing a cyclic"
885  " cascaded update\n"
886  "in the child table,", thr, foreign,
887  btr_pcur_get_rec(pcur), entry);
888 
889  goto nonstandard_exit_func;
890  }
891 
892  if (row_ins_cascade_n_ancestors(cascade) >= 15) {
893  err = DB_ROW_IS_REFERENCED;
894 
895  row_ins_foreign_report_err(
896  "Trying a too deep cascaded delete or update\n",
897  thr, foreign, btr_pcur_get_rec(pcur), entry);
898 
899  goto nonstandard_exit_func;
900  }
901 
902  index = btr_pcur_get_btr_cur(pcur)->index;
903 
904  ut_a(index == foreign->foreign_index);
905 
906  rec = btr_pcur_get_rec(pcur);
907 
908  if (dict_index_is_clust(index)) {
909  /* pcur is already positioned in the clustered index of
910  the child table */
911 
912  clust_index = index;
913  clust_rec = rec;
914  clust_block = btr_pcur_get_block(pcur);
915  } else {
916  /* We have to look for the record in the clustered index
917  in the child table */
918 
919  clust_index = dict_table_get_first_index(table);
920 
921  tmp_heap = mem_heap_create(256);
922 
923  ref = row_build_row_ref(ROW_COPY_POINTERS, index, rec,
924  tmp_heap);
925  btr_pcur_open_with_no_init(clust_index, ref,
926  PAGE_CUR_LE, BTR_SEARCH_LEAF,
927  cascade->pcur, 0, mtr);
928 
929  clust_rec = btr_pcur_get_rec(cascade->pcur);
930  clust_block = btr_pcur_get_block(cascade->pcur);
931 
932  if (!page_rec_is_user_rec(clust_rec)
933  || btr_pcur_get_low_match(cascade->pcur)
934  < dict_index_get_n_unique(clust_index)) {
935 
936  fputs("InnoDB: error in cascade of a foreign key op\n"
937  "InnoDB: ", stderr);
938  dict_index_name_print(stderr, trx, index);
939 
940  fputs("\n"
941  "InnoDB: record ", stderr);
942  rec_print(stderr, rec, index);
943  fputs("\n"
944  "InnoDB: clustered record ", stderr);
945  rec_print(stderr, clust_rec, clust_index);
946  fputs("\n"
947  "InnoDB: Submit a detailed bug report to"
948  " http://bugs.mysql.com\n", stderr);
949 
950  err = DB_SUCCESS;
951 
952  goto nonstandard_exit_func;
953  }
954  }
955 
956  /* Set an X-lock on the row to delete or update in the child table */
957 
958  err = lock_table(0, table, LOCK_IX, thr);
959 
960  if (err == DB_SUCCESS) {
961  /* Here it suffices to use a LOCK_REC_NOT_GAP type lock;
962  we already have a normal shared lock on the appropriate
963  gap if the search criterion was not unique */
964 
966  0, clust_block, clust_rec, clust_index,
967  LOCK_X, LOCK_REC_NOT_GAP, thr);
968  }
969 
970  if (err != DB_SUCCESS) {
971 
972  goto nonstandard_exit_func;
973  }
974 
975  if (rec_get_deleted_flag(clust_rec, dict_table_is_comp(table))) {
976  /* This can happen if there is a circular reference of
977  rows such that cascading delete comes to delete a row
978  already in the process of being delete marked */
979  err = DB_SUCCESS;
980 
981  goto nonstandard_exit_func;
982  }
983 
984  if ((node->is_delete
985  && (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL))
986  || (!node->is_delete
987  && (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL))) {
988 
989  /* Build the appropriate update vector which sets
990  foreign->n_fields first fields in rec to SQL NULL */
991 
992  update = cascade->update;
993 
994  update->info_bits = 0;
995  update->n_fields = foreign->n_fields;
996 
997  for (i = 0; i < foreign->n_fields; i++) {
998  upd_field_t* ufield = &update->fields[i];
999 
1000  ufield->field_no = dict_table_get_nth_col_pos(
1001  table,
1002  dict_index_get_nth_col_no(index, i));
1003  ufield->orig_len = 0;
1004  ufield->exp = NULL;
1005  dfield_set_null(&ufield->new_val);
1006  }
1007  }
1008 
1009  if (!node->is_delete
1010  && (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE)) {
1011 
1012  /* Build the appropriate update vector which sets changing
1013  foreign->n_fields first fields in rec to new values */
1014 
1015  upd_vec_heap = mem_heap_create(256);
1016 
1017  n_to_update = row_ins_cascade_calc_update_vec(node, foreign,
1018  upd_vec_heap);
1019  if (n_to_update == ULINT_UNDEFINED) {
1020  err = DB_ROW_IS_REFERENCED;
1021 
1022  row_ins_foreign_report_err(
1023  "Trying a cascaded update where the"
1024  " updated value in the child\n"
1025  "table would not fit in the length"
1026  " of the column, or the value would\n"
1027  "be NULL and the column is"
1028  " declared as not NULL in the child table,",
1029  thr, foreign, btr_pcur_get_rec(pcur), entry);
1030 
1031  goto nonstandard_exit_func;
1032  }
1033 
1034  if (cascade->update->n_fields == 0) {
1035 
1036  /* The update does not change any columns referred
1037  to in this foreign key constraint: no need to do
1038  anything */
1039 
1040  err = DB_SUCCESS;
1041 
1042  goto nonstandard_exit_func;
1043  }
1044  }
1045 
1046  /* Store pcur position and initialize or store the cascade node
1047  pcur stored position */
1048 
1049  btr_pcur_store_position(pcur, mtr);
1050 
1051  if (index == clust_index) {
1052  btr_pcur_copy_stored_position(cascade->pcur, pcur);
1053  } else {
1054  btr_pcur_store_position(cascade->pcur, mtr);
1055  }
1056 
1057  mtr_commit(mtr);
1058 
1059  ut_a(cascade->pcur->rel_pos == BTR_PCUR_ON);
1060 
1061  cascade->state = UPD_NODE_UPDATE_CLUSTERED;
1062 
1063  err = row_update_cascade_for_mysql(thr, cascade,
1064  foreign->foreign_table);
1065 
1066  if (foreign->foreign_table->n_foreign_key_checks_running == 0) {
1067  fprintf(stderr,
1068  "InnoDB: error: table %s has the counter 0"
1069  " though there is\n"
1070  "InnoDB: a FOREIGN KEY check running on it.\n",
1071  foreign->foreign_table->name);
1072  }
1073 
1074  /* Release the data dictionary latch for a while, so that we do not
1075  starve other threads from doing CREATE TABLE etc. if we have a huge
1076  cascaded operation running. The counter n_foreign_key_checks_running
1077  will prevent other users from dropping or ALTERing the table when we
1078  release the latch. */
1079 
1081  row_mysql_freeze_data_dictionary(thr_get_trx(thr));
1082 
1083  mtr_start(mtr);
1084 
1085  /* Restore pcur position */
1086 
1087  btr_pcur_restore_position(BTR_SEARCH_LEAF, pcur, mtr);
1088 
1089  if (tmp_heap) {
1090  mem_heap_free(tmp_heap);
1091  }
1092 
1093  if (upd_vec_heap) {
1094  mem_heap_free(upd_vec_heap);
1095  }
1096 
1097  return(err);
1098 
1099 nonstandard_exit_func:
1100  if (tmp_heap) {
1101  mem_heap_free(tmp_heap);
1102  }
1103 
1104  if (upd_vec_heap) {
1105  mem_heap_free(upd_vec_heap);
1106  }
1107 
1108  btr_pcur_store_position(pcur, mtr);
1109 
1110  mtr_commit(mtr);
1111  mtr_start(mtr);
1112 
1113  btr_pcur_restore_position(BTR_SEARCH_LEAF, pcur, mtr);
1114 
1115  return(err);
1116 }
1117 
1118 /*********************************************************************/
1122 static
1123 enum db_err
1124 row_ins_set_shared_rec_lock(
1125 /*========================*/
1126  ulint type,
1128  const buf_block_t* block,
1129  const rec_t* rec,
1130  dict_index_t* index,
1131  const ulint* offsets,
1132  que_thr_t* thr)
1133 {
1134  enum db_err err;
1135 
1136  ut_ad(rec_offs_validate(rec, index, offsets));
1137 
1138  if (dict_index_is_clust(index)) {
1140  0, block, rec, index, offsets, LOCK_S, type, thr);
1141  } else {
1143  0, block, rec, index, offsets, LOCK_S, type, thr);
1144  }
1145 
1146  return(err);
1147 }
1148 
1149 /*********************************************************************/
1153 static
1154 enum db_err
1155 row_ins_set_exclusive_rec_lock(
1156 /*===========================*/
1157  ulint type,
1159  const buf_block_t* block,
1160  const rec_t* rec,
1161  dict_index_t* index,
1162  const ulint* offsets,
1163  que_thr_t* thr)
1164 {
1165  enum db_err err;
1166 
1167  ut_ad(rec_offs_validate(rec, index, offsets));
1168 
1169  if (dict_index_is_clust(index)) {
1171  0, block, rec, index, offsets, LOCK_X, type, thr);
1172  } else {
1174  0, block, rec, index, offsets, LOCK_X, type, thr);
1175  }
1176 
1177  return(err);
1178 }
1179 
1180 /***************************************************************/
1185 UNIV_INTERN
1186 ulint
1188 /*=============================*/
1189  ibool check_ref,
1192  dict_foreign_t* foreign,
1195  dict_table_t* table,
1197  dtuple_t* entry,
1198  que_thr_t* thr)
1199 {
1200  upd_node_t* upd_node;
1201  dict_table_t* check_table;
1202  dict_index_t* check_index;
1203  ulint n_fields_cmp;
1204  btr_pcur_t pcur;
1205  int cmp;
1206  ulint err;
1207  ulint i;
1208  mtr_t mtr;
1209  trx_t* trx = thr_get_trx(thr);
1210  mem_heap_t* heap = NULL;
1211  ulint offsets_[REC_OFFS_NORMAL_SIZE];
1212  ulint* offsets = offsets_;
1213  rec_offs_init(offsets_);
1214 
1215 run_again:
1216 #ifdef UNIV_SYNC_DEBUG
1217  ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_SHARED));
1218 #endif /* UNIV_SYNC_DEBUG */
1219 
1220  err = DB_SUCCESS;
1221 
1222  if (trx->check_foreigns == FALSE) {
1223  /* The user has suppressed foreign key checks currently for
1224  this session */
1225  goto exit_func;
1226  }
1227 
1228  /* If any of the foreign key fields in entry is SQL NULL, we
1229  suppress the foreign key check: this is compatible with Oracle,
1230  for example */
1231 
1232  for (i = 0; i < foreign->n_fields; i++) {
1233  if (UNIV_SQL_NULL == dfield_get_len(
1234  dtuple_get_nth_field(entry, i))) {
1235 
1236  goto exit_func;
1237  }
1238  }
1239 
1240  if (que_node_get_type(thr->run_node) == QUE_NODE_UPDATE) {
1241  upd_node = static_cast<upd_node_t *>(thr->run_node);
1242 
1243  if (!(upd_node->is_delete) && upd_node->foreign == foreign) {
1244  /* If a cascaded update is done as defined by a
1245  foreign key constraint, do not check that
1246  constraint for the child row. In ON UPDATE CASCADE
1247  the update of the parent row is only half done when
1248  we come here: if we would check the constraint here
1249  for the child row it would fail.
1250 
1251  A QUESTION remains: if in the child table there are
1252  several constraints which refer to the same parent
1253  table, we should merge all updates to the child as
1254  one update? And the updates can be contradictory!
1255  Currently we just perform the update associated
1256  with each foreign key constraint, one after
1257  another, and the user has problems predicting in
1258  which order they are performed. */
1259 
1260  goto exit_func;
1261  }
1262  }
1263 
1264  if (check_ref) {
1265  check_table = foreign->referenced_table;
1266  check_index = foreign->referenced_index;
1267  } else {
1268  check_table = foreign->foreign_table;
1269  check_index = foreign->foreign_index;
1270  }
1271 
1272  if (check_table == NULL || check_table->ibd_file_missing) {
1273  if (check_ref) {
1274  FILE* ef = dict_foreign_err_file;
1275 
1276  row_ins_set_detailed(trx, foreign);
1277 
1278  mutex_enter(&dict_foreign_err_mutex);
1279  rewind(ef);
1280  ut_print_timestamp(ef);
1281  fputs(" Transaction:\n", ef);
1282  trx_print(ef, trx, 600);
1283  fputs("Foreign key constraint fails for table ", ef);
1284  ut_print_name(ef, trx, TRUE,
1285  foreign->foreign_table_name);
1286  fputs(":\n", ef);
1287  dict_print_info_on_foreign_key_in_create_format(
1288  ef, trx, foreign, TRUE);
1289  fputs("\nTrying to add to index ", ef);
1290  ut_print_name(ef, trx, FALSE,
1291  foreign->foreign_index->name);
1292  fputs(" tuple:\n", ef);
1293  dtuple_print(ef, entry);
1294  fputs("\nBut the parent table ", ef);
1295  ut_print_name(ef, trx, TRUE,
1296  foreign->referenced_table_name);
1297  fputs("\nor its .ibd file does"
1298  " not currently exist!\n", ef);
1299  mutex_exit(&dict_foreign_err_mutex);
1300 
1301  err = DB_NO_REFERENCED_ROW;
1302  }
1303 
1304  goto exit_func;
1305  }
1306 
1307  ut_a(check_table);
1308  ut_a(check_index);
1309 
1310  if (check_table != table) {
1311  /* We already have a LOCK_IX on table, but not necessarily
1312  on check_table */
1313 
1314  err = lock_table(0, check_table, LOCK_IS, thr);
1315 
1316  if (err != DB_SUCCESS) {
1317 
1318  goto do_possible_lock_wait;
1319  }
1320  }
1321 
1322  mtr_start(&mtr);
1323 
1324  /* Store old value on n_fields_cmp */
1325 
1326  n_fields_cmp = dtuple_get_n_fields_cmp(entry);
1327 
1328  dtuple_set_n_fields_cmp(entry, foreign->n_fields);
1329 
1330  btr_pcur_open(check_index, entry, PAGE_CUR_GE,
1331  BTR_SEARCH_LEAF, &pcur, &mtr);
1332 
1333  /* Scan index records and check if there is a matching record */
1334 
1335  do {
1336  const rec_t* rec = btr_pcur_get_rec(&pcur);
1337  const buf_block_t* block = btr_pcur_get_block(&pcur);
1338 
1339  if (page_rec_is_infimum(rec)) {
1340 
1341  continue;
1342  }
1343 
1344  offsets = rec_get_offsets(rec, check_index,
1345  offsets, ULINT_UNDEFINED, &heap);
1346 
1347  if (page_rec_is_supremum(rec)) {
1348 
1349  err = row_ins_set_shared_rec_lock(LOCK_ORDINARY, block,
1350  rec, check_index,
1351  offsets, thr);
1352  switch (err) {
1353  case DB_SUCCESS_LOCKED_REC:
1354  case DB_SUCCESS:
1355  continue;
1356  default:
1357  goto end_scan;
1358  }
1359  }
1360 
1361  cmp = cmp_dtuple_rec(entry, rec, offsets);
1362 
1363  if (cmp == 0) {
1364  if (rec_get_deleted_flag(rec,
1365  rec_offs_comp(offsets))) {
1366  err = row_ins_set_shared_rec_lock(
1367  LOCK_ORDINARY, block,
1368  rec, check_index, offsets, thr);
1369  switch (err) {
1370  case DB_SUCCESS_LOCKED_REC:
1371  case DB_SUCCESS:
1372  break;
1373  default:
1374  goto end_scan;
1375  }
1376  } else {
1377  /* Found a matching record. Lock only
1378  a record because we can allow inserts
1379  into gaps */
1380 
1381  err = row_ins_set_shared_rec_lock(
1382  LOCK_REC_NOT_GAP, block,
1383  rec, check_index, offsets, thr);
1384 
1385  switch (err) {
1386  case DB_SUCCESS_LOCKED_REC:
1387  case DB_SUCCESS:
1388  break;
1389  default:
1390  goto end_scan;
1391  }
1392 
1393  if (check_ref) {
1394  err = DB_SUCCESS;
1395 
1396  goto end_scan;
1397  } else if (foreign->type != 0) {
1398  /* There is an ON UPDATE or ON DELETE
1399  condition: check them in a separate
1400  function */
1401 
1402  err = row_ins_foreign_check_on_constraint(
1403  thr, foreign, &pcur, entry,
1404  &mtr);
1405  if (err != DB_SUCCESS) {
1406  /* Since reporting a plain
1407  "duplicate key" error
1408  message to the user in
1409  cases where a long CASCADE
1410  operation would lead to a
1411  duplicate key in some
1412  other table is very
1413  confusing, map duplicate
1414  key errors resulting from
1415  FK constraints to a
1416  separate error code. */
1417 
1418  if (err == DB_DUPLICATE_KEY) {
1419  err = DB_FOREIGN_DUPLICATE_KEY;
1420  }
1421 
1422  goto end_scan;
1423  }
1424 
1425  /* row_ins_foreign_check_on_constraint
1426  may have repositioned pcur on a
1427  different block */
1428  block = btr_pcur_get_block(&pcur);
1429  } else {
1430  row_ins_foreign_report_err(
1431  "Trying to delete or update",
1432  thr, foreign, rec, entry);
1433 
1434  err = DB_ROW_IS_REFERENCED;
1435  goto end_scan;
1436  }
1437  }
1438  } else {
1439  ut_a(cmp < 0);
1440 
1441  err = row_ins_set_shared_rec_lock(
1442  LOCK_GAP, block,
1443  rec, check_index, offsets, thr);
1444 
1445  switch (err) {
1446  case DB_SUCCESS_LOCKED_REC:
1447  case DB_SUCCESS:
1448  if (check_ref) {
1449  err = DB_NO_REFERENCED_ROW;
1450  row_ins_foreign_report_add_err(
1451  trx, foreign, rec, entry);
1452  } else {
1453  err = DB_SUCCESS;
1454  }
1455  }
1456 
1457  goto end_scan;
1458  }
1459  } while (btr_pcur_move_to_next(&pcur, &mtr));
1460 
1461  if (check_ref) {
1462  row_ins_foreign_report_add_err(
1463  trx, foreign, btr_pcur_get_rec(&pcur), entry);
1464  err = DB_NO_REFERENCED_ROW;
1465  } else {
1466  err = DB_SUCCESS;
1467  }
1468 
1469 end_scan:
1470  btr_pcur_close(&pcur);
1471 
1472  mtr_commit(&mtr);
1473 
1474  /* Restore old value */
1475  dtuple_set_n_fields_cmp(entry, n_fields_cmp);
1476 
1477 do_possible_lock_wait:
1478  if (err == DB_LOCK_WAIT) {
1479  trx->error_state = err;
1480 
1482 
1484 
1485  if (trx->error_state == DB_SUCCESS) {
1486 
1487  goto run_again;
1488  }
1489 
1490  err = trx->error_state;
1491  }
1492 
1493 exit_func:
1494  if (UNIV_LIKELY_NULL(heap)) {
1495  mem_heap_free(heap);
1496  }
1497  return(err);
1498 }
1499 
1500 /***************************************************************/
1507 static
1508 ulint
1509 row_ins_check_foreign_constraints(
1510 /*==============================*/
1511  dict_table_t* table,
1512  dict_index_t* index,
1513  dtuple_t* entry,
1514  que_thr_t* thr)
1515 {
1516  dict_foreign_t* foreign;
1517  ulint err;
1518  trx_t* trx;
1519  ibool got_s_lock = FALSE;
1520 
1521  trx = thr_get_trx(thr);
1522 
1523  foreign = UT_LIST_GET_FIRST(table->foreign_list);
1524 
1525  while (foreign) {
1526  if (foreign->foreign_index == index) {
1527 
1528  if (foreign->referenced_table == NULL) {
1529  dict_table_get(foreign->referenced_table_name_lookup,
1530  FALSE);
1531  }
1532 
1533  if (0 == trx->dict_operation_lock_mode) {
1534  got_s_lock = TRUE;
1535 
1536  row_mysql_freeze_data_dictionary(trx);
1537  }
1538 
1539  if (foreign->referenced_table) {
1540  mutex_enter(&(dict_sys->mutex));
1541 
1542  (foreign->referenced_table
1544 
1545  mutex_exit(&(dict_sys->mutex));
1546  }
1547 
1548  /* NOTE that if the thread ends up waiting for a lock
1549  we will release dict_operation_lock temporarily!
1550  But the counter on the table protects the referenced
1551  table from being dropped while the check is running. */
1552 
1554  TRUE, foreign, table, entry, thr);
1555 
1556  if (foreign->referenced_table) {
1557  mutex_enter(&(dict_sys->mutex));
1558 
1559  ut_a(foreign->referenced_table
1561  (foreign->referenced_table
1563 
1564  mutex_exit(&(dict_sys->mutex));
1565  }
1566 
1567  if (got_s_lock) {
1569  }
1570 
1571  if (err != DB_SUCCESS) {
1572  return(err);
1573  }
1574  }
1575 
1576  foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
1577  }
1578 
1579  return(DB_SUCCESS);
1580 }
1581 
1582 /***************************************************************/
1586 static
1587 ibool
1588 row_ins_dupl_error_with_rec(
1589 /*========================*/
1590  const rec_t* rec,
1593  const dtuple_t* entry,
1594  dict_index_t* index,
1595  const ulint* offsets)
1596 {
1597  ulint matched_fields;
1598  ulint matched_bytes;
1599  ulint n_unique;
1600  ulint i;
1601 
1602  ut_ad(rec_offs_validate(rec, index, offsets));
1603 
1604  n_unique = dict_index_get_n_unique(index);
1605 
1606  matched_fields = 0;
1607  matched_bytes = 0;
1608 
1609  cmp_dtuple_rec_with_match(entry, rec, offsets,
1610  &matched_fields, &matched_bytes);
1611 
1612  if (matched_fields < n_unique) {
1613 
1614  return(FALSE);
1615  }
1616 
1617  /* In a unique secondary index we allow equal key values if they
1618  contain SQL NULLs */
1619 
1620  if (!dict_index_is_clust(index)) {
1621 
1622  for (i = 0; i < n_unique; i++) {
1623  if (UNIV_SQL_NULL == dfield_get_len(
1624  dtuple_get_nth_field(entry, i))) {
1625 
1626  return(FALSE);
1627  }
1628  }
1629  }
1630 
1631  return(!rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
1632 }
1633 
1634 /***************************************************************/
1639 static
1640 ulint
1641 row_ins_scan_sec_index_for_duplicate(
1642 /*=================================*/
1643  dict_index_t* index,
1644  dtuple_t* entry,
1645  que_thr_t* thr)
1646 {
1647  ulint n_unique;
1648  ulint i;
1649  int cmp;
1650  ulint n_fields_cmp;
1651  btr_pcur_t pcur;
1652  ulint err = DB_SUCCESS;
1653  unsigned allow_duplicates;
1654  mtr_t mtr;
1655  mem_heap_t* heap = NULL;
1656  ulint offsets_[REC_OFFS_NORMAL_SIZE];
1657  ulint* offsets = offsets_;
1658  rec_offs_init(offsets_);
1659 
1660  n_unique = dict_index_get_n_unique(index);
1661 
1662  /* If the secondary index is unique, but one of the fields in the
1663  n_unique first fields is NULL, a unique key violation cannot occur,
1664  since we define NULL != NULL in this case */
1665 
1666  for (i = 0; i < n_unique; i++) {
1667  if (UNIV_SQL_NULL == dfield_get_len(
1668  dtuple_get_nth_field(entry, i))) {
1669 
1670  return(DB_SUCCESS);
1671  }
1672  }
1673 
1674  mtr_start(&mtr);
1675 
1676  /* Store old value on n_fields_cmp */
1677 
1678  n_fields_cmp = dtuple_get_n_fields_cmp(entry);
1679 
1681 
1682  btr_pcur_open(index, entry, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr);
1683 
1684  allow_duplicates = thr_get_trx(thr)->duplicates & TRX_DUP_IGNORE;
1685 
1686  /* Scan index records and check if there is a duplicate */
1687 
1688  do {
1689  const rec_t* rec = btr_pcur_get_rec(&pcur);
1690  const buf_block_t* block = btr_pcur_get_block(&pcur);
1691 
1692  if (page_rec_is_infimum(rec)) {
1693 
1694  continue;
1695  }
1696 
1697  offsets = rec_get_offsets(rec, index, offsets,
1698  ULINT_UNDEFINED, &heap);
1699 
1700  if (allow_duplicates) {
1701 
1702  /* If the SQL-query will update or replace
1703  duplicate key we will take X-lock for
1704  duplicates ( REPLACE, LOAD DATAFILE REPLACE,
1705  INSERT ON DUPLICATE KEY UPDATE). */
1706 
1707  err = row_ins_set_exclusive_rec_lock(
1708  LOCK_ORDINARY, block,
1709  rec, index, offsets, thr);
1710  } else {
1711 
1712  err = row_ins_set_shared_rec_lock(
1713  LOCK_ORDINARY, block,
1714  rec, index, offsets, thr);
1715  }
1716 
1717  switch (err) {
1718  case DB_SUCCESS_LOCKED_REC:
1719  err = DB_SUCCESS;
1720  case DB_SUCCESS:
1721  break;
1722  default:
1723  goto end_scan;
1724  }
1725 
1726  if (page_rec_is_supremum(rec)) {
1727 
1728  continue;
1729  }
1730 
1731  cmp = cmp_dtuple_rec(entry, rec, offsets);
1732 
1733  if (cmp == 0) {
1734  if (row_ins_dupl_error_with_rec(rec, entry,
1735  index, offsets)) {
1736  err = DB_DUPLICATE_KEY;
1737 
1738  thr_get_trx(thr)->error_info = index;
1739 
1740  goto end_scan;
1741  }
1742  } else {
1743  ut_a(cmp < 0);
1744  goto end_scan;
1745  }
1746  } while (btr_pcur_move_to_next(&pcur, &mtr));
1747 
1748 end_scan:
1749  if (UNIV_LIKELY_NULL(heap)) {
1750  mem_heap_free(heap);
1751  }
1752  mtr_commit(&mtr);
1753 
1754  /* Restore old value */
1755  dtuple_set_n_fields_cmp(entry, n_fields_cmp);
1756 
1757  return(err);
1758 }
1759 
1760 /***************************************************************/
1767 static
1768 ulint
1769 row_ins_duplicate_error_in_clust(
1770 /*=============================*/
1771  btr_cur_t* cursor,
1772  const dtuple_t* entry,
1773  que_thr_t* thr,
1774  mtr_t* mtr)
1775 {
1776  ulint err;
1777  rec_t* rec;
1778  ulint n_unique;
1779  trx_t* trx = thr_get_trx(thr);
1780  mem_heap_t*heap = NULL;
1781  ulint offsets_[REC_OFFS_NORMAL_SIZE];
1782  ulint* offsets = offsets_;
1783  rec_offs_init(offsets_);
1784 
1785  UT_NOT_USED(mtr);
1786 
1787  ut_a(dict_index_is_clust(cursor->index));
1788  ut_ad(dict_index_is_unique(cursor->index));
1789 
1790  /* NOTE: For unique non-clustered indexes there may be any number
1791  of delete marked records with the same value for the non-clustered
1792  index key (remember multiversioning), and which differ only in
1793  the row refererence part of the index record, containing the
1794  clustered index key fields. For such a secondary index record,
1795  to avoid race condition, we must FIRST do the insertion and after
1796  that check that the uniqueness condition is not breached! */
1797 
1798  /* NOTE: A problem is that in the B-tree node pointers on an
1799  upper level may match more to the entry than the actual existing
1800  user records on the leaf level. So, even if low_match would suggest
1801  that a duplicate key violation may occur, this may not be the case. */
1802 
1803  n_unique = dict_index_get_n_unique(cursor->index);
1804 
1805  if (cursor->low_match >= n_unique) {
1806 
1807  rec = btr_cur_get_rec(cursor);
1808 
1809  if (!page_rec_is_infimum(rec)) {
1810  offsets = rec_get_offsets(rec, cursor->index, offsets,
1811  ULINT_UNDEFINED, &heap);
1812 
1813  /* We set a lock on the possible duplicate: this
1814  is needed in logical logging of MySQL to make
1815  sure that in roll-forward we get the same duplicate
1816  errors as in original execution */
1817 
1818  if (trx->duplicates & TRX_DUP_IGNORE) {
1819 
1820  /* If the SQL-query will update or replace
1821  duplicate key we will take X-lock for
1822  duplicates ( REPLACE, LOAD DATAFILE REPLACE,
1823  INSERT ON DUPLICATE KEY UPDATE). */
1824 
1825  err = row_ins_set_exclusive_rec_lock(
1827  btr_cur_get_block(cursor),
1828  rec, cursor->index, offsets, thr);
1829  } else {
1830 
1831  err = row_ins_set_shared_rec_lock(
1833  btr_cur_get_block(cursor), rec,
1834  cursor->index, offsets, thr);
1835  }
1836 
1837  switch (err) {
1838  case DB_SUCCESS_LOCKED_REC:
1839  case DB_SUCCESS:
1840  break;
1841  default:
1842  goto func_exit;
1843  }
1844 
1845  if (row_ins_dupl_error_with_rec(
1846  rec, entry, cursor->index, offsets)) {
1847  trx->error_info = cursor->index;
1848  err = DB_DUPLICATE_KEY;
1849  goto func_exit;
1850  }
1851  }
1852  }
1853 
1854  if (cursor->up_match >= n_unique) {
1855 
1856  rec = page_rec_get_next(btr_cur_get_rec(cursor));
1857 
1858  if (!page_rec_is_supremum(rec)) {
1859  offsets = rec_get_offsets(rec, cursor->index, offsets,
1860  ULINT_UNDEFINED, &heap);
1861 
1862  if (trx->duplicates & TRX_DUP_IGNORE) {
1863 
1864  /* If the SQL-query will update or replace
1865  duplicate key we will take X-lock for
1866  duplicates ( REPLACE, LOAD DATAFILE REPLACE,
1867  INSERT ON DUPLICATE KEY UPDATE). */
1868 
1869  err = row_ins_set_exclusive_rec_lock(
1871  btr_cur_get_block(cursor),
1872  rec, cursor->index, offsets, thr);
1873  } else {
1874 
1875  err = row_ins_set_shared_rec_lock(
1877  btr_cur_get_block(cursor),
1878  rec, cursor->index, offsets, thr);
1879  }
1880 
1881  switch (err) {
1882  case DB_SUCCESS_LOCKED_REC:
1883  case DB_SUCCESS:
1884  break;
1885  default:
1886  goto func_exit;
1887  }
1888 
1889  if (row_ins_dupl_error_with_rec(
1890  rec, entry, cursor->index, offsets)) {
1891  trx->error_info = cursor->index;
1892  err = DB_DUPLICATE_KEY;
1893  goto func_exit;
1894  }
1895  }
1896 
1897  ut_a(!dict_index_is_clust(cursor->index));
1898  /* This should never happen */
1899  }
1900 
1901  err = DB_SUCCESS;
1902 func_exit:
1903  if (UNIV_LIKELY_NULL(heap)) {
1904  mem_heap_free(heap);
1905  }
1906  return(err);
1907 }
1908 
1909 /***************************************************************/
1918 UNIV_INLINE
1919 ulint
1920 row_ins_must_modify(
1921 /*================*/
1922  btr_cur_t* cursor)
1923 {
1924  ulint enough_match;
1925  rec_t* rec;
1926 
1927  /* NOTE: (compare to the note in row_ins_duplicate_error) Because node
1928  pointers on upper levels of the B-tree may match more to entry than
1929  to actual user records on the leaf level, we have to check if the
1930  candidate record is actually a user record. In a clustered index
1931  node pointers contain index->n_unique first fields, and in the case
1932  of a secondary index, all fields of the index. */
1933 
1934  enough_match = dict_index_get_n_unique_in_tree(cursor->index);
1935 
1936  if (cursor->low_match >= enough_match) {
1937 
1938  rec = btr_cur_get_rec(cursor);
1939 
1940  if (!page_rec_is_infimum(rec)) {
1941 
1942  return(ROW_INS_PREV);
1943  }
1944  }
1945 
1946  return(0);
1947 }
1948 
1949 /***************************************************************/
1960 static
1961 ulint
1962 row_ins_index_entry_low(
1963 /*====================*/
1964  ulint mode,
1967  dict_index_t* index,
1968  dtuple_t* entry,
1969  ulint n_ext,
1970  que_thr_t* thr)
1971 {
1972  btr_cur_t cursor;
1973  ulint search_mode;
1974  ulint modify = 0; /* remove warning */
1975  rec_t* insert_rec;
1976  rec_t* rec;
1977  ulint err;
1978  ulint n_unique;
1979  big_rec_t* big_rec = NULL;
1980  mtr_t mtr;
1981  mem_heap_t* heap = NULL;
1982 
1983  log_free_check();
1984 
1985  mtr_start(&mtr);
1986 
1987  cursor.thr = thr;
1988 
1989  /* Note that we use PAGE_CUR_LE as the search mode, because then
1990  the function will return in both low_match and up_match of the
1991  cursor sensible values */
1992 
1993  if (dict_index_is_clust(index)) {
1994  search_mode = mode;
1995  } else if (!(thr_get_trx(thr)->check_unique_secondary)) {
1996  search_mode = mode | BTR_INSERT | BTR_IGNORE_SEC_UNIQUE;
1997  } else {
1998  search_mode = mode | BTR_INSERT;
1999  }
2000 
2001  btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE,
2002  search_mode,
2003  &cursor, 0, __FILE__, __LINE__, &mtr);
2004 
2005  if (cursor.flag == BTR_CUR_INSERT_TO_IBUF) {
2006  /* The insertion was made to the insert buffer already during
2007  the search: we are done */
2008 
2009  ut_ad(search_mode & BTR_INSERT);
2010  err = DB_SUCCESS;
2011 
2012  goto function_exit;
2013  }
2014 
2015 #ifdef UNIV_DEBUG
2016  {
2017  page_t* page = btr_cur_get_page(&cursor);
2018  rec_t* first_rec = page_rec_get_next(
2019  page_get_infimum_rec(page));
2020 
2021  ut_ad(page_rec_is_supremum(first_rec)
2022  || rec_get_n_fields(first_rec, index)
2023  == dtuple_get_n_fields(entry));
2024  }
2025 #endif
2026 
2027  n_unique = dict_index_get_n_unique(index);
2028 
2029  if (dict_index_is_unique(index) && (cursor.up_match >= n_unique
2030  || cursor.low_match >= n_unique)) {
2031 
2032  if (dict_index_is_clust(index)) {
2033  /* Note that the following may return also
2034  DB_LOCK_WAIT */
2035 
2036  err = row_ins_duplicate_error_in_clust(
2037  &cursor, entry, thr, &mtr);
2038  if (err != DB_SUCCESS) {
2039 
2040  goto function_exit;
2041  }
2042  } else {
2043  mtr_commit(&mtr);
2044  err = row_ins_scan_sec_index_for_duplicate(
2045  index, entry, thr);
2046  mtr_start(&mtr);
2047 
2048  if (err != DB_SUCCESS) {
2049 
2050  goto function_exit;
2051  }
2052 
2053  /* We did not find a duplicate and we have now
2054  locked with s-locks the necessary records to
2055  prevent any insertion of a duplicate by another
2056  transaction. Let us now reposition the cursor and
2057  continue the insertion. */
2058 
2059  btr_cur_search_to_nth_level(index, 0, entry,
2060  PAGE_CUR_LE,
2061  mode | BTR_INSERT,
2062  &cursor, 0,
2063  __FILE__, __LINE__, &mtr);
2064  }
2065  }
2066 
2067  modify = row_ins_must_modify(&cursor);
2068 
2069  if (modify != 0) {
2070  /* There is already an index entry with a long enough common
2071  prefix, we must convert the insert into a modify of an
2072  existing record */
2073 
2074  if (modify == ROW_INS_NEXT) {
2075  rec = page_rec_get_next(btr_cur_get_rec(&cursor));
2076 
2077  btr_cur_position(index, rec,
2078  btr_cur_get_block(&cursor),&cursor);
2079  }
2080 
2081  if (dict_index_is_clust(index)) {
2082  err = row_ins_clust_index_entry_by_modify(
2083  mode, &cursor, &heap, &big_rec, entry,
2084  thr, &mtr);
2085  } else {
2086  ut_ad(!n_ext);
2087  err = row_ins_sec_index_entry_by_modify(
2088  mode, &cursor, entry, thr, &mtr);
2089  }
2090  } else {
2091  if (mode == BTR_MODIFY_LEAF) {
2092  err = btr_cur_optimistic_insert(
2093  0, &cursor, entry, &insert_rec, &big_rec,
2094  n_ext, thr, &mtr);
2095  } else {
2096  ut_a(mode == BTR_MODIFY_TREE);
2097  if (buf_LRU_buf_pool_running_out()) {
2098 
2099  err = DB_LOCK_TABLE_FULL;
2100 
2101  goto function_exit;
2102  }
2103  err = btr_cur_pessimistic_insert(
2104  0, &cursor, entry, &insert_rec, &big_rec,
2105  n_ext, thr, &mtr);
2106  }
2107  }
2108 
2109 function_exit:
2110  mtr_commit(&mtr);
2111 
2112  if (UNIV_LIKELY_NULL(big_rec)) {
2113  rec_t* exit_rec;
2114  ulint* offsets;
2115  mtr_start(&mtr);
2116 
2117  btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE,
2118  BTR_MODIFY_TREE, &cursor, 0,
2119  __FILE__, __LINE__, &mtr);
2120  exit_rec = btr_cur_get_rec(&cursor);
2121  offsets = rec_get_offsets(exit_rec, index, NULL,
2122  ULINT_UNDEFINED, &heap);
2123 
2125  index, btr_cur_get_block(&cursor),
2126  exit_rec, offsets, &mtr, FALSE, big_rec);
2127 
2128  if (modify) {
2129  dtuple_big_rec_free(big_rec);
2130  } else {
2131  dtuple_convert_back_big_rec(index, entry, big_rec);
2132  }
2133 
2134  mtr_commit(&mtr);
2135  }
2136 
2137  if (UNIV_LIKELY_NULL(heap)) {
2138  mem_heap_free(heap);
2139  }
2140  return(err);
2141 }
2142 
2143 /***************************************************************/
2149 UNIV_INTERN
2150 ulint
2152 /*================*/
2153  dict_index_t* index,
2154  dtuple_t* entry,
2155  ulint n_ext,
2156  ibool foreign,
2158  que_thr_t* thr)
2159 {
2160  enum db_err err;
2161 
2162  if (foreign && UT_LIST_GET_FIRST(index->table->foreign_list)) {
2163  err = static_cast<db_err>(row_ins_check_foreign_constraints(index->table, index,
2164  entry, thr));
2165  if (err != DB_SUCCESS) {
2166 
2167  return(err);
2168  }
2169  }
2170 
2171  /* Try first optimistic descent to the B-tree */
2172 
2173  err = static_cast<db_err>(row_ins_index_entry_low(BTR_MODIFY_LEAF, index, entry,
2174  n_ext, thr));
2175  if (err != DB_FAIL) {
2176 
2177  return(err);
2178  }
2179 
2180  /* Try then pessimistic descent to the B-tree */
2181 
2182  err = static_cast<db_err>(row_ins_index_entry_low(BTR_MODIFY_TREE, index, entry,
2183  n_ext, thr));
2184  return(err);
2185 }
2186 
2187 /***********************************************************/
2190 static
2191 void
2192 row_ins_index_entry_set_vals(
2193 /*=========================*/
2194  dict_index_t* index,
2195  dtuple_t* entry,
2196  const dtuple_t* row)
2197 {
2198  ulint n_fields;
2199  ulint i;
2200 
2201  ut_ad(entry && row);
2202 
2203  n_fields = dtuple_get_n_fields(entry);
2204 
2205  for (i = 0; i < n_fields; i++) {
2206  dict_field_t* ind_field;
2207  dfield_t* field;
2208  const dfield_t* row_field;
2209  ulint len;
2210 
2211  field = dtuple_get_nth_field(entry, i);
2212  ind_field = dict_index_get_nth_field(index, i);
2213  row_field = dtuple_get_nth_field(row, ind_field->col->ind);
2214  len = dfield_get_len(row_field);
2215 
2216  /* Check column prefix indexes */
2217  if (ind_field->prefix_len > 0
2218  && dfield_get_len(row_field) != UNIV_SQL_NULL) {
2219 
2220  const dict_col_t* col
2221  = dict_field_get_col(ind_field);
2222 
2223  len = dtype_get_at_most_n_mbchars(
2224  col->prtype, col->mbminmaxlen,
2225  ind_field->prefix_len,
2226  len, static_cast<const char *>(dfield_get_data(row_field)));
2227 
2228  ut_ad(!dfield_is_ext(row_field));
2229  }
2230 
2231  dfield_set_data(field, dfield_get_data(row_field), len);
2232  if (dfield_is_ext(row_field)) {
2233  ut_ad(dict_index_is_clust(index));
2234  dfield_set_ext(field);
2235  }
2236  }
2237 }
2238 
2239 /***********************************************************/
2243 static
2244 ulint
2245 row_ins_index_entry_step(
2246 /*=====================*/
2247  ins_node_t* node,
2248  que_thr_t* thr)
2249 {
2250  enum db_err err;
2251 
2252  ut_ad(dtuple_check_typed(node->row));
2253 
2254  row_ins_index_entry_set_vals(node->index, node->entry, node->row);
2255 
2256  ut_ad(dtuple_check_typed(node->entry));
2257 
2258  err = static_cast<db_err>(row_ins_index_entry(node->index, node->entry, 0, TRUE, thr));
2259 
2260  return(err);
2261 }
2262 
2263 /***********************************************************/
2265 UNIV_INLINE
2266 void
2267 row_ins_alloc_row_id_step(
2268 /*======================*/
2269  ins_node_t* node)
2270 {
2271  row_id_t row_id;
2272 
2273  ut_ad(node->state == INS_NODE_ALLOC_ROW_ID);
2274 
2275  if (dict_index_is_unique(dict_table_get_first_index(node->table))) {
2276 
2277  /* No row id is stored if the clustered index is unique */
2278 
2279  return;
2280  }
2281 
2282  /* Fill in row id value to row */
2283 
2284  row_id = dict_sys_get_new_row_id();
2285 
2286  dict_sys_write_row_id(node->row_id_buf, row_id);
2287 }
2288 
2289 /***********************************************************/
2291 UNIV_INLINE
2292 void
2293 row_ins_get_row_from_values(
2294 /*========================*/
2295  ins_node_t* node)
2296 {
2297  que_node_t* list_node;
2298  dfield_t* dfield;
2299  dtuple_t* row;
2300  ulint i;
2301 
2302  /* The field values are copied in the buffers of the select node and
2303  it is safe to use them until we fetch from select again: therefore
2304  we can just copy the pointers */
2305 
2306  row = node->row;
2307 
2308  i = 0;
2309  list_node = node->values_list;
2310 
2311  while (list_node) {
2312  eval_exp(list_node);
2313 
2314  dfield = dtuple_get_nth_field(row, i);
2315  dfield_copy_data(dfield, que_node_get_val(list_node));
2316 
2317  i++;
2318  list_node = que_node_get_next(list_node);
2319  }
2320 }
2321 
2322 /***********************************************************/
2324 UNIV_INLINE
2325 void
2326 row_ins_get_row_from_select(
2327 /*========================*/
2328  ins_node_t* node)
2329 {
2330  que_node_t* list_node;
2331  dfield_t* dfield;
2332  dtuple_t* row;
2333  ulint i;
2334 
2335  /* The field values are copied in the buffers of the select node and
2336  it is safe to use them until we fetch from select again: therefore
2337  we can just copy the pointers */
2338 
2339  row = node->row;
2340 
2341  i = 0;
2342  list_node = node->select->select_list;
2343 
2344  while (list_node) {
2345  dfield = dtuple_get_nth_field(row, i);
2346  dfield_copy_data(dfield, que_node_get_val(list_node));
2347 
2348  i++;
2349  list_node = que_node_get_next(list_node);
2350  }
2351 }
2352 
2353 /***********************************************************/
2357 static
2358 ulint
2359 row_ins(
2360 /*====*/
2361  ins_node_t* node,
2362  que_thr_t* thr)
2363 {
2364  ulint err;
2365 
2366  ut_ad(node && thr);
2367 
2368  if (node->state == INS_NODE_ALLOC_ROW_ID) {
2369 
2370  row_ins_alloc_row_id_step(node);
2371 
2372  node->index = dict_table_get_first_index(node->table);
2373  node->entry = UT_LIST_GET_FIRST(node->entry_list);
2374 
2375  if (node->ins_type == INS_SEARCHED) {
2376 
2377  row_ins_get_row_from_select(node);
2378 
2379  } else if (node->ins_type == INS_VALUES) {
2380 
2381  row_ins_get_row_from_values(node);
2382  }
2383 
2384  node->state = INS_NODE_INSERT_ENTRIES;
2385  }
2386 
2387  ut_ad(node->state == INS_NODE_INSERT_ENTRIES);
2388 
2389  while (node->index != NULL) {
2390  err = row_ins_index_entry_step(node, thr);
2391 
2392  if (err != DB_SUCCESS) {
2393 
2394  return(err);
2395  }
2396 
2397  node->index = dict_table_get_next_index(node->index);
2398  node->entry = UT_LIST_GET_NEXT(tuple_list, node->entry);
2399  }
2400 
2401  ut_ad(node->entry == NULL);
2402 
2403  node->state = INS_NODE_ALLOC_ROW_ID;
2404 
2405  return(DB_SUCCESS);
2406 }
2407 
2408 /***********************************************************/
2412 UNIV_INTERN
2413 que_thr_t*
2415 /*=========*/
2416  que_thr_t* thr)
2417 {
2418  ins_node_t* node;
2419  que_node_t* parent;
2420  sel_node_t* sel_node;
2421  trx_t* trx;
2422  ulint err;
2423 
2424  ut_ad(thr);
2425 
2426  trx = thr_get_trx(thr);
2427 
2429 
2430  node = static_cast<ins_node_t *>(thr->run_node);
2431 
2432  ut_ad(que_node_get_type(node) == QUE_NODE_INSERT);
2433 
2434  parent = que_node_get_parent(node);
2435  sel_node = node->select;
2436 
2437  if (thr->prev_node == parent) {
2438  node->state = INS_NODE_SET_IX_LOCK;
2439  }
2440 
2441  /* If this is the first time this node is executed (or when
2442  execution resumes after wait for the table IX lock), set an
2443  IX lock on the table and reset the possible select node. MySQL's
2444  partitioned table code may also call an insert within the same
2445  SQL statement AFTER it has used this table handle to do a search.
2446  This happens, for example, when a row update moves it to another
2447  partition. In that case, we have already set the IX lock on the
2448  table during the search operation, and there is no need to set
2449  it again here. But we must write trx->id to node->trx_id_buf. */
2450 
2451  trx_write_trx_id(node->trx_id_buf, trx->id);
2452 
2453  if (node->state == INS_NODE_SET_IX_LOCK) {
2454 
2455  /* It may be that the current session has not yet started
2456  its transaction, or it has been committed: */
2457 
2458  if (trx->id == node->trx_id) {
2459  /* No need to do IX-locking */
2460 
2461  goto same_trx;
2462  }
2463 
2464  err = lock_table(0, node->table, LOCK_IX, thr);
2465 
2466  if (err != DB_SUCCESS) {
2467 
2468  goto error_handling;
2469  }
2470 
2471  node->trx_id = trx->id;
2472 same_trx:
2473  node->state = INS_NODE_ALLOC_ROW_ID;
2474 
2475  if (node->ins_type == INS_SEARCHED) {
2476  /* Reset the cursor */
2477  sel_node->state = SEL_NODE_OPEN;
2478 
2479  /* Fetch a row to insert */
2480 
2481  thr->run_node = sel_node;
2482 
2483  return(thr);
2484  }
2485  }
2486 
2487  if ((node->ins_type == INS_SEARCHED)
2488  && (sel_node->state != SEL_NODE_FETCH)) {
2489 
2490  ut_ad(sel_node->state == SEL_NODE_NO_MORE_ROWS);
2491 
2492  /* No more rows to insert */
2493  thr->run_node = parent;
2494 
2495  return(thr);
2496  }
2497 
2498  /* DO THE CHECKS OF THE CONSISTENCY CONSTRAINTS HERE */
2499 
2500  err = row_ins(node, thr);
2501 
2502 error_handling:
2503  trx->error_state = err;
2504 
2505  if (err != DB_SUCCESS) {
2506  /* err == DB_LOCK_WAIT or SQL error detected */
2507  return(NULL);
2508  }
2509 
2510  /* DO THE TRIGGER ACTIONS HERE */
2511 
2512  if (node->ins_type == INS_SEARCHED) {
2513  /* Fetch a row to insert */
2514 
2515  thr->run_node = sel_node;
2516  } else {
2517  thr->run_node = que_node_get_parent(node);
2518  }
2519 
2520  return(thr);
2521 }