Drizzled Public API Documentation

row0umod.cc
1 /*****************************************************************************
2 
3 Copyright (C) 1997, 2010, Innobase Oy. All Rights Reserved.
4 
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License as published by the Free Software
7 Foundation; version 2 of the License.
8 
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
15 St, Fifth Floor, Boston, MA 02110-1301 USA
16 
17 *****************************************************************************/
18 
19 /**************************************************/
26 #include "row0umod.h"
27 
28 #ifdef UNIV_NONINL
29 #include "row0umod.ic"
30 #endif
31 
32 #include "dict0dict.h"
33 #include "dict0boot.h"
34 #include "trx0undo.h"
35 #include "trx0roll.h"
36 #include "btr0btr.h"
37 #include "mach0data.h"
38 #include "row0undo.h"
39 #include "row0vers.h"
40 #include "trx0trx.h"
41 #include "trx0rec.h"
42 #include "row0row.h"
43 #include "row0upd.h"
44 #include "que0que.h"
45 #include "log0log.h"
46 
47 /* Considerations on undoing a modify operation.
48 (1) Undoing a delete marking: all index records should be found. Some of
49 them may have delete mark already FALSE, if the delete mark operation was
50 stopped underway, or if the undo operation ended prematurely because of a
51 system crash.
52 (2) Undoing an update of a delete unmarked record: the newer version of
53 an updated secondary index entry should be removed if no prior version
54 of the clustered index record requires its existence. Otherwise, it should
55 be delete marked.
56 (3) Undoing an update of a delete marked record. In this kind of update a
57 delete marked clustered index record was delete unmarked and possibly also
58 some of its fields were changed. Now, it is possible that the delete marked
59 version has become obsolete at the time the undo is started. */
60 
61 /*************************************************************************
62 IMPORTANT NOTE: Any operation that generates redo MUST check that there
63 is enough space in the redo log before for that operation. This is
64 done by calling log_free_check(). The reason for checking the
65 availability of the redo log space before the start of the operation is
66 that we MUST not hold any synchonization objects when performing the
67 check.
68 If you make a change in this module make sure that no codepath is
69 introduced where a call to log_free_check() is bypassed. */
70 
71 /***********************************************************/
76 static
77 ibool
78 row_undo_mod_undo_also_prev_vers(
79 /*=============================*/
80  undo_node_t* node,
81  undo_no_t* undo_no)
82 {
83  trx_undo_rec_t* undo_rec;
84  trx_t* trx;
85 
86  trx = node->trx;
87 
88  if (node->new_trx_id != trx->id) {
89 
90  *undo_no = 0;
91  return(FALSE);
92  }
93 
94  undo_rec = trx_undo_get_undo_rec_low(node->new_roll_ptr, node->heap);
95 
96  *undo_no = trx_undo_rec_get_undo_no(undo_rec);
97 
98  return(trx->roll_limit <= *undo_no);
99 }
100 
101 /***********************************************************/
104 static
105 ulint
106 row_undo_mod_clust_low(
107 /*===================*/
108  undo_node_t* node,
109  que_thr_t* thr,
110  mtr_t* mtr,
112  ulint mode)
113 {
114  btr_pcur_t* pcur;
115  btr_cur_t* btr_cur;
116  ulint err;
117 #ifdef UNIV_DEBUG
118  ibool success;
119 #endif /* UNIV_DEBUG */
120 
121  pcur = &(node->pcur);
122  btr_cur = btr_pcur_get_btr_cur(pcur);
123 
124 #ifdef UNIV_DEBUG
125  success =
126 #endif /* UNIV_DEBUG */
127  btr_pcur_restore_position(mode, pcur, mtr);
128 
129  ut_ad(success);
130 
131  if (mode == BTR_MODIFY_LEAF) {
132 
133  err = btr_cur_optimistic_update(BTR_NO_LOCKING_FLAG
134  | BTR_NO_UNDO_LOG_FLAG
135  | BTR_KEEP_SYS_FLAG,
136  btr_cur, node->update,
137  node->cmpl_info, thr, mtr);
138  } else {
139  mem_heap_t* heap = NULL;
140  big_rec_t* dummy_big_rec;
141 
142  ut_ad(mode == BTR_MODIFY_TREE);
143 
145  BTR_NO_LOCKING_FLAG
146  | BTR_NO_UNDO_LOG_FLAG
147  | BTR_KEEP_SYS_FLAG,
148  btr_cur, &heap, &dummy_big_rec, node->update,
149  node->cmpl_info, thr, mtr);
150 
151  ut_a(!dummy_big_rec);
152  if (UNIV_LIKELY_NULL(heap)) {
153  mem_heap_free(heap);
154  }
155  }
156 
157  return(err);
158 }
159 
160 /***********************************************************/
167 static
168 ulint
169 row_undo_mod_remove_clust_low(
170 /*==========================*/
171  undo_node_t* node,
172  que_thr_t* thr,
173  mtr_t* mtr,
174  ulint mode)
175 {
176  btr_cur_t* btr_cur;
177  ulint err;
178 
179  ut_ad(node->rec_type == TRX_UNDO_UPD_DEL_REC);
180 
181  /* Find out if the record has been purged already
182  or if we can remove it. */
183 
184  if (!btr_pcur_restore_position(mode, &node->pcur, mtr)
186 
187  return(DB_SUCCESS);
188  }
189 
190  btr_cur = btr_pcur_get_btr_cur(&node->pcur);
191 
192  if (mode == BTR_MODIFY_LEAF) {
193  err = btr_cur_optimistic_delete(btr_cur, mtr)
194  ? DB_SUCCESS
195  : DB_FAIL;
196  } else {
197  ut_ad(mode == BTR_MODIFY_TREE);
198 
199  /* This operation is analogous to purge, we can free also
200  inherited externally stored fields */
201 
202  btr_cur_pessimistic_delete(&err, FALSE, btr_cur,
203  thr_is_recv(thr)
205  : RB_NONE, mtr);
206 
207  /* The delete operation may fail if we have little
208  file space left: TODO: easiest to crash the database
209  and restart with more file space */
210  }
211 
212  return(err);
213 }
214 
215 /***********************************************************/
219 static
220 ulint
221 row_undo_mod_clust(
222 /*===============*/
223  undo_node_t* node,
224  que_thr_t* thr)
225 {
226  btr_pcur_t* pcur;
227  mtr_t mtr;
228  ulint err;
229  ibool success;
230  ibool more_vers;
231  undo_no_t new_undo_no;
232 
233  ut_ad(node && thr);
234 
235  log_free_check();
236 
237  /* Check if also the previous version of the clustered index record
238  should be undone in this same rollback operation */
239 
240  more_vers = row_undo_mod_undo_also_prev_vers(node, &new_undo_no);
241 
242  pcur = &(node->pcur);
243 
244  mtr_start(&mtr);
245 
246  /* Try optimistic processing of the record, keeping changes within
247  the index page */
248 
249  err = row_undo_mod_clust_low(node, thr, &mtr, BTR_MODIFY_LEAF);
250 
251  if (err != DB_SUCCESS) {
252  btr_pcur_commit_specify_mtr(pcur, &mtr);
253 
254  /* We may have to modify tree structure: do a pessimistic
255  descent down the index tree */
256 
257  mtr_start(&mtr);
258 
259  err = row_undo_mod_clust_low(node, thr, &mtr, BTR_MODIFY_TREE);
260  }
261 
262  btr_pcur_commit_specify_mtr(pcur, &mtr);
263 
264  if (err == DB_SUCCESS && node->rec_type == TRX_UNDO_UPD_DEL_REC) {
265 
266  mtr_start(&mtr);
267 
268  err = row_undo_mod_remove_clust_low(node, thr, &mtr,
270  if (err != DB_SUCCESS) {
271  btr_pcur_commit_specify_mtr(pcur, &mtr);
272 
273  /* We may have to modify tree structure: do a
274  pessimistic descent down the index tree */
275 
276  mtr_start(&mtr);
277 
278  err = row_undo_mod_remove_clust_low(node, thr, &mtr,
280  }
281 
282  btr_pcur_commit_specify_mtr(pcur, &mtr);
283  }
284 
285  node->state = UNDO_NODE_FETCH_NEXT;
286 
287  trx_undo_rec_release(node->trx, node->undo_no);
288 
289  if (more_vers && err == DB_SUCCESS) {
290 
291  /* Reserve the undo log record to the prior version after
292  committing &mtr: this is necessary to comply with the latching
293  order, as &mtr may contain the fsp latch which is lower in
294  the latch hierarchy than trx->undo_mutex. */
295 
296  success = trx_undo_rec_reserve(node->trx, new_undo_no);
297 
298  if (success) {
299  node->state = UNDO_NODE_PREV_VERS;
300  }
301  }
302 
303  return(err);
304 }
305 
306 /***********************************************************/
309 static
310 ulint
311 row_undo_mod_del_mark_or_remove_sec_low(
312 /*====================================*/
313  undo_node_t* node,
314  que_thr_t* thr,
315  dict_index_t* index,
316  dtuple_t* entry,
317  ulint mode)
319 {
320  btr_pcur_t pcur;
321  btr_cur_t* btr_cur;
322  ibool success;
323  ibool old_has;
324  ulint err;
325  mtr_t mtr;
326  mtr_t mtr_vers;
327  enum row_search_result search_result;
328 
329  log_free_check();
330  mtr_start(&mtr);
331 
332  btr_cur = btr_pcur_get_btr_cur(&pcur);
333 
334  ut_ad(mode == BTR_MODIFY_TREE || mode == BTR_MODIFY_LEAF);
335 
336  search_result = row_search_index_entry(index, entry, mode,
337  &pcur, &mtr);
338 
339  switch (UNIV_EXPECT(search_result, ROW_FOUND)) {
340  case ROW_NOT_FOUND:
341  /* In crash recovery, the secondary index record may
342  be missing if the UPDATE did not have time to insert
343  the secondary index records before the crash. When we
344  are undoing that UPDATE in crash recovery, the record
345  may be missing.
346 
347  In normal processing, if an update ends in a deadlock
348  before it has inserted all updated secondary index
349  records, then the undo will not find those records. */
350 
351  err = DB_SUCCESS;
352  goto func_exit;
353  case ROW_FOUND:
354  break;
355  case ROW_BUFFERED:
356  case ROW_NOT_DELETED_REF:
357  /* These are invalid outcomes, because the mode passed
358  to row_search_index_entry() did not include any of the
359  flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
360  ut_error;
361  }
362 
363  /* We should remove the index record if no prior version of the row,
364  which cannot be purged yet, requires its existence. If some requires,
365  we should delete mark the record. */
366 
367  mtr_start(&mtr_vers);
368 
369  success = btr_pcur_restore_position(BTR_SEARCH_LEAF, &(node->pcur),
370  &mtr_vers);
371  ut_a(success);
372 
373  old_has = row_vers_old_has_index_entry(FALSE,
374  btr_pcur_get_rec(&(node->pcur)),
375  &mtr_vers, index, entry);
376  if (old_has) {
377  err = btr_cur_del_mark_set_sec_rec(BTR_NO_LOCKING_FLAG,
378  btr_cur, TRUE, thr, &mtr);
379  ut_ad(err == DB_SUCCESS);
380  } else {
381  /* Remove the index record */
382 
383  if (mode == BTR_MODIFY_LEAF) {
384  success = btr_cur_optimistic_delete(btr_cur, &mtr);
385  if (success) {
386  err = DB_SUCCESS;
387  } else {
388  err = DB_FAIL;
389  }
390  } else {
391  ut_ad(mode == BTR_MODIFY_TREE);
392 
393  /* No need to distinguish RB_RECOVERY_PURGE here,
394  because we are deleting a secondary index record:
395  the distinction between RB_NORMAL and
396  RB_RECOVERY_PURGE only matters when deleting a
397  record that contains externally stored
398  columns. */
399  ut_ad(!dict_index_is_clust(index));
400  btr_cur_pessimistic_delete(&err, FALSE, btr_cur,
401  RB_NORMAL, &mtr);
402 
403  /* The delete operation may fail if we have little
404  file space left: TODO: easiest to crash the database
405  and restart with more file space */
406  }
407  }
408 
409  btr_pcur_commit_specify_mtr(&(node->pcur), &mtr_vers);
410 
411 func_exit:
412  btr_pcur_close(&pcur);
413  mtr_commit(&mtr);
414 
415  return(err);
416 }
417 
418 /***********************************************************/
427 static
428 ulint
429 row_undo_mod_del_mark_or_remove_sec(
430 /*================================*/
431  undo_node_t* node,
432  que_thr_t* thr,
433  dict_index_t* index,
434  dtuple_t* entry)
435 {
436  ulint err;
437 
438  err = row_undo_mod_del_mark_or_remove_sec_low(node, thr, index,
439  entry, BTR_MODIFY_LEAF);
440  if (err == DB_SUCCESS) {
441 
442  return(err);
443  }
444 
445  err = row_undo_mod_del_mark_or_remove_sec_low(node, thr, index,
446  entry, BTR_MODIFY_TREE);
447  return(err);
448 }
449 
450 /***********************************************************/
456 static
457 ulint
458 row_undo_mod_del_unmark_sec_and_undo_update(
459 /*========================================*/
460  ulint mode,
462  que_thr_t* thr,
463  dict_index_t* index,
464  const dtuple_t* entry)
465 {
466  mem_heap_t* heap;
467  btr_pcur_t pcur;
468  btr_cur_t* btr_cur;
469  upd_t* update;
470  ulint err = DB_SUCCESS;
471  big_rec_t* dummy_big_rec;
472  mtr_t mtr;
473  trx_t* trx = thr_get_trx(thr);
474  enum row_search_result search_result;
475 
476  /* Ignore indexes that are being created. */
477  if (UNIV_UNLIKELY(*index->name == TEMP_INDEX_PREFIX)) {
478 
479  return(DB_SUCCESS);
480  }
481 
482  log_free_check();
483  mtr_start(&mtr);
484 
485  ut_ad(mode == BTR_MODIFY_TREE || mode == BTR_MODIFY_LEAF);
486 
487  search_result = row_search_index_entry(index, entry, mode,
488  &pcur, &mtr);
489 
490  switch (search_result) {
491  case ROW_BUFFERED:
492  case ROW_NOT_DELETED_REF:
493  /* These are invalid outcomes, because the mode passed
494  to row_search_index_entry() did not include any of the
495  flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
496  ut_error;
497  case ROW_NOT_FOUND:
498  fputs("InnoDB: error in sec index entry del undo in\n"
499  "InnoDB: ", stderr);
500  dict_index_name_print(stderr, trx, index);
501  fputs("\n"
502  "InnoDB: tuple ", stderr);
503  dtuple_print(stderr, entry);
504  fputs("\n"
505  "InnoDB: record ", stderr);
506  rec_print(stderr, btr_pcur_get_rec(&pcur), index);
507  putc('\n', stderr);
508  trx_print(stderr, trx, 0);
509  fputs("\n"
510  "InnoDB: Submit a detailed bug report"
511  " to http://bugs.mysql.com\n", stderr);
512  break;
513  case ROW_FOUND:
514  btr_cur = btr_pcur_get_btr_cur(&pcur);
515  err = btr_cur_del_mark_set_sec_rec(BTR_NO_LOCKING_FLAG,
516  btr_cur, FALSE, thr, &mtr);
517  ut_a(err == DB_SUCCESS);
518  heap = mem_heap_create(100);
519 
521  index, entry, btr_cur_get_rec(btr_cur), trx, heap);
522  if (upd_get_n_fields(update) == 0) {
523 
524  /* Do nothing */
525 
526  } else if (mode == BTR_MODIFY_LEAF) {
527  /* Try an optimistic updating of the record, keeping
528  changes within the page */
529 
531  BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG,
532  btr_cur, update, 0, thr, &mtr);
533  switch (err) {
534  case DB_OVERFLOW:
535  case DB_UNDERFLOW:
536  case DB_ZIP_OVERFLOW:
537  err = DB_FAIL;
538  }
539  } else {
540  ut_a(mode == BTR_MODIFY_TREE);
542  BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG,
543  btr_cur, &heap, &dummy_big_rec,
544  update, 0, thr, &mtr);
545  ut_a(!dummy_big_rec);
546  }
547 
548  mem_heap_free(heap);
549  }
550 
551  btr_pcur_close(&pcur);
552  mtr_commit(&mtr);
553 
554  return(err);
555 }
556 
557 /***********************************************************/
560 static
561 ulint
562 row_undo_mod_upd_del_sec(
563 /*=====================*/
564  undo_node_t* node,
565  que_thr_t* thr)
566 {
567  mem_heap_t* heap;
568  dtuple_t* entry;
569  dict_index_t* index;
570  ulint err = DB_SUCCESS;
571 
572  ut_ad(node->rec_type == TRX_UNDO_UPD_DEL_REC);
573  heap = mem_heap_create(1024);
574 
575  while (node->index != NULL) {
576  index = node->index;
577 
578  entry = row_build_index_entry(node->row, node->ext,
579  index, heap);
580  if (UNIV_UNLIKELY(!entry)) {
581  /* The database must have crashed after
582  inserting a clustered index record but before
583  writing all the externally stored columns of
584  that record. Because secondary index entries
585  are inserted after the clustered index record,
586  we may assume that the secondary index record
587  does not exist. However, this situation may
588  only occur during the rollback of incomplete
589  transactions. */
590  ut_a(thr_is_recv(thr));
591  } else {
592  err = row_undo_mod_del_mark_or_remove_sec(
593  node, thr, index, entry);
594 
595  if (err != DB_SUCCESS) {
596 
597  break;
598  }
599  }
600 
601  mem_heap_empty(heap);
602 
603  node->index = dict_table_get_next_index(node->index);
604  }
605 
606  mem_heap_free(heap);
607 
608  return(err);
609 }
610 
611 /***********************************************************/
614 static
615 ulint
616 row_undo_mod_del_mark_sec(
617 /*======================*/
618  undo_node_t* node,
619  que_thr_t* thr)
620 {
621  mem_heap_t* heap;
622  dtuple_t* entry;
623  dict_index_t* index;
624  ulint err;
625 
626  heap = mem_heap_create(1024);
627 
628  while (node->index != NULL) {
629  index = node->index;
630 
631  entry = row_build_index_entry(node->row, node->ext,
632  index, heap);
633  ut_a(entry);
634  err = row_undo_mod_del_unmark_sec_and_undo_update(
635  BTR_MODIFY_LEAF, thr, index, entry);
636  if (err == DB_FAIL) {
637  err = row_undo_mod_del_unmark_sec_and_undo_update(
638  BTR_MODIFY_TREE, thr, index, entry);
639  }
640 
641  if (err != DB_SUCCESS) {
642 
643  mem_heap_free(heap);
644 
645  return(err);
646  }
647 
648  node->index = dict_table_get_next_index(node->index);
649  }
650 
651  mem_heap_free(heap);
652 
653  return(DB_SUCCESS);
654 }
655 
656 /***********************************************************/
659 static
660 ulint
661 row_undo_mod_upd_exist_sec(
662 /*=======================*/
663  undo_node_t* node,
664  que_thr_t* thr)
665 {
666  mem_heap_t* heap;
667  dtuple_t* entry;
668  dict_index_t* index;
669  ulint err;
670 
671  if (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE) {
672  /* No change in secondary indexes */
673 
674  return(DB_SUCCESS);
675  }
676 
677  heap = mem_heap_create(1024);
678 
679  while (node->index != NULL) {
680  index = node->index;
681 
682  if (row_upd_changes_ord_field_binary(node->index, node->update,
683  thr,
684  node->row, node->ext)) {
685 
686  /* Build the newest version of the index entry */
687  entry = row_build_index_entry(node->row, node->ext,
688  index, heap);
689  if (UNIV_UNLIKELY(!entry)) {
690  /* The server must have crashed in
691  row_upd_clust_rec_by_insert() before
692  the updated externally stored columns (BLOBs)
693  of the new clustered index entry were
694  written. */
695 
696  /* The table must be in DYNAMIC or COMPRESSED
697  format. REDUNDANT and COMPACT formats
698  store a local 768-byte prefix of each
699  externally stored column. */
701  >= DICT_TF_FORMAT_ZIP);
702 
703  /* This is only legitimate when
704  rolling back an incomplete transaction
705  after crash recovery. */
706  ut_a(thr_get_trx(thr)->is_recovered);
707 
708  /* The server must have crashed before
709  completing the insert of the new
710  clustered index entry and before
711  inserting to the secondary indexes.
712  Because node->row was not yet written
713  to this index, we can ignore it. But
714  we must restore node->undo_row. */
715  } else {
716  /* NOTE that if we updated the fields of a
717  delete-marked secondary index record so that
718  alphabetically they stayed the same, e.g.,
719  'abc' -> 'aBc', we cannot return to the
720  original values because we do not know them.
721  But this should not cause problems because
722  in row0sel.c, in queries we always retrieve
723  the clustered index record or an earlier
724  version of it, if the secondary index record
725  through which we do the search is
726  delete-marked. */
727 
728  err = row_undo_mod_del_mark_or_remove_sec(
729  node, thr, index, entry);
730  if (err != DB_SUCCESS) {
731  mem_heap_free(heap);
732 
733  return(err);
734  }
735 
736  mem_heap_empty(heap);
737  }
738 
739  /* We may have to update the delete mark in the
740  secondary index record of the previous version of
741  the row. We also need to update the fields of
742  the secondary index record if we updated its fields
743  but alphabetically they stayed the same, e.g.,
744  'abc' -> 'aBc'. */
745  entry = row_build_index_entry(node->undo_row,
746  node->undo_ext,
747  index, heap);
748  ut_a(entry);
749 
750  err = row_undo_mod_del_unmark_sec_and_undo_update(
751  BTR_MODIFY_LEAF, thr, index, entry);
752  if (err == DB_FAIL) {
753  err = row_undo_mod_del_unmark_sec_and_undo_update(
754  BTR_MODIFY_TREE, thr, index, entry);
755  }
756 
757  if (err != DB_SUCCESS) {
758  mem_heap_free(heap);
759 
760  return(err);
761  }
762  }
763 
764  node->index = dict_table_get_next_index(node->index);
765  }
766 
767  mem_heap_free(heap);
768 
769  return(DB_SUCCESS);
770 }
771 
772 /***********************************************************/
774 static
775 void
776 row_undo_mod_parse_undo_rec(
777 /*========================*/
778  undo_node_t* node,
779  que_thr_t* thr)
780 {
781  dict_index_t* clust_index;
782  byte* ptr;
783  undo_no_t undo_no;
784  table_id_t table_id;
785  trx_id_t trx_id;
786  roll_ptr_t roll_ptr;
787  ulint info_bits;
788  ulint type;
789  ulint cmpl_info;
790  ibool dummy_extern;
791  trx_t* trx;
792 
793  ut_ad(node && thr);
794  trx = thr_get_trx(thr);
795  ptr = trx_undo_rec_get_pars(node->undo_rec, &type, &cmpl_info,
796  &dummy_extern, &undo_no, &table_id);
797  node->rec_type = type;
798 
799  node->table = dict_table_get_on_id(table_id, trx);
800 
801  /* TODO: other fixes associated with DROP TABLE + rollback in the
802  same table by another user */
803 
804  if (node->table == NULL) {
805  /* Table was dropped */
806  return;
807  }
808 
809  if (node->table->ibd_file_missing) {
810  /* We skip undo operations to missing .ibd files */
811  node->table = NULL;
812 
813  return;
814  }
815 
816  clust_index = dict_table_get_first_index(node->table);
817 
818  ptr = trx_undo_update_rec_get_sys_cols(ptr, &trx_id, &roll_ptr,
819  &info_bits);
820 
821  ptr = trx_undo_rec_get_row_ref(ptr, clust_index, &(node->ref),
822  node->heap);
823 
824  trx_undo_update_rec_get_update(ptr, clust_index, type, trx_id,
825  roll_ptr, info_bits, trx,
826  node->heap, &(node->update));
827  node->new_roll_ptr = roll_ptr;
828  node->new_trx_id = trx_id;
829  node->cmpl_info = cmpl_info;
830 }
831 
832 /***********************************************************/
835 UNIV_INTERN
836 ulint
838 /*=========*/
839  undo_node_t* node,
840  que_thr_t* thr)
841 {
842  ulint err;
843 
844  ut_ad(node && thr);
845  ut_ad(node->state == UNDO_NODE_MODIFY);
846 
847  row_undo_mod_parse_undo_rec(node, thr);
848 
849  if (!node->table || !row_undo_search_clust_to_pcur(node)) {
850  /* It is already undone, or will be undone by another query
851  thread, or table was dropped */
852 
853  trx_undo_rec_release(node->trx, node->undo_no);
854  node->state = UNDO_NODE_FETCH_NEXT;
855 
856  return(DB_SUCCESS);
857  }
858 
859  node->index = dict_table_get_next_index(
860  dict_table_get_first_index(node->table));
861 
862  if (node->rec_type == TRX_UNDO_UPD_EXIST_REC) {
863 
864  err = row_undo_mod_upd_exist_sec(node, thr);
865 
866  } else if (node->rec_type == TRX_UNDO_DEL_MARK_REC) {
867 
868  err = row_undo_mod_del_mark_sec(node, thr);
869  } else {
870  ut_ad(node->rec_type == TRX_UNDO_UPD_DEL_REC);
871  err = row_undo_mod_upd_del_sec(node, thr);
872  }
873 
874  if (err != DB_SUCCESS) {
875 
876  return(err);
877  }
878 
879  err = row_undo_mod_clust(node, thr);
880 
881  return(err);
882 }
UNIV_INTERN byte * trx_undo_rec_get_row_ref(byte *ptr, dict_index_t *index, dtuple_t **ref, mem_heap_t *heap)
Definition: trx0rec.cc:380
dict_index_t * index
Definition: row0undo.h:131
byte trx_undo_rec_t
Definition: trx0types.h:112
const char * name
Definition: dict0mem.h:339
roll_ptr_t new_roll_ptr
Definition: row0undo.h:112
ib_id_t roll_ptr_t
Definition: trx0types.h:87
trx_id_t id
Definition: trx0trx.h:548
UNIV_INTERN ibool btr_cur_optimistic_delete(btr_cur_t *cursor, mtr_t *mtr)
Definition: btr0cur.cc:2906
UNIV_INTERN ulint row_undo_mod(undo_node_t *node, que_thr_t *thr)
Definition: row0umod.cc:837
trx_undo_rec_t * undo_rec
Definition: row0undo.h:108
trx_id_t new_trx_id
Definition: row0undo.h:115
UNIV_INTERN trx_undo_rec_t * trx_undo_get_undo_rec_low(roll_ptr_t roll_ptr, mem_heap_t *heap)
Definition: trx0rec.cc:1337
UNIV_INTERN ulint btr_cur_optimistic_update(ulint flags, btr_cur_t *cursor, const upd_t *update, ulint cmpl_info, que_thr_t *thr, mtr_t *mtr)
Definition: btr0cur.cc:1952
UNIV_INTERN dtuple_t * row_build_index_entry(const dtuple_t *row, row_ext_t *ext, dict_index_t *index, mem_heap_t *heap)
Definition: row0row.cc:87
UNIV_INTERN byte * trx_undo_update_rec_get_update(byte *ptr, dict_index_t *index, ulint type, trx_id_t trx_id, roll_ptr_t roll_ptr, ulint info_bits, trx_t *trx, mem_heap_t *heap, upd_t **upd)
Definition: trx0rec.cc:898
UNIV_INTERN void rec_print(FILE *file, const rec_t *rec, const dict_index_t *index)
Definition: rem0rec.cc:1750
#define DICT_TF_FORMAT_ZIP
Definition: dict0mem.h:88
#define mem_heap_free(heap)
Definition: mem0mem.h:117
undo_no_t roll_limit
Definition: trx0trx.h:700
dtuple_t * undo_row
Definition: row0undo.h:128
UNIV_INTERN void dtuple_print(FILE *f, const dtuple_t *tuple)
Definition: data0data.cc:532
UNIV_INTERN dict_table_t * dict_table_get_on_id(table_id_t table_id, trx_t *trx)
Definition: dict0dict.cc:623
UNIV_INLINE void btr_pcur_close(btr_pcur_t *cursor)
row_ext_t * ext
Definition: row0undo.h:126
mem_heap_t * heap
Definition: row0undo.h:133
dtuple_t * row
Definition: row0undo.h:124
UNIV_INTERN void mtr_commit(mtr_t *mtr) __attribute__((nonnull))
Definition: mtr0mtr.cc:247
UNIV_INLINE void log_free_check(void)
UNIV_INLINE ulint dict_table_get_format(const dict_table_t *table)
UNIV_INTERN void trx_print(FILE *f, trx_t *trx, ulint max_query_len)
Definition: trx0trx.cc:1690
row_search_result
Definition: row0row.h:263
UNIV_INTERN enum row_search_result row_search_index_entry(dict_index_t *index, const dtuple_t *entry, ulint mode, btr_pcur_t *pcur, mtr_t *mtr)
Definition: row0row.cc:740
UNIV_INLINE ulint dict_index_is_clust(const dict_index_t *index) __attribute__((pure))
dict_table_t * table
Definition: row0undo.h:119
UNIV_INTERN ibool row_vers_old_has_index_entry(ibool also_curr, const rec_t *rec, mtr_t *mtr, dict_index_t *index, const dtuple_t *ientry)
Definition: row0vers.cc:344
UNIV_INLINE rec_t * btr_cur_get_rec(btr_cur_t *cursor)
#define ut_a(EXPR)
Definition: ut0dbg.h:105
#define mem_heap_create(N)
Definition: mem0mem.h:97
UNIV_INTERN ibool trx_undo_rec_reserve(trx_t *trx, undo_no_t undo_no)
Definition: trx0roll.cc:1026
dict_table_t * table
Definition: dict0mem.h:341
row_ext_t * undo_ext
Definition: row0undo.h:129
UNIV_INTERN ibool row_vers_must_preserve_del_marked(trx_id_t trx_id, mtr_t *mtr)
Definition: row0vers.cc:311
UNIV_INTERN ulint btr_cur_del_mark_set_sec_rec(ulint flags, btr_cur_t *cursor, ibool val, que_thr_t *thr, mtr_t *mtr)
Definition: btr0cur.cc:2799
UNIV_INLINE void mem_heap_empty(mem_heap_t *heap)
UNIV_INTERN ibool btr_cur_pessimistic_delete(ulint *err, ibool has_reserved_extents, btr_cur_t *cursor, enum trx_rb_ctx rb_ctx, mtr_t *mtr)
Definition: btr0cur.cc:2995
UNIV_INLINE undo_no_t trx_undo_rec_get_undo_no(const trx_undo_rec_t *undo_rec)
#define ut_ad(EXPR)
Definition: ut0dbg.h:127
ib_id_t trx_id_t
Definition: trx0types.h:85
unsigned ibd_file_missing
Definition: dict0mem.h:490
#define ut_error
Definition: ut0dbg.h:115
dtuple_t * ref
Definition: row0undo.h:123
UNIV_INTERN byte * trx_undo_update_rec_get_sys_cols(byte *ptr, trx_id_t *trx_id, roll_ptr_t *roll_ptr, ulint *info_bits)
Definition: trx0rec.cc:836
btr_pcur_t pcur
Definition: row0undo.h:117
UNIV_INTERN void trx_undo_rec_release(trx_t *trx, undo_no_t undo_no)
Definition: trx0roll.cc:1046
UNIV_INLINE ibool thr_is_recv(const que_thr_t *thr)
UNIV_INLINE trx_t * thr_get_trx(que_thr_t *thr)
UNIV_INTERN upd_t * row_upd_build_sec_rec_difference_binary(dict_index_t *index, const dtuple_t *entry, const rec_t *rec, trx_t *trx, mem_heap_t *heap)
Definition: row0upd.cc:786
UNIV_INLINE void mtr_start(mtr_t *mtr) __attribute__((nonnull))
undo_no_t undo_no
Definition: row0undo.h:109
UNIV_INLINE ulint upd_get_n_fields(const upd_t *update)
UNIV_INTERN void dict_index_name_print(FILE *file, trx_t *trx, const dict_index_t *index)
Definition: dict0dict.cc:4775
UNIV_INTERN ibool row_undo_search_clust_to_pcur(undo_node_t *node)
Definition: row0undo.cc:162
#define TEMP_INDEX_PREFIX
Definition: ut0ut.h:51
ib_id_t undo_no_t
Definition: trx0types.h:89
UNIV_INTERN ulint btr_cur_pessimistic_update(ulint flags, btr_cur_t *cursor, mem_heap_t **heap, big_rec_t **big_rec, const upd_t *update, ulint cmpl_info, que_thr_t *thr, mtr_t *mtr)
Definition: btr0cur.cc:2205
UNIV_INLINE void btr_pcur_commit_specify_mtr(btr_pcur_t *pcur, mtr_t *mtr)
UNIV_INTERN byte * trx_undo_rec_get_pars(trx_undo_rec_t *undo_rec, ulint *type, ulint *cmpl_info, ibool *updated_extern, undo_no_t *undo_no, table_id_t *table_id)
Definition: trx0rec.cc:282
upd_t * update
Definition: row0undo.h:121
enum undo_exec state
Definition: row0undo.h:105