Drizzled Public API Documentation

trx0i_s.cc
1 /*****************************************************************************
2 
3 Copyright (C) 2007, 2010, Innobase Oy. All Rights Reserved.
4 
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License as published by the Free Software
7 Foundation; version 2 of the License.
8 
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
15 St, Fifth Floor, Boston, MA 02110-1301 USA
16 
17 *****************************************************************************/
18 
19 /**************************************************/
31 #include <config.h>
32 /* Found during the build of 5.5.3 on Linux 2.4 and early 2.6 kernels:
33  The includes "univ.i" -> "my_global.h" cause a different path
34  to be taken further down with pthread functions and types,
35  so they must come first.
36  From the symptoms, this is related to bug#46587 in the MySQL bug DB.
37 */
38 #include "univ.i"
39 
40 #if !defined(BUILD_DRIZZLE)
41 # include <mysql/plugin.h>
42 #endif
43 
44 #include "buf0buf.h"
45 #include "dict0dict.h"
46 #include "ha0storage.h"
47 #include "ha_prototypes.h"
48 #include "hash0hash.h"
49 #include "lock0iter.h"
50 #include "lock0lock.h"
51 #include "mem0mem.h"
52 #include "page0page.h"
53 #include "rem0rec.h"
54 #include "row0row.h"
55 #include "srv0srv.h"
56 #include "sync0rw.h"
57 #include "sync0sync.h"
58 #include "sync0types.h"
59 #include "trx0i_s.h"
60 #include "trx0sys.h"
61 #include "trx0trx.h"
62 #include "ut0mem.h"
63 #include "ut0ut.h"
64 
65 #include <drizzled/session.h>
66 
68 #define TABLE_CACHE_INITIAL_ROWSNUM 1024
69 
78 #define MEM_CHUNKS_IN_TABLE_CACHE 39
79 
82 /* @{ */
83 
84 #if 0
85 
89 #define TEST_LOCK_FOLD_ALWAYS_DIFFERENT
90 #endif
91 
92 #if 0
93 
97 #define TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
98 #endif
99 
100 #if 0
101 
104 #define TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
105 #endif
106 
107 #if 0
108 
110 #define TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
111 #endif
112 
113 #if 0
114 
117 #define TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
118 #endif
119 /* @} */
120 
124 #define MAX_ALLOWED_FOR_STORAGE(cache) \
125  (TRX_I_S_MEM_LIMIT \
126  - (cache)->mem_allocd)
127 
131 #define MAX_ALLOWED_FOR_ALLOC(cache) \
132  (TRX_I_S_MEM_LIMIT \
133  - (cache)->mem_allocd \
134  - ha_storage_get_size((cache)->storage))
135 
139 typedef struct i_s_mem_chunk_struct {
140  ulint offset;
141  ulint rows_allocd;
143  void* base;
145 
147 typedef struct i_s_table_cache_struct {
148  ulint rows_used;
149  ulint rows_allocd;
150  ulint row_size;
151  i_s_mem_chunk_t chunks[MEM_CHUNKS_IN_TABLE_CACHE];
155 
160  ullint last_read;
171 #define LOCKS_HASH_CELLS_NUM 10000
176 #define CACHE_STORAGE_INITIAL_SIZE 1024
177 
178 #define CACHE_STORAGE_HASH_CELLS 2048
183  ulint mem_allocd;
185  ibool is_truncated;
188 };
189 
193 static trx_i_s_cache_t trx_i_s_cache_static;
197 UNIV_INTERN trx_i_s_cache_t* trx_i_s_cache = &trx_i_s_cache_static;
198 
199 /* Key to register the lock/mutex with performance schema */
200 #ifdef UNIV_PFS_RWLOCK
201 UNIV_INTERN mysql_pfs_key_t trx_i_s_cache_lock_key;
202 #endif /* UNIV_PFS_RWLOCK */
203 
204 #ifdef UNIV_PFS_MUTEX
205 UNIV_INTERN mysql_pfs_key_t cache_last_read_mutex_key;
206 #endif /* UNIV_PFS_MUTEX */
207 
208 /*******************************************************************/
212 static
213 ulint
214 wait_lock_get_heap_no(
215 /*==================*/
216  const lock_t* lock)
217 {
218  ulint ret;
219 
220  switch (lock_get_type(lock)) {
221  case LOCK_REC:
222  ret = lock_rec_find_set_bit(lock);
223  ut_a(ret != ULINT_UNDEFINED);
224  break;
225  case LOCK_TABLE:
226  ret = ULINT_UNDEFINED;
227  break;
228  default:
229  ut_error;
230  }
231 
232  return(ret);
233 }
234 
235 /*******************************************************************/
237 static
238 void
239 table_cache_init(
240 /*=============*/
241  i_s_table_cache_t* table_cache,
242  size_t row_size)
244 {
245  ulint i;
246 
247  table_cache->rows_used = 0;
248  table_cache->rows_allocd = 0;
249  table_cache->row_size = row_size;
250 
251  for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
252 
253  /* the memory is actually allocated in
254  table_cache_create_empty_row() */
255  table_cache->chunks[i].base = NULL;
256  }
257 }
258 
259 /*******************************************************************/
261 static
262 void
263 table_cache_free(
264 /*=============*/
265  i_s_table_cache_t* table_cache)
266 {
267  ulint i;
268 
269  for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
270 
271  /* the memory is actually allocated in
272  table_cache_create_empty_row() */
273  if (table_cache->chunks[i].base) {
274  mem_free(table_cache->chunks[i].base);
275  table_cache->chunks[i].base = NULL;
276  }
277  }
278 }
279 
280 /*******************************************************************/
286 static
287 void*
288 table_cache_create_empty_row(
289 /*=========================*/
290  i_s_table_cache_t* table_cache,
291  trx_i_s_cache_t* cache)
294 {
295  ulint i;
296  void* row;
297 
298  ut_a(table_cache->rows_used <= table_cache->rows_allocd);
299 
300  if (table_cache->rows_used == table_cache->rows_allocd) {
301 
302  /* rows_used == rows_allocd means that new chunk needs
303  to be allocated: either no more empty rows in the
304  last allocated chunk or nothing has been allocated yet
305  (rows_num == rows_allocd == 0); */
306 
307  i_s_mem_chunk_t* chunk;
308  ulint req_bytes;
309  ulint got_bytes;
310  ulint req_rows;
311  ulint got_rows;
312 
313  /* find the first not allocated chunk */
314  for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
315 
316  if (table_cache->chunks[i].base == NULL) {
317 
318  break;
319  }
320  }
321 
322  /* i == MEM_CHUNKS_IN_TABLE_CACHE means that all chunks
323  have been allocated :-X */
324  ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
325 
326  /* allocate the chunk we just found */
327 
328  if (i == 0) {
329 
330  /* first chunk, nothing is allocated yet */
331  req_rows = TABLE_CACHE_INITIAL_ROWSNUM;
332  } else {
333 
334  /* Memory is increased by the formula
335  new = old + old / 2; We are trying not to be
336  aggressive here (= using the common new = old * 2)
337  because the allocated memory will not be freed
338  until InnoDB exit (it is reused). So it is better
339  to once allocate the memory in more steps, but
340  have less unused/wasted memory than to use less
341  steps in allocation (which is done once in a
342  lifetime) but end up with lots of unused/wasted
343  memory. */
344  req_rows = table_cache->rows_allocd / 2;
345  }
346  req_bytes = req_rows * table_cache->row_size;
347 
348  if (req_bytes > MAX_ALLOWED_FOR_ALLOC(cache)) {
349 
350  return(NULL);
351  }
352 
353  chunk = &table_cache->chunks[i];
354 
355  chunk->base = mem_alloc2(req_bytes, &got_bytes);
356 
357  got_rows = got_bytes / table_cache->row_size;
358 
359  cache->mem_allocd += got_bytes;
360 
361 #if 0
362  printf("allocating chunk %d req bytes=%lu, got bytes=%lu, "
363  "row size=%lu, "
364  "req rows=%lu, got rows=%lu\n",
365  i, req_bytes, got_bytes,
366  table_cache->row_size,
367  req_rows, got_rows);
368 #endif
369 
370  chunk->rows_allocd = got_rows;
371 
372  table_cache->rows_allocd += got_rows;
373 
374  /* adjust the offset of the next chunk */
375  if (i < MEM_CHUNKS_IN_TABLE_CACHE - 1) {
376 
377  table_cache->chunks[i + 1].offset
378  = chunk->offset + chunk->rows_allocd;
379  }
380 
381  /* return the first empty row in the newly allocated
382  chunk */
383  row = chunk->base;
384  } else {
385 
386  char* chunk_start;
387  ulint offset;
388 
389  /* there is an empty row, no need to allocate new
390  chunks */
391 
392  /* find the first chunk that contains allocated but
393  empty/unused rows */
394  for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
395 
396  if (table_cache->chunks[i].offset
397  + table_cache->chunks[i].rows_allocd
398  > table_cache->rows_used) {
399 
400  break;
401  }
402  }
403 
404  /* i == MEM_CHUNKS_IN_TABLE_CACHE means that all chunks
405  are full, but
406  table_cache->rows_used != table_cache->rows_allocd means
407  exactly the opposite - there are allocated but
408  empty/unused rows :-X */
409  ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
410 
411  chunk_start = (char*) table_cache->chunks[i].base;
412  offset = table_cache->rows_used
413  - table_cache->chunks[i].offset;
414 
415  row = chunk_start + offset * table_cache->row_size;
416  }
417 
418  table_cache->rows_used++;
419 
420  return(row);
421 }
422 
423 #ifdef UNIV_DEBUG
424 /*******************************************************************/
427 static
428 ibool
429 i_s_locks_row_validate(
430 /*===================*/
431  const i_s_locks_row_t* row)
432 {
433  ut_ad(row->lock_trx_id != 0);
434  ut_ad(row->lock_mode != NULL);
435  ut_ad(row->lock_type != NULL);
436  ut_ad(row->lock_table != NULL);
437  ut_ad(row->lock_table_id != 0);
438 
439  if (row->lock_space == ULINT_UNDEFINED) {
440  /* table lock */
441  ut_ad(!strcmp("TABLE", row->lock_type));
442  ut_ad(row->lock_index == NULL);
443  ut_ad(row->lock_data == NULL);
444  ut_ad(row->lock_page == ULINT_UNDEFINED);
445  ut_ad(row->lock_rec == ULINT_UNDEFINED);
446  } else {
447  /* record lock */
448  ut_ad(!strcmp("RECORD", row->lock_type));
449  ut_ad(row->lock_index != NULL);
450  /* row->lock_data == NULL if buf_page_try_get() == NULL */
451  ut_ad(row->lock_page != ULINT_UNDEFINED);
452  ut_ad(row->lock_rec != ULINT_UNDEFINED);
453  }
454 
455  return(TRUE);
456 }
457 #endif /* UNIV_DEBUG */
458 
459 /*******************************************************************/
463 static
464 ibool
465 fill_trx_row(
466 /*=========*/
467  i_s_trx_row_t* row,
469  const trx_t* trx,
471  const i_s_locks_row_t* requested_lock_row,
476  trx_i_s_cache_t* cache)
479 {
480  const char* stmt;
481  size_t stmt_len;
482  const char* s;
483 
484  ut_ad(mutex_own(&kernel_mutex));
485 
486  row->trx_id = trx->id;
487  row->trx_started = (ib_time_t) trx->start_time;
488  row->trx_state = trx_get_que_state_str(trx);
489  row->requested_lock_row = requested_lock_row;
490  ut_ad(requested_lock_row == NULL
491  || i_s_locks_row_validate(requested_lock_row));
492 
493  if (trx->wait_lock != NULL) {
494  ut_a(requested_lock_row != NULL);
496  } else {
497  ut_a(requested_lock_row == NULL);
498  row->trx_wait_started = 0;
499  }
500 
501  row->trx_weight = (ullint) TRX_WEIGHT(trx);
502 
503  if (trx->mysql_thd == NULL) {
504  /* For internal transactions e.g., purge and transactions
505  being recovered at startup there is no associated MySQL
506  thread data structure. */
507  row->trx_mysql_thread_id = 0;
508  row->trx_query = NULL;
509  goto thd_done;
510  }
511 
512  row->trx_mysql_thread_id = trx->session()->getSessionId();
513  stmt= trx->mysql_thd->getQueryStringCopy(stmt_len);
514 
515  if (stmt != NULL) {
516  char query[TRX_I_S_TRX_QUERY_MAX_LEN + 1];
517 
518  if (stmt_len > TRX_I_S_TRX_QUERY_MAX_LEN) {
519  stmt_len = TRX_I_S_TRX_QUERY_MAX_LEN;
520  }
521 
522  memcpy(query, stmt, stmt_len);
523  query[stmt_len] = '\0';
524 
525  row->trx_query = static_cast<const char *>(ha_storage_put_memlim(
526  cache->storage, query, stmt_len + 1,
527  MAX_ALLOWED_FOR_STORAGE(cache)));
528 
529  if (row->trx_query == NULL) {
530 
531  return(FALSE);
532  }
533  } else {
534 
535  row->trx_query = NULL;
536  }
537 
538 thd_done:
539  s = trx->op_info;
540 
541  if (s != NULL && s[0] != '\0') {
542 
545 
546  if (row->trx_operation_state == NULL) {
547 
548  return(FALSE);
549  }
550  } else {
551 
552  row->trx_operation_state = NULL;
553  }
554 
555 // row->trx_tables_in_use = trx->n_mysql_tables_in_use;
556 
557  row->trx_tables_locked = trx->mysql_n_tables_locked;
558 
560 
562 
564 
565  row->trx_rows_modified = trx->undo_no;
566 
567  row->trx_concurrency_tickets = trx->n_tickets_to_enter_innodb;
568 
569  switch (trx->isolation_level) {
570  case TRX_ISO_READ_UNCOMMITTED:
571  row->trx_isolation_level = "READ UNCOMMITTED";
572  break;
573  case TRX_ISO_READ_COMMITTED:
574  row->trx_isolation_level = "READ COMMITTED";
575  break;
576  case TRX_ISO_REPEATABLE_READ:
577  row->trx_isolation_level = "REPEATABLE READ";
578  break;
579  case TRX_ISO_SERIALIZABLE:
580  row->trx_isolation_level = "SERIALIZABLE";
581  break;
582  /* Should not happen as TRX_ISO_READ_COMMITTED is default */
583  default:
584  row->trx_isolation_level = "UNKNOWN";
585  }
586 
587  row->trx_unique_checks = (ibool) trx->check_unique_secondary;
588 
589  row->trx_foreign_key_checks = (ibool) trx->check_foreigns;
590 
591  s = trx->detailed_error;
592 
593  if (s != NULL && s[0] != '\0') {
594 
598 
599  if (row->trx_foreign_key_error == NULL) {
600 
601  return(FALSE);
602  }
603  } else {
604  row->trx_foreign_key_error = NULL;
605  }
606 
607  row->trx_has_search_latch = (ibool) trx->has_search_latch;
608 
609  row->trx_search_latch_timeout = trx->search_latch_timeout;
610 
611  return(TRUE);
612 }
613 
614 /*******************************************************************/
619 static
620 ulint
621 put_nth_field(
622 /*==========*/
623  char* buf,
624  ulint buf_size,
625  ulint n,
626  const dict_index_t* index,
627  const rec_t* rec,
628  const ulint* offsets)
630 {
631  const byte* data;
632  ulint data_len;
633  dict_field_t* dict_field;
634  ulint ret;
635 
636  ut_ad(rec_offs_validate(rec, NULL, offsets));
637 
638  if (buf_size == 0) {
639 
640  return(0);
641  }
642 
643  ret = 0;
644 
645  if (n > 0) {
646  /* we must append ", " before the actual data */
647 
648  if (buf_size < 3) {
649 
650  buf[0] = '\0';
651  return(1);
652  }
653 
654  memcpy(buf, ", ", 3);
655 
656  buf += 2;
657  buf_size -= 2;
658  ret += 2;
659  }
660 
661  /* now buf_size >= 1 */
662 
663  data = rec_get_nth_field(rec, offsets, n, &data_len);
664 
665  dict_field = dict_index_get_nth_field(index, n);
666 
667  ret += row_raw_format((const char*) data, data_len,
668  dict_field, buf, buf_size);
669 
670  return(ret);
671 }
672 
673 /*******************************************************************/
677 static
678 ibool
679 fill_lock_data(
680 /*===========*/
681  const char** lock_data,
682  const lock_t* lock,
683  ulint heap_no,
684  trx_i_s_cache_t* cache)
686 {
687  mtr_t mtr;
688 
689  const buf_block_t* block;
690  const page_t* page;
691  const rec_t* rec;
692 
693  ut_a(lock_get_type(lock) == LOCK_REC);
694 
695  mtr_start(&mtr);
696 
698  lock_rec_get_page_no(lock),
699  &mtr);
700 
701  if (block == NULL) {
702 
703  *lock_data = NULL;
704 
705  mtr_commit(&mtr);
706 
707  return(TRUE);
708  }
709 
710  page = (const page_t*) buf_block_get_frame(block);
711 
712  rec = page_find_rec_with_heap_no(page, heap_no);
713 
714  if (page_rec_is_infimum(rec)) {
715 
716  *lock_data = ha_storage_put_str_memlim(
717  cache->storage, "infimum pseudo-record",
718  MAX_ALLOWED_FOR_STORAGE(cache));
719  } else if (page_rec_is_supremum(rec)) {
720 
721  *lock_data = ha_storage_put_str_memlim(
722  cache->storage, "supremum pseudo-record",
723  MAX_ALLOWED_FOR_STORAGE(cache));
724  } else {
725 
726  const dict_index_t* index;
727  ulint n_fields;
728  mem_heap_t* heap;
729  ulint offsets_onstack[REC_OFFS_NORMAL_SIZE];
730  ulint* offsets;
731  char buf[TRX_I_S_LOCK_DATA_MAX_LEN];
732  ulint buf_used;
733  ulint i;
734 
735  rec_offs_init(offsets_onstack);
736  offsets = offsets_onstack;
737 
738  index = lock_rec_get_index(lock);
739 
740  n_fields = dict_index_get_n_unique(index);
741 
742  ut_a(n_fields > 0);
743 
744  heap = NULL;
745  offsets = rec_get_offsets(rec, index, offsets, n_fields,
746  &heap);
747 
748  /* format and store the data */
749 
750  buf_used = 0;
751  for (i = 0; i < n_fields; i++) {
752 
753  buf_used += put_nth_field(
754  buf + buf_used, sizeof(buf) - buf_used,
755  i, index, rec, offsets) - 1;
756  }
757 
758  *lock_data = (const char*) ha_storage_put_memlim(
759  cache->storage, buf, buf_used + 1,
760  MAX_ALLOWED_FOR_STORAGE(cache));
761 
762  if (UNIV_UNLIKELY(heap != NULL)) {
763 
764  /* this means that rec_get_offsets() has created a new
765  heap and has stored offsets in it; check that this is
766  really the case and free the heap */
767  ut_a(offsets != offsets_onstack);
768  mem_heap_free(heap);
769  }
770  }
771 
772  mtr_commit(&mtr);
773 
774  if (*lock_data == NULL) {
775 
776  return(FALSE);
777  }
778 
779  return(TRUE);
780 }
781 
782 /*******************************************************************/
786 static
787 ibool
788 fill_locks_row(
789 /*===========*/
790  i_s_locks_row_t* row,
791  const lock_t* lock,
792  ulint heap_no,
795  trx_i_s_cache_t* cache)
797 {
798  row->lock_trx_id = lock_get_trx_id(lock);
799  row->lock_mode = lock_get_mode_str(lock);
800  row->lock_type = lock_get_type_str(lock);
801 
803  cache->storage, lock_get_table_name(lock),
804  MAX_ALLOWED_FOR_STORAGE(cache));
805 
806  /* memory could not be allocated */
807  if (row->lock_table == NULL) {
808 
809  return(FALSE);
810  }
811 
812  switch (lock_get_type(lock)) {
813  case LOCK_REC:
815  cache->storage, lock_rec_get_index_name(lock),
816  MAX_ALLOWED_FOR_STORAGE(cache));
817 
818  /* memory could not be allocated */
819  if (row->lock_index == NULL) {
820 
821  return(FALSE);
822  }
823 
824  row->lock_space = lock_rec_get_space_id(lock);
825  row->lock_page = lock_rec_get_page_no(lock);
826  row->lock_rec = heap_no;
827 
828  if (!fill_lock_data(&row->lock_data, lock, heap_no, cache)) {
829 
830  /* memory could not be allocated */
831  return(FALSE);
832  }
833 
834  break;
835  case LOCK_TABLE:
836  row->lock_index = NULL;
837 
838  row->lock_space = ULINT_UNDEFINED;
839  row->lock_page = ULINT_UNDEFINED;
840  row->lock_rec = ULINT_UNDEFINED;
841 
842  row->lock_data = NULL;
843 
844  break;
845  default:
846  ut_error;
847  }
848 
849  row->lock_table_id = lock_get_table_id(lock);
850 
851  row->hash_chain.value = row;
852  ut_ad(i_s_locks_row_validate(row));
853 
854  return(TRUE);
855 }
856 
857 /*******************************************************************/
860 static
862 fill_lock_waits_row(
863 /*================*/
864  i_s_lock_waits_row_t* row,
866  const i_s_locks_row_t* requested_lock_row,
869  const i_s_locks_row_t* blocking_lock_row)
872 {
873  ut_ad(i_s_locks_row_validate(requested_lock_row));
874  ut_ad(i_s_locks_row_validate(blocking_lock_row));
875 
876  row->requested_lock_row = requested_lock_row;
877  row->blocking_lock_row = blocking_lock_row;
878 
879  return(row);
880 }
881 
882 /*******************************************************************/
888 static
889 ulint
890 fold_lock(
891 /*======*/
892  const lock_t* lock,
893  ulint heap_no)
896 {
897 #ifdef TEST_LOCK_FOLD_ALWAYS_DIFFERENT
898  static ulint fold = 0;
899 
900  return(fold++);
901 #else
902  ulint ret;
903 
904  switch (lock_get_type(lock)) {
905  case LOCK_REC:
906  ut_a(heap_no != ULINT_UNDEFINED);
907 
908  ret = ut_fold_ulint_pair((ulint) lock_get_trx_id(lock),
909  lock_rec_get_space_id(lock));
910 
911  ret = ut_fold_ulint_pair(ret,
912  lock_rec_get_page_no(lock));
913 
914  ret = ut_fold_ulint_pair(ret, heap_no);
915 
916  break;
917  case LOCK_TABLE:
918  /* this check is actually not necessary for continuing
919  correct operation, but something must have gone wrong if
920  it fails. */
921  ut_a(heap_no == ULINT_UNDEFINED);
922 
923  ret = (ulint) lock_get_table_id(lock);
924 
925  break;
926  default:
927  ut_error;
928  }
929 
930  return(ret);
931 #endif
932 }
933 
934 /*******************************************************************/
937 static
938 ibool
939 locks_row_eq_lock(
940 /*==============*/
941  const i_s_locks_row_t* row,
942  const lock_t* lock,
943  ulint heap_no)
946 {
947  ut_ad(i_s_locks_row_validate(row));
948 #ifdef TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
949  return(0);
950 #else
951  switch (lock_get_type(lock)) {
952  case LOCK_REC:
953  ut_a(heap_no != ULINT_UNDEFINED);
954 
955  return(row->lock_trx_id == lock_get_trx_id(lock)
956  && row->lock_space == lock_rec_get_space_id(lock)
957  && row->lock_page == lock_rec_get_page_no(lock)
958  && row->lock_rec == heap_no);
959 
960  case LOCK_TABLE:
961  /* this check is actually not necessary for continuing
962  correct operation, but something must have gone wrong if
963  it fails. */
964  ut_a(heap_no == ULINT_UNDEFINED);
965 
966  return(row->lock_trx_id == lock_get_trx_id(lock)
967  && row->lock_table_id == lock_get_table_id(lock));
968 
969  default:
970  ut_error;
971  return(FALSE);
972  }
973 #endif
974 }
975 
976 /*******************************************************************/
981 static
983 search_innodb_locks(
984 /*================*/
985  trx_i_s_cache_t* cache,
986  const lock_t* lock,
987  ulint heap_no)
990 {
991  i_s_hash_chain_t* hash_chain;
992 
993  HASH_SEARCH(
994  /* hash_chain->"next" */
995  next,
996  /* the hash table */
997  cache->locks_hash,
998  /* fold */
999  fold_lock(lock, heap_no),
1000  /* the type of the next variable */
1002  /* auxiliary variable */
1003  hash_chain,
1004  /* assertion on every traversed item */
1005  ut_ad(i_s_locks_row_validate(hash_chain->value)),
1006  /* this determines if we have found the lock */
1007  locks_row_eq_lock(hash_chain->value, lock, heap_no));
1008 
1009  if (hash_chain == NULL) {
1010 
1011  return(NULL);
1012  }
1013  /* else */
1014 
1015  return(hash_chain->value);
1016 }
1017 
1018 /*******************************************************************/
1024 static
1026 add_lock_to_cache(
1027 /*==============*/
1028  trx_i_s_cache_t* cache,
1029  const lock_t* lock,
1030  ulint heap_no)
1033 {
1034  i_s_locks_row_t* dst_row;
1035 
1036 #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
1037  ulint i;
1038  for (i = 0; i < 10000; i++) {
1039 #endif
1040 #ifndef TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
1041  /* quit if this lock is already present */
1042  dst_row = search_innodb_locks(cache, lock, heap_no);
1043  if (dst_row != NULL) {
1044 
1045  ut_ad(i_s_locks_row_validate(dst_row));
1046  return(dst_row);
1047  }
1048 #endif
1049 
1050  dst_row = (i_s_locks_row_t*)
1051  table_cache_create_empty_row(&cache->innodb_locks, cache);
1052 
1053  /* memory could not be allocated */
1054  if (dst_row == NULL) {
1055 
1056  return(NULL);
1057  }
1058 
1059  if (!fill_locks_row(dst_row, lock, heap_no, cache)) {
1060 
1061  /* memory could not be allocated */
1062  cache->innodb_locks.rows_used--;
1063  return(NULL);
1064  }
1065 
1066 #ifndef TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
1067  HASH_INSERT(
1068  /* the type used in the hash chain */
1070  /* hash_chain->"next" */
1071  next,
1072  /* the hash table */
1073  cache->locks_hash,
1074  /* fold */
1075  fold_lock(lock, heap_no),
1076  /* add this data to the hash */
1077  &dst_row->hash_chain);
1078 #endif
1079 #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
1080  } /* for()-loop */
1081 #endif
1082 
1083  ut_ad(i_s_locks_row_validate(dst_row));
1084  return(dst_row);
1085 }
1086 
1087 /*******************************************************************/
1091 static
1092 ibool
1093 add_lock_wait_to_cache(
1094 /*===================*/
1095  trx_i_s_cache_t* cache,
1096  const i_s_locks_row_t* requested_lock_row,
1099  const i_s_locks_row_t* blocking_lock_row)
1102 {
1103  i_s_lock_waits_row_t* dst_row;
1104 
1105  dst_row = (i_s_lock_waits_row_t*)
1106  table_cache_create_empty_row(&cache->innodb_lock_waits,
1107  cache);
1108 
1109  /* memory could not be allocated */
1110  if (dst_row == NULL) {
1111 
1112  return(FALSE);
1113  }
1114 
1115  fill_lock_waits_row(dst_row, requested_lock_row, blocking_lock_row);
1116 
1117  return(TRUE);
1118 }
1119 
1120 /*******************************************************************/
1128 static
1129 ibool
1130 add_trx_relevant_locks_to_cache(
1131 /*============================*/
1132  trx_i_s_cache_t* cache,
1133  const trx_t* trx,
1134  i_s_locks_row_t** requested_lock_row)
1137 {
1138  ut_ad(mutex_own(&kernel_mutex));
1139 
1140  /* If transaction is waiting we add the wait lock and all locks
1141  from another transactions that are blocking the wait lock. */
1142  if (trx->que_state == TRX_QUE_LOCK_WAIT) {
1143 
1144  const lock_t* curr_lock;
1145  ulint wait_lock_heap_no;
1146  i_s_locks_row_t* blocking_lock_row;
1147  lock_queue_iterator_t iter;
1148 
1149  ut_a(trx->wait_lock != NULL);
1150 
1151  wait_lock_heap_no
1152  = wait_lock_get_heap_no(trx->wait_lock);
1153 
1154  /* add the requested lock */
1155  *requested_lock_row
1156  = add_lock_to_cache(cache, trx->wait_lock,
1157  wait_lock_heap_no);
1158 
1159  /* memory could not be allocated */
1160  if (*requested_lock_row == NULL) {
1161 
1162  return(FALSE);
1163  }
1164 
1165  /* then iterate over the locks before the wait lock and
1166  add the ones that are blocking it */
1167 
1169  ULINT_UNDEFINED);
1170 
1171  curr_lock = lock_queue_iterator_get_prev(&iter);
1172  while (curr_lock != NULL) {
1173 
1174  if (lock_has_to_wait(trx->wait_lock,
1175  curr_lock)) {
1176 
1177  /* add the lock that is
1178  blocking trx->wait_lock */
1179  blocking_lock_row
1180  = add_lock_to_cache(
1181  cache, curr_lock,
1182  /* heap_no is the same
1183  for the wait and waited
1184  locks */
1185  wait_lock_heap_no);
1186 
1187  /* memory could not be allocated */
1188  if (blocking_lock_row == NULL) {
1189 
1190  return(FALSE);
1191  }
1192 
1193  /* add the relation between both locks
1194  to innodb_lock_waits */
1195  if (!add_lock_wait_to_cache(
1196  cache, *requested_lock_row,
1197  blocking_lock_row)) {
1198 
1199  /* memory could not be allocated */
1200  return(FALSE);
1201  }
1202  }
1203 
1204  curr_lock = lock_queue_iterator_get_prev(&iter);
1205  }
1206  } else {
1207 
1208  *requested_lock_row = NULL;
1209  }
1210 
1211  return(TRUE);
1212 }
1213 
1218 #define CACHE_MIN_IDLE_TIME_US 100000 /* 0.1 sec */
1219 
1220 /*******************************************************************/
1223 static
1224 ibool
1225 can_cache_be_updated(
1226 /*=================*/
1227  trx_i_s_cache_t* cache)
1228 {
1229  ullint now;
1230 
1231  /* Here we read cache->last_read without acquiring its mutex
1232  because last_read is only updated when a shared rw lock on the
1233  whole cache is being held (see trx_i_s_cache_end_read()) and
1234  we are currently holding an exclusive rw lock on the cache.
1235  So it is not possible for last_read to be updated while we are
1236  reading it. */
1237 
1238 #ifdef UNIV_SYNC_DEBUG
1239  ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1240 #endif
1241 
1242  now = ut_time_us(NULL);
1243  if (now - cache->last_read > CACHE_MIN_IDLE_TIME_US) {
1244 
1245  return(TRUE);
1246  }
1247 
1248  return(FALSE);
1249 }
1250 
1251 /*******************************************************************/
1254 static
1255 void
1256 trx_i_s_cache_clear(
1257 /*================*/
1258  trx_i_s_cache_t* cache)
1259 {
1260  cache->innodb_trx.rows_used = 0;
1261  cache->innodb_locks.rows_used = 0;
1262  cache->innodb_lock_waits.rows_used = 0;
1263 
1264  hash_table_clear(cache->locks_hash);
1265 
1266  ha_storage_empty(&cache->storage);
1267 }
1268 
1269 /*******************************************************************/
1272 static
1273 void
1274 fetch_data_into_cache(
1275 /*==================*/
1276  trx_i_s_cache_t* cache)
1277 {
1278  trx_t* trx;
1279  i_s_trx_row_t* trx_row;
1280  i_s_locks_row_t* requested_lock_row;
1281 
1282  ut_ad(mutex_own(&kernel_mutex));
1283 
1284  trx_i_s_cache_clear(cache);
1285 
1286  /* We iterate over the list of all transactions and add each one
1287  to innodb_trx's cache. We also add all locks that are relevant
1288  to each transaction into innodb_locks' and innodb_lock_waits'
1289  caches. */
1290 
1291  for (trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
1292  trx != NULL;
1293  trx = UT_LIST_GET_NEXT(trx_list, trx)) {
1294 
1295  if (!add_trx_relevant_locks_to_cache(cache, trx,
1296  &requested_lock_row)) {
1297 
1298  cache->is_truncated = TRUE;
1299  return;
1300  }
1301 
1302  trx_row = (i_s_trx_row_t*)
1303  table_cache_create_empty_row(&cache->innodb_trx,
1304  cache);
1305 
1306  /* memory could not be allocated */
1307  if (trx_row == NULL) {
1308 
1309  cache->is_truncated = TRUE;
1310  return;
1311  }
1312 
1313  if (!fill_trx_row(trx_row, trx, requested_lock_row, cache)) {
1314 
1315  /* memory could not be allocated */
1316  cache->innodb_trx.rows_used--;
1317  cache->is_truncated = TRUE;
1318  return;
1319  }
1320  }
1321 
1322  cache->is_truncated = FALSE;
1323 }
1324 
1325 /*******************************************************************/
1329 UNIV_INTERN
1330 int
1332 /*===================================*/
1333  trx_i_s_cache_t* cache)
1334 {
1335  if (!can_cache_be_updated(cache)) {
1336 
1337  return(1);
1338  }
1339 
1340  /* We need to read trx_sys and record/table lock queues */
1341  mutex_enter(&kernel_mutex);
1342 
1343  fetch_data_into_cache(cache);
1344 
1345  mutex_exit(&kernel_mutex);
1346 
1347  return(0);
1348 }
1349 
1350 /*******************************************************************/
1354 UNIV_INTERN
1355 ibool
1357 /*=======================*/
1358  trx_i_s_cache_t* cache)
1359 {
1360  return(cache->is_truncated);
1361 }
1362 
1363 /*******************************************************************/
1365 UNIV_INTERN
1366 void
1368 /*===============*/
1369  trx_i_s_cache_t* cache)
1370 {
1371  /* The latching is done in the following order:
1372  acquire trx_i_s_cache_t::rw_lock, X
1373  acquire kernel_mutex
1374  release kernel_mutex
1375  release trx_i_s_cache_t::rw_lock
1376  acquire trx_i_s_cache_t::rw_lock, S
1377  acquire trx_i_s_cache_t::last_read_mutex
1378  release trx_i_s_cache_t::last_read_mutex
1379  release trx_i_s_cache_t::rw_lock */
1380 
1381  rw_lock_create(trx_i_s_cache_lock_key, &cache->rw_lock,
1382  SYNC_TRX_I_S_RWLOCK);
1383 
1384  cache->last_read = 0;
1385 
1386  mutex_create(cache_last_read_mutex_key,
1387  &cache->last_read_mutex, SYNC_TRX_I_S_LAST_READ);
1388 
1389  table_cache_init(&cache->innodb_trx, sizeof(i_s_trx_row_t));
1390  table_cache_init(&cache->innodb_locks, sizeof(i_s_locks_row_t));
1391  table_cache_init(&cache->innodb_lock_waits,
1392  sizeof(i_s_lock_waits_row_t));
1393 
1394  cache->locks_hash = hash_create(LOCKS_HASH_CELLS_NUM);
1395 
1396  cache->storage = ha_storage_create(CACHE_STORAGE_INITIAL_SIZE,
1397  CACHE_STORAGE_HASH_CELLS);
1398 
1399  cache->mem_allocd = 0;
1400 
1401  cache->is_truncated = FALSE;
1402 }
1403 
1404 /*******************************************************************/
1406 UNIV_INTERN
1407 void
1409 /*===============*/
1410  trx_i_s_cache_t* cache)
1411 {
1412  hash_table_free(cache->locks_hash);
1413  ha_storage_free(cache->storage);
1414  table_cache_free(&cache->innodb_trx);
1415  table_cache_free(&cache->innodb_locks);
1416  table_cache_free(&cache->innodb_lock_waits);
1417  memset(cache, 0, sizeof *cache);
1418 }
1419 
1420 /*******************************************************************/
1422 UNIV_INTERN
1423 void
1425 /*=====================*/
1426  trx_i_s_cache_t* cache)
1427 {
1428  rw_lock_s_lock(&cache->rw_lock);
1429 }
1430 
1431 /*******************************************************************/
1433 UNIV_INTERN
1434 void
1436 /*===================*/
1437  trx_i_s_cache_t* cache)
1438 {
1439  ullint now;
1440 
1441 #ifdef UNIV_SYNC_DEBUG
1442  ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_SHARED));
1443 #endif
1444 
1445  /* update cache last read time */
1446  now = ut_time_us(NULL);
1447  mutex_enter(&cache->last_read_mutex);
1448  cache->last_read = now;
1449  mutex_exit(&cache->last_read_mutex);
1450 
1451  rw_lock_s_unlock(&cache->rw_lock);
1452 }
1453 
1454 /*******************************************************************/
1456 UNIV_INTERN
1457 void
1459 /*======================*/
1460  trx_i_s_cache_t* cache)
1461 {
1462  rw_lock_x_lock(&cache->rw_lock);
1463 }
1464 
1465 /*******************************************************************/
1467 UNIV_INTERN
1468 void
1470 /*====================*/
1471  trx_i_s_cache_t* cache)
1472 {
1473 #ifdef UNIV_SYNC_DEBUG
1474  ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1475 #endif
1476 
1477  rw_lock_x_unlock(&cache->rw_lock);
1478 }
1479 
1480 /*******************************************************************/
1483 static
1485 cache_select_table(
1486 /*===============*/
1487  trx_i_s_cache_t* cache,
1488  enum i_s_table table)
1489 {
1490  i_s_table_cache_t* table_cache;
1491 
1492 #ifdef UNIV_SYNC_DEBUG
1493  ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_SHARED)
1494  || rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1495 #endif
1496 
1497  switch (table) {
1498  case I_S_INNODB_TRX:
1499  table_cache = &cache->innodb_trx;
1500  break;
1501  case I_S_INNODB_LOCKS:
1502  table_cache = &cache->innodb_locks;
1503  break;
1504  case I_S_INNODB_LOCK_WAITS:
1505  table_cache = &cache->innodb_lock_waits;
1506  break;
1507  default:
1508  ut_error;
1509  }
1510 
1511  return(table_cache);
1512 }
1513 
1514 /*******************************************************************/
1518 UNIV_INTERN
1519 ulint
1521 /*========================*/
1522  trx_i_s_cache_t* cache,
1523  enum i_s_table table)
1524 {
1525  i_s_table_cache_t* table_cache;
1526 
1527  table_cache = cache_select_table(cache, table);
1528 
1529  return(table_cache->rows_used);
1530 }
1531 
1532 /*******************************************************************/
1536 UNIV_INTERN
1537 void*
1539 /*======================*/
1540  trx_i_s_cache_t* cache,
1541  enum i_s_table table,
1542  ulint n)
1543 {
1544  i_s_table_cache_t* table_cache;
1545  ulint i;
1546  void* row;
1547 
1548  table_cache = cache_select_table(cache, table);
1549 
1550  ut_a(n < table_cache->rows_used);
1551 
1552  row = NULL;
1553 
1554  for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
1555 
1556  if (table_cache->chunks[i].offset
1557  + table_cache->chunks[i].rows_allocd > n) {
1558 
1559  row = (char*) table_cache->chunks[i].base
1560  + (n - table_cache->chunks[i].offset)
1561  * table_cache->row_size;
1562  break;
1563  }
1564  }
1565 
1566  ut_a(row != NULL);
1567 
1568  return(row);
1569 }
1570 
1571 /*******************************************************************/
1577 UNIV_INTERN
1578 char*
1580 /*===================*/
1581  const i_s_locks_row_t* row,
1582  char* lock_id,
1583  ulint lock_id_size)
1585 {
1586  int res_len;
1587 
1588  /* please adjust TRX_I_S_LOCK_ID_MAX_LEN if you change this */
1589 
1590  if (row->lock_space != ULINT_UNDEFINED) {
1591  /* record lock */
1592  res_len = ut_snprintf(lock_id, lock_id_size,
1593  TRX_ID_FMT ":%lu:%lu:%lu",
1594  row->lock_trx_id, row->lock_space,
1595  row->lock_page, row->lock_rec);
1596  } else {
1597  /* table lock */
1598  res_len = ut_snprintf(lock_id, lock_id_size,
1599  TRX_ID_FMT ":" TRX_ID_FMT,
1600  row->lock_trx_id,
1601  row->lock_table_id);
1602  }
1603 
1604  /* the typecast is safe because snprintf(3) never returns
1605  negative result */
1606  ut_a(res_len >= 0);
1607  ut_a((ulint) res_len < lock_id_size);
1608 
1609  return(lock_id);
1610 }
#define UT_LIST_GET_LEN(BASE)
Definition: ut0lst.h:217
trx_sys_t * trx_sys
Definition: trx0sys.cc:61
#define TRX_I_S_STRING_COPY(data, field, constraint, tcache)
Definition: trx0i_s.h:63
UNIV_INTERN void trx_i_s_cache_start_write(trx_i_s_cache_t *cache)
Definition: trx0i_s.cc:1458
UNIV_INTERN void trx_i_s_cache_free(trx_i_s_cache_t *cache)
Definition: trx0i_s.cc:1408
const char * lock_table
Definition: trx0i_s.h:105
#define rw_lock_create(K, L, level)
Definition: sync0rw.h:147
#define UT_LIST_GET_NEXT(NAME, N)
Definition: ut0lst.h:201
UNIV_INTERN ulint trx_i_s_cache_get_rows_used(trx_i_s_cache_t *cache, enum i_s_table table)
Definition: trx0i_s.cc:1520
i_s_table
Definition: trx0i_s.h:187
trx_id_t id
Definition: trx0trx.h:548
i_s_mem_chunk_t chunks[MEM_CHUNKS_IN_TABLE_CACHE]
Definition: trx0i_s.cc:151
ha_storage_t * storage
Definition: trx0i_s.cc:179
const char * lock_type
Definition: trx0i_s.h:103
const char * trx_state
Definition: trx0i_s.h:132
i_s_locks_row_t * value
Definition: trx0i_s.h:93
#define TRX_I_S_TRX_QUERY_MAX_LEN
Definition: trx0i_s.h:47
char detailed_error[256]
Definition: trx0trx.h:717
hash_table_t * locks_hash
Definition: trx0i_s.cc:172
table_id_t lock_table_id
Definition: trx0i_s.h:121
UNIV_INTERN void * trx_i_s_cache_get_nth_row(trx_i_s_cache_t *cache, enum i_s_table table, ulint n)
Definition: trx0i_s.cc:1538
UNIV_INTERN void trx_i_s_cache_end_write(trx_i_s_cache_t *cache)
Definition: trx0i_s.cc:1469
const i_s_locks_row_t * blocking_lock_row
Definition: trx0i_s.h:179
time_t ib_time_t
Definition: ut0ut.h:56
UNIV_INTERN ibool lock_has_to_wait(const lock_t *lock1, const lock_t *lock2)
Definition: lock0lock.cc:983
const lock_t * lock_queue_iterator_get_prev(lock_queue_iterator_t *iter)
Definition: lock0iter.cc:87
session_id_t getSessionId() const
Definition: session.h:644
UNIV_INTERN int trx_i_s_possibly_fetch_data_into_cache(trx_i_s_cache_t *cache)
Definition: trx0i_s.cc:1331
UNIV_INLINE ulint mem_heap_get_size(mem_heap_t *heap)
const char * trx_isolation_level
Definition: trx0i_s.h:160
#define mem_free(PTR)
Definition: mem0mem.h:249
UNIV_INTERN char * trx_i_s_create_lock_id(const i_s_locks_row_t *row, char *lock_id, ulint lock_id_size)
Definition: trx0i_s.cc:1579
#define ut_snprintf
Definition: ut0ut.h:398
#define TRX_ID_FMT
Definition: trx0types.h:33
UNIV_INTERN table_id_t lock_get_table_id(const lock_t *lock)
Definition: lock0lock.cc:5735
ibool trx_has_search_latch
Definition: trx0i_s.h:169
const char * trx_operation_state
Definition: trx0i_s.h:144
#define TRX_WEIGHT(t)
Definition: trx0trx.h:422
UNIV_INLINE void ha_storage_empty(ha_storage_t **storage)
undo_no_t undo_no
Definition: trx0trx.h:681
UNIV_INTERN const void * ha_storage_put_memlim(ha_storage_t *storage, const void *data, ulint data_len, ulint memlim)
Definition: ha0storage.cc:90
struct ha_storage_struct ha_storage_t
Definition: ha0storage.h:43
trx_id_t trx_id
Definition: trx0i_s.h:131
UNIV_INTERN const char * lock_get_type_str(const lock_t *lock)
Definition: lock0lock.cc:5696
UNIV_INTERN hash_table_t * hash_create(ulint n)
Definition: hash0hash.cc:102
trx_i_s_cache_t * trx_i_s_cache
Definition: trx0i_s.cc:197
mem_heap_t * lock_heap
Definition: trx0trx.h:653
ullint trx_weight
Definition: trx0i_s.h:140
UNIV_INTERN ullint ut_time_us(ullint *tloc)
Definition: ut0ut.cc:194
#define mem_heap_free(heap)
Definition: mem0mem.h:117
const char * trx_query
Definition: trx0i_s.h:142
ulint que_state
Definition: trx0trx.h:540
ulint trx_search_latch_timeout
Definition: trx0i_s.h:171
#define HASH_INSERT(TYPE, NAME, TABLE, FOLD, DATA)
Definition: hash0hash.h:101
UNIV_INLINE ha_storage_t * ha_storage_create(ulint initial_heap_bytes, ulint initial_hash_cells)
UNIV_INTERN void hash_table_free(hash_table_t *table)
Definition: hash0hash.cc:139
UNIV_INTERN void trx_i_s_cache_start_read(trx_i_s_cache_t *cache)
Definition: trx0i_s.cc:1424
UNIV_INTERN void mtr_commit(mtr_t *mtr) __attribute__((nonnull))
Definition: mtr0mtr.cc:247
UNIV_INTERN const char * lock_get_table_name(const lock_t *lock)
Definition: lock0lock.cc:5752
UNIV_INLINE ulint ut_fold_ulint_pair(ulint n1, ulint n2) __attribute__((const ))
const char * op_info
Definition: trx0trx.h:477
mutex_t last_read_mutex
Definition: trx0i_s.cc:163
UNIV_INTERN ulint row_raw_format(const char *data, ulint data_len, const dict_field_t *dict_field, char *buf, ulint buf_size)
Definition: row0row.cc:913
ibool trx_unique_checks
Definition: trx0i_s.h:162
UNIV_INLINE void hash_table_clear(hash_table_t *table)
ullint trx_rows_modified
Definition: trx0i_s.h:156
#define ut_a(EXPR)
Definition: ut0dbg.h:105
lock_t * wait_lock
Definition: trx0trx.h:637
UNIV_INLINE ibool page_rec_is_supremum(const rec_t *rec) __attribute__((const ))
UNIV_INTERN trx_id_t lock_get_trx_id(const lock_t *lock)
Definition: lock0lock.cc:5636
UNIV_INTERN const dict_index_t * lock_rec_get_index(const lock_t *lock)
Definition: lock0lock.cc:5768
#define UT_LIST_GET_FIRST(BASE)
Definition: ut0lst.h:224
UNIV_INTERN void trx_i_s_cache_end_read(trx_i_s_cache_t *cache)
Definition: trx0i_s.cc:1435
UNIV_INTERN ulint lock_rec_get_page_no(const lock_t *lock)
Definition: lock0lock.cc:5811
#define rw_lock_s_lock(M)
Definition: sync0rw.h:155
i_s_table_cache_t innodb_trx
Definition: trx0i_s.cc:167
#define TRX_I_S_TRX_OP_STATE_MAX_LEN
Definition: trx0i_s.h:51
#define ut_ad(EXPR)
Definition: ut0dbg.h:127
ulint trx_concurrency_tickets
Definition: trx0i_s.h:157
#define HASH_SEARCH(NAME, TABLE, FOLD, TYPE, DATA, ASSERTION, TEST)
Definition: hash0hash.h:176
UNIV_INLINE void ha_storage_free(ha_storage_t *storage)
UNIV_INTERN void trx_i_s_cache_init(trx_i_s_cache_t *cache)
Definition: trx0i_s.cc:1367
trx_id_t lock_trx_id
Definition: trx0i_s.h:100
#define ut_error
Definition: ut0dbg.h:115
UNIV_INTERN ibool trx_i_s_cache_is_truncated(trx_i_s_cache_t *cache)
Definition: trx0i_s.cc:1356
UNIV_INLINE ulint dict_index_get_n_unique(const dict_index_t *index)
#define TRX_I_S_LOCK_DATA_MAX_LEN
Definition: trx0i_s.h:43
ulint trx_rows_locked
Definition: trx0i_s.h:155
rw_lock_t rw_lock
Definition: trx0i_s.cc:158
UNIV_INTERN const char * lock_get_mode_str(const lock_t *lock)
Definition: lock0lock.cc:5649
const char * lock_mode
Definition: trx0i_s.h:101
#define LOCK_TABLE
Definition: lock0lock.h:762
time_t wait_started
Definition: trx0trx.h:647
const i_s_locks_row_t * requested_lock_row
Definition: trx0i_s.h:135
UNIV_INTERN ulint lock_rec_find_set_bit(const lock_t *lock)
Definition: lock0lock.cc:1057
const char * trx_foreign_key_error
Definition: trx0i_s.h:167
byte page_t
Definition: page0types.h:37
const rec_t * page_find_rec_with_heap_no(const page_t *page, ulint heap_no)
Definition: page0page.cc:2594
drizzled::Session * mysql_thd
Definition: trx0trx.h:559
i_s_table_cache_t innodb_lock_waits
Definition: trx0i_s.cc:169
UNIV_INTERN void lock_queue_iterator_reset(lock_queue_iterator_t *iter, const lock_t *lock, ulint bit_no)
Definition: lock0iter.cc:50
ulint trx_lock_memory_bytes
Definition: trx0i_s.h:152
i_s_hash_chain_t hash_chain
Definition: trx0i_s.h:124
ulint trx_lock_structs
Definition: trx0i_s.h:150
ulint trx_mysql_thread_id
Definition: trx0i_s.h:141
UNIV_INLINE void mtr_start(mtr_t *mtr) __attribute__((nonnull))
UNIV_INTERN ulint lock_get_type(const lock_t *lock)
Definition: lock0lock.cc:5624
const char * lock_data
Definition: trx0i_s.h:116
i_s_table_cache_t innodb_locks
Definition: trx0i_s.cc:168
UNIV_INLINE ibool page_rec_is_infimum(const rec_t *rec) __attribute__((const ))
time_t start_time
Definition: trx0trx.h:545
const i_s_locks_row_t * requested_lock_row
Definition: trx0i_s.h:178
UNIV_INTERN ulint lock_rec_get_space_id(const lock_t *lock)
Definition: lock0lock.cc:5797
ib_time_t trx_started
Definition: trx0i_s.h:134
#define buf_page_try_get(space_id, page_no, mtr)
Definition: buf0buf.h:378
UNIV_INTERN const char * lock_rec_get_index_name(const lock_t *lock)
Definition: lock0lock.cc:5783
UNIV_INTERN ulint lock_number_of_rows_locked(const trx_t *trx)
Definition: lock0lock.cc:1621
ibool trx_foreign_key_checks
Definition: trx0i_s.h:165
ib_time_t trx_wait_started
Definition: trx0i_s.h:139
const char * lock_index
Definition: trx0i_s.h:107
ulint trx_tables_locked
Definition: trx0i_s.h:147
#define ha_storage_put_str_memlim(storage, str, memlim)
Definition: ha0storage.h:104
#define TRX_I_S_TRX_FK_ERROR_MAX_LEN
Definition: trx0i_s.h:55
UNIV_INLINE ibool rec_offs_validate(const rec_t *rec, const dict_index_t *index, const ulint *offsets)
#define LOCK_REC
Definition: lock0lock.h:763
UNIV_INLINE const char * trx_get_que_state_str(const trx_t *trx)