Details
-
Bug
-
Status: Closed (View Workflow)
-
Critical
-
Resolution: Cannot Reproduce
-
10.6.1
Description
# 2021-05-21T15:57:22 [1421973] | [rr 1424988 88422]2021-05-21 15:54:58 43 [ERROR] mysqld: Lock wait timeout exceeded; try restarting transaction
|
# Maybe the event above belongs to the problem.
|
# 2021-05-21T15:57:22 [1421973] | [rr 1424988 90006]mysqld: /data/Server/bb-10.6-marko_A/storage/innobase/lock/lock0lock.cc:2043: void lock_discard_for_index(const dict_index_t&): Assertion `!lock->is_waiting()' failed.
|
# 2021-05-21T15:57:22 [1421973] | [rr 1424988 105446]Query (0x0): [rr 1424988 105448](null)[rr 1424988 105450]
|
# 2021-05-21T15:57:22 [1421973] | Connection ID (thread ID): 0
|
# 2021-05-21T15:57:22 [1421973] | [rr 1424988 105452]Status: NOT_KILLED
|
|
(rr) bt
|
#0 __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:50
|
#1 0x00007ffcd2281859 in __GI_abort () at abort.c:79
|
#2 0x00007ffcd2281729 in __assert_fail_base (fmt=0x7ffcd2417588 "%s%s%s:%u: %s%sAssertion `%s' failed.\n%n", assertion=0x55c2e92a9fe0 "!lock->is_waiting()", file=0x55c2e92aa160 "/data/Server/bb-10.6-marko_A/storage/innobase/lock/lock0lock.cc", line=2043,
|
function=<optimized out>) at assert.c:92
|
#3 0x00007ffcd2292f36 in __GI___assert_fail (assertion=0x55c2e92a9fe0 "!lock->is_waiting()", file=0x55c2e92aa160 "/data/Server/bb-10.6-marko_A/storage/innobase/lock/lock0lock.cc", line=2043, function=0x55c2e92ad7a0 "void lock_discard_for_index(const dict_index_t&)")
|
at assert.c:101
|
#4 0x000055c2e7d9a500 in lock_discard_for_index (index=...) at /data/Server/bb-10.6-marko_A/storage/innobase/lock/lock0lock.cc:2043
|
#5 0x000055c2e7fe5bbd in row_purge_remove_clust_if_poss_low (node=0x61a00000a908, mode=2) at /data/Server/bb-10.6-marko_A/storage/innobase/row/row0purge.cc:127
|
#6 0x000055c2e7fe694e in row_purge_remove_clust_if_poss (node=0x61a00000a908) at /data/Server/bb-10.6-marko_A/storage/innobase/row/row0purge.cc:274
|
#7 0x000055c2e7fe8b19 in row_purge_del_mark (node=0x61a00000a908) at /data/Server/bb-10.6-marko_A/storage/innobase/row/row0purge.cc:733
|
#8 0x000055c2e7fec757 in row_purge_record_func (node=0x61a00000a908, undo_rec=0x62100004c890 "", thr=0x6160000586c0, updated_extern=false) at /data/Server/bb-10.6-marko_A/storage/innobase/row/row0purge.cc:1157
|
#9 0x000055c2e7fecdaa in row_purge (node=0x61a00000a908, undo_rec=0x62100004c890 "", thr=0x6160000586c0) at /data/Server/bb-10.6-marko_A/storage/innobase/row/row0purge.cc:1218
|
#10 0x000055c2e7fed19d in row_purge_step (thr=0x6160000586c0) at /data/Server/bb-10.6-marko_A/storage/innobase/row/row0purge.cc:1267
|
#11 0x000055c2e7ed792c in que_thr_step (thr=0x6160000586c0) at /data/Server/bb-10.6-marko_A/storage/innobase/que/que0que.cc:653
|
#12 0x000055c2e7ed7cd4 in que_run_threads_low (thr=0x6160000586c0) at /data/Server/bb-10.6-marko_A/storage/innobase/que/que0que.cc:709
|
#13 0x000055c2e7ed7e76 in que_run_threads (thr=0x6160000586c0) at /data/Server/bb-10.6-marko_A/storage/innobase/que/que0que.cc:729
|
#14 0x000055c2e806df31 in srv_task_execute () at /data/Server/bb-10.6-marko_A/storage/innobase/srv/srv0srv.cc:1790
|
#15 0x000055c2e806ed8b in purge_worker_callback () at /data/Server/bb-10.6-marko_A/storage/innobase/srv/srv0srv.cc:1953
|
#16 0x000055c2e8421b48 in tpool::task_group::execute (this=0x55c2eb0374c0 <purge_task_group>, t=0x55c2eb037580 <purge_worker_task>) at /data/Server/bb-10.6-marko_A/tpool/task_group.cc:55
|
#17 0x000055c2e842244f in tpool::task::execute (this=0x55c2eb037580 <purge_worker_task>) at /data/Server/bb-10.6-marko_A/tpool/task.cc:47
|
#18 0x000055c2e840ebbb in tpool::thread_pool_generic::worker_main (this=0x618000000480, thread_var=0x63000001fd80) at /data/Server/bb-10.6-marko_A/tpool/tpool_generic.cc:550
|
#19 0x000055c2e84215a9 in std::__invoke_impl<void, void (tpool::thread_pool_generic::*)(tpool::worker_data*), tpool::thread_pool_generic*, tpool::worker_data*> (__f=
|
@0x6040000008e8: (void (tpool::thread_pool_generic::*)(class tpool::thread_pool_generic * const, struct tpool::worker_data *)) 0x55c2e840e9d8 <tpool::thread_pool_generic::worker_main(tpool::worker_data*)>, __t=@0x6040000008e0: 0x618000000480)
|
at /usr/include/c++/9/bits/invoke.h:73
|
#20 0x000055c2e8421354 in std::__invoke<void (tpool::thread_pool_generic::*)(tpool::worker_data*), tpool::thread_pool_generic*, tpool::worker_data*> (__fn=
|
@0x6040000008e8: (void (tpool::thread_pool_generic::*)(class tpool::thread_pool_generic * const, struct tpool::worker_data *)) 0x55c2e840e9d8 <tpool::thread_pool_generic::worker_main(tpool::worker_data*)>) at /usr/include/c++/9/bits/invoke.h:95
|
#21 0x000055c2e84211e5 in std::thread::_Invoker<std::tuple<void (tpool::thread_pool_generic::*)(tpool::worker_data*), tpool::thread_pool_generic*, tpool::worker_data*> >::_M_invoke<0ul, 1ul, 2ul> (this=0x6040000008d8) at /usr/include/c++/9/thread:244
|
#22 0x000055c2e842112f in std::thread::_Invoker<std::tuple<void (tpool::thread_pool_generic::*)(tpool::worker_data*), tpool::thread_pool_generic*, tpool::worker_data*> >::operator() (this=0x6040000008d8) at /usr/include/c++/9/thread:251
|
#23 0x000055c2e8421094 in std::thread::_State_impl<std::thread::_Invoker<std::tuple<void (tpool::thread_pool_generic::*)(tpool::worker_data*), tpool::thread_pool_generic*, tpool::worker_data*> > >::_M_run (this=0x6040000008d0) at /usr/include/c++/9/thread:195
|
#24 0x000065b758c98d84 in ?? () from /lib/x86_64-linux-gnu/libstdc++.so.6
|
#25 0x00006d7856f21609 in start_thread (arg=<optimized out>) at pthread_create.c:477
|
#26 0x00007ffcd237e293 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95
|
(rr)
|
pluto:/data/Results/1621607839/TBR-1068/dev/shm/vardir/1621607839/80/1/rr
|
_RR_TRACE_DIR="." rr replay --mark-stdio
|
|
GIT_SHOW: HEAD, origin/bb-10.6-marko c857c14c40df26eb588964dacd45da47b0ac50f9 2021-05-21T09:48:30+03:00
|
including some patch for MDEV-25750
|
|
RQG
|
====
|
git clone https://github.com/mleich1/rqg --branch experimental RQG
|
|
perl rqg.pl \
|
--grammar=conf/mariadb/table_stress_innodb_nocopy1.yy \
|
--gendata=conf/mariadb/table_stress.zz \
|
--gendata_sql=conf/mariadb/table_stress.sql \
|
--redefine=conf/mariadb/xa.yy \
|
--reporters=CrashRecovery1 \
|
--mysqld=--loose-innodb_lock_schedule_algorithm=fcfs \
|
--mysqld=--loose-idle_write_transaction_timeout=0 \
|
--mysqld=--loose-idle_transaction_timeout=0 \
|
--mysqld=--loose-idle_readonly_transaction_timeout=0 \
|
--mysqld=--connect_timeout=60 \
|
--mysqld=--interactive_timeout=28800 \
|
--mysqld=--slave_net_timeout=60 \
|
--mysqld=--net_read_timeout=30 \
|
--mysqld=--net_write_timeout=60 \
|
--mysqld=--loose-table_lock_wait_timeout=50 \
|
--mysqld=--wait_timeout=28800 \
|
--mysqld=--lock-wait-timeout=86400 \
|
--mysqld=--innodb-lock-wait-timeout=50 \
|
--no-mask \
|
--queries=10000000 \
|
--seed=random \
|
--reporters=Backtrace \
|
--reporters=ErrorLog \
|
--reporters=Deadlock1 \
|
--validators=None \
|
--mysqld=--log_output=none \
|
--mysqld=--log-bin \
|
--mysqld=--log_bin_trust_function_creators=1 \
|
--mysqld=--loose-debug_assert_on_not_freed_memory=0 \
|
--engine=InnoDB \
|
--restart_timeout=240 \
|
--mysqld=--plugin-load-add=file_key_management.so \
|
--mysqld=--loose-file-key-management-filename=$RQG_HOME/conf/mariadb/encryption_keys.txt \
|
--duration=100 \
|
--mysqld=--loose-innodb_fatal_semaphore_wait_threshold=600 \
|
--mysqld=--innodb_stats_persistent=off \
|
--mysqld=--innodb_adaptive_hash_index=off \
|
--mysqld=--loose-innodb_evict_tables_on_commit_debug=off \
|
--mysqld=--loose-max-statement-time=30 \
|
--threads=33 \
|
--mysqld=--innodb-use-native-aio=0 \
|
--rr=Extended \
|
--rr_options=--chaos \
|
--mysqld=--innodb_page_size=4K \
|
--mysqld=--innodb-buffer-pool-size=256M \
|
--no_mask \
|
--workdir=<local settings> \
|
--vardir=<local settings> \
|
--mtr-build-thread=<local settings> \
|
--basedir1=<local settings> \
|
--script_debug=_nix_
|
|
Error pattern for RQG
|
[ 'TBR-1068-MDEV-25757', 'mysqld: .{1,200}lock0lock.cc:.{1,20}: void lock_discard_for_index.{1,500}: Assertion \`\!lock->is_waiting\(\). failed' ],
|
Attachments
Issue Links
- relates to
-
MDEV-20605 Awaken transaction can miss inserted by other transaction records due to wrong persistent cursor restoration
- Closed