file=0x55dbee0ba600 "/data/Server/bb-10.6-MDEV-27700-bis/storage/innobase/buf/buf0flu.cc", line=175, function=0x55dbee0ba740 "void buf_pool_t::insert_into_flush_list(buf_block_t*, lsn_t)") at assert.c:101
#4 0x000055dbecf8b273 in buf_pool_t::insert_into_flush_list (this=0x55dbeefb3d40 <buf_pool>, block=0x7f5902009b00, lsn=60915068) at /data/Server/bb-10.6-MDEV-27700-bis/storage/innobase/buf/buf0flu.cc:175
#5 0x000055dbecb97e72 in buf_flush_note_modification (b=0x7f5902009b00, start=60915068, end=60915132) at /data/Server/bb-10.6-MDEV-27700-bis/storage/innobase/include/buf0flu.h:129
#6 0x000055dbecbdcdb9 in ReleaseBlocks::operator() (this=0x3ea561b4ed90, slot=0x3ea561b4ef80) at /data/Server/bb-10.6-MDEV-27700-bis/storage/innobase/mtr/mtr0mtr.cc:379
#7 0x000055dbecbe7067 in CIterate<ReleaseBlocks const>::operator() (this=0x3ea561b4ed90, block=0x3ea561b4ef20) at /data/Server/bb-10.6-MDEV-27700-bis/storage/innobase/mtr/mtr0mtr.cc:62
#8 0x000055dbecbe127b in mtr_buf_t::for_each_block_in_reverse<CIterate<ReleaseBlocks const> > (this=0x3ea561b4eef8, functor=...) at /data/Server/bb-10.6-MDEV-27700-bis/storage/innobase/include/dyn0buf.h:379
#9 0x000055dbecbd126e in mtr_t::commit (this=0x3ea561b4eee0) at /data/Server/bb-10.6-MDEV-27700-bis/storage/innobase/mtr/mtr0mtr.cc:484
#10 0x000055dbed16663b in btr_pcur_commit_specify_mtr (pcur=0x61a000091f78, mtr=0x3ea561b4eee0) at /data/Server/bb-10.6-MDEV-27700-bis/storage/innobase/include/btr0pcur.inl:258
#11 0x000055dbed168777 in row_undo_ins_remove_clust_rec (node=0x61a000091f08) at /data/Server/bb-10.6-MDEV-27700-bis/storage/innobase/row/row0uins.cc:240
#12 0x000055dbed16af8d in row_undo_ins (node=0x61a000091f08, thr=0x6160047290c0) at /data/Server/bb-10.6-MDEV-27700-bis/storage/innobase/row/row0uins.cc:608
The cause of this appears to be that earlier in the mini-transaction, we failed to set the mtr_t::m_made_dirty flag when flagging a freed page as modified. The following change ought to fix that:
If I understood it correctly, this scenario can only be triggered if the page had been written out to the data file before this mini-transaction modified something in the page (most likely, deleted the only record that had been inserted into the page).
For the record, the tree where the error was encountered was something that implemented a fix for MDEV-29384.
If my understanding of the scenario is correct, then this error should occur more frequently when using the following setting:
SETGLOBAL innodb_max_dirty_pages_pct=0;
Marko Mäkelä
added a comment - For the record, the tree where the error was encountered was something that implemented a fix for MDEV-29384 .
If my understanding of the scenario is correct, then this error should occur more frequently when using the following setting:
SET GLOBAL innodb_max_dirty_pages_pct=0;
@@ -15,8 +15,11 @@ WHERE variable_name = 'INNODB_BUFFER_POOL_PAGES_DIRTY';
SET GLOBAL innodb_max_dirty_pages_pct=90.0;
-CREATE TABLE t ENGINE=InnoDB SELECT * FROM seq_1_to_10000;
+CREATE TABLE t (a INT PRIMARY KEY) ENGINE=InnoDB;
+connect (con1,localhost,root,,);
+BEGIN; INSERT INTO t SELECT * FROM seq_1_to_10000;
+connection default;
SELECT variable_value>0 FROM information_schema.global_status
WHERE variable_name = 'INNODB_BUFFER_POOL_PAGES_DIRTY';
@@ -24,6 +27,11 @@ SET GLOBAL innodb_max_dirty_pages_pct=0.0;
# Without the MDEV-24917 fix, we would time out here.
--source include/wait_condition.inc
+SET GLOBAL innodb_max_dirty_pages_pct=90.0;
+
+disconnect con1;
+SET GLOBAL innodb_max_dirty_pages_pct=0.0;
+--source include/wait_condition.inc
DROP TABLE t;
SET GLOBAL innodb_max_dirty_pages_pct = @save_pct;
Outside debug builds, I think that the impact of this bug could be broken crash recovery or backups. Because I am unable to reproduce this, I can’t assess the real impact either.
Marko Mäkelä
added a comment - mleich , were you able to reproduce this bug more often than just the single occurrence?
I got an idea for reproducing this, but unfortunately it failed to crash the server:
diff --git a/mysql-test/suite/innodb/t/page_cleaner.test b/mysql-test/suite/innodb/t/page_cleaner.test
index f597619aa08..79a973df25a 100644
--- a/mysql-test/suite/innodb/t/page_cleaner.test
+++ b/mysql-test/suite/innodb/t/page_cleaner.test
@@ -15,7 +15,8 @@ WHERE variable_name = 'INNODB_BUFFER_POOL_PAGES_DIRTY';
SET GLOBAL innodb_max_dirty_pages_pct=90.0;
-CREATE TABLE t ENGINE=InnoDB SELECT * FROM seq_1_to_10000;
+CREATE TABLE t (a INT PRIMARY KEY) ENGINE=InnoDB;
+BEGIN; INSERT INTO t SELECT * FROM seq_1_to_10000;
SELECT variable_value>0 FROM information_schema.global_status
WHERE variable_name = 'INNODB_BUFFER_POOL_PAGES_DIRTY';
@@ -24,6 +25,8 @@ SET GLOBAL innodb_max_dirty_pages_pct=0.0;
# Without the MDEV-24917 fix, we would time out here.
--source include/wait_condition.inc
+
+ROLLBACK;
DROP TABLE t;
SET GLOBAL innodb_max_dirty_pages_pct = @save_pct;
A more elaborate attempt failed to reproduce it as well:
diff --git a/mysql-test/suite/innodb/t/page_cleaner.test b/mysql-test/suite/innodb/t/page_cleaner.test
index f597619aa08..508518edf2d 100644
--- a/mysql-test/suite/innodb/t/page_cleaner.test
+++ b/mysql-test/suite/innodb/t/page_cleaner.test
@@ -15,8 +15,11 @@ WHERE variable_name = 'INNODB_BUFFER_POOL_PAGES_DIRTY';
SET GLOBAL innodb_max_dirty_pages_pct=90.0;
-CREATE TABLE t ENGINE=InnoDB SELECT * FROM seq_1_to_10000;
+CREATE TABLE t (a INT PRIMARY KEY) ENGINE=InnoDB;
+connect (con1,localhost,root,,);
+BEGIN; INSERT INTO t SELECT * FROM seq_1_to_10000;
+connection default;
SELECT variable_value>0 FROM information_schema.global_status
WHERE variable_name = 'INNODB_BUFFER_POOL_PAGES_DIRTY';
@@ -24,6 +27,11 @@ SET GLOBAL innodb_max_dirty_pages_pct=0.0;
# Without the MDEV-24917 fix, we would time out here.
--source include/wait_condition.inc
+SET GLOBAL innodb_max_dirty_pages_pct=90.0;
+
+disconnect con1;
+SET GLOBAL innodb_max_dirty_pages_pct=0.0;
+--source include/wait_condition.inc
DROP TABLE t;
SET GLOBAL innodb_max_dirty_pages_pct = @save_pct;
Outside debug builds, I think that the impact of this bug could be broken crash recovery or backups. Because I am unable to reproduce this, I can’t assess the real impact either.
With my test case modification and ./mtr --rr innodb.page_cleaner I found out that normally, mtr_t::m_made_dirty (indicating that the mini-transaction will have to add a previously clean page to buf_pool.flush_list) would be set earlier during the rollback:
10.6 76bb671e422d958f7252f428b39e109369e2679d
#0 0x00005558c5773bac in mtr_t::x_latch_at_savepoint (this=0x7fce3e3c8d60,
savepoint=16, block=0x7fce5c0ab510)
at /mariadb/10.5/storage/innobase/include/mtr0mtr.inl:152
#1 0x00005558c575c04b in btr_cur_search_to_nth_level_func (
at /mariadb/10.5/storage/innobase/include/btr0pcur.inl:385
#3 0x00005558c57798e7 in btr_pcur_t::restore_position (this=0x7fce3805df50,
restore_latch_mode=522, mtr=0x7fce3e3c8d60)
at /mariadb/10.5/storage/innobase/btr/btr0pcur.cc:423
#4 0x00005558c589b49c in row_undo_ins_remove_clust_rec (node=0x7fce3805dee0)
at /mariadb/10.5/storage/innobase/row/row0uins.cc:210
#5 0x00005558c589ca72 in row_undo_ins (node=0x7fce3805dee0,
thr=0x7fce3805dd00) at /mariadb/10.5/storage/innobase/row/row0uins.cc:608
That is, most of the time, it would not matter that mtr_t::free() is not setting the flag, because it would already have been set.
Marko Mäkelä
added a comment - With my test case modification and ./mtr --rr innodb.page_cleaner I found out that normally, mtr_t::m_made_dirty (indicating that the mini-transaction will have to add a previously clean page to buf_pool.flush_list ) would be set earlier during the rollback:
10.6 76bb671e422d958f7252f428b39e109369e2679d
#0 0x00005558c5773bac in mtr_t::x_latch_at_savepoint (this=0x7fce3e3c8d60,
savepoint=16, block=0x7fce5c0ab510)
at /mariadb/10.5/storage/innobase/include/mtr0mtr.inl:152
#1 0x00005558c575c04b in btr_cur_search_to_nth_level_func (
index=0x7fce3019f3e0, level=0, tuple=0x7fce380260d0, mode=PAGE_CUR_LE,
latch_mode=10, cursor=0x7fce3805df50, ahi_latch=0x0, mtr=0x7fce3e3c8d60,
autoinc=0) at /mariadb/10.5/storage/innobase/btr/btr0cur.cc:2202
#2 0x00005558c5777e5e in btr_pcur_open_with_no_init_func (
index=0x7fce3019f3e0, tuple=0x7fce380260d0, mode=PAGE_CUR_LE,
latch_mode=522, cursor=0x7fce3805df50, ahi_latch=0x0, mtr=0x7fce3e3c8d60)
at /mariadb/10.5/storage/innobase/include/btr0pcur.inl:385
#3 0x00005558c57798e7 in btr_pcur_t::restore_position (this=0x7fce3805df50,
restore_latch_mode=522, mtr=0x7fce3e3c8d60)
at /mariadb/10.5/storage/innobase/btr/btr0pcur.cc:423
#4 0x00005558c589b49c in row_undo_ins_remove_clust_rec (node=0x7fce3805dee0)
at /mariadb/10.5/storage/innobase/row/row0uins.cc:210
#5 0x00005558c589ca72 in row_undo_ins (node=0x7fce3805dee0,
thr=0x7fce3805dd00) at /mariadb/10.5/storage/innobase/row/row0uins.cc:608
That is, most of the time, it would not matter that mtr_t::free() is not setting the flag, because it would already have been set.
I analyzed the trace once more to find out why exactly mtr_t::m_made_dirty had not been set in the first place. It turns out that the page write was completed and buf_page_t::oldest_modification_ reset at the time the ROLLBACK was waiting here:
#14 0x000055dbecdb6637 in sux_lock<ssux_lock_impl<true> >::x_lock (this=0x7f5902009b18, for_io=false) at /data/Server/bb-10.6-MDEV-27700-bis/storage/innobase/include/sux_lock.h:395
#15 0x000055dbecef3229 in btr_cur_latch_leaves (block=0x7f5902009b00, latch_mode=10, cursor=0x61a000091f78, mtr=0x3ea561b4eee0, latch_leaves=0x3ea561b4d5d0) at /data/Server/bb-10.6-MDEV-27700-bis/storage/innobase/btr/btr0cur.cc:284
There is a race condition in btr_cur_latch_leaves():
mtr->memo_push(block, MTR_MEMO_PAGE_X_FIX);
block->page.fix();
block->page.lock.x_lock();
The mtr_t::memo_push() did invoke mtr_t::is_block_dirtied(), but at that point of time, the write of the dirty block had not been completed. The page write would be protected by a U-latch (previously known as SX-latch), which would conflict with the exclusive latch that we would be waiting for. Only at that point it would be safe to execute mtr_t::memo_push() so that mtr_t::m_made_dirty would be set correctly.
This code was refactored by me in MDEV-13542. The following functions are affected by the same anti-pattern:
btr_cur_latch_leaves()
btr_store_big_rec_extern_fields()
btr_free_externally_stored_field()
trx_purge_free_segment()
I checked all calls to mtr_t::memo_push() with MTR_MEMO_PAGE_X_FIX or MTR_MEMO_PAGE_SX_FIX, and it turns out that nothing was broken outside MDEV-13542.
I no longer think that MDEV-18976 caused this regression. The additional fix to mtr_t::free() is a good idea in any case.
Side note: Performance could be improved if did not set mtr_t::m_made_dirty already when registering MTR_MEMO_PAGE_X_FIX or MTR_MEMO_PAGE_SX_FIX, but deferred it until the moment we set the MTR_MEMO_MODIFY flag on a block. In that way, even if a mini-transaction acquired a U or X latch on a page but never modified that page, mtr_t::commit() could avoid acquiring log_sys.flush_order_mutex. We only need that mutex when the mini-transaction actually needs to add a previously clean block to buf_pool.flush_list.
I am afraid that it is practically impossible to write a reproducible test case for this. The scheduling of page writes is largely invisible to higher-level code.
As far as I can tell, this bug can break crash recovery and backups (explaining MDEV-29364 and MDEV-29374).
Marko Mäkelä
added a comment - I analyzed the trace once more to find out why exactly mtr_t::m_made_dirty had not been set in the first place. It turns out that the page write was completed and buf_page_t::oldest_modification_ reset at the time the ROLLBACK was waiting here:
#14 0x000055dbecdb6637 in sux_lock<ssux_lock_impl<true> >::x_lock (this=0x7f5902009b18, for_io=false) at /data/Server/bb-10.6-MDEV-27700-bis/storage/innobase/include/sux_lock.h:395
#15 0x000055dbecef3229 in btr_cur_latch_leaves (block=0x7f5902009b00, latch_mode=10, cursor=0x61a000091f78, mtr=0x3ea561b4eee0, latch_leaves=0x3ea561b4d5d0) at /data/Server/bb-10.6-MDEV-27700-bis/storage/innobase/btr/btr0cur.cc:284
There is a race condition in btr_cur_latch_leaves() :
mtr->memo_push(block, MTR_MEMO_PAGE_X_FIX);
block->page.fix();
block->page.lock.x_lock();
The mtr_t::memo_push() did invoke mtr_t::is_block_dirtied() , but at that point of time, the write of the dirty block had not been completed. The page write would be protected by a U-latch (previously known as SX-latch), which would conflict with the exclusive latch that we would be waiting for. Only at that point it would be safe to execute mtr_t::memo_push() so that mtr_t::m_made_dirty would be set correctly.
This code was refactored by me in MDEV-13542 . The following functions are affected by the same anti-pattern:
btr_cur_latch_leaves()
btr_store_big_rec_extern_fields()
btr_free_externally_stored_field()
trx_purge_free_segment()
I checked all calls to mtr_t::memo_push() with MTR_MEMO_PAGE_X_FIX or MTR_MEMO_PAGE_SX_FIX , and it turns out that nothing was broken outside MDEV-13542 .
I no longer think that MDEV-18976 caused this regression. The additional fix to mtr_t::free() is a good idea in any case.
Side note: Performance could be improved if did not set mtr_t::m_made_dirty already when registering MTR_MEMO_PAGE_X_FIX or MTR_MEMO_PAGE_SX_FIX , but deferred it until the moment we set the MTR_MEMO_MODIFY flag on a block. In that way, even if a mini-transaction acquired a U or X latch on a page but never modified that page, mtr_t::commit() could avoid acquiring log_sys.flush_order_mutex . We only need that mutex when the mini-transaction actually needs to add a previously clean block to buf_pool.flush_list .
I am afraid that it is practically impossible to write a reproducible test case for this. The scheduling of page writes is largely invisible to higher-level code.
As far as I can tell, this bug can break crash recovery and backups (explaining MDEV-29364 and MDEV-29374 ).
I believe that in release builds, this failure could lead to a corrupted database after a seemingly successful backup or crash recovery, like in this failure:
[01] 2022-09-06 10:55:09 Copying ./mysql/transaction_registry.ibd to /dev/shm/var/3/mysqld.1/data/mysql/transaction_registry.ibd
[01] 2022-09-06 10:55:09 ...done
[00] 2022-09-06 10:55:09 completed OK!
mysqltest: At line 92: query 'SELECT * FROM t' failed: ER_TABLE_CORRUPT (1877): Table test/t is corrupted. Please drop the table and recreate.
This was with a code revision that did not include the fixes of MDEV-29383 or MDEV-29374.
Marko Mäkelä
added a comment - I believe that in release builds, this failure could lead to a corrupted database after a seemingly successful backup or crash recovery, like in this failure :
10.10 c0a6ce61d88104db14f7107cb16edf475b46bffd
mariabackup.huge_lsn 'strict_full_crc32' w3 [ fail ]
Test ended at 2022-09-06 10:55:11
…
[01] 2022-09-06 10:55:09 Copying ./mysql/transaction_registry.ibd to /dev/shm/var/3/mysqld.1/data/mysql/transaction_registry.ibd
[01] 2022-09-06 10:55:09 ...done
[00] 2022-09-06 10:55:09 completed OK!
mysqltest: At line 92: query 'SELECT * FROM t' failed: ER_TABLE_CORRUPT (1877): Table test/t is corrupted. Please drop the table and recreate.
This was with a code revision that did not include the fixes of MDEV-29383 or MDEV-29374 .
People
Marko Mäkelä
Marko Mäkelä
Votes:
0Vote for this issue
Watchers:
5Start watching this issue
Dates
Created:
Updated:
Resolved:
Git Integration
Error rendering 'com.xiplink.jira.git.jira_git_plugin:git-issue-webpanel'. Please contact your Jira administrators.
{"report":{"fcp":1416.8000001907349,"ttfb":481.6000003814697,"pageVisibility":"visible","entityId":114161,"key":"jira.project.issue.view-issue","isInitial":true,"threshold":1000,"elementTimings":{},"userDeviceMemory":8,"userDeviceProcessors":64,"apdex":0.5,"journeyId":"b490534d-05a4-4800-a0bb-ddf7a97523ff","navigationType":0,"readyForUser":1552.1000003814697,"redirectCount":0,"resourceLoadedEnd":1646.2000007629395,"resourceLoadedStart":489.6000003814697,"resourceTiming":[{"duration":294.6000003814697,"initiatorType":"link","name":"https://jira.mariadb.org/s/2c21342762a6a02add1c328bed317ffd-CDN/lu2cib/820016/12ta74/0a8bac35585be7fc6c9cc5a0464cd4cf/_/download/contextbatch/css/_super/batch.css","startTime":489.6000003814697,"connectEnd":0,"connectStart":0,"domainLookupEnd":0,"domainLookupStart":0,"fetchStart":489.6000003814697,"redirectEnd":0,"redirectStart":0,"requestStart":0,"responseEnd":784.2000007629395,"responseStart":0,"secureConnectionStart":0},{"duration":295.30000019073486,"initiatorType":"link","name":"https://jira.mariadb.org/s/7ebd35e77e471bc30ff0eba799ebc151-CDN/lu2cib/820016/12ta74/494e4c556ecbb29f90a3d3b4f09cb99c/_/download/contextbatch/css/jira.browse.project,project.issue.navigator,jira.view.issue,jira.general,jira.global,atl.general,-_super/batch.css?agile_global_admin_condition=true&jag=true&jira.create.linked.issue=true&slack-enabled=true&whisper-enabled=true","startTime":489.80000019073486,"connectEnd":0,"connectStart":0,"domainLookupEnd":0,"domainLookupStart":0,"fetchStart":489.80000019073486,"redirectEnd":0,"redirectStart":0,"requestStart":0,"responseEnd":785.1000003814697,"responseStart":0,"secureConnectionStart":0},{"duration":381.6000003814697,"initiatorType":"script","name":"https://jira.mariadb.org/s/0917945aaa57108d00c5076fea35e069-CDN/lu2cib/820016/12ta74/0a8bac35585be7fc6c9cc5a0464cd4cf/_/download/contextbatch/js/_super/batch.js?locale=en","startTime":490,"connectEnd":490,"connectStart":490,"domainLookupEnd":490,"domainLookupStart":490,"fetchStart":490,"redirectEnd":0,"redirectStart":0,"requestStart":490,"responseEnd":871.6000003814697,"responseStart":871.6000003814697,"secureConnectionStart":490},{"duration":508.19999980926514,"initiatorType":"script","name":"https://jira.mariadb.org/s/2d8175ec2fa4c816e8023260bd8c1786-CDN/lu2cib/820016/12ta74/494e4c556ecbb29f90a3d3b4f09cb99c/_/download/contextbatch/js/jira.browse.project,project.issue.navigator,jira.view.issue,jira.general,jira.global,atl.general,-_super/batch.js?agile_global_admin_condition=true&jag=true&jira.create.linked.issue=true&locale=en&slack-enabled=true&whisper-enabled=true","startTime":490.1000003814697,"connectEnd":490.1000003814697,"connectStart":490.1000003814697,"domainLookupEnd":490.1000003814697,"domainLookupStart":490.1000003814697,"fetchStart":490.1000003814697,"redirectEnd":0,"redirectStart":0,"requestStart":490.1000003814697,"responseEnd":998.3000001907349,"responseStart":998.3000001907349,"secureConnectionStart":490.1000003814697},{"duration":511.30000019073486,"initiatorType":"script","name":"https://jira.mariadb.org/s/a9324d6758d385eb45c462685ad88f1d-CDN/lu2cib/820016/12ta74/c92c0caa9a024ae85b0ebdbed7fb4bd7/_/download/contextbatch/js/atl.global,-_super/batch.js?locale=en","startTime":490.5,"connectEnd":490.5,"connectStart":490.5,"domainLookupEnd":490.5,"domainLookupStart":490.5,"fetchStart":490.5,"redirectEnd":0,"redirectStart":0,"requestStart":490.5,"responseEnd":1001.8000001907349,"responseStart":1001.8000001907349,"secureConnectionStart":490.5},{"duration":513.6000003814697,"initiatorType":"script","name":"https://jira.mariadb.org/s/d41d8cd98f00b204e9800998ecf8427e-CDN/lu2cib/820016/12ta74/1.0/_/download/batch/jira.webresources:calendar-en/jira.webresources:calendar-en.js","startTime":490.6000003814697,"connectEnd":490.6000003814697,"connectStart":490.6000003814697,"domainLookupEnd":490.6000003814697,"domainLookupStart":490.6000003814697,"fetchStart":490.6000003814697,"redirectEnd":0,"redirectStart":0,"requestStart":490.6000003814697,"responseEnd":1004.2000007629395,"responseStart":1004.2000007629395,"secureConnectionStart":490.6000003814697},{"duration":514,"initiatorType":"script","name":"https://jira.mariadb.org/s/d41d8cd98f00b204e9800998ecf8427e-CDN/lu2cib/820016/12ta74/1.0/_/download/batch/jira.webresources:calendar-localisation-moment/jira.webresources:calendar-localisation-moment.js","startTime":490.70000076293945,"connectEnd":490.70000076293945,"connectStart":490.70000076293945,"domainLookupEnd":490.70000076293945,"domainLookupStart":490.70000076293945,"fetchStart":490.70000076293945,"redirectEnd":0,"redirectStart":0,"requestStart":490.70000076293945,"responseEnd":1004.7000007629395,"responseStart":1004.7000007629395,"secureConnectionStart":490.70000076293945},{"duration":533.7000007629395,"initiatorType":"link","name":"https://jira.mariadb.org/s/b04b06a02d1959df322d9cded3aeecc1-CDN/lu2cib/820016/12ta74/a2ff6aa845ffc9a1d22fe23d9ee791fc/_/download/contextbatch/css/jira.global.look-and-feel,-_super/batch.css","startTime":491,"connectEnd":0,"connectStart":0,"domainLookupEnd":0,"domainLookupStart":0,"fetchStart":491,"redirectEnd":0,"redirectStart":0,"requestStart":0,"responseEnd":1024.7000007629395,"responseStart":0,"secureConnectionStart":0},{"duration":514.3999996185303,"initiatorType":"script","name":"https://jira.mariadb.org/rest/api/1.0/shortcuts/820016/47140b6e0a9bc2e4913da06536125810/shortcuts.js?context=issuenavigation&context=issueaction","startTime":491.1000003814697,"connectEnd":491.1000003814697,"connectStart":491.1000003814697,"domainLookupEnd":491.1000003814697,"domainLookupStart":491.1000003814697,"fetchStart":491.1000003814697,"redirectEnd":0,"redirectStart":0,"requestStart":491.1000003814697,"responseEnd":1005.5,"responseStart":1005.5,"secureConnectionStart":491.1000003814697},{"duration":533.6000003814697,"initiatorType":"link","name":"https://jira.mariadb.org/s/3ac36323ba5e4eb0af2aa7ac7211b4bb-CDN/lu2cib/820016/12ta74/d176f0986478cc64f24226b3d20c140d/_/download/contextbatch/css/com.atlassian.jira.projects.sidebar.init,-_super,-project.issue.navigator,-jira.view.issue/batch.css?jira.create.linked.issue=true","startTime":491.30000019073486,"connectEnd":0,"connectStart":0,"domainLookupEnd":0,"domainLookupStart":0,"fetchStart":491.30000019073486,"redirectEnd":0,"redirectStart":0,"requestStart":0,"responseEnd":1024.9000005722046,"responseStart":0,"secureConnectionStart":0},{"duration":514.8000001907349,"initiatorType":"script","name":"https://jira.mariadb.org/s/5d5e8fe91fbc506585e83ea3b62ccc4b-CDN/lu2cib/820016/12ta74/d176f0986478cc64f24226b3d20c140d/_/download/contextbatch/js/com.atlassian.jira.projects.sidebar.init,-_super,-project.issue.navigator,-jira.view.issue/batch.js?jira.create.linked.issue=true&locale=en","startTime":491.4000005722046,"connectEnd":491.4000005722046,"connectStart":491.4000005722046,"domainLookupEnd":491.4000005722046,"domainLookupStart":491.4000005722046,"fetchStart":491.4000005722046,"redirectEnd":0,"redirectStart":0,"requestStart":491.4000005722046,"responseEnd":1006.2000007629395,"responseStart":1006.2000007629395,"secureConnectionStart":491.4000005722046},{"duration":700.1999998092651,"initiatorType":"script","name":"https://jira.mariadb.org/s/d41d8cd98f00b204e9800998ecf8427e-CDN/lu2cib/820016/12ta74/1.0/_/download/batch/jira.webresources:bigpipe-js/jira.webresources:bigpipe-js.js","startTime":492.6000003814697,"connectEnd":492.6000003814697,"connectStart":492.6000003814697,"domainLookupEnd":492.6000003814697,"domainLookupStart":492.6000003814697,"fetchStart":492.6000003814697,"redirectEnd":0,"redirectStart":0,"requestStart":492.6000003814697,"responseEnd":1192.8000001907349,"responseStart":1192.8000001907349,"secureConnectionStart":492.6000003814697},{"duration":1070.7000007629395,"initiatorType":"script","name":"https://jira.mariadb.org/s/d41d8cd98f00b204e9800998ecf8427e-CDN/lu2cib/820016/12ta74/1.0/_/download/batch/jira.webresources:bigpipe-init/jira.webresources:bigpipe-init.js","startTime":497,"connectEnd":497,"connectStart":497,"domainLookupEnd":497,"domainLookupStart":497,"fetchStart":497,"redirectEnd":0,"redirectStart":0,"requestStart":497,"responseEnd":1567.7000007629395,"responseStart":1567.7000007629395,"secureConnectionStart":497},{"duration":157.19999980926514,"initiatorType":"xmlhttprequest","name":"https://jira.mariadb.org/rest/webResources/1.0/resources","startTime":1036.8000001907349,"connectEnd":1036.8000001907349,"connectStart":1036.8000001907349,"domainLookupEnd":1036.8000001907349,"domainLookupStart":1036.8000001907349,"fetchStart":1036.8000001907349,"redirectEnd":0,"redirectStart":0,"requestStart":1036.8000001907349,"responseEnd":1194,"responseStart":1194,"secureConnectionStart":1036.8000001907349},{"duration":313.20000076293945,"initiatorType":"script","name":"https://jira.mariadb.org/s/d41d8cd98f00b204e9800998ecf8427e-CDN/lu2cib/820016/12ta74/e65b778d185daf5aee24936755b43da6/_/download/contextbatch/js/browser-metrics-plugin.contrib,-_super,-atl.general/batch.js?agile_global_admin_condition=true&jag=true&slack-enabled=true&whisper-enabled=true","startTime":1333,"connectEnd":1333,"connectStart":1333,"domainLookupEnd":1333,"domainLookupStart":1333,"fetchStart":1333,"redirectEnd":0,"redirectStart":0,"requestStart":1333,"responseEnd":1646.2000007629395,"responseStart":1646.2000007629395,"secureConnectionStart":1333},{"duration":276.80000019073486,"initiatorType":"script","name":"https://www.google-analytics.com/analytics.js","startTime":1400.1000003814697,"connectEnd":0,"connectStart":0,"domainLookupEnd":0,"domainLookupStart":0,"fetchStart":1400.1000003814697,"redirectEnd":0,"redirectStart":0,"requestStart":0,"responseEnd":1676.9000005722046,"responseStart":0,"secureConnectionStart":0}],"fetchStart":0,"domainLookupStart":0,"domainLookupEnd":0,"connectStart":0,"connectEnd":0,"requestStart":271,"responseStart":482,"responseEnd":488,"domLoading":486,"domInteractive":1744,"domContentLoadedEventStart":1744,"domContentLoadedEventEnd":1804,"domComplete":2867,"loadEventStart":2868,"loadEventEnd":2868,"userAgent":"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; compatible; ClaudeBot/1.0; +claudebot@anthropic.com)","marks":[{"name":"bigPipe.sidebar-id.start","time":1686.4000005722046},{"name":"bigPipe.sidebar-id.end","time":1690},{"name":"bigPipe.activity-panel-pipe-id.start","time":1690.2000007629395},{"name":"bigPipe.activity-panel-pipe-id.end","time":1697.9000005722046},{"name":"activityTabFullyLoaded","time":1831.7000007629395}],"measures":[],"correlationId":"972af5362feee8","effectiveType":"4g","downlink":9.1,"rtt":0,"serverDuration":137,"dbReadsTimeInMs":19,"dbConnsTimeInMs":29,"applicationHash":"9d11dbea5f4be3d4cc21f03a88dd11d8c8687422","experiments":[]}}
For the record, the tree where the error was encountered was something that implemented a fix for
MDEV-29384.If my understanding of the scenario is correct, then this error should occur more frequently when using the following setting: