Uploaded image for project: 'MariaDB Server'
  1. MariaDB Server
  2. MDEV-25527

ERROR: AddressSanitizer: attempting double-free in __interceptor_free nearby dict_mem_table_free

Details

    Description

      ASAN failure hit during RQG testing
       
      # 2021-04-26T08:58:49 [1871827] | ==1875000==ERROR: AddressSanitizer: attempting double-free on 0x6170001d6200 in thread T27:
      # 2021-04-26T08:58:49 [1871827] |     #0 0x7f0534cc17cf in __interceptor_free (/lib/x86_64-linux-gnu/libasan.so.5+0x10d7cf)
      # 2021-04-26T08:58:49 [1871827] |     #1 0x55febde00ee5 in dict_mem_table_free(dict_table_t*) /data/Server/bb-10.6-MDEV-25491/storage/innobase/dict/dict0mem.cc:235
      # 2021-04-26T08:58:49 [1871827] |     #2 0x55febdcaa1a7 in btr_search_lazy_free /data/Server/bb-10.6-MDEV-25491/storage/innobase/btr/btr0sea.cc:217
      # 2021-04-26T08:58:49 [1871827] |     #3 0x55febdcad9cb in btr_search_drop_page_hash_index(buf_block_t*) /data/Server/bb-10.6-MDEV-25491/storage/innobase/btr/btr0sea.cc:1395
      # 2021-04-26T08:58:49 [1871827] |     #4 0x55febdd4566e in buf_LRU_free_page(buf_page_t*, bool) /data/Server/bb-10.6-MDEV-25491/storage/innobase/buf/buf0lru.cc:946
      # 2021-04-26T08:58:49 [1871827] |     #5 0x55febdd48303 in buf_LRU_free_from_common_LRU_list /data/Server/bb-10.6-MDEV-25491/storage/innobase/buf/buf0lru.cc:246
      # 2021-04-26T08:58:49 [1871827] |     #6 0x55febdd48928 in buf_LRU_scan_and_free_block(unsigned long) /data/Server/bb-10.6-MDEV-25491/storage/innobase/buf/buf0lru.cc:278
      # 2021-04-26T08:58:49 [1871827] |     #7 0x55febdd48df9 in buf_LRU_get_free_block(bool) /data/Server/bb-10.6-MDEV-25491/storage/innobase/buf/buf0lru.cc:439
      # 2021-04-26T08:58:49 [1871827] |     #8 0x55febdca113b in buf_block_alloc() /data/Server/bb-10.6-MDEV-25491/storage/innobase/include/buf0buf.ic:190
      # 2021-04-26T08:58:49 [1871827] |     #9 0x55febdca113b in btr_search_check_free_space_in_heap /data/Server/bb-10.6-MDEV-25491/storage/innobase/btr/btr0sea.cc:181
      # 2021-04-26T08:58:49 [1871827] |     #10 0x55febdcb0098 in btr_search_update_hash_on_insert(btr_cur_t*, ssux_lock_low*) /data/Server/bb-10.6-MDEV-25491/storage/innobase/btr/btr0sea.cc:1972
      # 2021-04-26T08:58:49 [1871827] |     #11 0x55febdc4d579 in btr_cur_optimistic_insert(unsigned long, btr_cur_t*, unsigned short**, mem_block_info_t**, dtuple_t*, unsigned char**, big_rec_t**, unsigned long, que_thr_t*, mtr_t*) /data/Server/bb-10.6-MDEV-25491/storage/innobase/btr/btr0cur.cc:3618
      # 2021-04-26T08:58:49 [1871827] |     #12 0x55febd97783d in row_ins_sec_index_entry_low(unsigned long, unsigned long, dict_index_t*, mem_block_info_t*, mem_block_info_t*, dtuple_t*, unsigned long, que_thr_t*) /data/Server/bb-10.6-MDEV-25491/storage/innobase/row/row0ins.cc:3145
      # 2021-04-26T08:58:49 [1871827] |     #13 0x55febd97859e in row_ins_sec_index_entry(dict_index_t*, dtuple_t*, que_thr_t*, bool) /data/Server/bb-10.6-MDEV-25491/storage/innobase/row/row0ins.cc:3346
      # 2021-04-26T08:58:49 [1871827] |     #14 0x55febd978fe5 in row_ins_index_entry /data/Server/bb-10.6-MDEV-25491/storage/innobase/row/row0ins.cc:3394
      # 2021-04-26T08:58:49 [1871827] |     #15 0x55febd9792e2 in row_ins_index_entry_step /data/Server/bb-10.6-MDEV-25491/storage/innobase/row/row0ins.cc:3561
      # 2021-04-26T08:58:49 [1871827] |     #16 0x55febd97aa3c in row_ins /data/Server/bb-10.6-MDEV-25491/storage/innobase/row/row0ins.cc:3707
      # 2021-04-26T08:58:49 [1871827] |     #17 0x55febd97b930 in row_ins_step(que_thr_t*) /data/Server/bb-10.6-MDEV-25491/storage/innobase/row/row0ins.cc:3853
      # 2021-04-26T08:58:49 [1871827] |     #18 0x55febd9d0ed3 in row_insert_for_mysql(unsigned char const*, row_prebuilt_t*, ins_mode_t) /data/Server/bb-10.6-MDEV-25491/storage/innobase/row/row0mysql.cc:1395
      # 2021-04-26T08:58:49 [1871827] |     #19 0x55febd60d11d in ha_innobase::write_row(unsigned char const*) /data/Server/bb-10.6-MDEV-25491/storage/innobase/handler/ha_innodb.cc:7420
      # 2021-04-26T08:58:49 [1871827] |     #20 0x55febcbcb466 in handler::ha_write_row(unsigned char const*) /data/Server/bb-10.6-MDEV-25491/sql/handler.cc:7162
      # 2021-04-26T08:58:49 [1871827] |     #21 0x55febc28927e in write_record(THD*, TABLE*, st_copy_info*, select_result*) /data/Server/bb-10.6-MDEV-25491/sql/sql_insert.cc:2106
      # 2021-04-26T08:58:49 [1871827] |     #22 0x55febc2a9694 in mysql_insert(THD*, TABLE_LIST*, List<Item>&, List<List<Item> >&, List<Item>&, List<Item>&, enum_duplicates, bool, select_result*) /data/Server/bb-10.6-MDEV-25491/sql/sql_insert.cc:1099
      # 2021-04-26T08:58:49 [1871827] |     #23 0x55febc3885b0 in mysql_execute_command(THD*) /data/Server/bb-10.6-MDEV-25491/sql/sql_parse.cc:4559
      # 2021-04-26T08:58:49 [1871827] |     #24 0x55febc347306 in mysql_parse(THD*, char*, unsigned int, Parser_state*) /data/Server/bb-10.6-MDEV-25491/sql/sql_parse.cc:8018
      # 2021-04-26T08:58:49 [1871827] |     #25 0x55febc375de7 in dispatch_command(enum_server_command, THD*, char*, unsigned int, bool) /data/Server/bb-10.6-MDEV-25491/sql/sql_parse.cc:1897
      # 2021-04-26T08:58:49 [1871827] |     #26 0x55febc37e297 in do_command(THD*, bool) /data/Server/bb-10.6-MDEV-25491/sql/sql_parse.cc:1406
      # 2021-04-26T08:58:49 [1871827] |     #27 0x55febc7a6fea in do_handle_one_connection(CONNECT*, bool) /data/Server/bb-10.6-MDEV-25491/sql/sql_connect.cc:1410
      # 2021-04-26T08:58:49 [1871827] |     #28 0x55febc7a7b98 in handle_one_connection /data/Server/bb-10.6-MDEV-25491/sql/sql_connect.cc:1312
      # 2021-04-26T08:58:49 [1871827] |     #29 0x7f0534aa2608 in start_thread /build/glibc-eX1tMB/glibc-2.31/nptl/pthread_create.c:477
      # 2021-04-26T08:58:49 [1871827] |     #30 0x7f0534676292 in __clone (/lib/x86_64-linux-gnu/libc.so.6+0x122292)
       
      # 2021-04-26T08:58:49 [1871827] | Query (0x62b000188238): INSERT INTO b ( `col_int_nokey`, `col_int_key` ) VALUES ( 5, 5 ), ( 1, 0 ) /* E_R Thread10 QNO 553 CON_ID 24 */
      ...
      # 2021-04-26T08:58:49 [1871827] | Status: NOT_KILLED
       
      HEAD, origin/bb-10.6-MDEV-25491 bb167613126a97652654d7ff7341fdbe4d2320cc 2021-04-26T13:58:26+03:00
       
      sdp:/data/Results/1619451439/TBR-1037
      gdb -c ./dev/shm/vardir/1619451439/174/1/data/core /Server_bin/bb-10.6-MDEV-25491_asan_Og/bin/mysqld
       
      RQG
      -------
      git clone https://github.com/mleich1/rqg --branch experimental RQG
       
      perl rqg.pl \
      --grammar=conf/mariadb/partitions_innodb.yy \
      --mysqld=--loose-innodb_lock_schedule_algorithm=fcfs \
      --mysqld=--loose-idle_write_transaction_timeout=0 \
      --mysqld=--loose-idle_transaction_timeout=0 \
      --mysqld=--loose-idle_readonly_transaction_timeout=0 \
      --mysqld=--connect_timeout=60 \
      --mysqld=--interactive_timeout=28800 \
      --mysqld=--slave_net_timeout=60 \
      --mysqld=--net_read_timeout=30 \
      --mysqld=--net_write_timeout=60 \
      --mysqld=--loose-table_lock_wait_timeout=50 \
      --mysqld=--wait_timeout=28800 \
      --mysqld=--lock-wait-timeout=86400 \
      --mysqld=--innodb-lock-wait-timeout=50 \
      --no-mask \
      --queries=10000000 \
      --seed=random \
      --reporters=Backtrace \
      --reporters=ErrorLog \
      --reporters=Deadlock1 \
      --validators=None \
      --mysqld=--log_output=none \
      --mysqld=--log-bin \
      --mysqld=--log_bin_trust_function_creators=1 \
      --mysqld=--loose-debug_assert_on_not_freed_memory=0 \
      --engine=InnoDB \
      --restart_timeout=240 \
      --mysqld=--plugin-load-add=file_key_management.so \
      --mysqld=--loose-file-key-management-filename=$RQG_HOME/conf/mariadb/encryption_keys.txt \
      --duration=300 \
      --mysqld=--loose-innodb_fatal_semaphore_wait_threshold=300 \
      --mysqld=--loose-innodb-sync-debug \
      --mysqld=--innodb_stats_persistent=off \
      --mysqld=--innodb_adaptive_hash_index=on \
      --mysqld=--loose-innodb_evict_tables_on_commit_debug=off \
      --mysqld=--loose-max-statement-time=30 \
      --threads=33 \
      --mysqld=--innodb_use_native_aio=1 \
      --mysqld=--innodb_page_size=32K \
      --mysqld=--innodb-buffer-pool-size=256M \
      --duration=300 \
      --no_mask \
      --workdir=<local settings> \
      --vardir=<local settings> \
      --mtr-build-thread=<local settings> \
      --basedir1=<local settings> \
      --script_debug=_nix_
      
      

      Attachments

        Issue Links

          Activity

            Error pattern for RQG
            [ 'TBR-1037-MDEV-25527' , 'ERROR: AddressSanitizer: attempting double-free.+#0 .{1,30}in __interceptor_free.+#1 .{1,30}in dict_mem_table_free.+#2 .{1,30}in btr_search_lazy_free.+#3 .{1,30}in btr_search_drop_page_hash_index.+#4 .{1,30}in buf_LRU_free_page' ],
            

            mleich Matthias Leich added a comment - Error pattern for RQG [ 'TBR-1037-MDEV-25527' , 'ERROR: AddressSanitizer: attempting double-free.+#0 .{1,30}in __interceptor_free.+#1 .{1,30}in dict_mem_table_free.+#2 .{1,30}in btr_search_lazy_free.+#3 .{1,30}in btr_search_drop_page_hash_index.+#4 .{1,30}in buf_LRU_free_page' ],

            I analyzed the core dump a little. Not all variables are available, because it is an optimized build. The interesting part is the latch protection:

            (gdb) p/x btr_search_sys.parts[0].latch
            $17 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x80000001}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x80000000}, <No data fields>}}
            (gdb) p/x btr_search_sys.parts[1].latch
            $18 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x80000000}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x80000000}, <No data fields>}}
            (gdb) p/x btr_search_sys.parts[2].latch
            $19 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x80000002}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x80000000}, <No data fields>}}
            (gdb) p/x btr_search_sys.parts[3].latch
            $20 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}
            (gdb) p/x btr_search_sys.parts[4].latch
            $21 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}
            (gdb) p/x btr_search_sys.parts[5].latch
            $22 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}
            (gdb) p/x btr_search_sys.parts[6].latch
            $23 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}
            (gdb) p/x btr_search_sys.parts[7].latch
            $24 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}
            

            We have btr_ahi_parts=8 in the core dump. The tablespace identifier appears to be 0x2d4 and the index identifier 0x5d9. The call btr_search_sys.get_part(0x5d9,0x2d4) should return btr_search_sys.parts+2.

            Above we can see that only the first 3 slots are being held in exclusive mode by some threads. The slots 0 and 2 have threads waiting for the exclusive lock to be released. 2 other threads are waiting to lock all adaptive hash index partitions. Maybe both of them are waiting for our latch.

            During the call, we are supposed to hold an exclusive latch on the correct adaptive hash index partition:

            	switch (index->search_info->ref_count--) {
            	case 0:
            		ut_error;
            	case 1:
            		if (index->freed()) {
            			btr_search_lazy_free(index);
            		}
            	}
             
            	block->index = NULL;
             
            	MONITOR_INC(MONITOR_ADAPTIVE_HASH_PAGE_REMOVED);
            	MONITOR_INC_VALUE(MONITOR_ADAPTIVE_HASH_ROW_REMOVED, n_cached);
             
            cleanup:
            	assert_block_ahi_valid(block);
            	part->latch.wr_unlock();
            

            There are only two calls to btr_search_lazy_free(), and nothing else should be freeing such detached indexes. Both calls are supposedly protected by an exclusive part->latch, but here it does not appear to be the case.

            In the core dump, the space_id is not available in block->frame, but it is in block->page.id_ and index->table->space_id. The index_id is available both in index->id and mach_read_from_8(block->frame+38+28).

            The block->index is still the detached index that we are about to free.

            The real mystery is: When was that memory freed for the first time?
            Would cmake -DWITH_ASAN_SCOPE=ON provide a stack trace of the previous freeing?

            marko Marko Mäkelä added a comment - I analyzed the core dump a little. Not all variables are available, because it is an optimized build. The interesting part is the latch protection: (gdb) p/x btr_search_sys.parts[0].latch $17 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x80000001}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x80000000}, <No data fields>}} (gdb) p/x btr_search_sys.parts[1].latch $18 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x80000000}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x80000000}, <No data fields>}} (gdb) p/x btr_search_sys.parts[2].latch $19 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x80000002}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x80000000}, <No data fields>}} (gdb) p/x btr_search_sys.parts[3].latch $20 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}} (gdb) p/x btr_search_sys.parts[4].latch $21 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}} (gdb) p/x btr_search_sys.parts[5].latch $22 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}} (gdb) p/x btr_search_sys.parts[6].latch $23 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}} (gdb) p/x btr_search_sys.parts[7].latch $24 = {writer = {lock = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}}, readers = {<std::__atomic_base<unsigned int>> = {_M_i = 0x0}, <No data fields>}} We have btr_ahi_parts=8 in the core dump. The tablespace identifier appears to be 0x2d4 and the index identifier 0x5d9 . The call btr_search_sys.get_part(0x5d9,0x2d4) should return btr_search_sys.parts+2 . Above we can see that only the first 3 slots are being held in exclusive mode by some threads. The slots 0 and 2 have threads waiting for the exclusive lock to be released. 2 other threads are waiting to lock all adaptive hash index partitions. Maybe both of them are waiting for our latch. During the call, we are supposed to hold an exclusive latch on the correct adaptive hash index partition: switch (index->search_info->ref_count--) { case 0: ut_error; case 1: if (index->freed()) { btr_search_lazy_free(index); } }   block->index = NULL;   MONITOR_INC(MONITOR_ADAPTIVE_HASH_PAGE_REMOVED); MONITOR_INC_VALUE(MONITOR_ADAPTIVE_HASH_ROW_REMOVED, n_cached);   cleanup: assert_block_ahi_valid(block); part->latch.wr_unlock(); There are only two calls to btr_search_lazy_free() , and nothing else should be freeing such detached indexes. Both calls are supposedly protected by an exclusive part->latch , but here it does not appear to be the case. In the core dump, the space_id is not available in block->frame , but it is in block->page.id_ and index->table->space_id . The index_id is available both in index->id and mach_read_from_8(block->frame+38+28) . The block->index is still the detached index that we are about to free. The real mystery is: When was that memory freed for the first time? Would cmake -DWITH_ASAN_SCOPE=ON provide a stack trace of the previous freeing?
            mleich Matthias Leich added a comment - - edited

            I had a lot attempts for generating some rr trace but that did not work.
            sdp:/data/Results/1619782219/MDEV-25527
            gdb -c dev/shm/vardir/1619782219/2/1/data/core /data/Server_bin/bb-10.6-MDEV-25491_asan_OgX/bin/mysqld
            /data/Results/1619782219/MDEV-25527/dev/shm/vardir/1619782219/2/1/mysql.err  # ASAN output
             
            Only two sessions are involved.
            create_10:
               CREATE TABLE IF NOT EXISTS v ( `col_int_nokey` INTEGER, `col_int_key` INTEGER NOT NULL, KEY (`col_int_key`)) ENGINE = INNODB ;
               CREATE TABLE IF NOT EXISTS w ( `col_int_nokey` INTEGER, `col_int_key` INTEGER NOT NULL, KEY (`col_int_key`)) ENGINE = INNODB ;
             
            thread1:
                ALTER TABLE w ORDER BY `col_int_nokey` ;
             
            thread1_connect:
                create_10 ; insert_tables ;
             
            thread1_init:
                ;
             
            thread2:
                ALTER TABLE v ORDER BY `col_int_nokey` ;
             
            thread2_connect:
                ;
             
            thread2_init:
                ;
             
            insert_tables:
                INSERT INTO w ( `col_int_nokey`, `col_int_key` ) VALUES ( 6 , 6 );
                INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w;
                INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w;
                INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w;
                INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w;
                INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w;
                INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w;
                INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w;
                INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w;
                INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w;
            The work flow is
            One session creates the two tables . One table gets filled with a crowd of rows.
             
            Between mid and end of test simplification the following error patterns showed up
            [ 'TBR-1037-MDEV-25527' , 'ERROR: AddressSanitizer: attempting double-free.+#0 .{1,30}in __interceptor_free.+#1 .{1,30}in dict_mem_table_free.+#2 .{1,30}in btr_search_lazy_free.+#3 .{1,30}in btr_search_drop_page_hash_index.+#4 .{1,30}in buf_LRU_free_page' ],
            [ 'TBR-1037A' , 'ERROR: AddressSanitizer: attempting double-free.+#0 .{1,30}in __interceptor_free.+#1 .{1,30}in mem_heap_block_free.+#2 .{1,30}in mem_heap_free.+#3 .{1,30}in dict_mem_table_free.+#4 .{1,30}in btr_search_lazy_free' ],
            [ 'TBR-1037B' , 'ERROR: AddressSanitizer: heap-use-after-free.+#0 .{1,30}in mem_heap_block_free.+#1 .{1,30}in mem_heap_free.+#2 .{1,30}in dict_mem_table_free.+#3 .{1,30}in btr_search_lazy_free' ],
            [ 'TBR-1037C' , 'ERROR: AddressSanitizer: heap-use-after-free.+#0 .{1,30}in void ut_list_remove.+#1 .{1,30}in void ut_list_remove.+#2 .{1,30}in mem_heap_block_free.+#3 .{1,30}in mem_heap_free.+#4 .{1,30}in dict_mem_table_free.+#5 .{1,30}in btr_search_lazy_free' ],
            [ 'TBR-1037D' , 'ERROR: AddressSanitizer: heap-use-after-free.+#0 .{1,30}in std::_Rb_tree.+#1 .{1,30}std::_Rb_tree.+#2 .{1,30}in std::set.+#3 .{1,30}in dict_mem_table_free.+#4 .{1,30}in btr_search_lazy_free.+#5 .{1,30}in btr_search_drop_page_hash_index' ],
            [ 'TBR-863-ASAN-btr_search_lazy_free', 'SUMMARY: AddressSanitizer: heap-use-after-free .{1,200}btr0sea.cc:.{1,30} in btr_search_lazy_free' ],
            [ 'TBR-1014' , 'ERROR: AddressSanitizer: attempting double-free on .+#0 .{1,30}in __interceptor_free.+#1 .{1,30}in dict_mem_table_free.+#2 .{1,30}in btr_search_lazy_free.+#3 .{1,30}in btr_search_drop_page_hash_index.+#4 .{1,30}in buf_LRU_free_page.+#5 .{1,30}in buf_LRU_free_from_common_LRU_list.+#6 .{1,30}in buf_LRU_scan_and_free_block.+#7 .{1,30}in buf_LRU_get_free_block.+#8 .{1,30}in buf_page_init_for_read.+#9 .{1,30}in buf_read_page_low.+#10 .{1,30}in buf_read_page_background.+#11 .{1,30}in btr_cur_prefetch_siblings.+#12 .{1,30}in btr_cur_optimistic_delete_func.+#13 .{1,30}in row_purge_remove_clust_if_poss_low.+#14 .{1,30}in row_purge_remove_clust_if_poss.+#15 .{1,30}in row_purge_del_mark' ],
            [ 'TBR-1041', 'InnoDB: Assertion failure in file /data/Server/bb-10.6-MDEV-25491/storage/innobase/include/ut0lst.h line .+InnoDB: Failing assertion: list.count > 0' ],
            

            mleich Matthias Leich added a comment - - edited I had a lot attempts for generating some rr trace but that did not work. sdp:/data/Results/1619782219/MDEV-25527 gdb -c dev/shm/vardir/1619782219/2/1/data/core /data/Server_bin/bb-10.6-MDEV-25491_asan_OgX/bin/mysqld /data/Results/1619782219/MDEV-25527/dev/shm/vardir/1619782219/2/1/mysql.err # ASAN output   Only two sessions are involved. create_10: CREATE TABLE IF NOT EXISTS v ( `col_int_nokey` INTEGER, `col_int_key` INTEGER NOT NULL, KEY (`col_int_key`)) ENGINE = INNODB ; CREATE TABLE IF NOT EXISTS w ( `col_int_nokey` INTEGER, `col_int_key` INTEGER NOT NULL, KEY (`col_int_key`)) ENGINE = INNODB ;   thread1: ALTER TABLE w ORDER BY `col_int_nokey` ;   thread1_connect: create_10 ; insert_tables ;   thread1_init: ;   thread2: ALTER TABLE v ORDER BY `col_int_nokey` ;   thread2_connect: ;   thread2_init: ;   insert_tables: INSERT INTO w ( `col_int_nokey`, `col_int_key` ) VALUES ( 6 , 6 ); INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w; INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w; INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w; INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w; INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w; INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w; INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w; INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w; INSERT INTO w ( `col_int_nokey`, `col_int_key` ) SELECT `col_int_nokey`, `col_int_key` FROM w; The work flow is One session creates the two tables . One table gets filled with a crowd of rows.   Between mid and end of test simplification the following error patterns showed up [ 'TBR-1037-MDEV-25527' , 'ERROR: AddressSanitizer: attempting double-free.+#0 .{1,30}in __interceptor_free.+#1 .{1,30}in dict_mem_table_free.+#2 .{1,30}in btr_search_lazy_free.+#3 .{1,30}in btr_search_drop_page_hash_index.+#4 .{1,30}in buf_LRU_free_page' ], [ 'TBR-1037A' , 'ERROR: AddressSanitizer: attempting double-free.+#0 .{1,30}in __interceptor_free.+#1 .{1,30}in mem_heap_block_free.+#2 .{1,30}in mem_heap_free.+#3 .{1,30}in dict_mem_table_free.+#4 .{1,30}in btr_search_lazy_free' ], [ 'TBR-1037B' , 'ERROR: AddressSanitizer: heap-use-after-free.+#0 .{1,30}in mem_heap_block_free.+#1 .{1,30}in mem_heap_free.+#2 .{1,30}in dict_mem_table_free.+#3 .{1,30}in btr_search_lazy_free' ], [ 'TBR-1037C' , 'ERROR: AddressSanitizer: heap-use-after-free.+#0 .{1,30}in void ut_list_remove.+#1 .{1,30}in void ut_list_remove.+#2 .{1,30}in mem_heap_block_free.+#3 .{1,30}in mem_heap_free.+#4 .{1,30}in dict_mem_table_free.+#5 .{1,30}in btr_search_lazy_free' ], [ 'TBR-1037D' , 'ERROR: AddressSanitizer: heap-use-after-free.+#0 .{1,30}in std::_Rb_tree.+#1 .{1,30}std::_Rb_tree.+#2 .{1,30}in std::set.+#3 .{1,30}in dict_mem_table_free.+#4 .{1,30}in btr_search_lazy_free.+#5 .{1,30}in btr_search_drop_page_hash_index' ], [ 'TBR-863-ASAN-btr_search_lazy_free', 'SUMMARY: AddressSanitizer: heap-use-after-free .{1,200}btr0sea.cc:.{1,30} in btr_search_lazy_free' ], [ 'TBR-1014' , 'ERROR: AddressSanitizer: attempting double-free on .+#0 .{1,30}in __interceptor_free.+#1 .{1,30}in dict_mem_table_free.+#2 .{1,30}in btr_search_lazy_free.+#3 .{1,30}in btr_search_drop_page_hash_index.+#4 .{1,30}in buf_LRU_free_page.+#5 .{1,30}in buf_LRU_free_from_common_LRU_list.+#6 .{1,30}in buf_LRU_scan_and_free_block.+#7 .{1,30}in buf_LRU_get_free_block.+#8 .{1,30}in buf_page_init_for_read.+#9 .{1,30}in buf_read_page_low.+#10 .{1,30}in buf_read_page_background.+#11 .{1,30}in btr_cur_prefetch_siblings.+#12 .{1,30}in btr_cur_optimistic_delete_func.+#13 .{1,30}in row_purge_remove_clust_if_poss_low.+#14 .{1,30}in row_purge_remove_clust_if_poss.+#15 .{1,30}in row_purge_del_mark' ], [ 'TBR-1041', 'InnoDB: Assertion failure in file /data/Server/bb-10.6-MDEV-25491/storage/innobase/include/ut0lst.h line .+InnoDB: Failing assertion: list.count > 0' ],

            thiru, can you please check if this was already fixed? I think that also earlier branches may be affected.

            marko Marko Mäkelä added a comment - thiru , can you please check if this was already fixed? I think that also earlier branches may be affected.

            People

              thiru Thirunarayanan Balathandayuthapani
              mleich Matthias Leich
              Votes:
              0 Vote for this issue
              Watchers:
              3 Start watching this issue

              Dates

                Created:
                Updated:
                Resolved:

                Git Integration

                  Error rendering 'com.xiplink.jira.git.jira_git_plugin:git-issue-webpanel'. Please contact your Jira administrators.