Details
-
Technical task
-
Status: Closed (View Workflow)
-
Major
-
Resolution: Done
-
N/A
-
None
-
10.1.29
Description
marko wrote:
DML before export, also DML before DISCARD TABLESPACE
at least some 100 megabytes of data in the table, with a big buffer pool, so that stale pages will stay around
maybe just import an empty table (that should be the worst exercise)
and in fact, you don’t necessarily have to copy large .ibd files during the test; we only need one .ibd file for the empty table to be imported (in a test loop)so, basically:
- create table;
- loop:
- insert/replace a lot;
- alter table...discard tablespace;
- copy empty file
- alter table..import tablespace;
- goto loop
and of course, multiple threads in parallel, each using their own table
and lots of secondary indexes
Test description
The implemented test is limited to what's prescribed in the request above, with only one addition – at the step "copy empty file", during the course of the test we alternate between using empty tablespaces and tablespaces with data.
Since the test is expected to use rather big tables, which take a lot of time to create, in order to speed up the process, the test is performed in a half-automated-half-manual mode.
One-time activity (manual):
- server is started with default parameters;
- tables are pre-created;
- tables are flushed for export;
- tablespaces are exported and stored manually;
- tables are unlocked;
- tables are populated;
- tables are flushed for export;
- tablespaces are exported and stored manually;
- tables are unlocked;
- server is shut down;
- the whole data directory is stored along with the exported/stored tablespaces.
Repeated activity;
- the stored data directory is copied back;
- server is started manually with parameters which are needed for the current test;
- an automated test is performed on the running server;
- server is shut down manually.
Data structures |
CREATE TABLE t1 ( pk INT AUTO_INCREMENT PRIMARY KEY, f1 INT, f2 INT NOT NULL, f3 BIGINT, f4 BIGINT NOT NULL, f5 VARCHAR(1024), f6 VARCHAR(2048) NOT NULL, f7 CHAR(255), f8 CHAR(128) NOT NULL, UNIQUE(f1), UNIQUE(f7), KEY(f2,f5(64)), KEY(f3,f6(128)), KEY(f4,f7), KEY(f1,f8) ) ENGINE=INNODB ROW_FORMAT = COMPACT ; |
CREATE TABLE t2 ( pk INT AUTO_INCREMENT PRIMARY KEY, f1 INT, f2 INT NOT NULL, f3 BIGINT, f4 BIGINT NOT NULL, f5 VARCHAR(1024), f6 VARCHAR(2048) NOT NULL, f7 CHAR(255), f8 CHAR(128) NOT NULL, UNIQUE(f1), UNIQUE(f7), KEY(f2,f5(64)), KEY(f3,f6(128)), KEY(f4,f7), KEY(f1,f8) ) ENGINE=INNODB ROW_FORMAT = REDUNDANT ; |
set global innodb_file_format=Barracuda; |
CREATE TABLE t3 ( pk INT AUTO_INCREMENT PRIMARY KEY, f1 INT, f2 INT NOT NULL, f3 BIGINT, f4 BIGINT NOT NULL, f5 VARCHAR(1024), f6 VARCHAR(2048) NOT NULL, f7 CHAR(255), f8 CHAR(128) NOT NULL, UNIQUE(f1), UNIQUE(f7), KEY(f2,f5(64)), KEY(f3,f6(128)), KEY(f4,f7), KEY(f1,f8) ) ENGINE=INNODB ROW_FORMAT = COMPRESSED ; |
CREATE TABLE t4 ( pk INT AUTO_INCREMENT PRIMARY KEY, f1 INT, f2 INT NOT NULL, f3 BIGINT, f4 BIGINT NOT NULL, f5 VARCHAR(1024), f6 VARCHAR(2048) NOT NULL, f7 CHAR(255), f8 CHAR(128) NOT NULL, UNIQUE(f1), UNIQUE(f7), KEY(f2,f5(64)), KEY(f3,f6(128)), KEY(f4,f7), KEY(f1,f8) ) ENGINE=INNODB ROW_FORMAT = DYNAMIC ; |
CREATE TABLE t5 ( pk INT AUTO_INCREMENT PRIMARY KEY, f1 INT, f2 INT NOT NULL, f3 BIGINT, f4 BIGINT NOT NULL, f5 VARCHAR(1024), f6 VARCHAR(2048) NOT NULL, f7 CHAR(255), f8 CHAR(128) NOT NULL, UNIQUE(f1), UNIQUE(f7), KEY(f2,f5(64)), KEY(f3,f6(128)), KEY(f4,f7), KEY(f1,f8) ) ENGINE=INNODB ROW_FORMAT = DYNAMIC PAGE_COMPRESSED=1 PAGE_COMPRESSION_LEVEL=3; |
CREATE TABLE t6 ( pk INT AUTO_INCREMENT PRIMARY KEY, f1 INT, f2 INT NOT NULL, f3 BIGINT, f4 BIGINT NOT NULL, f5 VARCHAR(1024), f6 VARCHAR(2048) NOT NULL, f7 CHAR(255), f8 CHAR(128) NOT NULL, UNIQUE(f1), UNIQUE(f7), KEY(f2,f5(64)), KEY(f3,f6(128)), KEY(f4,f7), KEY(f1,f8) ) ENGINE=INNODB ROW_FORMAT = DYNAMIC PAGE_COMPRESSED=1; |
CREATE TABLE t7 ( pk INT AUTO_INCREMENT PRIMARY KEY, f1 INT, f2 INT NOT NULL, f3 BIGINT, f4 BIGINT NOT NULL, f5 VARCHAR(1024), f6 VARCHAR(2048) NOT NULL, f7 CHAR(255), f8 CHAR(128) NOT NULL, UNIQUE(f1), UNIQUE(f7), KEY(f2,f5(64)), KEY(f3,f6(128)), KEY(f4,f7), KEY(f1,f8) ) ENGINE=INNODB ROW_FORMAT = COMPACT PAGE_COMPRESSED=1; |
CREATE TABLE t8 ( pk INT AUTO_INCREMENT PRIMARY KEY, f1 INT, f2 INT NOT NULL, f3 BIGINT, f4 BIGINT NOT NULL, f5 VARCHAR(1024), f6 VARCHAR(2048) NOT NULL, f7 CHAR(255), f8 CHAR(128) NOT NULL, UNIQUE(f1), UNIQUE(f7), KEY(f2,f5(64)), KEY(f3,f6(128)), KEY(f4,f7), KEY(f1,f8) ) ENGINE=INNODB ROW_FORMAT = COMPACT PAGE_COMPRESSED=1 PAGE_COMPRESSION_LEVEL=9; |
Initial table contents |
INSERT INTO t1 SELECT NULL, seq, seq%1000, seq%10, seq%10000, CONCAT('f5_',(seq%1000)), CONCAT('f6_',(seq%100)), CONCAT('f7_',seq), CONCAT('f8_',(seq%10)) FROM seq_1_to_10000; |
INSERT INTO t2 SELECT NULL, seq, seq%1000, seq%10, seq%10000, CONCAT('f5_',(seq%1000)), CONCAT('f6_',(seq%100)), CONCAT('f7_',seq), CONCAT('f8_',(seq%10)) FROM seq_1_to_50000; |
INSERT INTO t3 SELECT NULL, seq, seq%1000, seq%10, seq%10000, CONCAT('f5_',(seq%1000)), CONCAT('f6_',(seq%100)), CONCAT('f7_',seq), CONCAT('f8_',(seq%10)) FROM seq_1_to_100000; |
INSERT INTO t4 SELECT NULL, seq, seq%1000, seq%10, seq%10000, CONCAT('f5_',(seq%1000)), CONCAT('f6_',(seq%100)), CONCAT('f7_',seq), CONCAT('f8_',(seq%10)) FROM seq_1_to_200000; |
INSERT INTO t5 SELECT NULL, seq, seq%1000, seq%10, seq%10000, CONCAT('f5_',(seq%1000)), CONCAT('f6_',(seq%100)), CONCAT('f7_',seq), CONCAT('f8_',(seq%10)) FROM seq_1_to_300000; |
INSERT INTO t6 SELECT NULL, seq, seq%1000, seq%10, seq%10000, CONCAT('f5_',(seq%1000)), CONCAT('f6_',(seq%100)), CONCAT('f7_',seq), CONCAT('f8_',(seq%10)) FROM seq_1_to_30000; |
INSERT INTO t7 SELECT NULL, seq, seq%1000, seq%10, seq%10000, CONCAT('f5_',(seq%1000)), CONCAT('f6_',(seq%100)), CONCAT('f7_',seq), CONCAT('f8_',(seq%10)) FROM seq_1_to_80000; |
INSERT INTO t8 SELECT NULL, seq, seq%1000, seq%10, seq%10000, CONCAT('f5_',(seq%1000)), CONCAT('f6_',(seq%100)), CONCAT('f7_',seq), CONCAT('f8_',(seq%10)) FROM seq_1_to_800000; |
Stored tablespaces |
-rw-r----- 1 elenst elenst 26214400 Nov 6 13:28 data/test/t1.ibd.backup
|
-rw-r----- 1 elenst elenst 104857600 Nov 6 13:28 data/test/t2.ibd.backup
|
-rw-r----- 1 elenst elenst 96468992 Nov 6 13:28 data/test/t3.ibd.backup
|
-rw-r----- 1 elenst elenst 364904448 Nov 6 13:28 data/test/t4.ibd.backup
|
-rw-r----- 1 elenst elenst 515899392 Nov 6 13:29 data/test/t5.ibd.backup
|
-rw-r----- 1 elenst elenst 62914560 Nov 6 13:29 data/test/t6.ibd.backup
|
-rw-r----- 1 elenst elenst 146800640 Nov 6 13:29 data/test/t7.ibd.backup
|
-rw-r----- 1 elenst elenst 1430257664 Nov 6 13:29 data/test/t8.ibd.backup
|
|
-rw-r----- 1 elenst elenst 196608 Nov 6 13:28 data/test/t1.ibd.empty
|
-rw-r----- 1 elenst elenst 196608 Nov 6 13:28 data/test/t2.ibd.empty
|
-rw-r----- 1 elenst elenst 98304 Nov 6 13:28 data/test/t3.ibd.empty
|
-rw-r----- 1 elenst elenst 196608 Nov 6 13:28 data/test/t4.ibd.empty
|
-rw-r----- 1 elenst elenst 196608 Nov 6 13:28 data/test/t5.ibd.empty
|
-rw-r----- 1 elenst elenst 196608 Nov 6 13:28 data/test/t6.ibd.empty
|
-rw-r----- 1 elenst elenst 196608 Nov 6 13:28 data/test/t7.ibd.empty
|
-rw-r----- 1 elenst elenst 196608 Nov 6 13:28 data/test/t8.ibd.empty
|
Test results
Note: Any noticeable deviations and inaccuracies in executing the scenario caused various problems. These problems are considered to be outside the scope of this task and attributed to the general instability of export and DISCARD/IMPORT tablespace functionality. Analysis of such failures was not performed.
Smoke tests for stability
Several runs of the described test were performed, all with the same data:
- 1-thread test on a server with innodb_buffer_pool_size=128M general_log=on, otherwise defaults, for table t1;
- 8-thread test on a server with innodb_buffer_pool_size=128M general_log=on, otherwise defaults;
- 8-thread test on a server with innodb_buffer_pool_size=2G general_log=on, otherwise defaults;
- 8-thread test on a server with innodb_buffer_pool_size=4G general_log=on, otherwise defaults;
- 8-thread test on a server with innodb_buffer_pool_size=8G general_log=on, otherwise defaults;
- 8-thread test on a server with innodb_buffer_pool_size=2G general_log=on ignore-builtin-innodb plugin-load-add=ha_innodb, otherwise defaults;
The duration of the test runs varied from 10 to 30 min. All test runs were performed on a debug build.
In all variations above, the test was considered a pass if the server didn't crash during the duration of the test.
On the initial implementation, bb-10.1-marko 1fb7ac18c417c8fbd95, the test caused failure described in this comment. The failure was reproducible via a stress test and MTR.
The failure stopped happening after a temporary patch and didn't re-appear after the real fix. No other crashes were observed.
Sanity check for performance regressions
The same test flow was converted into a performance comparison test. The tests were performed in an environment not tuned for benchmarks, results are not considered in any way accurate, and absolute values are irrelevant. The purpose of the test was to check for critical performance regressions. All tests were run on non-debug builds.
Several test runs were performed:
- 1-thread latency comparison* test on servers with innodb_buffer_pool_size=4G general_log=on, otherwise defaults, table t3;
- 1-thread latency comparison* test on servers with innodb_buffer_pool_size=4G general_log=on, otherwise defaults, table t8;
- 8-thread throughput comparison** tests on a servers with innodb_buffer_pool_size=8G general_log=on, otherwise defaults;
- 1-thread throughput comparison** tests on a servers with innodb_buffer_pool_size=8G general_log=on, otherwise defaults, table t5;
- 8-thread throughput comparison** tests on a servers with innodb_buffer_pool_size=128M general_log=on, otherwise defaults;
* For the latency comparison test, two servers are running side-by-side. The test executes the same query on both servers one after another, compares the execution time, and reports differences which exceed a given threshold
** For the throughput comparison test, only one server is started at a time. All test flow is run on the server for a given time interval, after that the overall number of all queries (including DML) per thread is calculated, and resulting numbers are compared.
Examples of results of throughput test:
8-thread 10-min test, buffer pool 8G
thread/table | 10.1 | bb-10.1-marko |
---|---|---|
1 | 451 | 478 |
2 | 283 | 286 |
3 | 345 | 425 |
4 | 271 | 320 |
5 | 233 | 287 |
6 | 418 | 418 |
7 | 322 | 314 |
8 | 381 | 348 |
8-thread 10-min test, buffer pool 128M
thread/table | 10.1 | bb-10.1-marko |
---|---|---|
1 | 392 | 452 |
2 | 286 | 378 |
3 | 349 | 349 |
4 | 325 | 340 |
5 | 302 | 244 |
6 | 391 | 468 |
7 | 322 | 402 |
8 | 381 | 348 |
1-thread 10-min test, buffer pool 8G, table t5
10.1 | bb-10.1-marko |
---|---|
1043 | 1043 |
yes, exactly the same, not a typo, weird coincidence
No differences which could not be explained by unreliability of the test environment have been noticed during tests.
bb-10.1-marko 1fb7ac18c417c8fbd95
2017-11-05 00:02:20 7fd51f616b00 InnoDB: Assertion failure in thread 140553331239680 in file buf0buf.cc line 3555
InnoDB: Failing assertion: mode == BUF_GET_POSSIBLY_FREED || !fix_block->page.file_page_was_freed
InnoDB: We intentionally generate a memory trap.
#5 0x00007fd51d5aa3fa in abort () from /lib/x86_64-linux-gnu/libc.so.6
#6 0x00007fd516a20cf9 in buf_page_get_gen (space=4, zip_size=0, offset=138, rw_latch=3, guess=0x0, mode=12, file=0x7fd516d01d38 "/data/src/bb-10.1-marko/storage/innobase/row/row0import.cc", line=2158, mtr=0x7fd51f614420, err=0x0) at /data/src/bb-10.1-marko/storage/innobase/buf/buf0buf.cc:3554
#7 0x00007fd516bd1876 in PageConverter::operator() (this=0x7fd51f614050, offset=2260992, block=0x7fd51f613370) at /data/src/bb-10.1-marko/storage/innobase/row/row0import.cc:2158
#8 0x00007fd516a98f3d in fil_iterate (iter=..., block=0x7fd51f613370, callback=...) at /data/src/bb-10.1-marko/storage/innobase/fil/fil0fil.cc:6512
#9 0x00007fd516a999ee in fil_tablespace_iterate (table=0x7fd515ebb8f8, n_io_buffers=64, callback=...) at /data/src/bb-10.1-marko/storage/innobase/fil/fil0fil.cc:6776
#10 0x00007fd516bd4ca9 in row_import_for_mysql (table=0x7fd515ebb8f8, prebuilt=0x7fd507583078) at /data/src/bb-10.1-marko/storage/innobase/row/row0import.cc:3604
#11 0x00007fd516af90a1 in ha_innodb::discard_or_import_tablespace (this=0x7fd5074b0088, discard=0 '\000') at /data/src/bb-10.1-marko/storage/innobase/handler/ha_innodb.cc:12412
#12 0x000055eedc944ec2 in handler::ha_discard_or_import_tablespace (this=0x7fd5074b0088, discard=0 '\000') at /data/src/bb-10.1-marko/sql/handler.cc:4189
#13 0x000055eedc7bf401 in mysql_discard_or_import_tablespace (thd=0x7fd513fc7070, table_list=0x7fd5074fa178, discard=false) at /data/src/bb-10.1-marko/sql/sql_table.cc:5666
#14 0x000055eedc83864d in Sql_cmd_discard_import_tablespace::execute (this=0x7fd5074fa770, thd=0x7fd513fc7070) at /data/src/bb-10.1-marko/sql/sql_alter.cc:365
#15 0x000055eedc70917c in mysql_execute_command (thd=0x7fd513fc7070) at /data/src/bb-10.1-marko/sql/sql_parse.cc:5680
#16 0x000055eedc70d42d in mysql_parse (thd=0x7fd513fc7070, rawbuf=0x7fd5074fa088 "ALTER TABLE t1 IMPORT TABLESPACE", length=32, parser_state=0x7fd51f6155e0) at /data/src/bb-10.1-marko/sql/sql_parse.cc:7326
#17 0x000055eedc6fc042 in dispatch_command (command=COM_QUERY, thd=0x7fd513fc7070, packet=0x7fd515ec9071 "ALTER TABLE t1 IMPORT TABLESPACE", packet_length=32) at /data/src/bb-10.1-marko/sql/sql_parse.cc:1477
#18 0x000055eedc6fadc7 in do_command (thd=0x7fd513fc7070) at /data/src/bb-10.1-marko/sql/sql_parse.cc:1106
#19 0x000055eedc833a63 in do_handle_one_connection (thd_arg=0x7fd513fc7070) at /data/src/bb-10.1-marko/sql/sql_connect.cc:1349
#20 0x000055eedc8337c7 in handle_one_connection (arg=0x7fd513fc7070) at /data/src/bb-10.1-marko/sql/sql_connect.cc:1261
#21 0x000055eedcbecb98 in pfs_spawn_thread (arg=0x7fd515ebb8f0) at /data/src/bb-10.1-marko/storage/perfschema/pfs.cc:1861
#22 0x00007fd51f2a5494 in start_thread (arg=0x7fd51f616b00) at pthread_create.c:333
#23 0x00007fd51d65e93f in clone () from /lib/x86_64-linux-gnu/libc.so.6
Run with --mysqld=--sequence
--source include/have_innodb.inc
--let $dir= `SELECT CONCAT(@@datadir,'/',DATABASE())`
--copy_file $dir/t1.ibd $dir/t1.ibd.backup
UNLOCK TABLES;
--copy_file $dir/t1.ibd.backup $dir/t1.ibd
# Cleanup