Details
-
Bug
-
Status: Confirmed (View Workflow)
-
Major
-
Resolution: Unresolved
-
11.7.2, 10.5, 10.6, 10.11, 11.4, 11.8
-
None
-
openSUSE Tumbleweed
Description
With these tables:
CREATE TABLE `run` (
|
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
|
`version` text DEFAULT NULL,
|
`sha` varchar(128) DEFAULT NULL,
|
`filter` text DEFAULT NULL,
|
`skip` tinyint(1) NOT NULL,
|
`timestamp` timestamp NOT NULL DEFAULT current_timestamp(),
|
PRIMARY KEY (`id`)
|
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci;
|
|
CREATE TABLE `source` (
|
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
|
`run` int(10) unsigned DEFAULT NULL,
|
`src` text NOT NULL,
|
PRIMARY KEY (`id`),
|
UNIQUE KEY `src` (`src`) USING HASH,
|
KEY `run` (`run`),
|
KEY `idx_source_src` (`src`(768)),
|
CONSTRAINT `source_ibfk_1` FOREIGN KEY (`run`) REFERENCES `run` (`id`)
|
) ENGINE=InnoDB AUTO_INCREMENT=18561 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci;
|
|
CREATE TABLE `struct` (
|
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
|
`run` int(10) unsigned DEFAULT NULL,
|
`parent` int(10) unsigned DEFAULT NULL,
|
`type` char(1) NOT NULL CHECK (`type` in ('s','u')),
|
`name` text NOT NULL,
|
`attrs` text DEFAULT NULL,
|
`packed` tinyint(1) NOT NULL,
|
`inMacro` tinyint(1) NOT NULL,
|
`src` int(10) unsigned NOT NULL,
|
`begLine` int(10) unsigned NOT NULL,
|
`begCol` int(10) unsigned NOT NULL,
|
`endLine` int(10) unsigned DEFAULT NULL,
|
`endCol` int(10) unsigned DEFAULT NULL,
|
PRIMARY KEY (`id`),
|
UNIQUE KEY `name` (`name`,`src`,`begLine`,`begCol`) USING HASH,
|
KEY `run` (`run`),
|
KEY `parent` (`parent`),
|
KEY `src` (`src`),
|
CONSTRAINT `struct_ibfk_1` FOREIGN KEY (`run`) REFERENCES `run` (`id`),
|
CONSTRAINT `struct_ibfk_2` FOREIGN KEY (`parent`) REFERENCES `struct` (`id`) ON DELETE CASCADE,
|
CONSTRAINT `struct_ibfk_3` FOREIGN KEY (`src`) REFERENCES `source` (`id`) ON DELETE CASCADE
|
|
) ENGINE=InnoDB AUTO_INCREMENT=128962 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci;
|
and this python script:
from contextlib import closing
|
import MySQLdb
|
import MySQLdb.constants.ER as ER
|
import multiprocessing as mp
|
import os
|
import time
|
import sys
|
|
SOURCES = 10
|
STRUCTS = 500
|
|
def deadlock(cur, q, params):
|
for x in range(3):
|
try:
|
cur.execute(q, params)
|
except MySQLdb.Error as e:
|
if e.args[0] not in (ER.LOCK_DEADLOCK, ):
|
print(f"Error: {q}:\n\t{params}\n\t{e}")
|
break
|
time.sleep(.1)
|
else:
|
if x > 1:
|
print(x)
|
return
|
|
def do_work(cur):
|
pid = os.getpid()
|
|
for x in range(SOURCES):
|
deadlock(cur, 'INSERT INTO source(src) VALUES (%s)', ('tty_xyz%d_%d.c' % (pid, x),))
|
|
for x in range(STRUCTS):
|
deadlock(cur, '''INSERT INTO
|
struct(type, name, attrs, packed, inMacro, src, begLine, begCol, endLine, endCol)
|
SELECT %s, %s, %s, %s, %s, id, %s, %s, %s, %s FROM source WHERE src=%s''',
|
('s', 'str%d_%d' % (pid, x), '', 0, 0, 10 + x, 1, 11 + x, 1,
|
'tty_xyz%d_%d.c' % (pid, x % SOURCES)))
|
|
|
def db_work():
|
try:
|
with closing(MySQLdb.connect(
|
host="localhost",
|
port=3306,
|
database="clang_struct",
|
user="clang_struct",
|
password="pass")) as db:
|
with closing(db.cursor()) as cur:
|
do_work(cur)
|
db.commit();
|
except MySQLdb.Error as e:
|
print(f"Error0: {e}")
|
sys.exit(1);
|
|
with mp.Pool() as pool:
|
count = os.cpu_count()
|
print(f"Running {count} processes")
|
for res in [ pool.apply_async(db_work) for i in range(count) ]:
|
res.get()
|
I occasionally see an exception:
1467, 'Failed to read auto-increment value from storage engine'
|
But:
show table status where name='struct';
|
+--------+--------+---------+------------+------+----------------+-------------+-----------------+--------------+-----------+----------------+---------------------+---------------------+------------+-----------------------+----------+----------------+---------+------------------+-----------+
|
| Name | Engine | Version | Row_format | Rows | Avg_row_length | Data_length | Max_data_length | Index_length | Data_free | Auto_increment | Create_time | Update_time | Check_time | Collation | Checksum | Create_options | Comment | Max_index_length | Temporary |
|
+--------+--------+---------+------------+------+----------------+-------------+-----------------+--------------+-----------+----------------+---------------------+---------------------+------------+-----------------------+----------+----------------+---------+------------------+-----------+
|
| struct | InnoDB | 11 | Dynamic | 0 | 0 | 16384 | 0 | 65536 | 14680064 | 128962 | 2025-02-21 09:55:17 | 2025-02-25 08:59:09 | NULL | utf8mb4_uca1400_ai_ci | NULL | | | 0 | N |
|
+--------+--------+---------+------------+------+----------------+-------------+-----------------+--------------+-----------+----------------+---------------------+---------------------+------------+-----------------------+----------+----------------+---------+------------------+-----------+
|
So integer value is definitely far from be exhausted.
The python script inserts into `source` (10 rows) and `struct` (500 rows) in parallel processes (nr=count of cpus). And the insertion to `struct` sometimes fails with the above error. This is a deduced (reduced) testcase, the same happens with the C binding.
Also if I retry – i.e. add ER.AUTOINC_READ_FAILED along with ER.LOCK_DEADLOCK. The second attempt usually succeeds. But I assume AUTOINC_READ_FAILED should not be raised in the first place.