Details
-
Task
-
Status: Closed (View Workflow)
-
Major
-
Resolution: Fixed
Description
This task is about improving memory utilization and performance for
Information schema
Some work has recently been done in bb-10.2-ext to free memory early for
tables and views used be performance schema. The next step is to create
more efficient temporary tables that doesn't store information that we don't
need.
MariaDB [test]> select MEMORY_USED,MAX_MEMORY_USED from information_schema.processlist where db="test";
|
+-------------+-----------------+
|
| MEMORY_USED | MAX_MEMORY_USED |
|
+-------------+-----------------+
|
| 86120 | 245768 |
|
+-------------+-----------------+
|
1 row in set (0.00 sec)
|
|
MariaDB [test]> select table_name from information_schema.tables where table_schema="mysql";
|
....
|
MariaDB [test]> select MEMORY_USED,MAX_MEMORY_USED from information_schema.processlist where db="test";
|
+-------------+-----------------+
|
| MEMORY_USED | MAX_MEMORY_USED |
|
+-------------+-----------------+
|
| 86120 | 696880 |
|
+-------------+-----------------+
|
Here we used 600K memory for a simple query
MariaDB [test]> select count(*) from information_schema.tables where table_schema="mysql";
|
MariaDB [test]> select table_name from information_schema.tables;
|
...
|
MariaDB [test]> select MEMORY_USED,MAX_MEMORY_USED from information_schema.processlist where db="test";
|
+-------------+-----------------+
|
| MEMORY_USED | MAX_MEMORY_USED |
|
+-------------+-----------------+
|
| 86120 | 5293216 |
|
+-------------+-----------------+
|
Here we used 5M memory for a simple query over 341 tables.
The reason for the excessive memory used comes from that the temporary table
created has a very wide record:
While running:
select table_name from information_schema.tables; |
in gdb:
(gdb) break handler::ha_write_tmp_row
|
(gdb) p table->s->reclength
|
$2 = 14829
|
Two possible ways to fix this:
1) Extend heap tables to store VARCHAR and BLOB efficiently
2) In sql_show, change all fields that are not used to be CHAR(1)
1) is a major tasks and we can't get that done in time for 10.3
2) will help even if we do 1) as we have less to store.
This task is to do 2)
This should not be that hard as information_schema already knows which
fields are accessed in the query. This is already used to decide if we
can solve the information_schema access without opening the table.
This should be done against the bb-10.2-ext tree, which has the new
MAX_MEMORY_USED column in information_schema.processlist.
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index ae18e1cac04..99cf4b84ad6 100644
— a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -7759,6 +7759,88 @@ ST_SCHEMA_TABLE *get_schema_table(enum enum_schema_tables schema_table_idx)
return &schema_tables[schema_table_idx];
}
+bool evaluate_schema_field_recursive(Item* item, const char* field_name)
{ + Item_field* field= (Item_field *)item; + if (!strcasecmp(field->field_name.str, field_name)) + return true; + else if (!strcasecmp(field->field_name.str, "*")) + return true; + else + return false; + }+{
+ switch(item->type())
+ {
+ case Item::FIELD_ITEM:
+
+
{ + show_field= show_field || + evaluate_schema_field_recursive(func->arguments()[i], + field_name); + if (show_field) + return true; + }+ case Item::FUNC_ITEM:
+ {
+ bool show_field= false;
+ Item_func* func= (Item_func *)item;
+ for (uint i= 0; i < func->argument_count(); i++)
+
+ return false;
{ + show_field= show_field || + evaluate_schema_field_recursive(tmp, field_name); + if (show_field) + return true; + }+ }
+
+ case Item::COND_ITEM:
+ {
+ Item *tmp;
+ bool show_field= false;
+ Item_cond* cond= (Item_cond *)item;
+ List_iterator<Item> it(*(cond->argument_list()));
+ while ((tmp= it++))
+
+ return false;
+ }
+
+ default:
+ return false;
+ }
+}
The above function doesn't check all possible item types, for example
SUM_FUNC_ITEM is required to be tested.
+
{ + if (evaluate_schema_field_recursive(item, field_info->field_name)) + return true; + }+bool field_can_be_used_in_query(THD* thd, ST_FIELD_INFO *field_info)
+{
+ if (thd->lex->select_lex.sj_nests.elements > 0 ||
+ thd->lex->select_lex.sj_subselects.elements > 0 ||
+ thd->lex->select_lex.nest_level > 0)
+ return true;
+
+ reg2 Item *item;
+ List_iterator<Item> it(thd->lex->select_lex.item_list);
+
+ /* select fields list check */
+ while ((item= it++))
+
+
+ /* select fields where cond check */
+ if (thd->lex->select_lex.where &&
+ evaluate_schema_field_recursive(thd->lex->select_lex.where,
+ field_info->field_name))
+ return true;
+
+ /* select fields having cond check */
+ if (thd->lex->select_lex.having &&
+ evaluate_schema_field_recursive(thd->lex->select_lex.having,
+ field_info->field_name))
+ return true;
+
+ return false;
+}
The above was quite ok, but it missed a couple of things:
which will take some resources if there is many columns used.
/**
Create information_schema table using schema_table data.
@@ -7783,6 +7865,7 @@ ST_SCHEMA_TABLE *get_schema_table(enum enum_schema_tables schema_table_idx)
TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
{
+ bool show_field= false;
int field_count= 0;
Item *item;
TABLE *table;
@@ -7869,19 +7952,35 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
case MYSQL_TYPE_MEDIUM_BLOB:
case MYSQL_TYPE_LONG_BLOB:
case MYSQL_TYPE_BLOB:
+
+ show_field= field_can_be_used_in_query(thd, fields_info);
+ if (show_field)
{
+ if (!(item= new (mem_root)
+ Item_blob(thd, fields_info->field_name,
+ fields_info->field_length)))
+ { + DBUG_RETURN(0); + }
+ }
+ else
+ {
+ if (!(item= new (mem_root)
+ Item_empty_string(thd, "", 1, cs)))
+ { + DBUG_RETURN(0); + }
+ item->set_name(thd, fields_info->field_name,
+ field_name_length, cs);
}
break;
default:
/* Don't let unimplemented types pass through. Could be a grave error. */
DBUG_ASSERT(fields_info->field_type == MYSQL_TYPE_STRING);
+ show_field= field_can_be_used_in_query(thd, fields_info);
if (!(item= new (mem_root)
+ Item_empty_string(thd, "", show_field ? fields_info->field_length : 1, cs))) { DBUG_RETURN(0); }
The above code was ok.
What was missing in the code:
because of generated warnings when the old code tried to write too long
strings into the shortened fields)
To solve the issue with the not handled queries, I decide to use a little
different approach:
string column or not.
columns.
The final patch is attached to this issue.
Note that even if I decided to use a different approach, having your code
as a base made my work much faster, so thanks a lot for doing this!