Details
-
Bug
-
Status: Closed (View Workflow)
-
Minor
-
Resolution: Cannot Reproduce
-
2.3.4
-
None
-
Ubuntu 1604
Description
First of all, I'm not 100% if this is PEBKAC or an issue, I was trying to follow the documentation and I ended up with some issues. What I did is:
1. Setup MariaDB 10.3.12 with following configuration file:
[MYSQLD]
|
user=mysql
|
basedir=/usr/
|
datadir=/var/lib/mysql
|
socket=/var/lib/mysql/mysql.sock
|
pid_file=/var/lib/mysql/mysql.pid
|
port=3306
|
log_error=/var/log/mysql/mysqld.log
|
log_warnings=2
|
# log_output = FILE
|
|
#Slow logging
|
slow_query_log_file=/var/log/mysql/mysql-slow.log
|
long_query_time=2
|
slow_query_log=OFF
|
log_queries_not_using_indexes=OFF
|
|
### INNODB OPTIONS
|
innodb_buffer_pool_size=248M
|
innodb_flush_log_at_trx_commit=2
|
innodb_file_per_table=1
|
innodb_data_file_path = ibdata1:100M:autoextend
|
## You may want to tune the below depending on number of cores and disk sub
|
innodb_read_io_threads=4
|
innodb_write_io_threads=4
|
innodb_doublewrite=1
|
innodb_log_file_size=64M
|
innodb_log_buffer_size=16M
|
innodb_buffer_pool_instances=1
|
innodb_log_files_in_group=2
|
innodb_thread_concurrency=64
|
# innodb_file_format = barracuda
|
innodb_flush_method = O_DIRECT
|
innodb_rollback_on_timeout=ON
|
# innodb_locks_unsafe_for_binlog = 1
|
innodb_autoinc_lock_mode=2
|
## avoid statistics update when doing e.g show tables
|
innodb_stats_on_metadata=0
|
default_storage_engine=innodb
|
|
# CHARACTER SET
|
# collation_server = utf8_unicode_ci
|
# init_connect = 'SET NAMES utf8'
|
# character_set_server = utf8
|
|
# REPLICATION SPECIFIC
|
server_id=1
|
binlog_format=ROW
|
log_bin=binlog
|
log_slave_updates=1
|
relay_log=relay-bin
|
expire_logs_days=7
|
read_only=ON
|
report_host=10.0.0.152
|
|
# OTHER THINGS, BUFFERS ETC
|
key_buffer_size = 24M
|
tmp_table_size = 64M
|
max_heap_table_size = 64M
|
max_allowed_packet = 512M
|
# sort_buffer_size = 256K
|
# read_buffer_size = 256K
|
# read_rnd_buffer_size = 512K
|
# myisam_sort_buffer_size = 8M
|
skip_name_resolve
|
memlock=0
|
sysdate_is_now=1
|
max_connections=500
|
thread_cache_size=512
|
query_cache_type = 0
|
query_cache_size = 0
|
table_open_cache=1024
|
lower_case_table_names=0
|
# 5.6 backwards compatibility (FIXME)
|
# explicit_defaults_for_timestamp = 1
|
|
performance_schema = OFF
|
performance-schema-max-mutex-classes = 0
|
performance-schema-max-mutex-instances = 0
|
|
ignore-db-dir=lost+found
|
ignore-db-dir=.s9s_do_not_delete
|
|
[MYSQL]
|
socket=/var/lib/mysql/mysql.sock
|
# default_character_set = utf8
|
[client]
|
socket=/var/lib/mysql/mysql.sock
|
# default_character_set = utf8
|
[mysqldump]
|
socket=/var/lib/mysql/mysql.sock
|
max_allowed_packet = 512M
|
# default_character_set = utf8
|
|
[xtrabackup]
|
|
[MYSQLD_SAFE]
|
# log_error = /var/log/mysqld.log
|
basedir=/usr/
|
# datadir = /var/lib/mysql
|
2. I created data using sysbench, one table, three rows in it.
3. On a separate host set up Columnstore 1.2.2 and MaxScale 2.3.4 following the documentation. MaxScale's config is default plus:
# The Replication Proxy service
|
[replication-service]
|
type=service
|
router=binlogrouter
|
server_id=4000
|
master_id=3000
|
filestem=binlog
|
user=maxuser
|
passwd=maxpwd
|
|
# The Avro conversion service
|
[avro-service]
|
type=service
|
router=avrorouter
|
source=replication-service
|
filestem=binlog
|
avrodir=/var/lib/maxscale/avro/
|
start_index=1
|
|
# The listener for the replication-service
|
[replication-listener]
|
type=listener
|
service=replication-service
|
protocol=MariaDBClient
|
port=3307
|
|
# The client listener for the avro-service
|
[avro-listener]
|
type=listener
|
service=avro-service
|
protocol=CDC
|
port=4001
|
4. I set up the binlog replication from MariaDB to MaxScale. Got the binlog just fine
5. I created avsc file using python utility:
{"type": "record", "namespace": "MaxScaleChangeDataSchema.avro", "fields": [{"type": ["null", "int"], "name": "id", "real_type": "int", "length": 11}, {"type": ["null", "int"], "name": "k", "real_type": "int", "length": 11}, {"type": ["null", "string"], "name": "c", "real_type": "char", "length": 120}, {"type": ["null", "string"], "name": "pad", "real_type": "char", "length": 60}], "name": "ChangeRecord"}
|
6. Created cdc users as per documentation:
maxadmin call command cdc add_user avro-service coluser colpass
|
7. Copied Columnstore.xml to MaxScale directory
8. When I attempt to start mxs_adapter I got an error:
root@vagrant:~# mxs_adapter -u coluser -p colpass -d -a -h 10.0.0.151 -P 4001 -c /var/lib/maxscale/Columnstore.xml sbtest sbtest1
|
2019-02-20 12:28:54 [main] Started thread 0x2256dc0
|
2019-02-20 12:28:54 [main] Started 1 threads
|
2019-02-20 12:28:54 [sbtest.sbtest1] Requesting data for table: sbtest.sbtest1
|
2019-02-20 12:28:54 [sbtest.sbtest1] MaxScale connection could not be created: Failed to parse JSON: end of file expected near '{'
|
I'm not sure what's wrong, by looking into the avro file it looks quite messy although I'm not 100% sure what I should expect. Avro file is attached. I'd love to get some feedback, especially if I'm not doing something right.