def run_pt_table_checksum(basedir, conn_options=None):
     """
     Method for running pt-table-checksum method. Should be run on master server.
     :param basedir: PS basedir path
     :param conn_options: pass this only for Slave
     :return:
     """
     rb_obj = RunBenchmark()
     sock_file = rb_obj.get_sock(basedir=basedir)
     if conn_options is None:
         # TODO: Temporarily disable check due to https://jira.percona.com/browse/PT-225
         # --no-check-slave-tables
         command = "pt-table-checksum --user={} --socket={} " \
                   "--recursion-method dsn=h=localhost,D=test,t=dsns " \
                   "--no-check-binlog-format --no-check-slave-tables".format("root", sock_file)
     else:
         command = "pt-table-checksum {} " \
                   "--recursion-method dsn=h=localhost,D=test,t=dsns " \
                   "--no-check-binlog-format --no-check-slave-tables".format(conn_options)
     status, output = subprocess.getstatusoutput(command)
     if status == 0:
         logger.debug("pt-table-checksum succeeded on master")
         return True
     else:
         logger.error("pt-table-checksum command failed")
         logger.error(output)
         raise RuntimeError("pt-table-checksum command failed")
Ejemplo n.º 2
0
 def test_populate_dsns_table(self, return_runner_test_mode_obj_5_6_xb_2_3):
     for basedir in return_runner_test_mode_obj_5_6_xb_2_3.basedirs:
         if '5.6' in basedir:
             mysql_master_client_cmd = RunBenchmark(
                 config=return_runner_test_mode_obj_5_6_xb_2_3.conf
             ).get_mysql_conn(basedir=basedir)
             file_name = "cl_node0"
             mysql_slave_client_cmd = RunBenchmark(
                 config=return_runner_test_mode_obj_5_6_xb_2_3.conf
             ).get_mysql_conn(basedir=basedir, file_name=file_name)
             # Get slave port here
             sql = "{} -e 'select @@port'".format(mysql_slave_client_cmd)
             port = return_runner_test_mode_obj_5_6_xb_2_3.run_sql_command(
                 sql_command=sql)
             #slave_sock = "{}/sock0.sock".format(basedir)
             assert return_runner_test_mode_obj_5_6_xb_2_3.populate_dsns_table(
                 sql_conn=mysql_master_client_cmd, slave_port=port[7:])
Ejemplo n.º 3
0
 def test_create_dsns_table(self, return_runner_test_mode_obj_5_6_xb_2_3):
     for basedir in return_runner_test_mode_obj_5_6_xb_2_3.basedirs:
         if '5.6' in basedir:
             mysql_master_client_cmd = RunBenchmark(
                 config=return_runner_test_mode_obj_5_6_xb_2_3.conf
             ).get_mysql_conn(basedir=basedir)
             assert return_runner_test_mode_obj_5_6_xb_2_3.create_dsns_table(
                 mysql_master_client_cmd)
Ejemplo n.º 4
0
 def test_check_slave_status(self, return_runner_test_mode_obj_5_6_xb_2_3):
     for basedir in return_runner_test_mode_obj_5_6_xb_2_3.basedirs:
         if '5.6' in basedir:
             mysql_slave_client_cmd = RunBenchmark(
                 config=return_runner_test_mode_obj_5_6_xb_2_3.conf
             ).get_mysql_conn(basedir=basedir, file_name="cl_node0")
             show_slave_status = "{} -e 'show slave status\G'"
             assert return_runner_test_mode_obj_5_6_xb_2_3.check_slave_status(
                 show_slave_status.format(mysql_slave_client_cmd)) is None
Ejemplo n.º 5
0
 def test_drop_blank_mysql_users(self,
                                 return_runner_test_mode_obj_5_6_xb_2_3):
     for basedir in return_runner_test_mode_obj_5_6_xb_2_3.basedirs:
         if '5.6' in basedir:
             mysql_master_client_cmd = RunBenchmark(
                 config=return_runner_test_mode_obj_5_6_xb_2_3.conf
             ).get_mysql_conn(basedir=basedir)
             assert return_runner_test_mode_obj_5_6_xb_2_3.drop_blank_mysql_users(
                 mysql_master_client_cmd)
Ejemplo n.º 6
0
    def run_all_backup(self):
        # Method for taking backups using master_backup_script.backuper.py::all_backup()
        RunBenchmark().run_sysbench_prepare(basedir=self.basedir)
        if '5.7' in self.basedir:
            for i in range(1, 10):
                sql_encrypt = "alter table sysbench_test_db.sbtest{} encryption='Y'".format(
                    i)
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=sql_encrypt)
                # Compression related issue -> https://bugs.launchpad.net/percona-xtrabackup/+bug/1641745
                # Disabling for now
                # TODO: Enable this after #1641745 is fixed.
                # sql_compress = "alter table sysbench_test_db.sbtest{} compression='lz4'".format(i)
                # RunBenchmark.run_sql_statement(basedir=self.basedir, sql_statement=sql_compress)
                # sql_optimize = "optimize table sysbench_test_db.sbtest{}".format(i)
                # RunBenchmark.run_sql_statement(basedir=self.basedir, sql_statement=sql_optimize)
            # NOTE: PXB will ignore rocksdb tables, which is going to break pt-table-checksum
            # for i in range(10, 15):
            #     sql_alter = "alter table sysbench_test_db.sbtest{} engine=rocksdb".format(i)
            #     RunBenchmark.run_sql_statement(basedir=self.basedir, sql_statement=sql_alter)
        # NOTE: PXB will ignore tokudb tables, which is going to break pt-table-checksum
        # for i in range(15, 20):
        #     sql_alter = "alter table sysbench_test_db.sbtest{} engine=tokudb".format(i)
        #     RunBenchmark.run_sql_statement(basedir=self.basedir, sql_statement=sql_alter)

        flush_tables = "flush tables"
        RunBenchmark.run_sql_statement(basedir=self.basedir,
                                       sql_statement=flush_tables)
        sleep(20)

        for _ in range(int(self.incremental_count) + 1):
            RunBenchmark().run_sysbench_run(basedir=self.basedir)
            self.all_backup()

        return True
Ejemplo n.º 7
0
from master_backup_script.backuper import Backup

clb_obj = CloneBuildStartServer()


@pytest.fixture()
def return_clone_obj():
    return clb_obj


@pytest.fixture()
def return_basedir():
    basedir = clb_obj.get_basedir()
    return basedir

rb_obj = RunBenchmark()


@pytest.fixture()
def return_run_benchmark_obj():
    return rb_obj

cg_obj = ConfigGenerator()


@pytest.fixture()
def return_config_generator_obj():
    return cg_obj

@pytest.fixture()
def return_runner_test_mode_obj_5_6_xb_2_3():
    def wipe_backup_prepare_copyback(self, basedir, keyring_vault=0):
        """
        Method Backup + Prepare and Copy-back actions.
        It is also going to create slave server from backup of master and start.
        :param basedir: The basedir path of MySQL
        :return: Success if no exception raised from methods
        """
        c_count = 0
        for options in ConfigGenerator(
                config=self.conf).options_combination_generator(
                    self.mysql_options):
            c_count = c_count + 1
            options = " ".join(options)
            if '5.7' in basedir:
                if keyring_vault == 0:
                    options = options + " " + self.df_mysql_options.format(
                        basedir, c_count)
                elif keyring_vault == 1:
                    # The keyring_vault options must be provided manually with full path in config.
                    # Such as: --early-plugin-load=keyring_vault=keyring_vault.so,--loose-keyring_vault_config=/sda/vault_server/keyring_vault.cnf
                    # It indicates that there is no need to pass basedir that's why only passing the [--server-id=c_count]
                    options = options + " " + self.df_mysql_options.format(
                        c_count)
            else:
                options = options + " " + self.df_mysql_options.format(c_count)
            logger.debug("*********************************")
            logger.debug("Starting cycle{}".format(c_count))
            logger.debug("Will start MySQL with {}".format(options))
            # Passing options to start MySQL
            if self.clone_obj.wipe_server_all(basedir_path=basedir,
                                              options=options):
                # Specifying directories and passing to WrapperForBackupTest class
                full_dir = self.backupdir + "/cycle{}".format(
                    c_count) + "/full"
                inc_dir = self.backupdir + "/cycle{}".format(c_count) + "/inc"
                backup_obj = WrapperForBackupTest(config=self.conf,
                                                  full_dir=full_dir,
                                                  inc_dir=inc_dir,
                                                  basedir=basedir)
                # Take backups
                logger.debug("Started to run run_all_backup()")
                if backup_obj.run_all_backup():
                    prepare_obj = WrapperForPrepareTest(config=self.conf,
                                                        full_dir=full_dir,
                                                        inc_dir=inc_dir)
                    # Prepare backups
                    logger.debug("Started to run run_prepare_backup()")
                    if prepare_obj.run_prepare_backup():
                        if hasattr(self, 'make_slaves'):
                            logger.debug(
                                "make_slaves is defined so will create slaves!"
                            )
                            # Creating slave datadir
                            slave_datadir = self.create_slave_datadir(
                                basedir=basedir, num=1)
                            # Doing some stuff for creating slave server env
                            prepare_obj.run_xtra_copyback(
                                datadir=slave_datadir)
                            prepare_obj.giving_chown(datadir=slave_datadir)
                            slave_full_options = self.prepare_start_slave_options(
                                basedir=basedir,
                                slave_number=1,
                                options=options)

                            prepare_obj.start_mysql_func(
                                start_tool="{}/start_dynamic".format(basedir),
                                options=slave_full_options)
                            # Creating connection file for new node
                            self.create_slave_connection_file(basedir=basedir,
                                                              num=1)
                            # Creating shutdown file for new node
                            self.create_slave_shutdown_file(basedir=basedir,
                                                            num=1)

                            # Checking if node is up
                            logger.debug("Pausing a bit here...")
                            sleep(10)
                            chk_obj = CheckEnv(config=self.conf)
                            check_options = "--user={} --socket={}/sock{}.sock".format(
                                'root', basedir, 1)
                            chk_obj.check_mysql_uptime(options=check_options)
                            # Make this node to be slave
                            mysql_master_client_cmd = RunBenchmark(
                                config=self.conf).get_mysql_conn(
                                    basedir=basedir)
                            # Create replication user on master server
                            self.run_sql_create_user(mysql_master_client_cmd)
                            # Drop blank users if PS version is 5.6 from master server
                            if '5.6' in basedir or '5.5' in basedir:
                                self.drop_blank_mysql_users(
                                    mysql_master_client_cmd)
                            full_backup_dir = prepare_obj.recent_full_backup_file(
                            )
                            mysql_slave_client_cmd = RunBenchmark(
                                config=self.conf).get_mysql_conn(
                                    basedir=basedir,
                                    file_name="cl_node{}".format(1))
                            # Creating dsns table
                            self.create_dsns_table(mysql_master_client_cmd)

                            # Running change master and some other commands here
                            if self.run_change_master(
                                    basedir=basedir,
                                    full_backup_dir="{}/{}".format(
                                        full_dir, full_backup_dir),
                                    mysql_master_client_cmd=
                                    mysql_master_client_cmd,
                                    mysql_slave_client_cmd=
                                    mysql_slave_client_cmd):
                                sleep(10)

                            logger.debug(
                                "Starting actions for second slave here...")
                            # Actions for second slave, it is going to be started from slave backup
                            full_dir_2 = self.backupdir + "/cycle{}".format(
                                c_count) + "/slave_backup" + "/full"
                            inc_dir_2 = self.backupdir + "/cycle{}".format(
                                c_count) + "/slave_backup" + "/inc"
                            # Create config for this slave node1 here
                            logger.debug(
                                "Generating special config file for second slave"
                            )
                            cnf_obj = ConfigGenerator(config=self.conf)
                            slave_conf_path = self.backupdir + "/cycle{}".format(
                                c_count)
                            if ('5.7' in basedir) and ('2_4_ps_5_7'
                                                       in self.conf):
                                slave_conf_file = 'xb_2_4_ps_5_7_slave.conf'
                            elif ('5.6' in basedir) and ('2_4_ps_5_6'
                                                         in self.conf):
                                slave_conf_file = 'xb_2_4_ps_5_6_slave.conf'
                            elif ('5.6' in basedir) and ('2_3_ps_5_6'
                                                         in self.conf):
                                slave_conf_file = 'xb_2_3_ps_5_6_slave.conf'
                            elif ('5.5' in basedir) and ('2_3_ps_5_5'
                                                         in self.conf):
                                slave_conf_file = 'xb_2_3_ps_5_5_slave.conf'
                            elif ('5.5' in basedir) and ('2_4_ps_5_5'
                                                         in self.conf):
                                slave_conf_file = 'xb_2_4_ps_5_5_slave.conf'

                            cnf_obj.generate_config_files(
                                test_path=self.testpath,
                                conf_file=slave_conf_file,
                                basedir=basedir,
                                datadir="{}/node{}".format(basedir, 1),
                                sock_file="{}/sock{}.sock".format(basedir, 1),
                                backup_path=slave_conf_path)
                            # DO backup here
                            backup_obj_2 = WrapperForBackupTest(
                                config="{}/{}".format(slave_conf_path,
                                                      slave_conf_file),
                                full_dir=full_dir_2,
                                inc_dir=inc_dir_2,
                                basedir=basedir)
                            if backup_obj_2.all_backup():
                                # DO prepare here
                                prepare_obj_2 = WrapperForPrepareTest(
                                    config="{}/{}".format(
                                        slave_conf_path, slave_conf_file),
                                    full_dir=full_dir_2,
                                    inc_dir=inc_dir_2)
                                if prepare_obj_2.run_prepare_backup():
                                    # Removing outside tablespace files
                                    if os.path.isfile(
                                            '{}/out_ts1.ibd'.format(basedir)):
                                        os.remove(
                                            '{}/out_ts1.ibd'.format(basedir))
                                    if os.path.isfile(
                                            '{}/sysbench_test_db/t1.ibd'.
                                            format(basedir)):
                                        os.remove('{}/sysbench_test_db/t1.ibd'.
                                                  format(basedir))
                                    # Creating slave datadir
                                    slave_datadir_2 = self.create_slave_datadir(
                                        basedir=basedir, num=2)
                                    prepare_obj_2.run_xtra_copyback(
                                        datadir=slave_datadir_2)
                                    prepare_obj_2.giving_chown(
                                        datadir=slave_datadir_2)
                                    slave_full_options = self.prepare_start_slave_options(
                                        basedir=basedir,
                                        slave_number=2,
                                        options=options)

                                    prepare_obj_2.start_mysql_func(
                                        start_tool="{}/start_dynamic".format(
                                            basedir),
                                        options=slave_full_options)
                                    # Creating connection file for new node
                                    self.create_slave_connection_file(
                                        basedir=basedir, num=2)
                                    # Creating shutdown file for new node
                                    self.create_slave_shutdown_file(
                                        basedir=basedir, num=2)
                                    logger.debug("Pausing a bit here...")
                                    sleep(10)
                                    check_options_2 = "--user={} --socket={}/sock{}.sock".format(
                                        'root', basedir, 2)
                                    chk_obj.check_mysql_uptime(
                                        options=check_options_2)

                                    mysql_slave_client_cmd_2 = RunBenchmark(
                                        config=self.conf).get_mysql_conn(
                                            basedir=basedir,
                                            file_name="cl_node{}".format(2))
                                    full_backup_dir_2 = prepare_obj_2.recent_full_backup_file(
                                    )
                                    if self.run_change_master(
                                            basedir=basedir,
                                            full_backup_dir="{}/{}".format(
                                                full_dir_2, full_backup_dir_2),
                                            mysql_master_client_cmd=
                                            mysql_master_client_cmd,
                                            mysql_slave_client_cmd=
                                            mysql_slave_client_cmd_2,
                                            is_slave=True):
                                        sleep(10)

                            # Running on master
                            self.run_pt_table_checksum(basedir=basedir)

                            # Shutdown slaves
                            self.slave_shutdown(basedir=basedir, num=1)
                            self.slave_shutdown(basedir=basedir, num=2)
                            sleep(5)

                        else:
                            prepare_obj.copy_back_action(options=options)
Ejemplo n.º 9
0
 def __init__(self, config=path_config.config_path_file):
     self.conf = config
     self.home = path_config.home
     super().__init__(config=self.conf)
     # For getting socket file path using RunBenchmark()
     self.benchmark_obj = RunBenchmark()
Ejemplo n.º 10
0
class ConfigGenerator(CloneBuildStartServer):
    def __init__(self, config=path_config.config_path_file):
        self.conf = config
        self.home = path_config.home
        super().__init__(config=self.conf)
        # For getting socket file path using RunBenchmark()
        self.benchmark_obj = RunBenchmark()

    @staticmethod
    def generate_config_files(test_path,
                              conf_file,
                              basedir,
                              datadir,
                              sock_file,
                              home_path,
                              backup_path=None):
        # This method for generating separate config files for each XB versions based on PS versions.
        # There is only one config for PXB 8 - because there is only supported platform PS 8.
        try:
            if backup_path is None:
                conf_path = "{}/{}".format(test_path, conf_file)
            else:
                conf_path = "{}/{}".format(backup_path, conf_file)
            with open(conf_path, 'w+') as cfgfile:
                config = configparser.ConfigParser(allow_no_value=True)
                section1 = 'MySQL'
                config.add_section(section1)
                config.set(section1, "mysql", "{}/bin/mysql".format(basedir))
                config.set(section1, "mycnf", "")
                config.set(section1, "mysqladmin",
                           "{}/bin/mysqladmin".format(basedir))
                config.set(section1, "mysql_user", "root")
                config.set(section1, "mysql_password", "")
                config.set(section1,
                           "#Use either socket or port + host combination")
                config.set(section1, "mysql_socket", "{}".format(sock_file))
                config.set(section1, "#mysql_host", "127.0.0.1")
                config.set(section1, "#mysql_port", "3306")
                config.set(section1, "datadir",
                           "{}/{}".format(basedir, datadir))

                section2 = 'Backup'
                config.add_section(section2)
                config.set(section2, "#Optional: set pid directory")
                config.set(section2, "pid_dir", "/tmp/MySQL-AutoXtraBackup")
                config.set(section2, "tmpdir",
                           join(home_path, "XB_TEST/mysql_datadirs"))
                config.set(
                    section2,
                    "#Optional: set warning if pid of backup us running for longer than X"
                )
                config.set(section2, "pid_runtime_warning", "2 Hours")

                # Getting PS version from conf_file name
                get_ps_version = conf_file[3:6]
                # Getting XB version from conf_file name
                get_xb_version = conf_file[-7:-4]
                get_xb_version_replaced = get_xb_version.replace('_', '.')
                config.set(
                    section2, "backupdir",
                    join(
                        home_path, "XB_TEST/backup_dir/ps_{}_x_{}".format(
                            get_ps_version, get_xb_version)))

                config.set(
                    section2, "backup_tool",
                    "{}/target/percona-xtrabackup-{}.x-debug/bin/xtrabackup".
                    format(test_path, get_xb_version_replaced))

                config.set(
                    section2,
                    "#Optional: specify different path/version of xtrabackup here for prepare"
                )
                config.set(section2, "#prepare_tool", "")
                config.set(section2, "xtra_prepare", "--apply-log-only")
                config.set(
                    section2,
                    "#Optional: pass additional options for backup stage")
                config.set(section2, "#xtra_backup", "--compact")
                config.set(
                    section2,
                    "#Optional: pass additional options for prepare stage")
                config.set(section2, "#xtra_prepare_options",
                           "--rebuild-indexes")
                config.set(
                    section2,
                    "#Optional: pass general additional options; it will go to both for backup and prepare"
                )
                config.set(section2, "#xtra_options",
                           "--binlog-info=ON --galera-info")
                if '5.7' in basedir or '8.0' in basedir:
                    config.set(
                        section2,
                        "xtra_options",
                        "--slave-info --no-version-check --core-file "
                        "--parallel=10 --throttle=40 --check-privileges "
                        "--ftwrl-wait-timeout=0 "
                        "--ftwrl-wait-query-type=all "
                        "--ftwrl-wait-threshold=1 "
                        #"--lock-wait-timeout=0 "
                        #"--lock-wait-query-type=all "
                        #"--lock-wait-threshold=1 "
                        "--kill-long-queries-timeout=1 "
                        "--kill-wait-query-type=all "
                        "--kill-long-query-type=all "
                        "--no-backup-locks "
                        #"--lock-ddl-per-table "
                        #"--lock-ddl "
                        "--keyring-file-data={}/mysql-keyring/keyring ".format(
                            basedir))
                else:
                    config.set(
                        section2,
                        "xtra_options",
                        "--slave-info --no-version-check --core-file "
                        "--ftwrl-wait-timeout=0 "
                        "--ftwrl-wait-query-type=all "
                        "--ftwrl-wait-threshold=1 "
                        #"--lock-wait-timeout=0 "
                        #"--lock-wait-query-type=all "
                        #"--lock-wait-threshold=1 "
                        "--kill-long-queries-timeout=1 "
                        "--kill-wait-query-type=all "
                        "--kill-long-query-type=all "
                        "--no-backup-locks "
                        "--parallel=10 --throttle=40 --check-privileges ")
                config.set(section2, "#Optional: set archive and rotation")
                config.set(section2, "#archive_dir",
                           join(home_path, "XB_TEST/backup_archives"))
                config.set(section2, "#prepare_archive", "1")
                config.set(section2, "#move_archive", "0")
                config.set(section2, "#full_backup_interval", "1 day")
                config.set(section2, "#max_archive_size", "100GiB")
                config.set(section2, "#max_archive_duration", "4 Days")
                config.set(
                    section2,
                    "#Optional: WARNING(Enable this if you want to take partial backups). "
                    "Specify database names or table names.")
                config.set(section2, "#partial_list", "test.t1 test.t2 dbtest")

                section3 = "Compress"
                config.add_section(section3)
                config.set(section3, "#optional")
                config.set(section3,
                           "#Enable only if you want to use compression.")
                config.set(section3, "compress", "quicklz")
                config.set(section3, "compress_chunk_size", "65536")
                config.set(section3, "compress_threads", "4")
                config.set(section3, "decompress", "TRUE")
                config.set(
                    section3,
                    "#Enable if you want to remove .qp files after decompression."
                    "(Not available yet, will be released with XB 2.3.7 and 2.4.6)"
                )
                config.set(section3, "remove_original", "FALSE")

                section4 = "Encrypt"
                config.add_section(section4)
                config.set(section4, "#Optional")
                config.set(
                    section4,
                    "#Enable only if you want to create encrypted backups")

                config.set(
                    section4, "xbcrypt",
                    "{}/target/percona-xtrabackup-{}.x-debug/bin/xbcrypt".
                    format(test_path, get_xb_version_replaced))
                config.set(section4, "encrypt", "AES256")
                config.set(
                    section4,
                    "#Please note that --encrypt-key and --encrypt-key-file are mutually exclusive"
                )
                config.set(section4, "encrypt_key",
                           'VVTBwgM4UhwkTTV98fhuj+D1zyWoA89K')
                config.set(section4, "#encrypt_key_file",
                           "/path/to/file/with_encrypt_key")
                config.set(section4, "encrypt_threads", "4")
                config.set(section4, "encrypt_chunk_size", "65536")
                config.set(section4, "decrypt", "AES256")
                config.set(
                    section4,
                    "#Enable if you want to remove .qp files after decompression."
                    "(Not available yet, will be released with XB 2.3.7 and 2.4.6)"
                )
                config.set(section4, "remove_original", "FALSE")

                section5 = "Xbstream"
                config.add_section(section5)
                config.set(section5, "#EXPERIMENTAL")
                config.set(section5,
                           "#Enable this, if you want to stream your backups")

                config.set(
                    section5, "xbstream",
                    "{}/target/percona-xtrabackup-{}.x-debug/bin/xbstream".
                    format(test_path, get_xb_version_replaced))
                config.set(section5, "stream", "xbstream")
                config.set(section5, "xbstream_options", "-x --parallel=100")
                config.set(section5, "xbs_decrypt", "1")
                config.set(
                    section5,
                    "# WARN, enable this, if you want to stream your backups to remote host"
                )
                config.set(section5, "#remote_stream", "ssh xxx.xxx.xxx.xxx")

                section6 = "Remote"
                config.add_section(section6)
                config.set(section6, "#Optional remote syncing")
                config.set(section6, "#remote_conn", "*****@*****.**")
                config.set(section6, "#remote_dir", "/home/sh/Documents")

                section7 = "Commands"
                config.add_section(section7)
                config.set(section7, "start_mysql_command",
                           "{}/start".format(basedir))
                config.set(section7, "stop_mysql_command",
                           "{}/stop".format(basedir))
                # Getting System Username - equal to run whoami
                sys_user = getuser()
                config.set(section7, "chown_command",
                           "chown -R {}:{}".format(sys_user, sys_user))

                section8 = "TestConf"
                config.add_section(section8)
                config.set(section8, "ps_branches", "5.5 5.6 5.7 8.0")
                config.set(section8, "pxb_branches", "2.3 2.4 8.0")
                config.set(
                    section8, "gitcmd",
                    "--recursive --depth=1 https://github.com/percona/percona-server.git"
                )
                config.set(
                    section8, "pxb_gitcmd",
                    "https://github.com/percona/percona-xtrabackup.git")
                config.set(section8, "testpath",
                           join(home_path, "XB_TEST/server_dir"))
                config.set(section8, "incremental_count", "3")
                config.set(
                    section8, "xb_configs", "xb_2_4_ps_5_6.cnf "
                    "xb_2_4_ps_5_7.cnf "
                    "xb_2_3_ps_5_6.cnf "
                    "xb_2_3_ps_5_5.cnf "
                    "xb_2_4_ps_5_5.cnf "
                    "xb_8_0_ps_8_0.cnf")
                config.set(section8, "make_slaves", "1")
                if '5_7' in conf_file:
                    config.set(
                        section8, "default_mysql_options",
                        "--early-plugin-load=keyring_file.so,"
                        "--keyring_file_data={}/mysql-keyring/keyring,"
                        "--log-bin=mysql-bin,--log-slave-updates,--server-id={},"
                        "--gtid-mode=ON,--enforce-gtid-consistency,--binlog-format=row,"
                        "--encrypt_binlog=ON,--master_verify_checksum=ON,--binlog_checksum=CRC32,"
                        "--innodb_encrypt_tables=ON,"
                        "--innodb_encrypt_online_alter_logs=ON,"
                        "--innodb_temp_tablespace_encrypt=ON")
                elif '8_0' in conf_file:
                    # For now make similar to 5.7
                    config.set(
                        section8, "default_mysql_options",
                        "--early-plugin-load=keyring_file.so,"
                        "--keyring_file_data={}/mysql-keyring/keyring,"
                        "--log-bin=mysql-bin,--log-slave-updates,--server-id={},"
                        "--gtid-mode=ON,--enforce-gtid-consistency,--binlog-format=row,"
                        "--encrypt_binlog=ON,--master_verify_checksum=ON,--binlog_checksum=CRC32,"
                        "--innodb_encrypt_tables=ON,"
                        "--innodb_encrypt_online_alter_logs=ON,"
                        "--innodb_temp_tablespace_encrypt=ON")
                elif '5_6' in conf_file:
                    config.set(
                        section8, "default_mysql_options",
                        "--log-bin=mysql-bin,--log-slave-updates,--server-id={},"
                        "--gtid-mode=ON,--enforce-gtid-consistency,--binlog-format=row"
                    )
                elif '5_5' in conf_file:
                    config.set(
                        section8, "default_mysql_options",
                        "--log-bin=mysql-bin,--log-slave-updates,--server-id={},"
                        "--binlog-format=row")

                if '5_7' in conf_file or '8_0' in conf_file:
                    config.set(
                        section8, "mysql_options",
                        "--innodb_buffer_pool_size=1G 2G 3G,--innodb_log_file_size=1G 2G 3G,"
                        "--innodb_page_size=4K 8K 16K 32K 64K")
                elif '5_6' in conf_file:
                    config.set(
                        section8, "mysql_options",
                        "--innodb_buffer_pool_size=1G 2G 3G,--innodb_log_file_size=1G 2G 3G,"
                        "--innodb_page_size=4K 8K 16K")
                elif '5_5' in conf_file:
                    config.set(
                        section8, "mysql_options",
                        "--innodb_buffer_pool_size=1G 2G 3G,--innodb_log_file_size=1G 2G 3G,"
                        "--innodb_page_size=4K 8K 16K")

                config.write(cfgfile)

        except Exception as err:
            logger.error("Failed to generate config file...")
            logger.error(err)
            return False
        else:
            logger.debug("Config file generated successfully...")
            return True

    def the_main_generator(self):
        # The method for calling config generator based on if statements
        conf_list = self.xb_configs.split()
        basedirs = self.get_basedir()
        for basedir in basedirs:
            for conf_file in conf_list:
                if ('8.0' in basedir) and ('8_0_ps_8_0' in conf_file):
                    self.generate_config_files(
                        test_path=self.testpath,
                        conf_file=conf_file,
                        basedir=basedir,
                        datadir='data',
                        sock_file=self.benchmark_obj.get_sock(basedir=basedir),
                        home_path=self.home)
                if ('5.7' in basedir) and ('2_4_ps_5_7' in conf_file):
                    self.generate_config_files(
                        test_path=self.testpath,
                        conf_file=conf_file,
                        basedir=basedir,
                        datadir='data',
                        sock_file=self.benchmark_obj.get_sock(basedir=basedir),
                        home_path=self.home)
                elif ('5.6' in basedir) and ('2_4_ps_5_6' in conf_file):
                    self.generate_config_files(
                        test_path=self.testpath,
                        conf_file=conf_file,
                        basedir=basedir,
                        datadir='data',
                        sock_file=self.benchmark_obj.get_sock(basedir=basedir),
                        home_path=self.home)
                elif ('5.6' in basedir) and ('2_3_ps_5_6' in conf_file):
                    self.generate_config_files(
                        test_path=self.testpath,
                        conf_file=conf_file,
                        basedir=basedir,
                        datadir='data',
                        sock_file=self.benchmark_obj.get_sock(basedir=basedir),
                        home_path=self.home)
                elif ('5.5' in basedir) and ('2_4_ps_5_5' in conf_file):
                    self.generate_config_files(
                        test_path=self.testpath,
                        conf_file=conf_file,
                        basedir=basedir,
                        datadir='data',
                        sock_file=self.benchmark_obj.get_sock(basedir=basedir),
                        home_path=self.home)
                elif ('5.5' in basedir) and ('2_3_ps_5_5' in conf_file):
                    self.generate_config_files(
                        test_path=self.testpath,
                        conf_file=conf_file,
                        basedir=basedir,
                        datadir='data',
                        sock_file=self.benchmark_obj.get_sock(basedir=basedir),
                        home_path=self.home)
                else:
                    continue

        return True

    @staticmethod
    def options_combination_generator(initial_str):
        '''
        Option parser method for creating option combinarotics
        :param initial_str -> mysql_options initial string from config file
        :return List of tuples with option combinations
        '''
        separated_values_list = []

        for i in initial_str.split(','):
            separated_values_list.append(i.split('='))

        all_new_list = []

        for i in separated_values_list:
            k = ["{}={}".format(i[0], j) for j in i[1].split()]
            all_new_list.append(k)

        option_combinations = []

        for i in product(*all_new_list):
            option_combinations.append(i)

        return option_combinations
 def __init__(self, config='/etc/bck.conf'):
     self.conf = config
     super().__init__(config=self.conf)
     # For getting socket file path using RunBenchmark()
     self.benchmark_obj = RunBenchmark()
Ejemplo n.º 12
0
    def run_all_backup(self):
        # Method for taking backups using master_backup_script.backuper.py::all_backup()
        RunBenchmark().run_sysbench_prepare(basedir=self.basedir)
        # Fix for https://github.com/ShahriyarR/MySQL-AutoXtraBackup/issues/245
        # Disabled for now
        # self.create_million_tables(basedir=self.basedir)
        # with concurrent.futures.ProcessPoolExecutor(max_workers=100) as pool:
        #         for i in range(1000000):
        #             pool.submit(self.create_million_tables(basedir=self.basedir, i=i))

        if '5.7' in self.basedir:
            # Fix for https://github.com/ShahriyarR/MySQL-AutoXtraBackup/issues/205
            # Adding compression column with predefined dictionary.
            sql_create_dictionary = "CREATE COMPRESSION_DICTIONARY numbers('08566691963-88624912351-16662227201-46648573979-64646226163-77505759394-75470094713-41097360717-15161106334-50535565977')"
            RunBenchmark.run_sql_statement(basedir=self.basedir,
                                           sql_statement=sql_create_dictionary)

            # Fix for https://github.com/ShahriyarR/MySQL-AutoXtraBackup/issues/229
            # Creating encrypted general tablespace
            sql_create_tablespace = "create tablespace ts3_enc add datafile 'ts3_enc.ibd' encryption='Y'"
            RunBenchmark.run_sql_statement(basedir=self.basedir,
                                           sql_statement=sql_create_tablespace)

            # Fix for https://github.com/ShahriyarR/MySQL-AutoXtraBackup/issues/271
            # Preparing env here
            self.run_call_innodb_online_alter_encryption_sql_sh(
                basedir=self.basedir,
                sock="{}/socket.sock".format(self.basedir))
            self.run_call_innodb_online_alter_encryption_alters_sh(
                basedir=self.basedir,
                sock="{}/socket.sock".format(self.basedir))

            # Fix for https://github.com/ShahriyarR/MySQL-AutoXtraBackup/issues/268
            # Running create statement
            sql_create_table = "CREATE TABLE sysbench_test_db.t10 (a INT AUTO_INCREMENT PRIMARY KEY, b INT)"
            RunBenchmark.run_sql_statement(basedir=self.basedir,
                                           sql_statement=sql_create_table)
            for _ in range(10):
                insert_rand = "INSERT INTO sysbench_test_db.t10 (b) VALUES (FLOOR(RAND() * 10000)), (FLOOR(RAND() * 10000)), (FLOOR(RAND() * 10000))"
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=insert_rand)

            for _ in range(5):
                insert_select = "INSERT INTO sysbench_test_db.t10 (b) SELECT b FROM sysbench_test_db.t10"
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=insert_select)

            # Fix for https://github.com/ShahriyarR/MySQL-AutoXtraBackup/issues/268
            self.run_temp_table_test_sh(basedir=self.basedir,
                                        sock="{}/socket.sock".format(
                                            self.basedir))
            self.run_call_create_index_temp_sh(basedir=self.basedir,
                                               sock="{}/socket.sock".format(
                                                   self.basedir))

            for i in range(1, 5):
                sql_encrypt = "alter table sysbench_test_db.sbtest{} encryption='Y'".format(
                    i)
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=sql_encrypt)
                # Compression related issue -> https://bugs.launchpad.net/percona-xtrabackup/+bug/1641745
                # Disabling for now
                # TODO: Enable this after #1641745 is fixed. Or disable 64K page size for MySQL;disabled.
                sql_compress = "alter table sysbench_test_db.sbtest{} compression='lz4'".format(
                    i)
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=sql_compress)
                sql_optimize = "optimize table sysbench_test_db.sbtest{}".format(
                    i)
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=sql_optimize)
                # Fix for https://github.com/ShahriyarR/MySQL-AutoXtraBackup/issues/196
                # Adding JSON + virtual + stored columns here
                sql_virtual_column = "alter table sysbench_test_db.sbtest{} add column json_test_v json generated always as (json_array(k,c,pad)) virtual".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_virtual_column)
                sql_stored_column = "alter table sysbench_test_db.sbtest{} add column json_test_s json generated always as (json_array(k,c,pad)) stored".format(
                    i)
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=sql_stored_column)
                sql_create_json_column = "alter table sysbench_test_db.sbtest{} add column json_test_index varchar(255) generated always as (json_array(k,c,pad)) stored".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_create_json_column)
                sql_alter_add_index = "alter table sysbench_test_db.sbtest{} add index(json_test_index)".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_alter_add_index)

            general_tablespace = "create tablespace ts1 add datafile 'ts1.ibd' engine=innodb"
            RunBenchmark.run_sql_statement(basedir=self.basedir,
                                           sql_statement=general_tablespace)

            outside_tablespace_full_path = '{}/out_ts1.ibd'.format(
                self.basedir)
            if os.path.isfile(outside_tablespace_full_path):
                os.remove(outside_tablespace_full_path)
            # Fix for https://github.com/ShahriyarR/MySQL-AutoXtraBackup/issues/219
            general_out_tablespace = "create tablespace out_ts1 add datafile '{}' engine=innodb".format(
                outside_tablespace_full_path)
            RunBenchmark.run_sql_statement(
                basedir=self.basedir, sql_statement=general_out_tablespace)
            # Create general tablespace with relative path
            # TODO: enable this after fix for https://bugs.launchpad.net/percona-xtrabackup/+bug/1736380
            # self.general_tablespace_rel(self.basedir)
            # general_out_relative = "create tablespace out_rel_ts1 add datafile '../relative_path/out_rel_ts1.ibd' engine=innodb"
            # RunBenchmark.run_sql_statement(basedir=self.basedir, sql_statement=general_out_relative)

            for i in range(5, 10):
                sql_compress = "alter table sysbench_test_db.sbtest{} compression='zlib'".format(
                    i)
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=sql_compress)
                sql_optimize = "optimize table sysbench_test_db.sbtest{}".format(
                    i)
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=sql_optimize)
                sql_alter_compression_dict = "alter table sysbench_test_db.sbtest{} modify c varchar(250) column_format compressed with compression_dictionary numbers".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir,
                    sql_statement=sql_alter_compression_dict)

            for i in range(10, 15):
                # Fix for https://github.com/ShahriyarR/MySQL-AutoXtraBackup/issues/206
                # Altering some tables to use general tablespace.
                sql_virtual_column = "alter table sysbench_test_db.sbtest{} add column json_test_v json generated always as (json_array(k,c,pad)) virtual".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_virtual_column)
                sql_stored_column = "alter table sysbench_test_db.sbtest{} add column json_test_s json generated always as (json_array(k,c,pad)) stored".format(
                    i)
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=sql_stored_column)
                sql_create_json_column = "alter table sysbench_test_db.sbtest{} add column json_test_index varchar(255) generated always as (json_array(k,c,pad)) stored".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_create_json_column)
                sql_alter_add_index = "alter table sysbench_test_db.sbtest{} add index(json_test_index)".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_alter_add_index)
                # Decrypting tables for -> https://github.com/ShahriyarR/MySQL-AutoXtraBackup/issues/264
                sql_encrypt = "alter table sysbench_test_db.sbtest{} encryption='N'".format(
                    i)
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=sql_encrypt)
                sql_alter_tablespace = "alter table sysbench_test_db.sbtest{} tablespace=ts1".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_alter_tablespace)

            for i in range(15, 20):
                sql_virtual_column = "alter table sysbench_test_db.sbtest{} add column json_test_v json generated always as (json_array(k,c,pad)) virtual".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_virtual_column)
                sql_stored_column = "alter table sysbench_test_db.sbtest{} add column json_test_s json generated always as (json_array(k,c,pad)) stored".format(
                    i)
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=sql_stored_column)
                sql_create_json_column = "alter table sysbench_test_db.sbtest{} add column json_test_index varchar(255) generated always as (json_array(k,c,pad)) stored".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_create_json_column)
                sql_alter_add_index = "alter table sysbench_test_db.sbtest{} add index(json_test_index)".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_alter_add_index)
                # Decrypting tables for -> https://github.com/ShahriyarR/MySQL-AutoXtraBackup/issues/264
                sql_encrypt = "alter table sysbench_test_db.sbtest{} encryption='N'".format(
                    i)
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=sql_encrypt)
                sql_alter_tablespace = "alter table sysbench_test_db.sbtest{} tablespace=out_ts1".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_alter_tablespace)

            for i in range(25, 30):
                # Altering encrypted tables to use encrypted general tablespace
                sql_encrypt = "alter table sysbench_test_db.sbtest{} encryption='Y'".format(
                    i)
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=sql_encrypt)

                sql_virtual_column = "alter table sysbench_test_db.sbtest{} add column json_test_v json generated always as (json_array(k,c,pad)) virtual".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_virtual_column)
                sql_stored_column = "alter table sysbench_test_db.sbtest{} add column json_test_s json generated always as (json_array(k,c,pad)) stored".format(
                    i)
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=sql_stored_column)
                sql_create_json_column = "alter table sysbench_test_db.sbtest{} add column json_test_index varchar(255) generated always as (json_array(k,c,pad)) stored".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_create_json_column)
                sql_alter_add_index = "alter table sysbench_test_db.sbtest{} add index(json_test_index)".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_alter_add_index)

                sql_alter_tablespace = "alter table sysbench_test_db.sbtest{} tablespace=ts3_enc".format(
                    i)
                RunBenchmark.run_sql_statement(
                    basedir=self.basedir, sql_statement=sql_alter_tablespace)

            # TODO: enable this after fix for https://bugs.launchpad.net/percona-xtrabackup/+bug/1736380
            # for i in range(20, 25):
            #     sql_virtual_column = "alter table sysbench_test_db.sbtest{} add column json_test_v json generated always as (json_array(k,c,pad)) virtual".format(
            #         i)
            #     RunBenchmark.run_sql_statement(basedir=self.basedir, sql_statement=sql_virtual_column)
            #     sql_stored_column = "alter table sysbench_test_db.sbtest{} add column json_test_s json generated always as (json_array(k,c,pad)) stored".format(
            #         i)
            #     RunBenchmark.run_sql_statement(basedir=self.basedir, sql_statement=sql_stored_column)
            #     sql_create_json_column = "alter table sysbench_test_db.sbtest{} add column json_test_index varchar(255) generated always as (json_array(k,c,pad)) stored".format(
            #         i)
            #     RunBenchmark.run_sql_statement(basedir=self.basedir, sql_statement=sql_create_json_column)
            #     sql_alter_add_index = "alter table sysbench_test_db.sbtest{} add index(json_test_index)".format(i)
            #     RunBenchmark.run_sql_statement(basedir=self.basedir, sql_statement=sql_alter_add_index)
            #     sql_alter_tablespace = "alter table sysbench_test_db.sbtest{} tablespace=out_rel_ts1".format(i)
            #     RunBenchmark.run_sql_statement(basedir=self.basedir, sql_statement=sql_alter_tablespace)

            # NOTE: PXB will ignore rocksdb tables, which is going to break pt-table-checksum
            # for i in range(10, 15):
            #     sql_alter = "alter table sysbench_test_db.sbtest{} engine=rocksdb".format(i)
            #     RunBenchmark.run_sql_statement(basedir=self.basedir, sql_statement=sql_alter)
        # NOTE: PXB will ignore tokudb tables, which is going to break pt-table-checksum
        # for i in range(15, 20):
        #     sql_alter = "alter table sysbench_test_db.sbtest{} engine=tokudb".format(i)
        #     RunBenchmark.run_sql_statement(basedir=self.basedir, sql_statement=sql_alter)
        if '5.5' in self.basedir:
            for i in range(1, 5):
                sql_alter = "alter table sysbench_test_db.sbtest{} modify c varchar(120) CHARACTER SET utf8 COLLATE utf8_general50_ci".format(
                    i)
                RunBenchmark.run_sql_statement(basedir=self.basedir,
                                               sql_statement=sql_alter)

        # Altering some of the table engines from innodb to myisam
        # Disabled based on -> https://bugs.mysql.com/bug.php?id=89977
        # for i in range(20, 25):
        #     sql_alter_engine = "alter table sysbench_test_db.sbtest{} engine=myisam".format(i)
        #     RunBenchmark.run_sql_statement(basedir=self.basedir, sql_statement=sql_alter_engine)

        # Fix for https://github.com/ShahriyarR/MySQL-AutoXtraBackup/issues/222
        # Creating table with data directory option
        if '5.6' in self.basedir or '5.7' in self.basedir:
            if os.path.exists('{}/{}'.format(self.basedir,
                                             'sysbench_test_db')):
                shutil.rmtree('{}/{}'.format(self.basedir, 'sysbench_test_db'))
            sql_create_table = "create table sysbench_test_db.t1(c varchar(255)) data directory='{}'".format(
                self.basedir)
            RunBenchmark.run_sql_statement(basedir=self.basedir,
                                           sql_statement=sql_create_table)
            sql_insert_data = "insert into sysbench_test_db.t1 select c from sysbench_test_db.sbtest1"
            RunBenchmark.run_sql_statement(basedir=self.basedir,
                                           sql_statement=sql_insert_data)

        flush_tables = "flush tables"
        RunBenchmark.run_sql_statement(basedir=self.basedir,
                                       sql_statement=flush_tables)

        sleep(10)

        try:
            for _ in range(int(self.incremental_count) + 1):
                # RunBenchmark().run_sysbench_run(basedir=self.basedir)
                # TODO: enable when you pass --lock-ddl-per-table or --lock-ddl; disabled by default
                # Fix for https://github.com/ShahriyarR/MySQL-AutoXtraBackup/issues/243
                # Calling here ddl_test.sh file for running some DDLs.
                # self.run_ddl_test_sh(basedir=self.basedir, sock="{}/socket.sock".format(self.basedir))

                # Disabled based on -> https://bugs.mysql.com/bug.php?id=89977
                # Concurrently running select on myisam based tables.
                # with concurrent.futures.ProcessPoolExecutor(max_workers=50) as pool:
                #     for _ in range(10):
                #         for i in range(20, 25):
                #             pool.submit(
                #                 self.parallel_sleep_queries(basedir=self.basedir,
                #                                             sock="{}/socket.sock".format(self.basedir),
                #                                             sql="select benchmark(9999999, md5(c)) from sysbench_test_db.sbtest{}".format(
                #                                                 i)))

                self.all_backup()
                # self.check_kill_process('call_ddl_test')
        except Exception as err:
            print(err)
            raise
        else:
            if os.path.isfile('{}/out_ts1.ibd'.format(self.basedir)):
                os.remove('{}/out_ts1.ibd'.format(self.basedir))

            if os.path.isfile('{}/sysbench_test_db/t1.ibd'.format(
                    self.basedir)):
                os.remove('{}/sysbench_test_db/t1.ibd'.format(self.basedir))

            # TODO: enable this after fix for https://bugs.launchpad.net/percona-xtrabackup/+bug/1736380
            # self.general_tablespace_rel(self.basedir)
        finally:
            # self.check_kill_process('call_ddl_test')
            self.check_kill_process('call_temp_table_test')
            self.check_kill_process('call_create_index_temp')
            self.check_kill_process('call_innodb_alter_encryption_alters')
            self.check_kill_process('call_innodb_alter_encryption_sql')
            pass

        return True
Ejemplo n.º 13
0
 def create_million_tables(basedir):
     for i in range(1000000):
         sql_create = "create table sysbench_test_db.ddl_table{}(id int not null)"
         RunBenchmark.run_sql_statement(basedir=basedir,
                                        sql_statement=sql_create.format(i))