def sysbench_run(self, socket, db): # Sysbench load test threads = [32, 64, 128, 256, 1024] version = utility_cmd.version_check(BASEDIR) checksum = "" if int(version) < int("080000"): checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, socket, debug) checksum.sanity_check() for thread in threads: sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, socket, debug) if thread == 32: result = sysbench.sanity_check(db) utility_cmd.check_testcase(result, "Sysbench run sanity check") result = sysbench.sysbench_cleanup(db, thread, thread, SYSBENCH_LOAD_TEST_TABLE_SIZE) utility_cmd.check_testcase( result, "Sysbench data cleanup (threads : " + str(thread) + ")") result = sysbench.sysbench_load(db, thread, thread, SYSBENCH_LOAD_TEST_TABLE_SIZE) utility_cmd.check_testcase( result, "Sysbench data load (threads : " + str(thread) + ")") time.sleep(5) result = utility_cmd.check_table_count( BASEDIR, db, socket, WORKDIR + '/node2/mysql.sock') utility_cmd.check_testcase(result, "Checksum run for DB: test")
def sysbench_run(self, socket, db): checksum = "" # Sysbench load test tables = [50, 100, 300, 600, 1000] threads = [32, 64, 128, 256, 512, 1024] version = utility_cmd.version_check(BASEDIR) if int(version) < int("080000"): checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, socket, debug) checksum.sanity_check() sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, socket, debug) result = sysbench.sanity_check(db) for table_count in tables: utility_cmd.check_testcase(result, "Sysbench run sanity check") result = sysbench.sysbench_cleanup( db, table_count, table_count, SYSBENCH_RANDOM_LOAD_TABLE_SIZE) utility_cmd.check_testcase( result, "Sysbench data cleanup (threads : " + str(table_count) + ")") result = sysbench.sysbench_load(db, table_count, table_count, SYSBENCH_RANDOM_LOAD_TABLE_SIZE) utility_cmd.check_testcase( result, "Sysbench data load (threads : " + str(table_count) + ")") for thread in threads: sysbench.sysbench_oltp_read_write( db, table_count, thread, SYSBENCH_RANDOM_LOAD_TABLE_SIZE, SYSBENCH_RANDOM_LOAD_RUN_TIME) time.sleep(5) result = utility_cmd.check_table_count( BASEDIR, db, socket, WORKDIR + '/node2/mysql.sock') utility_cmd.check_testcase(result, "Checksum run for DB: " + db)
def sysbench_run(self, socket, db, port): # Sysbench data load version = utility_cmd.version_check(BASEDIR) checksum = "" if int(version) < int("080000"): checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, socket, debug) checksum.sanity_check() sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, socket, debug) result = sysbench.sanity_check(db) utility_cmd.check_testcase(result, "Sysbench run sanity check") result = sysbench.sysbench_load(db, 50, 50, SYSBENCH_NORMAL_TABLE_SIZE) utility_cmd.check_testcase( result, "Sysbench data load (threads : " + str(SYSBENCH_THREADS) + ")") # Sysbench OLTP read write run query = "sysbench /usr/share/sysbench/oltp_read_write.lua" \ " --table-size=" + str(SYSBENCH_NORMAL_TABLE_SIZE) + \ " --tables=" + str(50) + \ " --threads=" + str(50) + \ " --mysql-db=test --mysql-user="******" --mysql-password="******" --db-driver=mysql --mysql-host=127.0.0.1 --mysql-port=" + str(port) + \ " --time=300 --db-ps-mode=disable run > " + WORKDIR + "/log/sysbench_read_write.log" if debug == 'YES': print(query) query_status = os.system(query) if int(query_status) != 0: print("ERROR!: sysbench read write run is failed") utility_cmd.check_testcase(result, "Sysbench read write run") utility_cmd.check_testcase(0, "Sysbench read write run")
def sysbench_run(self, socket, db): # Sysbench load test threads = [32, 64, 128] version = utility_cmd.version_check(BASEDIR) if int(version) < int("080000"): checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, socket, debug) checksum.sanity_check() sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, socket, debug) result = sysbench.sanity_check(db) utility_cmd.check_testcase(result, "Sysbench run sanity check") result = sysbench.sysbench_custom_table( db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_CUSTOMIZED_DATALOAD_TABLE_SIZE) utility_cmd.check_testcase(result, "Sysbench data load")
def sysbench_run(self, socket, db): # Sysbench data load version = utility_cmd.version_check(BASEDIR) checksum = "" if int(version) < int("080000"): checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, socket, debug) checksum.sanity_check() sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, socket, debug) result = sysbench.sanity_check(db) utility_cmd.check_testcase(result, "Sysbench run sanity check") result = sysbench.sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_LOAD_TEST_TABLE_SIZE) utility_cmd.check_testcase( result, "Sysbench data load (threads : " + str(SYSBENCH_THREADS) + ")")
def sysbench_run(self, socket, db): # Sysbench load test threads = [32, 64, 128] version = utility_cmd.version_check(BASEDIR) if int(version) < int("080000"): checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, socket, debug) checksum.sanity_check() sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, socket, debug) for thread in threads: result = sysbench.sanity_check(db) utility_cmd.check_testcase(result, "Sysbench run sanity check") sysbench.sysbench_custom_read_qa(db, 5, thread, SYSBENCH_READ_QA_TABLE_SIZE) time.sleep(5) result = utility_cmd.check_table_count( BASEDIR, db, socket, WORKDIR + '/node2/mysql.sock') utility_cmd.check_testcase(result, "Checksum run for DB: " + db)
def sysbench_run(self, socket, db): # Sysbench load test threads = [32, 64, 128, 256, 1024] checksum = table_checksum.TableChecksum(pt_basedir, basedir, workdir, node, socket) checksum.sanity_check() for thread in threads: sysbench = sysbench_run.SysbenchRun(basedir, workdir, sysbench_user, sysbench_pass, socket, thread, sysbench_table_size, db, thread, sysbench_run_time) if thread == 32: result = sysbench.sanity_check() utility_cmd.check_testcase(result, "Sysbench run sanity check") result = sysbench.sysbench_cleanup() utility_cmd.check_testcase( result, "Sysbench data cleanup (threads : " + str(thread) + ")") result = sysbench.sysbench_load() utility_cmd.check_testcase( result, "Sysbench data load (threads : " + str(thread) + ")") checksum.data_consistency('test')
if os.path.isfile(parent_dir + '/util/createsql.py'): generate_sql = createsql.GenerateSQL('/tmp/dataload.sql', 1000) generate_sql.OutFile() generate_sql.CreateTable() sys.stdout = sys.__stdout__ create_db = self.basedir + "/bin/mysql --user=root --socket=" + \ socket + ' -Bse"drop database if exists ' + db + \ ';create database ' + db + ';" 2>&1' result = os.system(create_db) utility_cmd.check_testcase(result, "SSL QA sample DB creation") data_load_query = self.basedir + "/bin/mysql --user=root --socket=" + \ socket + ' ' + db + ' -f < /tmp/dataload.sql >/dev/null 2>&1' result = os.system(data_load_query) utility_cmd.check_testcase(result, "SSL QA sample data load") print("\nPXC SSL test") print("--------------") ssl_run = SSLCheck(basedir, workdir, user, node1_socket, node) ssl_run.start_pxc() ssl_run.sysbench_run(node1_socket, 'test') ssl_run.data_load('pxc_dataload_db', node1_socket) rqg_dataload = rqg_datagen.RQGDataGen(basedir, workdir, 'examples', user) rqg_dataload.initiate_rqg('test', node1_socket) result = utility_cmd.check_table_count(basedir, 'test', 'sbtest1', node1_socket, node2_socket) utility_cmd.check_testcase(result, "SSL QA table test.sbtest1 checksum between nodes") utility_cmd.check_testcase(result, "SSL QA table pxc_dataload_db.t1 checksum between nodes") checksum = table_checksum.TableChecksum(pt_basedir, basedir, workdir, node, node1_socket) checksum.sanity_check() checksum.data_consistency('test,pxc_dataload_db')
if debug == 'YES': print(data_load_query) result = os.system(data_load_query) utility_cmd.check_testcase(result, "SSL QA sample data load") print("\nPXC SSL test") print("--------------") ssl_run = SSLCheck(BASEDIR, WORKDIR, USER, WORKDIR + '/node1/mysql.sock', NODE) ssl_run.start_pxc() ssl_run.sysbench_run(WORKDIR + '/node1/mysql.sock', 'sbtest') ssl_run.data_load('pxc_dataload_db', WORKDIR + '/node1/mysql.sock') rqg_dataload = rqg_datagen.RQGDataGen(BASEDIR, WORKDIR, USER, debug) rqg_dataload.initiate_rqg('examples', 'test', WORKDIR + '/node1/mysql.sock') version = utility_cmd.version_check(BASEDIR) if int(version) < int("080000"): checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, WORKDIR + '/node1/mysql.sock', debug) checksum.sanity_check() checksum.data_consistency('test,pxc_dataload_db') else: result = utility_cmd.check_table_count(BASEDIR, 'test', WORKDIR + '/node1/mysql.sock', WORKDIR + '/node2/mysql.sock') utility_cmd.check_testcase(result, "Checksum run for DB: test") result = utility_cmd.check_table_count(BASEDIR, 'pxc_dataload_db', WORKDIR + '/node1/mysql.sock', WORKDIR + '/node2/mysql.sock') utility_cmd.check_testcase(result, "Checksum run for DB: pxc_dataload_db")
utility_cmd.check_testcase( result, "Starting cluster node" + str(i) + " after upgrade run") self.startup_check(i) query = pxc_lower_base + "/bin/mysqld --version 2>&1 | grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1" lower_version = os.popen(query).read().rstrip() query = pxc_upper_base + "/bin/mysqld --version 2>&1 | grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1" upper_version = os.popen(query).read().rstrip() print("\nPXC Upgrade test : Upgrading from PXC-" + lower_version + " to PXC-" + upper_version) print( "------------------------------------------------------------------------------" ) checksum = table_checksum.TableChecksum(pt_basedir, pxc_upper_base, workdir, node, socket) upgrade_qa = PXCUpgrade() upgrade_qa.startup() rqg_dataload = rqg_datagen.RQGDataGen(pxc_lower_base, workdir, 'galera', user) rqg_dataload.initiate_rqg('rqg_galera', socket) rqg_dataload = rqg_datagen.RQGDataGen(pxc_lower_base, workdir, 'transactions', user) rqg_dataload.initiate_rqg('rqg_transactions', socket) rqg_dataload = rqg_datagen.RQGDataGen(pxc_lower_base, workdir, 'partitioning', user) rqg_dataload.initiate_rqg('rqg_partitioning', socket) upgrade_qa.upgrade() checksum.sanity_check() checksum.data_consistency('test,rqg_galera,rqg_transactions,rqg_partitioning')