def test_py_step(self): parser = DBCParams.get_arg_parser() args = parser.parse_args([ '--packet-name=' + self.packet_name, '--db-name=' + self.test_dbc_01 ]) MainRoutine( parser.parse_args([ '--packet-name=' + self.packet_name, '--db-name=' + self.test_dbc_01, '--wipe' ]), self.conf_file).run() main = MainRoutine(args, self.conf_file) res_2 = main.run() self.assertTrue( res_2.packet_status[self.test_dbc_01] == PacketStatus.DONE) self.assertTrue( res_2.result_code[self.test_dbc_01] == ResultCode.SUCCESS) db_local = postgresql.open(main.sys_conf.dbs_dict[self.test_dbc_01]) content = get_resultset( db_local, """ SELECT content FROM public.test_tbl_import WHERE fname in ('data_a.txt', 'data_b.txt') ORDER BY id """) self.assertTrue(content[0][0] == 'Some raw data A') self.assertTrue(content[1][0] == 'Some raw data B') db_local.close()
def test_lock(self, mocked_requests_post): parser = DBCParams.get_arg_parser() args = parser.parse_args([ '--packet-name=' + self.packet_name, '--db-name=' + self.test_dbc_01 ]) dbc = MainRoutine(args, self.conf_file) db_conn = postgresql.open(dbc.sys_conf.dbs_dict[self.test_dbc_01]) ActionTracker.set_packet_lock(db_conn, dbc.sys_conf.schema_location, self.packet_name) res_1 = dbc.run() self.assertTrue( res_1.packet_status[self.test_dbc_01] == PacketStatus.STARTED) self.assertTrue( res_1.result_code[self.test_dbc_01] == ResultCode.LOCKED) ActionTracker.set_packet_unlock(db_conn, dbc.sys_conf.schema_location, self.packet_name) res_2 = MainRoutine(args, self.conf_file).run() self.assertTrue( res_2.packet_status[self.test_dbc_01] == PacketStatus.DONE) self.assertTrue( res_2.result_code[self.test_dbc_01] == ResultCode.SUCCESS) db_conn.close()
def test_conn_err(self): parser = DBCParams.get_arg_parser() args = parser.parse_args([ '--packet-name=' + self.packet_name, '--db-name=' + self.test_dbc_01 ]) dbc = MainRoutine(args, self.conf_file) db_conn = postgresql.open(dbc.sys_conf.dbs_dict[self.test_dbc_01]) ActionTracker.init_tbls(db_conn, dbc.sys_conf.schema_location) ActionTracker.set_packet_unlock(db_conn, dbc.sys_conf.schema_location, self.packet_name) db_conn.close() main = MainRoutine(args, self.conf_file) @threaded def emulate_conn_error(): time.sleep(2) th_db_conn = postgresql.open( dbc.sys_conf.dbs_dict[self.test_dbc_01]) main.terminate_conns(th_db_conn, self.test_dbc_01, main.sys_conf.application_name, self.packet_name) th_db_conn.close() main.append_thread(self.test_dbc_01 + '_ext', emulate_conn_error()) res = main.run() self.assertTrue( res.packet_status[self.test_dbc_01] == PacketStatus.DONE) self.assertTrue( res.result_code[self.test_dbc_01] == ResultCode.SUCCESS)
def run_meta_test(statement_timeout, packet_status, result_code, res_status, exception_descr): args = parser.parse_args([ '--packet-name=' + self.packet_name, '--db-name=' + self.test_dbc_01, '--conf={"statement_timeout":"%s","schema_location":"%s"}' % (statement_timeout, self.schema_location), '--skip-step-cancel' ]) main = MainRoutine(args, self.conf_file) res = main.run() self.assertTrue( res.packet_status[self.test_dbc_01] == packet_status) self.assertTrue(res.result_code[self.test_dbc_01] == result_code) db_local = postgresql.open( main.sys_conf.dbs_dict[self.test_dbc_01]) dbc_packets_content = get_resultset( db_local, """SELECT status, meta_data FROM %s.dbc_packets where name = '%s'""" % (self.schema_location, self.packet_name)) self.assertTrue(dbc_packets_content[0][0] == res_status) dbc_steps_content = get_resultset( db_local, """SELECT s.status, exception_descr FROM %s.dbc_steps s JOIN %s.dbc_packets p on p.id = s.packet_id WHERE p.name = '%s'""" % (self.schema_location, self.schema_location, self.packet_name)) self.assertTrue(dbc_steps_content[0][0] == res_status) self.assertTrue(dbc_steps_content[0][1] == exception_descr) db_local.close()
def test_placeholders(self): parser = DBCParams.get_arg_parser() MainRoutine( parser.parse_args([ '--packet-name=' + self.packet_name, '--db-name=' + self.test_dbc_01, '--wipe', ]), self.conf_file).run() args = parser.parse_args([ '--packet-name=' + self.packet_name, '--db-name=' + self.test_dbc_01, '--placeholders={"USER_NAME":"dbc_test_user","PASSW":"1234"}', ]) main = MainRoutine(args, self.conf_file) res = main.run() self.assertTrue( res.packet_status[self.test_dbc_01] == PacketStatus.DONE) self.assertTrue( res.result_code[self.test_dbc_01] == ResultCode.SUCCESS) db_local = postgresql.open(main.sys_conf.dbs_dict[self.test_dbc_01]) dbc_packets_content = get_resultset( db_local, """ SELECT count(1) FROM pg_roles WHERE rolcanlogin = true AND rolname = 'dbc_test_user' """) self.assertTrue(dbc_packets_content[0][0] == 1) db_local.close()
def test_lock(self, mocked_requests_post): dbc = MainRoutine(self.wipe_params, self.conf_file) res_1 = dbc.run() self.assertTrue( res_1.packet_status[self.test_dbc_01] == PacketStatus.NEW) self.assertTrue( res_1.result_code[self.test_dbc_01] == ResultCode.NOTHING_TODO) db_conn = postgresql.open(dbc.sys_conf.dbs_dict[self.test_dbc_01]) ActionTracker.set_packet_lock(db_conn, dbc.sys_conf.schema_location, self.packet_name) res_2 = MainRoutine(self.run_params, self.conf_file).run() self.assertTrue( res_2.packet_status[self.test_dbc_01] == PacketStatus.STARTED) self.assertTrue( res_2.result_code[self.test_dbc_01] == ResultCode.LOCKED) ActionTracker.set_packet_unlock(db_conn, dbc.sys_conf.schema_location, self.packet_name) db_conn.close() res_3 = MainRoutine(self.run_params, self.conf_file).run() self.assertTrue( res_3.packet_status[self.test_dbc_01] == PacketStatus.DONE) self.assertTrue( res_3.result_code[self.test_dbc_01] == ResultCode.SUCCESS)
def emulate_conn_error(): time.sleep(2) th_db_conn = postgresql.open( dbc.sys_conf.dbs_dict[self.test_dbc_01]) main.terminate_conns(th_db_conn, self.test_dbc_01, main.sys_conf.application_name, self.packet_name) th_db_conn.close()
def emulate_signal(): # for Windows time.sleep(5) main.external_interrupt = True time.sleep(3) th_db_conn = postgresql.open( dbc.sys_conf.dbs_dict[self.test_dbc_01]) main.terminate_conns(th_db_conn, self.test_dbc_01, main.sys_conf.application_name, self.packet_name) th_db_conn.close()
def emulate_workload(): th_db_conn = postgresql.open( main.sys_conf.dbs_dict[self.test_dbc_01]) th_db_conn.execute(""" do $$ begin perform pg_sleep(3); perform * from public.test_wait_tx_tbl; perform pg_sleep(10); end$$ """) th_db_conn.close()
def emulate_workload(db_name): time.sleep(1) th_db_conn = postgresql.open(db_name) i = 1 try: for i in range(1, 500): th_db_conn.execute(""" INSERT INTO public.test_tbl(fld_1, fld_2) VALUES (%d, 'emulate_workload_%d'); """ % (i, i)) time.sleep(0.01) i += 1 except: return th_db_conn.close() print( '================> thread emulate_workload finished for DB %s' % db_name)
def test_sigint(self, mocked_requests_post): parser = DBCParams.get_arg_parser() args = parser.parse_args([ '--packet-name=' + self.packet_name, '--db-name=' + self.test_dbc_01 ]) dbc = MainRoutine(args, self.conf_file) db_conn = postgresql.open(dbc.sys_conf.dbs_dict[self.test_dbc_01]) ActionTracker.set_packet_unlock(db_conn, dbc.sys_conf.schema_location, self.packet_name) db_conn.close() main = MainRoutine(args, self.conf_file) @threaded def send_signal(): time.sleep(5) pid = os.getpid() os.kill(pid, signal.SIGINT) @threaded def emulate_signal(): # for Windows time.sleep(5) main.external_interrupt = True time.sleep(3) th_db_conn = postgresql.open( dbc.sys_conf.dbs_dict[self.test_dbc_01]) main.terminate_conns(th_db_conn, self.test_dbc_01, main.sys_conf.application_name, self.packet_name) th_db_conn.close() if os.name == 'nt': main.append_thread(self.test_dbc_01 + '_ext', emulate_signal()) else: main.append_thread(self.test_dbc_01 + '_ext', send_signal()) res = main.run() self.assertTrue( res.packet_status[self.test_dbc_01] == PacketStatus.STARTED) self.assertTrue( res.result_code[self.test_dbc_01] == ResultCode.TERMINATE)
def test_packets(self, mocked_requests_post): parser = DBCParams.get_arg_parser() args = parser.parse_args( ['--packet-name=dba_get_version', '--db-name=' + self.test_dbc_01]) dbc = MainRoutine(args, self.conf_file) db_conn = postgresql.open(dbc.sys_conf.dbs_dict[self.test_dbc_01]) ActionTracker.init_tbls(db_conn, dbc.sys_conf.schema_location) for args in self.runs: ActionTracker.set_packet_unlock(db_conn, dbc.sys_conf.schema_location, args.packet_name) res = MainRoutine(args, self.conf_file).run() self.assertTrue( res.result_code[args.db_name] == ResultCode.SUCCESS) self.assertTrue( res.packet_status[args.db_name] == PacketStatus.DONE) db_conn.close()
def test_skip_action_cancel(self): parser = DBCParams.get_arg_parser() MainRoutine( parser.parse_args([ '--packet-name=' + self.packet_name, '--db-name=' + self.test_dbc_01, '--wipe' ]), self.conf_file).run() args = parser.parse_args([ '--packet-name=' + self.packet_name, '--db-name=' + self.test_dbc_01, '--skip-action-cancel' ]) dbc = MainRoutine(args, self.conf_file) db_conn = postgresql.open(dbc.sys_conf.dbs_dict[self.test_dbc_01]) ActionTracker.set_packet_unlock(db_conn, dbc.sys_conf.schema_location, self.packet_name) db_conn.close() main = MainRoutine(args, self.conf_file) @threaded def emulate_conn_error(): time.sleep(5) th_db_conn = postgresql.open( dbc.sys_conf.dbs_dict[self.test_dbc_01]) main.terminate_conns(th_db_conn, self.test_dbc_01, main.sys_conf.application_name, self.packet_name, terminate=False) th_db_conn.close() main.append_thread(self.test_dbc_01 + '_ext', emulate_conn_error()) res_2 = main.run() self.assertTrue( res_2.packet_status[self.test_dbc_01] == PacketStatus.EXCEPTION) self.assertTrue(res_2.result_code[self.test_dbc_01] == ResultCode.FAIL)
def cleanup(): db_conn = postgresql.open(dbc.sys_conf.dbs_dict['pg_db']) db_conn.execute(""" SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE pid <> pg_backend_pid() AND datname = '%s' """ % self.test_dbc_packets) db_conn.execute("""DROP DATABASE IF EXISTS %s""" % self.test_dbc_packets) db_conn.execute(""" CREATE DATABASE %s WITH OWNER = postgres ENCODING = 'UTF8' LC_COLLATE = 'en_US.UTF-8' LC_CTYPE = 'en_US.UTF-8' TABLESPACE = pg_default template = template0""" % self.test_dbc_packets) db_conn.close()
def test_create_db(self): global call_TestDBCPrepareDBs if call_TestDBCPrepareDBs: return parser = DBCParams.get_arg_parser() args = parser.parse_args([ '--packet-name=' + self.packet_name, '--db-name=' + self.pg_db, '--wipe' ]) dbc = MainRoutine(args, self.conf_file) db_conn = postgresql.open(dbc.sys_conf.dbs_dict[self.pg_db]) ActionTracker.cleanup(db_conn, dbc.sys_conf.schema_location) db_conn.execute(""" SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE pid <> pg_backend_pid() AND datname in ('test_dbc_01', 'test_dbc_02') """) db_conn.execute("""DROP DATABASE IF EXISTS test_dbc_01""") db_conn.execute("""DROP DATABASE IF EXISTS test_dbc_02""") db_conn.close() MainRoutine(args, self.conf_file).run() MainRoutine( parser.parse_args([ '--packet-name=' + self.packet_name, '--db-name=' + self.pg_db, '--unlock' ]), self.conf_file).run() res_1 = MainRoutine( parser.parse_args([ '--packet-name=' + self.packet_name, '--db-name=' + self.pg_db ]), self.conf_file).run() call_TestDBCPrepareDBs = True
def run(self) -> DBCResult: self.logger.log('=====> DBC %s started' % VERSION, "Info", do_print=True) self.logger.log("#--------------- Incoming parameters", "Info") for arg in vars(self.args): self.logger.log("# %s = %s" % (arg, getattr(self.args, arg)), "Info") self.logger.log("#-----------------------------------", "Info") # ======================================================================== # confirmation break_deployment = False if not self.args.list and not self.args.status and self.sys_conf.db_name_all_confirmation: if len(self.dbs) > 1 and not self.args.force: print("Deployment will be performed on these databases:\n") for db_name in self.dbs: print(" " + db_name) cmd_question = input('\nDo you want to continue? Type YES to continue...\n') if cmd_question != "YES": print('Stopping...') break_deployment = True self.result_code[db_name] = ResultCode.NOTHING_TODO # ======================================================================== if self.args.list: print("List of targets:") for db_name in self.dbs: print(" " + db_name) break_deployment = True self.result_code[db_name] = ResultCode.NOTHING_TODO if not break_deployment: if len(self.dbs) == 0: self.logger.log('No target databases!', "Error", do_print=True) for db in self.args.db_name.split(','): self.packet_status[db] = PacketStatus.UNKNOWN self.result_code[db] = ResultCode.NOTHING_TODO for db_name in self.dbs: try: self.run_on_db(db_name, self.sys_conf.dbs_dict[db_name]) if self.args.seq: self.wait_threads() # wait on pair (lock_observer, worker_db_func) except ( postgresql.exceptions.AuthenticationSpecificationError, postgresql.exceptions.ClientCannotConnectError, TimeoutError ) as e: self.logger.log( 'Cannot connect to %s: \n%s' % (db_name, exception_helper(self.sys_conf.detailed_traceback)), "Error", do_print=True ) if not break_deployment: self.wait_threads() # wait all threads for db, result in self.workers_result.items(): if db in self.sys_conf.dbs_dict: db_conn = postgresql.open(self.sys_conf.dbs_dict[db]) ActionTracker.set_packet_unlock(db_conn, self.sys_conf.schema_location, self.args.packet_name) db_conn.close() if result == WorkerResult.SUCCESS: self.result_code[db] = ResultCode.SUCCESS self.packet_status[db] = PacketStatus.DONE if result == WorkerResult.FAIL: self.result_code[db] = ResultCode.FAIL self.packet_status[db] = PacketStatus.EXCEPTION if result == WorkerResult.TERMINATE: self.result_code[db] = ResultCode.TERMINATE self.packet_status[db] = PacketStatus.STARTED self.logger.log('<===== DBC %s finished' % VERSION, "Info", do_print=True) self.logger.stop() result = DBCResult() result.command_type = self.command_type result.packet_type = self.packet_type result.result_code = self.result_code.copy() result.packet_status = self.packet_status.copy() result.result_data = self.result_data.copy() self.cleanup() return result
def run_on_db(self, db_name, str_conn): db_conn = postgresql.open(str_conn) # ================================================================================================ # Check 'dbc_packets', 'dbc_steps', 'dbc_actions', 'dbc_locks' tables ActionTracker.init_tbls(db_conn, self.sys_conf.schema_location) # ================================================================================================ if self.packet_type == PacketType.DEFAULT or self.args.status: self.fill_status(db_name, db_conn) if self.packet_type in ( PacketType.READ_ONLY, PacketType.MAINTENANCE, PacketType.NO_COMMIT, PacketType.EXPORT_DATA ) and not self.args.status: self.packet_status[db_name] = PacketStatus.NEW # ================================================================================================ if self.args.stop: term_conn_res = self.terminate_conns( db_conn, db_name, self.sys_conf.application_name, self.args.packet_name ) self.result_code[db_name] = ResultCode.SUCCESS if term_conn_res else ResultCode.NOTHING_TODO # ================================================================================================ if self.args.wipe and self.packet_type not in ( PacketType.READ_ONLY, PacketType.MAINTENANCE, PacketType.NO_COMMIT, PacketType.EXPORT_DATA ): wipe_res = ActionTracker.wipe_packet(db_conn, self.sys_conf.schema_location, self.args.packet_name) ActionTracker.set_packet_unlock(db_conn, self.sys_conf.schema_location, self.args.packet_name) if wipe_res: self.result_code[db_name] = ResultCode.SUCCESS print("=====> Database '%s', packet '%s' successfully wiped!" % (db_name, self.args.packet_name)) else: self.result_code[db_name] = ResultCode.NOTHING_TODO print("=====> Database '%s', packet '%s' data not found!" % (db_name, self.args.packet_name)) self.packet_status[db_name] = PacketStatus.NEW elif self.args.wipe: print("=====> Database '%s', packet '%s' nothing to wipe!" % (db_name, self.args.packet_name)) self.result_code[db_name] = ResultCode.NOTHING_TODO self.packet_status[db_name] = PacketStatus.NEW # ================================================================================================ if self.args.status: print( "=====> Database '%s', packet '%s' status: %s" % ( db_name, self.args.packet_name, "new" if db_name not in self.packet_status else self.packet_status[db_name] ) ) self.result_code[db_name] = ResultCode.SUCCESS if "exception_descr" in self.db_packet_status and self.db_packet_status["exception_descr"] is not None: print(" Action date time: %s" % str( self.db_packet_status["exception_dt"])) print("=".join(['=' * 100])) print(self.db_packet_status["exception_descr"]) print("=".join(['=' * 100])) # ================================================================================================ if not self.args.unlock and self.command_type == CommandType.RUN: if self.packet_status[db_name] != PacketStatus.DONE: # =========================================== if ActionTracker.is_packet_locked(db_conn, self.sys_conf.schema_location, self.args.packet_name): self.logger.log( '=====> Packet %s is locked in DB %s' % (self.args.packet_name, db_name), "Error", do_print=True ) self.result_code[db_name] = ResultCode.LOCKED self.packet_status[db_name] = PacketStatus.STARTED else: ActionTracker.set_packet_lock(db_conn, self.sys_conf.schema_location, self.args.packet_name) self.logger.log( '=====> Hold lock for packet %s in DB %s' % (self.args.packet_name, db_name), "Info", do_print=True ) self.append_thread( db_name, self.lock_observer("lock_observer_%s" % str(db_name), str_conn, db_name, self.args.packet_name) ) self.append_thread( db_name, self.worker_db_func( "manager_db_%s" % str(db_name), str_conn, db_name, self.args.packet_name, self.packet_type == PacketType.READ_ONLY ) ) self.logger.log( '--------> Packet \'%s\' started for \'%s\' database!' % \ (self.args.packet_name, db_name), "Info", do_print=True ) # =========================================== if self.packet_status[db_name] == PacketStatus.DONE: self.logger.log( '<-------- Packet \'%s\' already deployed to \'%s\' database!' % \ (self.args.packet_name, db_name), "Info", do_print=True ) self.packet_status[db_name] = PacketStatus.DONE self.result_code[db_name] = ResultCode.NOTHING_TODO if self.args.unlock: if ActionTracker.is_packet_locked(db_conn, self.sys_conf.schema_location, self.args.packet_name): ActionTracker.set_packet_unlock(db_conn, self.sys_conf.schema_location, self.args.packet_name) self.result_code[db_name] = ResultCode.SUCCESS self.logger.log( '--------> Packet \'%s\' has been unlocked in \'%s\' database!' % \ (self.args.packet_name, db_name), "Info", do_print=True ) else: self.result_code[db_name] = ResultCode.NOTHING_TODO self.logger.log( '--------> Packet \'%s\' not locked in \'%s\' database!' % \ (self.args.packet_name, db_name), "Info", do_print=True ) db_conn.close()
def cluster_specific_execute(func): reconnect_attempt = 0 thread_name = "Main" db_hosts = collections.deque( [v for _, v in CLUSTERTEST.sys_conf.dbs_dict.items()]) db_local = None do_work = True while do_work: try: if db_local is None and reconnect_attempt < CLUSTERTEST.sys_conf.reconnect_attempt: CLUSTERTEST.logger.log( "Thread '%s': connecting... reconnect_attempt = %d" % (thread_name, reconnect_attempt), "Info", do_print=True) db_local = postgresql.open(db_hosts[0]) db_local.execute("SET application_name = '%s'" % (CLUSTERTEST.sys_conf.application_name)) elif db_local is None and reconnect_attempt >= CLUSTERTEST.sys_conf.reconnect_attempt: # change connection host CLUSTERTEST.logger.log( "Thread '%s': connecting to another host... " % thread_name, "Info", do_print=True) invalid_host = db_hosts.popleft() db_hosts.append(invalid_host) reconnect_attempt = 0 db_local = postgresql.open(db_hosts[0]) db_local.execute("SET application_name = '%s'" % (CLUSTERTEST.sys_conf.application_name)) func(db_local) do_work = False except (postgresql.exceptions.QueryCanceledError, postgresql.exceptions.AdminShutdownError, postgresql.exceptions.CrashShutdownError, postgresql.exceptions.ServerNotReadyError) as e: CLUSTERTEST.logger.log( 'Exception in \'%s\': %s. Reconnecting after %d sec...' % (thread_name, str(e), CLUSTERTEST.sys_conf.conn_exception_sleep_interval), "Error", do_print=True) db_local = None time.sleep(CLUSTERTEST.sys_conf.conn_exception_sleep_interval) except (postgresql.exceptions.ClientCannotConnectError) as e: reconnect_attempt += 1 CLUSTERTEST.logger.log( 'Exception in \'%s\': %s. Reconnecting after %d sec... reconnect_attempt = %d' % (thread_name, str(e), CLUSTERTEST.sys_conf.conn_exception_sleep_interval, reconnect_attempt), "Error", do_print=True) db_local = None time.sleep(CLUSTERTEST.sys_conf.conn_exception_sleep_interval) db_local.close() CLUSTERTEST.logger.log('Finished %s' % thread_name, "Info", do_print=True)
def worker_func(thread_name): # https://www.2ndquadrant.com/en/blog/postgresql-anti-patterns-read-modify-write-cycles/ CLUSTERTEST.logger.log('Started %s' % thread_name, "Info", do_print=True) reconnect_attempt = 0 db_hosts = collections.deque( [v for _, v in CLUSTERTEST.sys_conf.dbs_dict.items()]) db_local = None while CLUSTERTEST.operation_num < CLUSTERTEST.args.operations: if CLUSTERTEST.is_terminate: break CLUSTERTEST.operation_num += 1 if CLUSTERTEST.operation_num % 10 == 0: CLUSTERTEST.logger.log('Progress %s' % str( round( float(CLUSTERTEST.operation_num) * 100 / CLUSTERTEST.args.operations, 2)) + "%", "Info", do_print=True) do_work = True def execute_task(): with db_local.xact(): result = get_resultset( db_local, """ SELECT id, balance FROM public.accounts ORDER BY random() LIMIT 2 FOR UPDATE """) from_account = result[0] to_account = result[1] amount = round(random.uniform(0.01, 100), 2) time.sleep(random.uniform( 0.1, 0.2)) # emulate billing calculation delay db_local.execute(""" UPDATE public.accounts SET balance = balance - %s::money WHERE id = %d """ % (str(amount), from_account[0])) time.sleep(random.uniform( 0.1, 0.2)) # emulate billing calculation delay db_local.execute(""" UPDATE public.accounts SET balance = balance + %s::money + 1::money WHERE id = %d """ % (str(amount), to_account[0])) # success: next task return True while do_work: try: if db_local is None and reconnect_attempt < CLUSTERTEST.sys_conf.reconnect_attempt: CLUSTERTEST.logger.log( "Thread '%s': connecting... reconnect_attempt = %d" % (thread_name, reconnect_attempt), "Info", do_print=True) db_local = postgresql.open(db_hosts[0]) db_local.execute("SET application_name = '%s'" % (CLUSTERTEST.sys_conf.application_name)) current_pid = get_scalar(db_local, "SELECT pg_backend_pid()") CLUSTERTEST.db_conns[current_pid] = db_local elif db_local is None and reconnect_attempt >= CLUSTERTEST.sys_conf.reconnect_attempt: # change connection host CLUSTERTEST.logger.log( "Thread '%s': connecting to another host... " % thread_name, "Info", do_print=True) invalid_host = db_hosts.popleft() db_hosts.append(invalid_host) reconnect_attempt = 0 db_local = postgresql.open(db_hosts[0]) db_local.execute("SET application_name = '%s'" % (CLUSTERTEST.sys_conf.application_name)) current_pid = get_scalar(db_local, "SELECT pg_backend_pid()") CLUSTERTEST.db_conns[current_pid] = db_local if execute_task(): do_work = False except (postgresql.exceptions.DeadlockError) as e: CLUSTERTEST.logger.log( 'Exception in \'%s\': %s. DeadlockError' % (thread_name, str(e)), "Error", do_print=True) # repeat task on deadlock try: db_local.execute("ROLLBACK") except (postgresql.exceptions.NoActiveTransactionError): pass except (postgresql.exceptions.ReadOnlyTransactionError) as e: CLUSTERTEST.logger.log( 'Exception in \'%s\': %s. ReadOnlyTransactionError' % (thread_name, str(e)), "Error", do_print=True) db_local = None # switch connection on switchover invalid_host = db_hosts.popleft() db_hosts.append(invalid_host) time.sleep(CLUSTERTEST.sys_conf.conn_exception_sleep_interval) except (postgresql.exceptions.QueryCanceledError, postgresql.exceptions.AdminShutdownError, postgresql.exceptions.CrashShutdownError, postgresql.exceptions.ServerNotReadyError) as e: if CLUSTERTEST.is_terminate: return CLUSTERTEST.logger.log( 'Exception in \'%s\': %s. Reconnecting after %d sec...' % (thread_name, str(e), CLUSTERTEST.sys_conf.conn_exception_sleep_interval), "Error", do_print=True) db_local = None time.sleep(CLUSTERTEST.sys_conf.conn_exception_sleep_interval) except (postgresql.exceptions.ClientCannotConnectError, postgresql.exceptions.ProtocolError, postgresql.exceptions.ConnectionFailureError, ConnectionResetError) as e: reconnect_attempt += 1 CLUSTERTEST.logger.log( 'Exception in \'%s\': %s. Reconnecting after %d sec... reconnect_attempt = %d' % (thread_name, str(e), CLUSTERTEST.sys_conf.conn_exception_sleep_interval, reconnect_attempt), "Error", do_print=True) db_local = None time.sleep(CLUSTERTEST.sys_conf.conn_exception_sleep_interval) db_local.close() CLUSTERTEST.logger.log('Finished %s' % thread_name, "Info", do_print=True)
def emulate_workload(): time.sleep(3) th_db_conn = postgresql.open( main.sys_conf.dbs_dict[self.test_dbc_01]) th_db_conn.execute("""vacuum full public.test_blocker_tx_tbl""") th_db_conn.close()