def __init__(self, db, host, port, user, password, tables=None): if db == None or host == None or port == None or user == None or password == None: logs.critical("db or host or port... is nil.") self.db = db self.host = host self.port = port self.user = user self.password = password self.tables = tables
def load(self): if not path.isfile(self.src.getDumpedMeta()): self.dump() rest.sync_stat(self.notice, 'Loading') cmds = self.dest.toLoader() logs.info("loader: %s", cmds) try: shell.run('[fatal]', cmds) except subprocess.CalledProcessError as e: rest.sync_stat(self.notice, 'LoadError', reason=e.output) logs.critical("load error")
def dump(self): if self.src == None: logs.critical("src database is nil") datadir = self.src.getDataDir() # remove all old data if path.isdir(datadir): shutil.rmtree(datadir) rest.sync_stat(self.notice, 'Dumping') cmds = self.src.toDumper() logs.info("dumper: %s", cmds) try: shell.run('CRITICAL', cmds) except subprocess.CalledProcessError as e: rest.sync_stat(self.notice, 'DumpError', reason=e.output) logs.critical("dump error")
def sync(api, arr): if api == None: logs.critical("sync state api is nil") st = [operator] for op in arr: st.append(op) j = json.dumps(st).strip() for i in range(0, 60): try: r = requests.patch(api, data=j) if r.status_code != 200: logs.critical( "can't synchronize the migration status and wait for 1 minute to try again: %s", r.reason) else: return except requests.exceptions.ConnectionError as ce: logs.error("can't connect to tidb-operator, retry after 60s: %s", ce) time.sleep(60) logs.critical("retry 60 times after exiting")
def __init__(self, src, dest, notice=None): if src == None and dest == None: logs.critical("both src and dest database is nil") self.src = src self.dest = dest self.notice = notice