def inr_recover(port, day): full_prepare_cmd = cmd_init + option + cmd_base #执行全量prepare print(full_prepare_cmd) rc, stdout, stderr = exec_cmd(full_prepare_cmd) if rc != 0: print(stderr) return print('\033[1;31mfullbackup prepare is ok!\033[0m') inr_dirs.sort() r = True if len(inr_dirs) > 1: if mid(recover_path, inr_dirs[:-1], port) == False: return b = inr_dirs[-1] cmd = cmd_init + cmd_inrs.format(recover_path / 'base', recover_path / b) print(cmd) rc, stdout, stderr = exec_cmd(cmd) print(rc, stdout, stderr) if rc != 0: return print('\033[1;31mrecover is ok!\033[0m') full_recover_cmd = "xtrabackup --datadir=/data0/recover_work/recover --socket=/tmp/mysql_{}.sock\ --copy-back --target-dir={}".format(port, recover_path / 'base') print(full_recover_cmd) rc, stdout, stderr = exec_cmd(full_recover_cmd) print(rc, stderr) if rc == 0: print('\033[1;31mcopyback is complete:path: %s\033[0m' % (recover_path / 'recover'))
def arch_dump(): cmd = "ssh {}".format(NFSHOST) + ' "/bin/mkdir -p {}" '.format(arch_dir) rc, stdout, stderr = exec_cmd(cmd) if rc != 0: sender.send_all("mkdir {} failed . stderr : {}".format(arch_dir, stderr)) return cmd = "rsync -avzP --bwlimit=5000 {} {}:{}".format(local_dir, NFSHOST, arch_dir) rc, stdout, stderr = exec_cmd(cmd) if rc != 0: sender.send_all("{} rsync is failed .stderr : {} ".format(local_dir, stderr)) return shutil.rmtree(local_dir)
def week_slow_log(): for port in ports: # dir即获得datadir dir = dir_bin_all_binlog(port)[0] cmd = "mv {}/slow.log {}/slow-{}.log".format(dir, dir, today) rc, stdout, stderr = exec_cmd(cmd) if rc != 0: print("flush_slow_log.py.mv is failed.stderr:{}".format(stderr)) cmd = "/usr/local/mysql/bin/mysqladmin -uroot -S /tmp/mysql_{}.sock flush-logs slow".format(port) rc, stdout, stderr = exec_cmd(cmd) if rc != 0: print("flush_slow_log.py.flush-logs slow failed.stderr:{}".format(stderr))
def rsync_binlog(binlog, ports): dir = archive_today_dir / "{}".format(ports) cmd1 = "rsync -avzP --bwlimit=5000 {} {}:{}".format(binlog, NFSHOST, dir) rc, stdout, stderr = exec_cmd(cmd1) if rc != 0: sender.send_all("rsync binlog is failed.cmd:{}.stder:{}".format( cmd1, stderr))
def exec_bash_cmd(cmd): rc, stdout, stderr = exec_cmd(cmd) if rc != 0: sender.send_all("exec_bash_cmd is failed.cmd:{}.stder:{}".format( cmd, stderr)) result = stdout.strip() return result
def full_recovery(recovery_base): cmd = "/usr/bin/xtrabackup --prepare --target-dir={}".format(recovery_base) print(cmd) rc, stdout, stderr = exec_cmd(cmd) if rc == 0: print('\033[1;31m***recover is done!***\033[0m') else: print(stderr)
def full_recover(port, recover_path): #执行全备恢复 cmd_pre = "--apply-log --target-dir={}".format(recover_path / 'base') full_recover_cmd = "xtrabackup --datadir=/data0/recover_work/recover --socket=/tmp/mysql_{}.sock\ --copy-back --target-dir={}".format(port, recover_path / 'base') full_prepare_cmd = cmd_init + cmd_pre print(full_prepare_cmd) rc, stdout, stderr = exec_cmd(full_prepare_cmd) if rc == 0: print('\033[1;31m***prepare is done!***\033[0m') print(full_recover_cmd) rc, stdout, stderr = exec_cmd(full_recover_cmd) if rc == 0: print('\033[1;31m***recover is done!***\033[0m') else: print(stderr) else: print('prepare is failed!')
def sqldump(port): base_port_dir = backup_today / "{}_{}".format(hostname, port) cmd = "{} -u root -S /tmp/mysql_{}.sock " \ "--regex '^(?!(mysql|test|information_schema|performance_schema|sys))' " \ "-o {} --triggers --events --routines -c".\ format(mydumper, port, base_port_dir) rc, stdout, stderr = exec_cmd(cmd) if rc != 0: sender.send_all("mysql_sqldump.py, sqldump is failed. port {} . error: {}".format(port, stderr))
def incr_recovery(last_incr_dir, recovery_base): """ :param last_incr_dir: 日期最大的增备目录 :param recovery_base: base 目录 """ incr_dirs_except_choose = [] for dir in last_incr_dir.parent.iterdir(): if dir.name != 'base' and dir.name < last_incr_dir.name: incr_dirs_except_choose.append(dir.name) incr_dirs_except_choose = sorted(incr_dirs_except_choose) # prepare base cmd_base = "/usr/bin/xtrabackup --prepare " \ "--apply-log-only --target-dir={}".format(recovery_base) print(cmd_base) rc, stdout, stderr = exec_cmd(cmd_base) if rc != 0: print('\033[1;31m***full recovery is error!*** {} \033[0m'.format(cmd_base)) print(stderr) return print('\033[1;31m***full backup recovery is done!***\033[0m') # prepare incr_dirs except choose dir for dir in incr_dirs_except_choose: cmd = cmd_base + " --incremental-dir={}".format(last_incr_dir.parent / dir) print(cmd) rc, stdout, stderr = exec_cmd(cmd) if rc != 0: print('\033[1;31m***incremental recovery is error!*** {} \033[0m'.format(cmd)) print(stderr) return print('\033[1;31m***incremental backup recovery is done!***\033[0m') # prepare choose dir, do not use --apply-log-only cmd = "/usr/bin/xtrabackup --prepare " \ + " --target-dir={}".format(recovery_base) + " --incremental-dir={}".format(last_incr_dir) print(cmd) rc, stdout, stderr = exec_cmd(cmd) if rc != 0: print('\033[1;31m***incremental recovery is error!*** {} \033[0m'.format(cmd)) print(stderr) return print('\033[1;31m***full backup recovery is done!***\033[0m')
def arch_xtra(): cmd = "ssh {} ".format(NFSHOST) + \ ' "/bin/mkdir -p {} && /bin/mkdir -p {}" '.format(no_fina_path, fina_path) rc, stdout, stderr = exec_cmd(cmd) if rc != 0: #sender.send_all("mkdir {} or {} failed . stderr : {}".format(no_fina_path, fina_path, stderr)) print("mkdir {} or {} failed . stderr : {}".format( no_fina_path, fina_path, stderr)) return #get_mysql_running_ports是一个函数,能够找到存活的MySQL端口 for port in get_mysql_running_ports(): nxbd = local_path / str(port) if not nxbd.exists(): #sender.send_all("{} is not exists,maybe this port is new instance".format(nxbd)) print("{} is not exists,maybe this port is new instance".format( nxbd)) continue # 判断端口是否是金融库的端口 if port in fina_ports: copy_cmd = "rsync -avzP --bwlimit=5000 {} {}:{}".format( nxbd, NFSHOST, fina_path) else: copy_cmd = "rsync -avzP --bwlimit=5000 {} {}:{}".format( nxbd, NFSHOST, no_fina_path) rc, stdout, stderr = exec_cmd(copy_cmd) if rc != 0: #sender.send_all("cmd: {} is failed . stderr : {} ".format(cmd, stderr)) print("cmd: {} is failed . stderr : {} ".format(cmd, stderr)) continue shutil.rmtree(nxbd) #拷贝成功后,需要删除该目录 cmd = "rmdir {}".format(local_path) #如果上一级目录不为空,说明拷贝过程中有出现失败的。需要打印错误 rc, stdout, stderr = exec_cmd(cmd) if rc != 0: #sender.send_all("rmdir is failed. dir: {},stderr: {}".format(local_path, stderr)) print("rmdir is failed. dir: {},stderr: {}".format(local_path, stderr))
def mid(recover_path, l, port): cmd_init = "xtrabackup --prepare --socket=/tmp/mysql_{}.sock ".format( port) for index, i in enumerate(l): cmd = cmd_init + option + cmd_inrs.format(recover_path / 'base', recover_path / i) print(cmd) rc, stdout, stderr = exec_cmd(cmd) if rc == 0: print('\033[1;31minr_prepare is ok\033[0m') else: print('\033[1;31minr_prepare is failed\033[0m') return False return True
def bak_bin(): # 获取归档机器中,最大日期的备份目录 cmd1 = "ssh {}".format(NFSHOST) + ' "/bin/ls {}"'.format(archive_base_dir) list_date = exec_bash_cmd(cmd1).split("\n") max_date = max(list_date) # 在归档机器上创建今天的备份目录 for port in running_ports(): cmd_mkdir_date = "ssh {}".format( NFSHOST) + ' "/bin/mkdir -p {}/{}" '.format( archive_today_dir, port) rc, stdout, stderr = exec_cmd(cmd_mkdir_date) if rc != 0: sender.send_all( "mkdir archive date dir is failed.cmd:{}.stder:{}".format( cmd_mkdir_date, stderr)) # 判断上一次的备份端口数和金融端口实例数是否一致(为了防止新添加金融实例,新实例需要备份所有binlog) archive_max_dir = archive_base_dir / "{}".format(max_date) cmd1 = "ssh {}".format(NFSHOST) + ' "/bin/ls {}"'.format(archive_max_dir) list_port = exec_bash_cmd(cmd1).split("\n") if len(list_port) != len(financial_ports): for p in running_ports(): # 获得binlog文件的前缀、datadir、以及binlog.index log_bin = dir_bin_all_binlog(p)[1] datadir = dir_bin_all_binlog(p)[0] index = log_bin + ".index" binlog_index = datadir / "{}".format(index) # 如果不存在于list_port中,证明上次备份没有备份该文件,需要获取所有的binlog if str(p) not in list_port: all_binlog = dir_bin_all_binlog(p)[2] cmd = "/usr/local/mysql/bin/mysql -uroot -S /tmp/mysql_{}.sock -e 'flush logs;'".format( p) rc, stdout, stderr = exec_cmd(cmd) if rc != 0: sender.send_all( "flush log is failed.cmd:{}.stder:{}".format( cmd, stderr)) # 上一步执行刷新日志命令,因此最新的日志不需要拷贝,需要增加判断 tail_bin = tail_n(binlog_index, 1).lstrip("./") for binlog in all_binlog: if str(binlog) != str(datadir / "{}".format(tail_bin)): rsync_binlog(binlog, p) else: # 如果存在于list_port中,证明没有新添加金融端口的数据库,拷贝最后一次的binlog文件即可 tail_bin = tail_n(binlog_index, 1).lstrip("./") cmd = "/usr/local/mysql/bin/mysql -uroot -S /tmp/mysql_{}.sock -e 'flush logs;'".format( p) rc, stdout, stderr = exec_cmd(cmd) if rc != 0: sender.send_all( "flush log is failed.cmd:{}.stder:{}".format( cmd, stderr)) tail_binlog = datadir / "{}".format(tail_bin) rsync_binlog(tail_binlog, p) rsync_binlog(binlog_index, p)