def all_procedure(prepare, backup, partial, verbose, log, defaults_file): logger.setLevel(log) formatter = logging.Formatter( fmt='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') if verbose: ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(ch) validate_file(defaults_file) config = GeneralClass(defaults_file) pid_file = pid.PidFile(piddir=config.pid_dir) try: with pid_file: # User PidFile for locking to single instance if (not prepare) and (not backup) and (not partial) and ( not defaults_file): print( "ERROR: you must give an option, run with --help for available options" ) elif prepare: a = Prepare(config=defaults_file) a.prepare_backup_and_copy_back() # print("Prepare") elif backup: b = Backup(config=defaults_file) b.all_backup() # print("Backup") elif partial: c = PartialRecovery(config=defaults_file) c.final_actions() except pid.PidFileAlreadyLockedError as error: if hasattr(config, 'pid_runtime_warning'): if time.time() - os.stat( pid_file.filename).st_ctime > config.pid_runtime_warning: pid.fh.seek(0) pid_str = pid.fh.read(16).split("\n", 1)[0].strip() logger.critical("Backup (pid: " + pid_str + ") has been running for logger than: " + str( humanfriendly.format_timespan( config.pid_runtime_warning))) #logger.warn("Pid file already exists: " + str(error)) except pid.PidFileAlreadyRunningError as error: if hasattr(config, 'pid_runtime_warning'): if time.time() - os.stat( pid_file.filename).st_ctime > config.pid_runtime_warning: pid.fh.seek(0) pid_str = pid.fh.read(16).split("\n", 1)[0].strip() logger.critical("Backup (pid: " + pid_str + ") has been running for logger than: " + str( humanfriendly.format_timespan( config.pid_runtime_warning))) #logger.warn("Pid already running: " + str(error)) except pid.PidFileUnreadableError as error: logger.warn("Pid file can not be read: " + str(error)) except pid.PidFileError as error: logger.warn("Generic error with pid file: " + str(error))
def create_backup_archives(self): # Creating .tar.gz archive files of taken backups for i in os.listdir(self.full_dir): if len(os.listdir(self.full_dir)) == 1 or i != max(os.listdir(self.full_dir)): logger.debug("Preparing backups prior archiving them...") if hasattr(self, 'prepare_archive'): logger.debug("Started to prepare backups, prior archiving!") prepare_obj = Prepare(config=self.conf, dry_run=self.dry, tag=self.tag) prepare_obj.prepare_inc_full_backups() if hasattr(self, 'move_archive') and (int(self.move_archive) == 1): dir_name = self.archive_dir + '/' + i + '_archive' try: shutil.copytree(self.backupdir, dir_name) except Exception as err: logger.error("FAILED: Archiving ") logger.error(err) raise else: return True else: # Multi-core tar utilizing pigz. # Pigz default to number of cores available, or 8 if cannot be read. # Test if pigz is available. try: subprocess.call(["pigz", "-q"]) run_tar = "tar cf - %s %s | pigz > %s" % ( self.full_dir, self.inc_dir, self.archive_dir + '/' + i + '.tar.gz') except OSError as e: if e.errno == os.errno.ENOENT: # handle file not found error. logger.warning("pigz executeable not available. Defaulting to singlecore tar") run_tar = "tar -zcf %s %s %s" % ( self.archive_dir + '/' + i + '.tar.gz', self.full_dir, self.inc_dir) else: # Something else went wrong while trying to run `wget` raise RuntimeError("FAILED: Archiving -> {}".format(e)) logger.debug("Started to archive previous backups") logger.debug("The following backup command will be executed {}".format(run_tar)) status, output = subprocess.getstatusoutput(run_tar) if status == 0: logger.debug("OK: Old full backup and incremental backups archived!") return True else: logger.error("FAILED: Archiving ") logger.error(output) raise RuntimeError("FAILED: Archiving -> {}".format(output))
def all_procedure(prepare, backup, partial): if (not prepare) and (not backup) and (not partial): print("ERROR: you must give an option, run with --help for available options") elif prepare: a = Prepare() a.prepare_backup_and_copy_back() #print("Prepare") elif backup: b = Backup() b.all_backup() #print("Backup") elif partial: c = PartialRecovery() c.final_actions()
def create_backup_archives(self): # Creating .tar.gz archive files of taken backups for i in os.listdir(self.full_dir): if len(os.listdir(self.full_dir)) == 1 or i != max(os.listdir(self.full_dir)): run_tar = "tar -zcf %s %s %s" % (self.archive_dir + '/' + i + '.tar.gz', self.full_dir, self.inc_dir) logger.debug("Preparing backups prior archiving them...") prepare_obj = Prepare(config=self.conf, dry_run=self.dry, tag=self.tag) prepare_obj.prepare_inc_full_backups() logger.debug("Started to archive previous backups") status, output = subprocess.getstatusoutput(run_tar) if status == 0: logger.debug("OK: Old full backup and incremental backups archived!") return True else: logger.error("FAILED: Archiving ") logger.error(output) raise RuntimeError("FAILED: Archiving -> {}".format(output))
def create_backup_archives(self): # Creating .tar.gz archive files of taken backups for i in os.listdir(self.full_dir): if len(os.listdir(self.full_dir)) == 1 or i != max( os.listdir(self.full_dir)): logger.debug("Preparing backups prior archiving them...") if hasattr(self, 'prepare_archive'): logger.debug( "Started to prepare backups, prior archiving!") prepare_obj = Prepare(config=self.conf, dry_run=self.dry, tag=self.tag) prepare_obj.prepare_inc_full_backups() if hasattr(self, 'move_archive') and (int(self.move_archive) == 1): dir_name = self.archive_dir + '/' + i + '_archive' try: shutil.copytree(self.backupdir, dir_name) except Exception as err: logger.error("FAILED: Archiving ") logger.error(err) raise else: return True else: run_tar = "tar -zcf %s %s %s" % ( self.archive_dir + '/' + i + '.tar.gz', self.full_dir, self.inc_dir) logger.debug("Started to archive previous backups") status, output = subprocess.getstatusoutput(run_tar) if status == 0: logger.debug( "OK: Old full backup and incremental backups archived!" ) return True else: logger.error("FAILED: Archiving ") logger.error(output) raise RuntimeError( "FAILED: Archiving -> {}".format(output))
def all_procedure(ctx, prepare, backup, partial, tag, show_tags, verbose, log_file, log, defaults_file, dry_run, test_mode, log_file_max_bytes, log_file_backup_count, keyring_vault): logger.setLevel(log) formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') if verbose: ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(ch) if log_file: try: file_handler = RotatingFileHandler(log_file, mode='a', maxBytes=log_file_max_bytes, backupCount=log_file_backup_count) file_handler.setFormatter(formatter) logger.addHandler(file_handler) except PermissionError as err: exit("{} Please consider to run as root or sudo".format(err)) validate_file(defaults_file) config = GeneralClass(defaults_file) pid_file = pid.PidFile(piddir=config.pid_dir) try: with pid_file: # User PidFile for locking to single instance if (prepare is False and backup is False and partial is False and verbose is False and dry_run is False and test_mode is False and show_tags is False): print_help(ctx, None, value=True) elif show_tags and defaults_file: b = Backup(config=defaults_file) b.show_tags(backup_dir=b.backupdir) elif test_mode and defaults_file: # TODO: do staff here to implement all in one things for running test mode logger.warning("Enabled Test Mode!!!") logger.debug("Starting Test Mode") test_obj = RunnerTestMode(config=defaults_file) for basedir in test_obj.basedirs: if ('5.7' in basedir) and ('2_4_ps_5_7' in defaults_file): if keyring_vault == 1: test_obj.wipe_backup_prepare_copyback(basedir=basedir, keyring_vault=1) else: test_obj.wipe_backup_prepare_copyback(basedir=basedir) elif ('5.6' in basedir) and ('2_4_ps_5_6' in defaults_file): test_obj.wipe_backup_prepare_copyback(basedir=basedir) elif ('5.6' in basedir) and ('2_3_ps_5_6' in defaults_file): test_obj.wipe_backup_prepare_copyback(basedir=basedir) elif ('5.5' in basedir) and ('2_3_ps_5_5' in defaults_file): test_obj.wipe_backup_prepare_copyback(basedir=basedir) elif ('5.5' in basedir) and ('2_4_ps_5_5' in defaults_file): test_obj.wipe_backup_prepare_copyback(basedir=basedir) else: logger.error("Please pass proper already generated config file!") logger.error("Please check also if you have run prepare_env.bats file") elif prepare and not test_mode: if not dry_run: if tag: a = Prepare(config=defaults_file, tag=tag) a.prepare_backup_and_copy_back() else: a = Prepare(config=defaults_file) a.prepare_backup_and_copy_back() else: logger.warning("Dry run enabled!") logger.warning("Do not recover/copy-back in this mode!") if tag: a = Prepare(config=defaults_file, dry_run=1, tag=tag) a.prepare_backup_and_copy_back() else: a = Prepare(config=defaults_file, dry_run=1) a.prepare_backup_and_copy_back() elif backup and not test_mode: if not dry_run: if tag: b = Backup(config=defaults_file, tag=tag) b.all_backup() else: b = Backup(config=defaults_file) b.all_backup() else: logger.warning("Dry run enabled!") if tag: b = Backup(config=defaults_file, dry_run=1, tag=tag) b.all_backup() else: b = Backup(config=defaults_file, dry_run=1) b.all_backup() elif partial: if not dry_run: c = PartialRecovery(config=defaults_file) c.final_actions() else: logger.critical("Dry run is not implemented for partial recovery!") except pid.PidFileAlreadyLockedError as error: if hasattr(config, 'pid_runtime_warning'): if time.time() - os.stat(pid_file.filename).st_ctime > config.pid_runtime_warning: pid.fh.seek(0) pid_str = pid.fh.read(16).split("\n", 1)[0].strip() logger.critical( "Backup (pid: " + pid_str + ") has been running for logger than: " + str( humanfriendly.format_timespan( config.pid_runtime_warning))) # logger.warn("Pid file already exists: " + str(error)) except pid.PidFileAlreadyRunningError as error: if hasattr(config, 'pid_runtime_warning'): if time.time() - os.stat(pid_file.filename).st_ctime > config.pid_runtime_warning: pid.fh.seek(0) pid_str = pid.fh.read(16).split("\n", 1)[0].strip() logger.critical( "Backup (pid: " + pid_str + ") has been running for logger than: " + str( humanfriendly.format_timespan( config.pid_runtime_warning))) # logger.warn("Pid already running: " + str(error)) except pid.PidFileUnreadableError as error: logger.warning("Pid file can not be read: " + str(error)) except pid.PidFileError as error: logger.warning("Generic error with pid file: " + str(error))
def create_backup_archives(self): # Creating .tar.gz archive files of taken backups for i in os.listdir(self.full_dir): if len(os.listdir(self.full_dir)) == 1 or i != max( os.listdir(self.full_dir)): logger.info("Preparing backups prior archiving them...") if hasattr(self, 'prepare_archive'): logger.info("Started to prepare backups, prior archiving!") prepare_obj = Prepare(config=self.conf, dry_run=self.dry, tag=self.tag) status = prepare_obj.prepare_inc_full_backups() if status: logger.info("Backups Prepared successfully...") if hasattr(self, 'move_archive') and (int(self.move_archive) == 1): dir_name = self.archive_dir + '/' + i + '_archive' logger.info("move_archive enabled. Moving {} to {}".format( self.backupdir, dir_name)) try: shutil.copytree(self.backupdir, dir_name) except Exception as err: logger.error("FAILED: Move Archive") logger.error(err) raise else: return True else: logger.info( "move_archive is disabled. archiving / compressing current_backup." ) # Multi-core tar utilizing pigz. # Pigz default to number of cores available, or 8 if cannot be read. # Test if pigz is available. logger.info("testing for pigz...") status = ProcessRunner.run_command("pigz --version") archive_file = self.archive_dir + '/' + i + '.tar.gz' if status: logger.info("Found pigz...") # run_tar = "tar cvvf - {} {} | pigz -v > {}" \ run_tar = "tar --use-compress-program=pigz -cvf {} {} {}" \ .format(archive_file, self.full_dir, self.inc_dir) else: # handle file not found error. logger.warning( "pigz executeable not available. Defaulting to singlecore tar" ) run_tar = "tar -zcf {} {} {}"\ .format(archive_file, self.full_dir, self.inc_dir) status = ProcessRunner.run_command(run_tar) if status: logger.info( "OK: Old full backup and incremental backups archived!" ) return True else: logger.error("FAILED: Archiving ") raise RuntimeError( "FAILED: Archiving -> {}".format(run_tar))
def all_procedure(ctx, prepare, backup, partial, tag, show_tags, verbose, log_file, log, defaults_file, dry_run, test_mode, log_file_max_bytes, log_file_backup_count, keyring_vault): config = GeneralClass(defaults_file) formatter = logging.Formatter( fmt='%(asctime)s %(levelname)s [%(module)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d %H:%M:%S') if verbose: ch = logging.StreamHandler() # control console output log level ch.setLevel(logging.INFO) ch.setFormatter(formatter) logger.addHandler(ch) if log_file: try: if config.log_file_max_bytes and config.log_file_backup_count: file_handler = RotatingFileHandler( log_file, mode='a', maxBytes=int(config.log_file_max_bytes), backupCount=int(config.log_file_backup_count)) else: file_handler = RotatingFileHandler( log_file, mode='a', maxBytes=log_file_max_bytes, backupCount=log_file_backup_count) file_handler.setFormatter(formatter) logger.addHandler(file_handler) except PermissionError as err: exit("{} Please consider to run as root or sudo".format(err)) # set log level in order: 1. user argument 2. config file 3. @click default if log is not None: logger.setLevel(log) elif 'log_level' in config.__dict__: logger.setLevel(config.log_level) else: # this is the fallback default log-level. logger.setLevel('INFO') validate_file(defaults_file) pid_file = pid.PidFile(piddir=config.pid_dir) try: with pid_file: # User PidFile for locking to single instance if (prepare is False and backup is False and partial is False and verbose is False and dry_run is False and test_mode is False and show_tags is False): print_help(ctx, None, value=True) elif show_tags and defaults_file: b = Backup(config=defaults_file) b.show_tags(backup_dir=b.backupdir) elif test_mode and defaults_file: logger.warning("Enabled Test Mode!!!") logger.info("Starting Test Mode") test_obj = RunnerTestMode(config=defaults_file) for basedir in test_obj.basedirs: if ('5.7' in basedir) and ('2_4_ps_5_7' in defaults_file): if keyring_vault == 1: test_obj.wipe_backup_prepare_copyback( basedir=basedir, keyring_vault=1) else: test_obj.wipe_backup_prepare_copyback( basedir=basedir) elif ('8.0' in basedir) and ('8_0_ps_8_0' in defaults_file): if keyring_vault == 1: test_obj.wipe_backup_prepare_copyback( basedir=basedir, keyring_vault=1) else: test_obj.wipe_backup_prepare_copyback( basedir=basedir) elif ('5.6' in basedir) and ('2_4_ps_5_6' in defaults_file): test_obj.wipe_backup_prepare_copyback(basedir=basedir) elif ('5.6' in basedir) and ('2_3_ps_5_6' in defaults_file): test_obj.wipe_backup_prepare_copyback(basedir=basedir) elif ('5.5' in basedir) and ('2_3_ps_5_5' in defaults_file): test_obj.wipe_backup_prepare_copyback(basedir=basedir) elif ('5.5' in basedir) and ('2_4_ps_5_5' in defaults_file): test_obj.wipe_backup_prepare_copyback(basedir=basedir) else: logger.error( "Please pass proper already generated config file!" ) logger.error( "Please check also if you have run prepare_env.bats file" ) elif prepare and not test_mode: if not dry_run: if tag: a = Prepare(config=defaults_file, tag=tag) a.prepare_backup_and_copy_back() else: a = Prepare(config=defaults_file) a.prepare_backup_and_copy_back() else: logger.warning("Dry run enabled!") logger.warning("Do not recover/copy-back in this mode!") if tag: a = Prepare(config=defaults_file, dry_run=1, tag=tag) a.prepare_backup_and_copy_back() else: a = Prepare(config=defaults_file, dry_run=1) a.prepare_backup_and_copy_back() elif backup and not test_mode: if not dry_run: if tag: b = Backup(config=defaults_file, tag=tag) b.all_backup() else: b = Backup(config=defaults_file) b.all_backup() else: logger.warning("Dry run enabled!") if tag: b = Backup(config=defaults_file, dry_run=1, tag=tag) b.all_backup() else: b = Backup(config=defaults_file, dry_run=1) b.all_backup() elif partial: if not dry_run: c = PartialRecovery(config=defaults_file) c.final_actions() else: logger.critical( "Dry run is not implemented for partial recovery!") except pid.PidFileAlreadyLockedError as error: if hasattr(config, 'pid_runtime_warning'): if time.time() - os.stat( pid_file.filename).st_ctime > config.pid_runtime_warning: pid.fh.seek(0) pid_str = pid.fh.read(16).split("\n", 1)[0].strip() logger.critical("Backup (pid: " + pid_str + ") has been running for logger than: " + str( humanfriendly.format_timespan( config.pid_runtime_warning))) # logger.warn("Pid file already exists: " + str(error)) except pid.PidFileAlreadyRunningError as error: if hasattr(config, 'pid_runtime_warning'): if time.time() - os.stat( pid_file.filename).st_ctime > config.pid_runtime_warning: pid.fh.seek(0) pid_str = pid.fh.read(16).split("\n", 1)[0].strip() logger.critical("Backup (pid: " + pid_str + ") has been running for logger than: " + str( humanfriendly.format_timespan( config.pid_runtime_warning))) # logger.warn("Pid already running: " + str(error)) except pid.PidFileUnreadableError as error: logger.warning("Pid file can not be read: " + str(error)) except pid.PidFileError as error: logger.warning("Generic error with pid file: " + str(error)) logger.info("Xtrabackup command history:") for i in ProcessRunner.xtrabackup_history_log: logger.info(str(i)) logger.info("Autoxtrabackup completed successfully!") return True