def __init__(self, config="/etc/bck.conf", dry_run=0): self.conf = config self.dry = dry_run GeneralClass.__init__(self, self.conf) self.check_env_obj = CheckEnv(self.conf) # If prepare_tool option enabled in config, make backup_tool to use this. if hasattr(self, 'prepare_tool'): self.backup_tool = self.prepare_tool
def all_backup(self): """ This method at first checks full backup directory, if it is empty takes full backup. If it is not empty then checks for full backup time. If the recent full backup is taken 1 day ago, it takes full backup. In any other conditions it takes incremental backup. """ # Workaround for circular import dependency error in Python # Creating object from CheckEnv class check_env_obj = CheckEnv(self.conf, full_dir=self.full_dir, inc_dir=self.inc_dir) if check_env_obj.check_all_env(): if self.recent_full_backup_file() == 0: logger.debug("- - - - You have no backups : Taking very first Full Backup! - - - -") # Flushing Logs if self.mysql_connection_flush_logs(): # Taking fullbackup if self.full_backup(): # Removing old inc backups self.clean_inc_backup_dir() # Copying backups to remote server if hasattr(self, 'remote_conn') and hasattr(self, 'remote_dir') \ and self.remote_conn and self.remote_dir: self.copy_backup_to_remote_host() return True elif self.last_full_backup_date() == 1: logger.debug("- - - - Your full backup is timeout : Taking new Full Backup! - - - -") # Archiving backups if hasattr(self, 'archive_dir'): if (hasattr(self, 'max_archive_duration') and self.max_archive_duration) \ or (hasattr(self, 'max_archive_size') and self.max_archive_size): self.clean_old_archives() self.create_backup_archives() else: logger.debug("Archiving disabled. Skipping!") # Flushing logs if self.mysql_connection_flush_logs(): # Taking fullbackup if self.full_backup(): # Removing full backups self.clean_full_backup_dir() # Removing inc backups self.clean_inc_backup_dir() # Copying backups to remote server if hasattr(self, 'remote_conn') and hasattr(self, 'remote_dir') \ and self.remote_conn and self.remote_dir: self.copy_backup_to_remote_host() return True else: logger.debug("- - - - You have a full backup that is less than {} seconds old. - - - -".format( self.full_backup_interval)) logger.debug("- - - - We will take an incremental one based on recent Full Backup - - - -") time.sleep(3) # Taking incremental backup self.inc_backup() # Copying backups to remote server if hasattr(self, 'remote_conn') and hasattr(self, 'remote_dir') \ and self.remote_conn and self.remote_dir: self.copy_backup_to_remote_host() return True
def wipe_backup_prepare_copyback(self, basedir, keyring_vault=0): """ Method Backup + Prepare and Copy-back actions. It is also going to create slave server from backup of master and start. :param basedir: The basedir path of MySQL :return: Success if no exception raised from methods """ c_count = 0 for options in ConfigGenerator( config=self.conf).options_combination_generator( self.mysql_options): c_count = c_count + 1 options = " ".join(options) if '5.7' in basedir: if keyring_vault == 0: options = options + " " + self.df_mysql_options.format( basedir, c_count) elif keyring_vault == 1: # The keyring_vault options must be provided manually with full path in config. # Such as: --early-plugin-load=keyring_vault=keyring_vault.so,--loose-keyring_vault_config=/sda/vault_server/keyring_vault.cnf # It indicates that there is no need to pass basedir that's why only passing the [--server-id=c_count] options = options + " " + self.df_mysql_options.format( c_count) else: options = options + " " + self.df_mysql_options.format(c_count) logger.debug("*********************************") logger.debug("Starting cycle{}".format(c_count)) logger.debug("Will start MySQL with {}".format(options)) # Passing options to start MySQL if self.clone_obj.wipe_server_all(basedir_path=basedir, options=options): # Specifying directories and passing to WrapperForBackupTest class full_dir = self.backupdir + "/cycle{}".format( c_count) + "/full" inc_dir = self.backupdir + "/cycle{}".format(c_count) + "/inc" backup_obj = WrapperForBackupTest(config=self.conf, full_dir=full_dir, inc_dir=inc_dir, basedir=basedir) # Take backups logger.debug("Started to run run_all_backup()") if backup_obj.run_all_backup(): prepare_obj = WrapperForPrepareTest(config=self.conf, full_dir=full_dir, inc_dir=inc_dir) # Prepare backups logger.debug("Started to run run_prepare_backup()") if prepare_obj.run_prepare_backup(): if hasattr(self, 'make_slaves'): logger.debug( "make_slaves is defined so will create slaves!" ) # Creating slave datadir slave_datadir = self.create_slave_datadir( basedir=basedir, num=1) # Doing some stuff for creating slave server env prepare_obj.run_xtra_copyback( datadir=slave_datadir) prepare_obj.giving_chown(datadir=slave_datadir) slave_full_options = self.prepare_start_slave_options( basedir=basedir, slave_number=1, options=options) prepare_obj.start_mysql_func( start_tool="{}/start_dynamic".format(basedir), options=slave_full_options) # Creating connection file for new node self.create_slave_connection_file(basedir=basedir, num=1) # Creating shutdown file for new node self.create_slave_shutdown_file(basedir=basedir, num=1) # Checking if node is up logger.debug("Pausing a bit here...") sleep(10) chk_obj = CheckEnv(config=self.conf) check_options = "--user={} --socket={}/sock{}.sock".format( 'root', basedir, 1) chk_obj.check_mysql_uptime(options=check_options) # Make this node to be slave mysql_master_client_cmd = RunBenchmark( config=self.conf).get_mysql_conn( basedir=basedir) # Create replication user on master server self.run_sql_create_user(mysql_master_client_cmd) # Drop blank users if PS version is 5.6 from master server if '5.6' in basedir or '5.5' in basedir: self.drop_blank_mysql_users( mysql_master_client_cmd) full_backup_dir = prepare_obj.recent_full_backup_file( ) mysql_slave_client_cmd = RunBenchmark( config=self.conf).get_mysql_conn( basedir=basedir, file_name="cl_node{}".format(1)) # Creating dsns table self.create_dsns_table(mysql_master_client_cmd) # Running change master and some other commands here if self.run_change_master( basedir=basedir, full_backup_dir="{}/{}".format( full_dir, full_backup_dir), mysql_master_client_cmd= mysql_master_client_cmd, mysql_slave_client_cmd= mysql_slave_client_cmd): sleep(10) logger.debug( "Starting actions for second slave here...") # Actions for second slave, it is going to be started from slave backup full_dir_2 = self.backupdir + "/cycle{}".format( c_count) + "/slave_backup" + "/full" inc_dir_2 = self.backupdir + "/cycle{}".format( c_count) + "/slave_backup" + "/inc" # Create config for this slave node1 here logger.debug( "Generating special config file for second slave" ) cnf_obj = ConfigGenerator(config=self.conf) slave_conf_path = self.backupdir + "/cycle{}".format( c_count) if ('5.7' in basedir) and ('2_4_ps_5_7' in self.conf): slave_conf_file = 'xb_2_4_ps_5_7_slave.conf' elif ('5.6' in basedir) and ('2_4_ps_5_6' in self.conf): slave_conf_file = 'xb_2_4_ps_5_6_slave.conf' elif ('5.6' in basedir) and ('2_3_ps_5_6' in self.conf): slave_conf_file = 'xb_2_3_ps_5_6_slave.conf' elif ('5.5' in basedir) and ('2_3_ps_5_5' in self.conf): slave_conf_file = 'xb_2_3_ps_5_5_slave.conf' elif ('5.5' in basedir) and ('2_4_ps_5_5' in self.conf): slave_conf_file = 'xb_2_4_ps_5_5_slave.conf' cnf_obj.generate_config_files( test_path=self.testpath, conf_file=slave_conf_file, basedir=basedir, datadir="{}/node{}".format(basedir, 1), sock_file="{}/sock{}.sock".format(basedir, 1), backup_path=slave_conf_path) # DO backup here backup_obj_2 = WrapperForBackupTest( config="{}/{}".format(slave_conf_path, slave_conf_file), full_dir=full_dir_2, inc_dir=inc_dir_2, basedir=basedir) if backup_obj_2.all_backup(): # DO prepare here prepare_obj_2 = WrapperForPrepareTest( config="{}/{}".format( slave_conf_path, slave_conf_file), full_dir=full_dir_2, inc_dir=inc_dir_2) if prepare_obj_2.run_prepare_backup(): # Removing outside tablespace files if os.path.isfile( '{}/out_ts1.ibd'.format(basedir)): os.remove( '{}/out_ts1.ibd'.format(basedir)) if os.path.isfile( '{}/sysbench_test_db/t1.ibd'. format(basedir)): os.remove('{}/sysbench_test_db/t1.ibd'. format(basedir)) # Creating slave datadir slave_datadir_2 = self.create_slave_datadir( basedir=basedir, num=2) prepare_obj_2.run_xtra_copyback( datadir=slave_datadir_2) prepare_obj_2.giving_chown( datadir=slave_datadir_2) slave_full_options = self.prepare_start_slave_options( basedir=basedir, slave_number=2, options=options) prepare_obj_2.start_mysql_func( start_tool="{}/start_dynamic".format( basedir), options=slave_full_options) # Creating connection file for new node self.create_slave_connection_file( basedir=basedir, num=2) # Creating shutdown file for new node self.create_slave_shutdown_file( basedir=basedir, num=2) logger.debug("Pausing a bit here...") sleep(10) check_options_2 = "--user={} --socket={}/sock{}.sock".format( 'root', basedir, 2) chk_obj.check_mysql_uptime( options=check_options_2) mysql_slave_client_cmd_2 = RunBenchmark( config=self.conf).get_mysql_conn( basedir=basedir, file_name="cl_node{}".format(2)) full_backup_dir_2 = prepare_obj_2.recent_full_backup_file( ) if self.run_change_master( basedir=basedir, full_backup_dir="{}/{}".format( full_dir_2, full_backup_dir_2), mysql_master_client_cmd= mysql_master_client_cmd, mysql_slave_client_cmd= mysql_slave_client_cmd_2, is_slave=True): sleep(10) # Running on master self.run_pt_table_checksum(basedir=basedir) # Shutdown slaves self.slave_shutdown(basedir=basedir, num=1) self.slave_shutdown(basedir=basedir, num=2) sleep(5) else: prepare_obj.copy_back_action(options=options)
def inc_backup(self): # Taking Incremental backup recent_bck = self.recent_full_backup_file() recent_inc = self.recent_inc_backup_file() check_env_obj = CheckEnv(self.conf) # Creating time-stamped incremental backup directory inc_backup_dir = self.create_backup_directory(self.inc_dir) # Checking if there is any incremental backup if recent_inc == 0: # If there is no incremental backup # Taking incremental backup. args = "%s --defaults-file=%s --user=%s --password='******' " \ "--target-dir=%s --incremental-basedir=%s/%s --backup" % \ (self.backup_tool, self.mycnf, self.mysql_user, self.mysql_password, inc_backup_dir, self.full_dir, recent_bck) if hasattr(self, 'mysql_socket'): args += " --socket=%s" % (self.mysql_socket) elif hasattr(self, 'mysql_host') and hasattr(self, 'mysql_port'): args += " --host=%s" % self.mysql_host args += " --port=%s" % self.mysql_port else: logger.critical( "Neither mysql_socket nor mysql_host and mysql_port are not defined in config!" ) return False # Adding compression support for incremental backup if hasattr(self, 'compress'): args += " --compress=%s" % (self.compress) if hasattr(self, 'compress_chunk_size'): args += " --compress-chunk-size=%s" % ( self.compress_chunk_size) if hasattr(self, 'compress_threads'): args += " --compress-threads=%s" % (self.compress_threads) # Adding encryption support for incremental backup if hasattr(self, 'encrypt'): args += " --encrypt=%s" % (self.encrypt) if hasattr(self, 'encrypt_key'): args += " --encrypt-key=%s" % (self.encrypt_key) if hasattr(self, 'encrypt_key_file'): args += " --encrypt_key_file=%s" % (self.encrypt_key_file) if hasattr(self, 'encrypt_threads'): args += " --encrypt-threads=%s" % (self.encrypt_threads) if hasattr(self, 'encrypt_chunk_size'): args += " --encrypt-chunk-size=%s" % (self.encrypt_chunk_size) # Extract and decrypt streamed full backup prior to executing incremental backup if hasattr(self, 'stream') \ and hasattr(self, 'encrypt') \ and hasattr(self, 'xbs_decrypt'): logger.debug( "Using xbstream to extract and decrypt from full_backup.stream!" ) xbstream_command = "%s %s --decrypt=%s --encrypt-key=%s --encrypt-threads=%s " \ "< %s/%s/full_backup.stream -C %s/%s" % ( self.xbstream, self.xbstream_options, self.decrypt, self.encrypt_key, self.encrypt_threads, self.full_dir, recent_bck, self.full_dir, recent_bck ) logger.debug( "The following xbstream command will be executed %s", xbstream_command) if self.dry == 0 and isfile(("%s/%s/full_backup.stream") % (self.full_dir, recent_bck)): status, output = subprocess.getstatusoutput( xbstream_command) if status == 0: logger.debug("XBSTREAM command succeeded.") else: logger.error("XBSTREAM COMMAND FAILED!") time.sleep(5) logger.error(output) return False # Extract streamed full backup prior to executing incremental backup elif hasattr(self, 'stream'): logger.debug( "Using xbstream to extract from full_backup.stream!") xbstream_command = "%s %s < %s/%s/full_backup.stream -C %s/%s" % ( self.xbstream, self.xbstream_options, self.full_dir, recent_bck, self.full_dir, recent_bck) logger.debug( "The following xbstream command will be executed %s", xbstream_command) if self.dry == 0 and isfile(("%s/%s/full_backup.stream") % (self.full_dir, recent_bck)): status, output = subprocess.getstatusoutput( xbstream_command) if status == 0: logger.debug("XBSTREAM command succeeded.") else: logger.error("XBSTREAM COMMAND FAILED!") time.sleep(5) logger.error(output) return False elif 'encrypt' in args: logger.debug("Applying workaround for LP #1444255") xbcrypt_command = "%s -d -k %s -a %s -i %s/%s/xtrabackup_checkpoints.xbcrypt " \ "-o %s/%s/xtrabackup_checkpoints" % \ (self.xbcrypt, self.encrypt_key, self.encrypt, self.full_dir, recent_bck, self.full_dir, recent_bck) logger.debug( "The following xbcrypt command will be executed %s", xbcrypt_command) if self.dry == 0: status, output = subprocess.getstatusoutput( xbcrypt_command) if status == 0: logger.debug(output[-27:]) else: logger.error("XBCRYPT COMMAND FAILED!") time.sleep(5) logger.error(output) return False # Checking if extra options were passed: if hasattr(self, 'xtra_options'): args += " " args += self.xtra_options # Checking if extra backup options were passed: if hasattr(self, 'xtra_backup'): args += " " args += self.xtra_backup # Checking if partial recovery list is available if hasattr(self, 'partial_list'): args += " " args += '--databases="%s"' % (self.partial_list) logger.warning("Partial Backup is enabled!") # Checking if streaming enabled for backups if hasattr(self, 'stream'): args += " " args += '--stream="%s"' % self.stream args += " > %s/inc_backup.stream" % inc_backup_dir logger.warning("Streaming is enabled!") logger.debug("The following backup command will be executed %s", args) if self.dry == 0: status, output = subprocess.getstatusoutput(args) if status == 0: logger.debug(output) #logger.debug(output[-27:]) return True else: logger.error("INCREMENT BACKUP FAILED!") time.sleep(5) logger.error(output) return False else: # If there is already existing incremental backup args = "%s --defaults-file=%s --user=%s --password='******' " \ "--target-dir=%s --incremental-basedir=%s/%s --backup" % \ (self.backup_tool, self.mycnf, self.mysql_user, self.mysql_password, inc_backup_dir, self.inc_dir, recent_inc) if hasattr(self, 'mysql_socket'): args += " --socket=%s" % (self.mysql_socket) elif hasattr(self, 'mysql_host') and hasattr(self, 'mysql_port'): args += " --host=%s" % self.mysql_host args += " --port=%s" % self.mysql_port else: logger.critical( "Neither mysql_socket nor mysql_host and mysql_port are defined in config!" ) return False # Adding compression support for incremental backup if hasattr(self, 'compress'): args += " --compress=%s" % (self.compress) if hasattr(self, 'compress_chunk_size'): args += " --compress_chunk_size=%s" % ( self.compress_chunk_size) if hasattr(self, 'compress-threads'): args += " --compress_threads=%s" % (self.compress_threads) # Adding encryption support for incremental backup if hasattr(self, 'encrypt'): args += " --encrypt=%s" % (self.encrypt) if hasattr(self, 'encrypt_key'): args += " --encrypt-key=%s" % (self.encrypt_key) if hasattr(self, 'encrypt_key_file'): args += " --encrypt-key-file=%s" % (self.encrypt_key_file) if hasattr(self, 'encrypt_threads'): args += " --encrypt-threads=%s" % (self.encrypt_threads) if hasattr(self, 'encrypt_chunk_size'): args += " --encrypt-chunk-size=%s" % (self.encrypt_chunk_size) # Extract and decrypt streamed full backup prior to executing incremental backup if hasattr(self, 'stream') \ and hasattr(self, 'encrypt') \ and hasattr(self, 'xbs_decrypt'): logger.debug( "Using xbstream to extract and decrypt from inc_backup.stream!" ) xbstream_command = "%s %s --decrypt=%s --encrypt-key=%s --encrypt-threads=%s " \ "< %s/%s/inc_backup.stream -C %s/%s" % ( self.xbstream, self.xbstream_options, self.decrypt, self.encrypt_key, self.encrypt_threads, self.inc_dir, recent_inc, self.inc_dir, recent_inc ) logger.debug( "The following xbstream command will be executed %s", xbstream_command) if self.dry == 0 and isfile( ("%s/%s/inc_backup.stream") % (self.inc_dir, recent_inc)): status, output = subprocess.getstatusoutput( xbstream_command) if status == 0: logger.debug("XBSTREAM command succeeded.") else: logger.error("XBSTREAM COMMAND FAILED!") time.sleep(5) logger.error(output) return False # Extracting streamed incremental backup prior to executing new incremental backup elif hasattr(self, 'stream'): logger.debug( "Using xbstream to extract from inc_backup.stream!") xbstream_command = "%s %s < %s/%s/inc_backup.stream -C %s/%s" % ( self.xbstream, self.xbstream_options, self.inc_dir, recent_inc, self.inc_dir, recent_inc) logger.debug( "The following xbstream command will be executed %s", xbstream_command) if self.dry == 0 and isfile( ("%s/%s/inc_backup.stream") % (self.full_dir, recent_bck)): status, output = subprocess.getstatusoutput( xbstream_command) if status == 0: logger.debug("XBSTREAM command succeeded.") else: logger.error("XBSTREAM COMMAND FAILED!") time.sleep(5) logger.error(output) return False elif 'encrypt' in args: logger.debug("Applying workaround for LP #1444255") xbcrypt_command = "%s -d -k %s -a %s -i %s/%s/xtrabackup_checkpoints.xbcrypt " \ "-o %s/%s/xtrabackup_checkpoints" % \ (self.xbcrypt, self.encrypt_key, self.encrypt, self.inc_dir, recent_inc, self.inc_dir, recent_inc) logger.debug( "The following xbcrypt command will be executed %s", xbcrypt_command) if self.dry == 0: status, output = subprocess.getstatusoutput( xbcrypt_command) if status == 0: logger.debug(output[-27:]) else: logger.error("XBCRYPT COMMAND FAILED!") time.sleep(5) logger.error(output) return False # Checking if extra options were passed: if hasattr(self, 'xtra_options'): args += " " args += self.xtra_options # Checking if extra backup options were passed: if hasattr(self, 'xtra_backup'): args += " " args += self.xtra_backup # Checking if partial recovery list is available if hasattr(self, 'partial_list'): args += " " args += '--databases="%s"' % (self.partial_list) logger.warning("Partial Backup is enabled!") # Checking if streaming enabled for backups if hasattr(self, 'stream'): args += " " args += '--stream="%s"' % self.stream args += " > %s/inc_backup.stream" % inc_backup_dir logger.warning("Streaming is enabled!") logger.debug("The following backup command will be executed %s", args) if self.dry == 0: status, output = subprocess.getstatusoutput(args) if status == 0: logger.debug(output) #logger.debug(output[-27:]) return True else: logger.error("INCREMENT BACKUP FAILED!") time.sleep(5) logger.error(output) return False
def __init__(self, config="/etc/bck.conf"): self.conf = config GeneralClass.__init__(self, self.conf) self.check_env_obj = CheckEnv(self.conf) self.result = self.check_env_obj.check_systemd_init()
def all_backup(self): """ This function at first checks full backup directory, if it is empty takes full backup. If it is not empty then checks for full backup time. If the recent full backup is taken 1 day ago, it takes full backup. In any other conditions it takes incremental backup. """ # Workaround for circular import dependency error in Python # Creating object from CheckEnv class check_env_obj = CheckEnv(self.conf) if check_env_obj.check_all_env(): if self.recent_full_backup_file() == 0: logger.debug( "###############################################################" ) logger.debug( "#You have no backups : Taking very first Full Backup! - - - - #" ) logger.debug( "###############################################################" ) time.sleep(3) # Flushing Logs if self.mysql_connection_flush_logs(): # Taking fullbackup if self.full_backup(): # Removing old inc backups self.clean_inc_backup_dir() # Copying backups to remote server if hasattr(self, 'remote_conn') and hasattr( self, 'remote_dir') and self.remote_conn and self.remote_dir: self.copy_backup_to_remote_host() # Exiting after taking full backup exit(0) elif self.last_full_backup_date() == 1: logger.debug( "################################################################" ) logger.debug( "Your full backup is timeout : Taking new Full Backup!- - - - - #" ) logger.debug( "################################################################" ) time.sleep(3) # Archiving backups if self.archive_dir: if (hasattr(self, 'max_archive_duration') and self.max_archive_duration) or ( hasattr(self, 'max_archive_size') and self.max_archive_size): self.clean_old_archives() if not self.create_backup_archives(): exit(0) # Flushing logs if self.mysql_connection_flush_logs(): # Taking fullbackup if self.full_backup(): # Removing full backups self.clean_full_backup_dir() # Removing inc backups self.clean_inc_backup_dir() # Copying backups to remote server if hasattr(self, 'remote_conn') and hasattr( self, 'remote_dir') and self.remote_conn and self.remote_dir: self.copy_backup_to_remote_host() # Exiting after taking NEW full backup exit(0) else: logger.debug( "################################################################" ) logger.debug( "You have a full backup that is less than %d seconds old. - -#", self.full_backup_interval) logger.debug( "We will take an incremental one based on recent Full Backup - -#" ) logger.debug( "################################################################" ) time.sleep(3) # Taking incremental backup self.inc_backup() # Copying backups to remote server if hasattr(self, 'remote_conn') and hasattr( self, 'remote_dir') and self.remote_conn and self.remote_dir: self.copy_backup_to_remote_host() # Exiting after taking Incremental backup exit(0)
def inc_backup(self): # Taking Incremental backup recent_bck = self.recent_full_backup_file() recent_inc = self.recent_inc_backup_file() check_env_obj = CheckEnv() product_type = check_env_obj.check_mysql_product() # Creating time-stamped incremental backup directory inc_backup_dir = self.create_backup_directory(self.inc_dir) # Checking if there is any incremental backup if recent_inc == 0: # If there is no incremental backup # If you have a question why we check whether MariaDB or MySQL installed? # See BUG -> https://bugs.launchpad.net/percona-xtrabackup/+bug/1444541 if product_type == 2: # Taking incremental backup with MariaDB. (--incremental-force-scan option will be added for BUG workaround) args = "%s --defaults-file=%s --user=%s --password='******' " \ "--incremental-force-scan --incremental %s --incremental-basedir %s/%s" % \ (self.backup_tool, self.mycnf, self.mysql_user, self.mysql_password, self.inc_dir, self.full_dir, recent_bck) elif product_type == 3: # Taking incremental backup with MySQL. args = "%s --defaults-file=%s --user=%s --password='******' " \ "--target-dir=%s --incremental-basedir=%s/%s --backup" % \ (self.backup_tool, self.mycnf, self.mysql_user, self.mysql_password, inc_backup_dir, self.full_dir, recent_bck) if hasattr(self, 'mysql_socket'): args += " --socket=%s" % (self.mysql_socket) elif hasattr(self, 'mysql_host') and hasattr(self, 'mysql_port'): args += " --host=%s" % self.mysql_host args += " --port=%s" % self.mysql_port else: logger.critical( "Neither mysql_socket nor mysql_host and mysql_port are defined in config!" ) return False # Adding compression support for incremental backup if hasattr(self, 'compress'): args += " --compress=%s" % (self.compress) if hasattr(self, 'compress_chunk_size'): args += " --compress-chunk-size=%s" % ( self.compress_chunk_size) if hasattr(self, 'compress_threads'): args += " --compress-threads=%s" % (self.compress_threads) # Adding encryption support for incremental backup if hasattr(self, 'encrypt'): args += " --encrypt=%s" % (self.encrypt) if hasattr(self, 'encrypt_key'): args += " --encrypt-key=%s" % (self.encrypt_key) if hasattr(self, 'encrypt_key_file'): args += " --encrypt_key_file=%s" % (self.encrypt_key_file) if hasattr(self, 'encrypt_threads'): args += " --encrypt-threads=%s" % (self.encrypt_threads) if hasattr(self, 'encrypt_chunk_size'): args += " --encrypt-chunk-size=%s" % (self.encrypt_chunk_size) if 'encrypt' in args: logger.debug("Applying workaround for LP #1444255") xbcrypt_command = "%s -d -k %s -a %s -i %s/%s/xtrabackup_checkpoints.xbcrypt " \ "-o %s/%s/xtrabackup_checkpoints" % \ (self.xbcrypt, self.encrypt_key, self.encrypt, self.full_dir, recent_bck, self.full_dir, recent_bck) logger.debug( "The following xbcrypt command will be executed %s", xbcrypt_command) status, output = subprocess.getstatusoutput(xbcrypt_command) if status == 0: logger.debug(output[-27:]) else: logger.error("XBCRYPT COMMAND FAILED!") time.sleep(5) logger.error(output) return False logger.debug("The following backup command will be executed %s", args) status, output = subprocess.getstatusoutput(args) if status == 0: logger.debug(output[-27:]) return True else: logger.error("INCREMENT BACKUP FAILED!") time.sleep(5) logger.error(output) return False else: # If there is already existing incremental backup if product_type == 2: # Taking incremental backup with MariaDB. (--incremental-force-scan option will be added for BUG workaround) args = "%s --defaults-file=%s --user=%s --password='******' " \ "--incremental-force-scan --incremental %s --incremental-basedir %s/%s" % \ (self.backup_tool, self.mycnf, self.mysql_user, self.mysql_password, self.inc_dir, self.inc_dir, recent_inc) elif product_type == 3: args = "%s --defaults-file=%s --user=%s --password='******' " \ "--target-dir=%s --incremental-basedir=%s/%s --backup" % \ (self.backup_tool, self.mycnf, self.mysql_user, self.mysql_password, inc_backup_dir, self.inc_dir, recent_inc) if hasattr(self, 'mysql_socket'): args += " --socket=%s" % (self.mysql_socket) elif hasattr(self, 'mysql_host') and hasattr(self, 'mysql_port'): args += " --host=%s" % self.mysql_host args += " --port=%s" % self.mysql_port else: logger.critical( "Neither mysql_socket nor mysql_host and mysql_port are defined in config!" ) return False # Adding compression support for incremental backup if hasattr(self, 'compress'): args += " --compress=%s" % (self.compress) if hasattr(self, 'compress_chunk_size'): args += " --compress_chunk_size=%s" % ( self.compress_chunk_size) if hasattr(self, 'compress-threads'): args += " --compress_threads=%s" % (self.compress_threads) # Adding encryption support for incremental backup if hasattr(self, 'encrypt'): args += " --encrypt=%s" % (self.encrypt) if hasattr(self, 'encrypt_key'): args += " --encrypt-key=%s" % (self.encrypt_key) if hasattr(self, 'encrypt_key_file'): args += " --encrypt-key-file=%s" % (self.encrypt_key_file) if hasattr(self, 'encrypt_threads'): args += " --encrypt-threads=%s" % (self.encrypt_threads) if hasattr(self, 'encrypt_chunk_size'): args += " --encrypt-chunk-size=%s" % (self.encrypt_chunk_size) if 'encrypt' in args: logger.debug("Applying workaround for LP #1444255") xbcrypt_command = "%s -d -k %s -a %s -i %s/%s/xtrabackup_checkpoints.xbcrypt " \ "-o %s/%s/xtrabackup_checkpoints" % \ (self.xbcrypt, self.encrypt_key, self.encrypt, self.inc_dir, recent_inc, self.inc_dir, recent_inc) logger.debug( "The following xbcrypt command will be executed %s", xbcrypt_command) status, output = subprocess.getstatusoutput(xbcrypt_command) if status == 0: logger.debug(output[-27:]) else: logger.error("XBCRYPT COMMAND FAILED!") time.sleep(5) logger.error(output) return False logger.debug("The following backup command will be executed %s", args) status, output = subprocess.getstatusoutput(args) if status == 0: logger.debug(output[-27:]) return True else: logger.error("INCREMENT BACKUP FAILED!") time.sleep(5) logger.error(output) return False