def execute(self): try: (out, err) = utils.create_subprocess('sync') if err: logging.error('Error while sync exec: {0}'.format(err)) except Exception as error: logging.error('Error while sync exec: {0}'.format(error)) self.conf.storage.prepare() if self.conf.mode == 'fs': backup.backup(self.conf, self.storage, self.engine) elif self.conf.mode == 'mongo': backup.backup_mode_mongo(self.conf) elif self.conf.mode == 'mysql': backup.backup_mode_mysql(self.conf) elif self.conf.mode == 'sqlserver': backup.backup_mode_sql_server(self.conf) else: raise ValueError('Please provide a valid backup mode')
def execute(self): containers = swift.check_container_existance(self.conf) if containers['main_container'] is not True: swift.create_containers(self.conf) if self.conf.no_incremental: if self.conf.max_backup_level or \ self.conf.always_backup_level: raise Exception( 'no-incremental option is not compatible ' 'with backup level options') manifest_meta_dict = {} else: # Get the object list of the remote containers # and store it in self.conf.remote_obj_list self.conf = swift.get_container_content(self.conf) # Check if a backup exist in swift with same name. # If not, set backup level to 0 manifest_meta_dict =\ utils.check_backup_and_tar_meta_existence(self.conf) (self.conf, manifest_meta_dict) = utils.set_backup_level( self.conf, manifest_meta_dict) self.conf.manifest_meta_dict = manifest_meta_dict if self.conf.mode == 'fs': backup.backup_mode_fs( self.conf, self.start_time.timestamp, manifest_meta_dict) elif self.conf.mode == 'mongo': backup.backup_mode_mongo( self.conf, self.start_time.timestamp, manifest_meta_dict) elif self.conf.mode == 'mysql': backup.backup_mode_mysql( self.conf, self.start_time.timestamp, manifest_meta_dict) elif self.conf.mode == 'sqlserver': backup.backup_mode_sql_server( self.conf, self.time_stamp, manifest_meta_dict) else: raise ValueError('Please provide a valid backup mode')
def execute(self): containers = swift.check_container_existance(self.conf) if containers['main_container'] is not True: swift.create_containers(self.conf) if self.conf.no_incremental: if self.conf.max_backup_level or \ self.conf.always_backup_level: raise Exception('no-incremental option is not compatible ' 'with backup level options') manifest_meta_dict = {} else: # Get the object list of the remote containers # and store it in self.conf.remote_obj_list self.conf = swift.get_container_content(self.conf) # Check if a backup exist in swift with same name. # If not, set backup level to 0 manifest_meta_dict =\ utils.check_backup_and_tar_meta_existence(self.conf) (self.conf, manifest_meta_dict) = utils.set_backup_level(self.conf, manifest_meta_dict) self.conf.manifest_meta_dict = manifest_meta_dict if self.conf.mode == 'fs': backup.backup_mode_fs(self.conf, self.start_time.timestamp, manifest_meta_dict) elif self.conf.mode == 'mongo': backup.backup_mode_mongo(self.conf, self.start_time.timestamp, manifest_meta_dict) elif self.conf.mode == 'mysql': backup.backup_mode_mysql(self.conf, self.start_time.timestamp, manifest_meta_dict) elif self.conf.mode == 'sqlserver': backup.backup_mode_sql_server(self.conf, self.time_stamp, manifest_meta_dict) else: raise ValueError('Please provide a valid backup mode')
def test_backup_mode_mysql(self, monkeypatch, tmpdir): backup_opt = BackupOpt1() backup_opt.__dict__['storage'] = local.LocalStorage(tmpdir.strpath) fakemysql = FakeMySQLdb() expanduser = Os() fakere = FakeRe() fakeswiftclient = FakeSwiftClient() fakelvm = Lvm() fakesubprocess = FakeSubProcess() fakesubprocesspopen = fakesubprocess.Popen() fakemultiprocessing = FakeMultiProcessing() fakemultiprocessingqueue = fakemultiprocessing.Queue() fakemultiprocessingpipe = fakemultiprocessing.Pipe() fakemultiprocessinginit = fakemultiprocessing.__init__() monkeypatch.setattr( multiprocessing, 'Queue', fakemultiprocessingqueue) monkeypatch.setattr( multiprocessing, 'Pipe', fakemultiprocessingpipe) monkeypatch.setattr( multiprocessing, 'Process', fakemultiprocessing.Process) monkeypatch.setattr( multiprocessing, '__init__', fakemultiprocessinginit) #monkeypatch.setattr(__builtin__, 'open', fakeopen.open) monkeypatch.setattr( subprocess.Popen, 'communicate', fakesubprocesspopen.communicate) monkeypatch.setattr( freezer.lvm, 'lvm_snap_remove', fakelvm.lvm_snap_remove) monkeypatch.setattr(freezer.lvm, 'lvm_eval', fakelvm.lvm_eval) monkeypatch.setattr(re, 'search', fakere.search) monkeypatch.setattr(MySQLdb, 'connect', fakemysql.connect) monkeypatch.setattr(os.path, 'expanduser', expanduser.expanduser) monkeypatch.setattr(os.path, 'isdir', expanduser.isdir) monkeypatch.setattr(os, 'makedirs', expanduser.makedirs) monkeypatch.setattr(os.path, 'exists', expanduser.exists) monkeypatch.setattr(os, 'chdir', lambda x: x) monkeypatch.setattr(swiftclient, 'client', fakeswiftclient.client) mysql_conf = backup_opt.mysql_conf backup_opt.__dict__['mysql_conf'] = None pytest.raises(Exception, backup_mode_mysql, backup_opt) # Generate mysql conf test file backup_opt.__dict__['mysql_conf'] = mysql_conf with open(backup_opt.mysql_conf, 'w') as mysql_conf_fd: mysql_conf_fd.write('host=abcd\nport=1234\nuser=abcd\npassword=abcd\n') assert backup_mode_mysql(backup_opt) is None fakemysql2 = FakeMySQLdb2() monkeypatch.setattr(MySQLdb, 'connect', fakemysql2.connect) pytest.raises(Exception, backup_mode_mysql) os.unlink(backup_opt.mysql_conf)
def test_backup_mode_mysql(self, monkeypatch): test_meta = dict() backup_opt = BackupOpt1() fakemysql = FakeMySQLdb() expanduser = Os() fakere = FakeRe() fakeswiftclient = FakeSwiftClient() fakelvm = Lvm() fakesubprocess = FakeSubProcess() fakesubprocesspopen = fakesubprocess.Popen() fakemultiprocessing = FakeMultiProcessing() fakemultiprocessingqueue = fakemultiprocessing.Queue() fakemultiprocessingpipe = fakemultiprocessing.Pipe() fakemultiprocessinginit = fakemultiprocessing.__init__() monkeypatch.setattr( multiprocessing, 'Queue', fakemultiprocessingqueue) monkeypatch.setattr( multiprocessing, 'Pipe', fakemultiprocessingpipe) monkeypatch.setattr( multiprocessing, 'Process', fakemultiprocessing.Process) monkeypatch.setattr( multiprocessing, '__init__', fakemultiprocessinginit) #monkeypatch.setattr(__builtin__, 'open', fakeopen.open) monkeypatch.setattr( subprocess.Popen, 'communicate', fakesubprocesspopen.communicate) monkeypatch.setattr( freezer.lvm, 'lvm_snap_remove', fakelvm.lvm_snap_remove) monkeypatch.setattr(freezer.lvm, 'lvm_eval', fakelvm.lvm_eval) monkeypatch.setattr(re, 'search', fakere.search) monkeypatch.setattr(MySQLdb, 'connect', fakemysql.connect) monkeypatch.setattr(os.path, 'expanduser', expanduser.expanduser) monkeypatch.setattr(os.path, 'isdir', expanduser.isdir) monkeypatch.setattr(os, 'makedirs', expanduser.makedirs) monkeypatch.setattr(os.path, 'exists', expanduser.exists) monkeypatch.setattr(swiftclient, 'client', fakeswiftclient.client) mysql_conf_file = backup_opt.mysql_conf_file backup_opt.__dict__['mysql_conf_file'] = None pytest.raises(Exception, backup_mode_mysql, backup_opt, 123456789, test_meta) # Generate mysql conf test file backup_opt.__dict__['mysql_conf_file'] = mysql_conf_file with open(backup_opt.mysql_conf_file, 'w') as mysql_conf_fd: mysql_conf_fd.write('host=abcd\nport=1234\nuser=abcd\npassword=abcd\n') assert backup_mode_mysql( backup_opt, 123456789, test_meta) is None fakemysql2 = FakeMySQLdb2() monkeypatch.setattr(MySQLdb, 'connect', fakemysql2.connect) pytest.raises(Exception, backup_mode_mysql, backup_opt, 123456789, test_meta) os.unlink(backup_opt.mysql_conf_file)
def freezer_main(backup_args): ''' Program Main Execution. This main function is a wrapper for most of the other functions. By calling main() the program execution start and the respective actions are taken. If you want only use the single function is probably better to not import main() ''' # Computing execution start datetime and Timestamp (time_stamp, today_start) = start_time() # Add timestamp to the arguments namespace backup_args.__dict__['time_stamp'] = time_stamp # Initialize the swift connector and store it in the same dict passed # as argument under the dict.sw_connector namespace. This is helpful # so the swift client object doesn't need to be initialized every time backup_args = get_client(backup_args) # Get the list of the containers backup_args = get_containers_list(backup_args) if show_containers(backup_args): elapsed_time(today_start) return True # Check if the provided container already exists in swift. # If it doesn't exist a new one will be created along with the segments # container as container_segments backup_args = check_container_existance(backup_args) # Get the object list of the remote containers and store id in the # same dict passes as argument under the dict.remote_obj_list namespace backup_args = get_container_content(backup_args) if show_objects(backup_args): elapsed_time(today_start) return True # Check if a backup exist in swift with same name. If not, set # backup level to 0 manifest_meta_dict = check_backup_existance(backup_args) # Set the right backup level for incremental backup (backup_args, manifest_meta_dict) = set_backup_level(backup_args, manifest_meta_dict) backup_args.manifest_meta_dict = manifest_meta_dict # File system backup mode selected if backup_args.mode == 'fs': # If any of the restore options was specified, then a data restore # will be executed if validate_any_args([ backup_args.restore_from_date, backup_args.restore_from_host, backup_args.restore_abs_path ]): logging.info('[*] Executing FS restore...') restore_fs(backup_args) else: backup_mode_fs(backup_args, time_stamp, manifest_meta_dict) elif backup_args.mode == 'mongo': backup_mode_mongo(backup_args, time_stamp, manifest_meta_dict) elif backup_args.mode == 'mysql': backup_mode_mysql(backup_args, time_stamp, manifest_meta_dict) else: logging.critical('[*] Error: Please provide a valid backup mode') raise ValueError remove_obj_older_than(backup_args) # Elapsed time: elapsed_time(today_start)
def freezer_main(backup_args): ''' Program Main Execution. This main function is a wrapper for most of the other functions. By calling main() the program execution start and the respective actions are taken. If you want only use the single function is probably better to not import main() ''' # Computing execution start datetime and Timestamp (time_stamp, today_start) = start_time() # Add timestamp to the arguments namespace backup_args.__dict__['time_stamp'] = time_stamp # Initialize the swift connector and store it in the same dict passed # as argument under the dict.sw_connector namespace. This is helpful # so the swift client object doesn't need to be initialized every time backup_args = get_client(backup_args) # Get the list of the containers backup_args = get_containers_list(backup_args) if show_containers(backup_args): elapsed_time(today_start) return True # Check if the provided container already exists in swift. # If it doesn't exist a new one will be created along with the segments # container as container_segments backup_args = check_container_existance(backup_args) # Get the object list of the remote containers and store id in the # same dict passes as argument under the dict.remote_obj_list namespace backup_args = get_container_content(backup_args) if show_objects(backup_args): elapsed_time(today_start) return True # Check if a backup exist in swift with same name. If not, set # backup level to 0 manifest_meta_dict = check_backup_existance(backup_args) # Set the right backup level for incremental backup (backup_args, manifest_meta_dict) = set_backup_level( backup_args, manifest_meta_dict) backup_args.manifest_meta_dict = manifest_meta_dict # File system backup mode selected if backup_args.mode == 'fs': # If any of the restore options was specified, then a data restore # will be executed if validate_any_args([ backup_args.restore_from_date, backup_args.restore_from_host, backup_args.restore_abs_path]): logging.info('[*] Executing FS restore...') restore_fs(backup_args) else: backup_mode_fs(backup_args, time_stamp, manifest_meta_dict) elif backup_args.mode == 'mongo': backup_mode_mongo(backup_args, time_stamp, manifest_meta_dict) elif backup_args.mode == 'mysql': backup_mode_mysql(backup_args, time_stamp, manifest_meta_dict) else: logging.critical('[*] Error: Please provide a valid backup mode') raise ValueError remove_obj_older_than(backup_args) # Elapsed time: elapsed_time(today_start)