def test_add_host_name_ts_level(self): backup_opt = BackupOpt1() backup_opt.__dict__['backup_name'] = False pytest.raises(Exception, add_host_name_ts_level, backup_opt) backup_opt = BackupOpt1() assert type(add_host_name_ts_level(backup_opt)) is unicode
def gen_tar_command( opt_dict, meta_data_backup_file=False, time_stamp=int(time.time()), remote_manifest_meta=False): ''' Generate tar command options. ''' required_list = [ opt_dict.backup_name, opt_dict.src_file, os.path.exists(opt_dict.src_file)] if not validate_all_args(required_list): logging.critical( 'Error: Please ALL the following options: {0}'.format( ','.join(required_list))) raise Exception # Change che current working directory to op_dict.src_file os.chdir(os.path.normpath(opt_dict.src_file.strip())) logging.info('[*] Changing current working directory to: {0} \ '.format(opt_dict.src_file)) logging.info('[*] Backup started for: {0} \ '.format(opt_dict.src_file)) # Tar option for default behavoir. Please refer to man tar to have # a better options explanation tar_command = ' {0} --create -z --warning=none \ --dereference --hard-dereference --no-check-device --one-file-system \ --preserve-permissions --same-owner --seek \ --ignore-failed-read '.format(opt_dict.tar_path) file_name = add_host_name_ts_level(opt_dict, time_stamp) meta_data_backup_file = u'tar_metadata_{0}'.format(file_name) # Incremental backup section if not opt_dict.no_incremental: (tar_command, opt_dict, remote_manifest_meta) = tar_incremental( tar_command, opt_dict, meta_data_backup_file, remote_manifest_meta) # End incremental backup section if opt_dict.exclude: tar_command = ' {0} --exclude="{1}" '.format( tar_command, opt_dict.exclude) tar_command = ' {0} . '.format(tar_command) # Encrypt data if passfile is provided if opt_dict.encrypt_pass_file: openssl_cmd = "{0} enc -aes-256-cfb -pass file:{1}".format( opt_dict.openssl_path, opt_dict.encrypt_pass_file) tar_command = '{0} | {1} '.format(tar_command, openssl_cmd) return opt_dict, tar_command, remote_manifest_meta
def gen_tar_command(opt_dict, meta_data_backup_file=False, time_stamp=int(time.time()), remote_manifest_meta=False): ''' Generate tar command options. ''' required_list = [ opt_dict.backup_name, opt_dict.src_file, os.path.exists(opt_dict.src_file) ] if not validate_all_args(required_list): logging.critical('Error: Please ALL the following options: {0}'.format( ','.join(required_list))) raise Exception # Change che current working directory to op_dict.src_file os.chdir(os.path.normpath(opt_dict.src_file.strip())) logging.info('[*] Changing current working directory to: {0} \ '.format(opt_dict.src_file)) logging.info('[*] Backup started for: {0} \ '.format(opt_dict.src_file)) # Tar option for default behavoir. Please refer to man tar to have # a better options explanation tar_command = ' {0} --create -z --warning=none \ --dereference --hard-dereference --no-check-device --one-file-system \ --preserve-permissions --same-owner --seek \ --ignore-failed-read '.format(opt_dict.tar_path) file_name = add_host_name_ts_level(opt_dict, time_stamp) meta_data_backup_file = u'tar_metadata_{0}'.format(file_name) # Incremental backup section if not opt_dict.no_incremental: (tar_command, opt_dict, remote_manifest_meta) = tar_incremental(tar_command, opt_dict, meta_data_backup_file, remote_manifest_meta) # End incremental backup section if opt_dict.exclude: tar_command = ' {0} --exclude="{1}" '.format(tar_command, opt_dict.exclude) tar_command = ' {0} . '.format(tar_command) # Encrypt data if passfile is provided if opt_dict.encrypt_pass_file: openssl_cmd = "{0} enc -aes-256-cfb -pass file:{1}".format( opt_dict.openssl_path, opt_dict.encrypt_pass_file) tar_command = '{0} | {1} '.format(tar_command, openssl_cmd) return opt_dict, tar_command, remote_manifest_meta
def backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict): ''' Execute the necessary tasks for file system backup mode ''' logging.info('[*] File System backup is being executed...') lvm_snap(backup_opt_dict) # Extract some values from arguments that will be used later on # Initialize swift client object, generate container segments name # and extract backup name sw_connector = backup_opt_dict.sw_connector # Execute a tar gzip of the specified directory and return # small chunks (default 128MB), timestamp, backup, filename, # file chunk index and the tar meta-data file # Generate a string hostname, backup name, timestamp and backup level file_name = add_host_name_ts_level(backup_opt_dict, time_stamp) meta_data_backup_file = u'tar_metadata_{0}'.format(file_name) (backup_opt_dict, tar_command, manifest_meta_dict) = gen_tar_command( opt_dict=backup_opt_dict, time_stamp=time_stamp, remote_manifest_meta=manifest_meta_dict) # Initialize a Queue for a maximum of 2 items tar_backup_queue = Queue(maxsize=2) tar_backup_stream = Process(target=tar_backup, args=( backup_opt_dict, tar_command, tar_backup_queue, )) tar_backup_stream.daemon = True tar_backup_stream.start() add_object_stream = Process(target=add_object, args=(backup_opt_dict, tar_backup_queue, file_name, time_stamp)) add_object_stream.daemon = True add_object_stream.start() tar_backup_stream.join() tar_backup_queue.put(({False: False})) tar_backup_queue.close() add_object_stream.join() (backup_opt_dict, manifest_meta_dict, tar_meta_to_upload, tar_meta_prev) = gen_manifest_meta(backup_opt_dict, manifest_meta_dict, meta_data_backup_file) manifest_file = u'' meta_data_abs_path = '{0}/{1}'.format(backup_opt_dict.workdir, tar_meta_prev) # Upload swift manifest for segments if backup_opt_dict.upload: if not backup_opt_dict.no_incremental: # Upload tar incremental meta data file and remove it logging.info('[*] Uploading tar meta data file: {0}'.format( tar_meta_to_upload)) with open(meta_data_abs_path, 'r') as meta_fd: sw_connector.put_object(backup_opt_dict.container, tar_meta_to_upload, meta_fd) # Removing tar meta data file, so we have only one authoritative # version on swift logging.info('[*] Removing tar meta data file: {0}'.format( meta_data_abs_path)) os.remove(meta_data_abs_path) # Upload manifest to swift manifest_upload(manifest_file, backup_opt_dict, file_name, manifest_meta_dict) # Unmount and remove lvm snapshot volume lvm_snap_remove(backup_opt_dict)
def backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict): """ Execute the necessary tasks for file system backup mode """ logging.info('[*] File System backup is being executed...') try: if is_windows(): # Create a shadow copy. # Create a shadow copy. backup_opt_dict.shadow_path, backup_opt_dict.shadow = \ vss_create_shadow_copy(backup_opt_dict.volume) else: # If lvm_auto_snap is true, the volume group and volume name will # be extracted automatically if backup_opt_dict.lvm_auto_snap: backup_opt_dict = get_lvm_info(backup_opt_dict) # Generate the lvm_snap if lvm arguments are available lvm_snap(backup_opt_dict) # Generate a string hostname, backup name, timestamp and backup level file_name = add_host_name_ts_level(backup_opt_dict, time_stamp) meta_data_backup_file = u'tar_metadata_{0}'.format(file_name) backup_opt_dict.meta_data_file = meta_data_backup_file # Initialize a Queue for a maximum of 2 items tar_backup_queue = multiprocessing.Queue(maxsize=2) if is_windows(): backup_opt_dict.absolute_path = backup_opt_dict.src_file backup_opt_dict.src_file = use_shadow(backup_opt_dict.src_file, backup_opt_dict.volume) # Execute a tar gzip of the specified directory and return # small chunks (default 128MB), timestamp, backup, filename, # file chunk index and the tar meta-data file (backup_opt_dict, tar_command, manifest_meta_dict) = \ gen_tar_command(opt_dict=backup_opt_dict, time_stamp=time_stamp, remote_manifest_meta=manifest_meta_dict) tar_backup_stream = multiprocessing.Process( target=tar_backup, args=( backup_opt_dict, tar_command, tar_backup_queue,)) tar_backup_stream.daemon = True tar_backup_stream.start() add_object_stream = multiprocessing.Process( target=add_object, args=( backup_opt_dict, tar_backup_queue, file_name, time_stamp)) add_object_stream.daemon = True add_object_stream.start() tar_backup_stream.join() tar_backup_queue.put(({False: False})) tar_backup_queue.close() add_object_stream.join() if add_object_stream.exitcode: raise Exception('failed to upload object to swift server') (backup_opt_dict, manifest_meta_dict, tar_meta_to_upload, tar_meta_prev) = gen_manifest_meta( backup_opt_dict, manifest_meta_dict, meta_data_backup_file) manifest_file = u'' meta_data_abs_path = os.path.join(backup_opt_dict.workdir, tar_meta_prev) # Upload swift manifest for segments if backup_opt_dict.upload: # Request a new auth client in case the current token # is expired before uploading tar meta data or the swift manifest backup_opt_dict = get_client(backup_opt_dict) if not backup_opt_dict.no_incremental: # Upload tar incremental meta data file and remove it logging.info('[*] Uploading tar meta data file: {0}'.format( tar_meta_to_upload)) with open(meta_data_abs_path, 'r') as meta_fd: backup_opt_dict.sw_connector.put_object( backup_opt_dict.container, tar_meta_to_upload, meta_fd) # Removing tar meta data file, so we have only one # authoritative version on swift logging.info('[*] Removing tar meta data file: {0}'.format( meta_data_abs_path)) os.remove(meta_data_abs_path) # Upload manifest to swift manifest_upload( manifest_file, backup_opt_dict, file_name, manifest_meta_dict) finally: if is_windows(): # Delete the shadow copy after the backup vss_delete_shadow_copy(backup_opt_dict.shadow, backup_opt_dict.volume) else: # Unmount and remove lvm snapshot volume lvm_snap_remove(backup_opt_dict)
def backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict): """ Execute the necessary tasks for file system backup mode """ logging.info('[*] File System backup is being executed...') try: if is_windows(): # Create a shadow copy. # Create a shadow copy. backup_opt_dict.shadow_path, backup_opt_dict.shadow = \ vss_create_shadow_copy(backup_opt_dict.volume) else: # If lvm_auto_snap is true, the volume group and volume name will # be extracted automatically if backup_opt_dict.lvm_auto_snap: backup_opt_dict = get_lvm_info(backup_opt_dict) # Generate the lvm_snap if lvm arguments are available lvm_snap(backup_opt_dict) # Generate a string hostname, backup name, timestamp and backup level file_name = add_host_name_ts_level(backup_opt_dict, time_stamp) meta_data_backup_file = u'tar_metadata_{0}'.format(file_name) backup_opt_dict.meta_data_file = meta_data_backup_file # Initialize a Queue for a maximum of 2 items tar_backup_queue = multiprocessing.Queue(maxsize=2) if is_windows(): backup_opt_dict.absolute_path = backup_opt_dict.src_file backup_opt_dict.src_file = use_shadow(backup_opt_dict.src_file, backup_opt_dict.volume) # Execute a tar gzip of the specified directory and return # small chunks (default 128MB), timestamp, backup, filename, # file chunk index and the tar meta-data file (backup_opt_dict, tar_command, manifest_meta_dict) = \ gen_tar_command(opt_dict=backup_opt_dict, time_stamp=time_stamp, remote_manifest_meta=manifest_meta_dict) tar_backup_stream = multiprocessing.Process(target=tar_backup, args=( backup_opt_dict, tar_command, tar_backup_queue, )) tar_backup_stream.daemon = True tar_backup_stream.start() add_object_stream = multiprocessing.Process( target=add_object, args=(backup_opt_dict, tar_backup_queue, file_name, time_stamp)) add_object_stream.daemon = True add_object_stream.start() tar_backup_stream.join() tar_backup_queue.put(({False: False})) tar_backup_queue.close() add_object_stream.join() if add_object_stream.exitcode: raise Exception('failed to upload object to swift server') (backup_opt_dict, manifest_meta_dict, tar_meta_to_upload, tar_meta_prev) = gen_manifest_meta(backup_opt_dict, manifest_meta_dict, meta_data_backup_file) manifest_file = u'' meta_data_abs_path = os.path.join(backup_opt_dict.workdir, tar_meta_prev) # Upload swift manifest for segments if backup_opt_dict.upload: # Request a new auth client in case the current token # is expired before uploading tar meta data or the swift manifest backup_opt_dict = get_client(backup_opt_dict) if not backup_opt_dict.no_incremental: # Upload tar incremental meta data file and remove it logging.info('[*] Uploading tar meta data file: {0}'.format( tar_meta_to_upload)) with open(meta_data_abs_path, 'r') as meta_fd: backup_opt_dict.sw_connector.put_object( backup_opt_dict.container, tar_meta_to_upload, meta_fd) # Removing tar meta data file, so we have only one # authoritative version on swift logging.info('[*] Removing tar meta data file: {0}'.format( meta_data_abs_path)) os.remove(meta_data_abs_path) # Upload manifest to swift manifest_upload(manifest_file, backup_opt_dict, file_name, manifest_meta_dict) finally: if is_windows(): # Delete the shadow copy after the backup vss_delete_shadow_copy(backup_opt_dict.shadow, backup_opt_dict.volume) else: # Unmount and remove lvm snapshot volume lvm_snap_remove(backup_opt_dict)