def execute(self): conf = self.conf logging.info('[*] Executing FS restore...') restore_timestamp = None restore_abs_path = conf.restore_abs_path if conf.restore_from_date: restore_timestamp = utils.date_to_timestamp(conf.restore_from_date) if conf.backup_media == 'fs': builder = tar.TarCommandRestoreBuilder( conf.tar_path, restore_abs_path, conf.compression, winutils.is_windows()) if conf.dry_run: builder.set_dry_run() if winutils.is_windows(): os.chdir(conf.restore_abs_path) if conf.encrypt_pass_file: builder.set_encryption(conf.openssl_path, conf.encrypt_pass_file) backup = self.storage.find_one(conf.hostname_backup_name, restore_timestamp) self.engine.restore(backup, restore_abs_path) return res = restore.RestoreOs(conf.client_manager, conf.container) if conf.backup_media == 'nova': res.restore_nova(conf.nova_inst_id, restore_timestamp) elif conf.backup_media == 'cinder': res.restore_cinder_by_glance(conf.cinder, restore_timestamp) elif conf.backup_media == 'cindernative': res.restore_cinder(conf.cinder_vol_id, restore_timestamp) else: raise Exception("unknown backup type: %s" % conf.backup_media)
def snapshot_create(backup_opt_dict): if is_windows(): if backup_opt_dict.vssadmin: # Create a shadow copy. backup_opt_dict.shadow_path, backup_opt_dict.shadow = \ vss_create_shadow_copy(backup_opt_dict.windows_volume) # execute this after the snapshot creation if backup_opt_dict.mode == 'sqlserver': start_sql_server(backup_opt_dict) else: # If lvm_auto_snap is true, the volume group and volume name will # be extracted automatically if backup_opt_dict.lvm_auto_snap: lvm_list = get_lvm_info( backup_opt_dict.lvm_auto_snap) backup_opt_dict.lvm_volgroup = lvm_list[0] backup_opt_dict.lvm_srcvol = lvm_list[2] # Generate the lvm_snap if lvm arguments are available lvm_snap(backup_opt_dict) if is_windows() and backup_opt_dict.vssadmin: backup_opt_dict.path_to_backup = use_shadow( backup_opt_dict.path_to_backup, backup_opt_dict.windows_volume) return backup_opt_dict
def snapshot_create(backup_opt_dict): """ Calls the code to take fs snapshots, depending on the platform :param backup_opt_dict: :return: boolean value, True if snapshot has been taken, false otherwise """ if is_windows(): # vssadmin is to be deprecated in favor of the --snapshot flag if backup_opt_dict.snapshot: backup_opt_dict.vssadmin = True if backup_opt_dict.vssadmin: # Create a shadow copy. backup_opt_dict.shadow_path, backup_opt_dict.shadow = \ vss_create_shadow_copy(backup_opt_dict.windows_volume) backup_opt_dict.path_to_backup = use_shadow( backup_opt_dict.path_to_backup, backup_opt_dict.windows_volume) # execute this after the snapshot creation if backup_opt_dict.mode == 'sqlserver': start_sql_server(backup_opt_dict.sql_server_instance) return True return False else: return lvm.lvm_snap(backup_opt_dict)
def snapshot_remove(backup_opt_dict, shadow, windows_volume): if is_windows(): # Delete the shadow copy after the backup vss_delete_shadow_copy(shadow, windows_volume) else: # Unmount and remove lvm snapshot volume lvm.lvm_snap_remove(backup_opt_dict)
def restore_level(self, restore_path, read_pipe): """ Restore the provided file into backup_opt_dict.restore_abs_path Decrypt the file if backup_opt_dict.encrypt_pass_file key is provided """ tar_command = tar_builders.TarCommandRestoreBuilder( restore_path, self.compression_algo, self.is_windows) if self.encrypt_pass_file: tar_command.set_encryption(self.encrypt_pass_file) if self.dry_run: tar_command.set_dry_run() command = tar_command.build() if winutils.is_windows(): # on windows, chdir to restore path. os.chdir(restore_path) tar_process = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) # Start loop reading the pipe and pass the data to the tar std input. # If EOFError exception is raised, the loop end the std err will be # checked for errors. try: while True: tar_process.stdin.write(read_pipe.recv_bytes()) except EOFError: logging.info('[*] Pipe closed as EOF reached. ' 'Data transmitted successfully') self.check_process_output(tar_process)
def tar_restore(restore_abs_path, tar_command, read_pipe): """ Restore the provided file into backup_opt_dict.restore_abs_path Decrypt the file if backup_opt_dict.encrypt_pass_file key is provided """ if winutils.is_windows(): # on windows, chdir to restore path. os.chdir(restore_abs_path) tar_cmd_proc = subprocess.Popen( tar_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) # Start loop reading the pipe and pass the data to the tar std input. # If EOFError exception is raised, the loop end the std err will be # checked for errors. try: while True: tar_cmd_proc.stdin.write(read_pipe.recv_bytes()) except EOFError: logging.info( '[*] Pipe closed as EOF reached. Data transmitted succesfully') tar_err = tar_cmd_proc.communicate()[1] if 'error' in tar_err.lower(): logging.exception('[*] Restore error: {0}'.format(tar_err)) sys.exit(1)
def tar_path(): """This function returns tar binary path""" from freezer import winutils if winutils.is_windows(): path_to_binaries = os.path.dirname(os.path.abspath(__file__)) return '{0}\\bin\\tar.exe'.format(path_to_binaries) tar = (get_executable_path('gnutar') or get_executable_path('gtar') or get_executable_path('tar')) if not tar: raise Exception('Please install gnu tar (gtar) as it is a ' 'mandatory requirement to use freezer.') return tar
def tar_restore(backup_opt_dict, read_pipe): """ Restore the provided file into backup_opt_dict.restore_abs_path Decrypt the file if backup_opt_dict.encrypt_pass_file key is provided """ if not tar_restore_args_valid(backup_opt_dict): sys.exit(1) if backup_opt_dict.dry_run: tar_cmd = ' {0} -z --incremental --list \ --ignore-zeros --warning=none'.format( backup_opt_dict.tar_path) else: tar_cmd = ' {0} -z --incremental --extract \ --unlink-first --ignore-zeros --warning=none --overwrite \ --directory {1} '.format( backup_opt_dict.tar_path, backup_opt_dict.restore_abs_path) if is_windows(): # on windows, chdir to restore path. os.chdir(backup_opt_dict.restore_abs_path) tar_cmd = '{0} -x -z --incremental --unlink-first ' \ '--ignore-zeros -f - '.format(backup_opt_dict.tar_path) # Check if encryption file is provided and set the openssl decrypt # command accordingly if backup_opt_dict.encrypt_pass_file: openssl_cmd = " {0} enc -d -aes-256-cfb -pass file:{1}".format( backup_opt_dict.openssl_path, backup_opt_dict.encrypt_pass_file) tar_cmd = '{0} | {1} '.format(openssl_cmd, tar_cmd) tar_cmd_proc = subprocess.Popen( tar_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) # Start loop reading the pipe and pass the data to the tar std input. # If EOFError exception is raised, the loop end the std err will be # checked for errors. try: while True: tar_cmd_proc.stdin.write(read_pipe.recv_bytes()) except EOFError: logging.info( '[*] Pipe closed as EOF reached. Data transmitted succesfully') tar_err = tar_cmd_proc.communicate()[1] if 'error' in tar_err.lower(): logging.exception('[*] Restore error: {0}'.format(tar_err)) sys.exit(1)
def tar_restore(backup_opt_dict, read_pipe): """ Restore the provided file into backup_opt_dict.restore_abs_path Decrypt the file if backup_opt_dict.encrypt_pass_file key is provided """ if not tar_restore_args_valid(backup_opt_dict): sys.exit(1) if backup_opt_dict.dry_run: tar_cmd = ' {0} -z --incremental --list \ --ignore-zeros --warning=none'.format(backup_opt_dict.tar_path) else: tar_cmd = ' {0} -z --incremental --extract \ --unlink-first --ignore-zeros --warning=none --overwrite \ --directory {1} '.format(backup_opt_dict.tar_path, backup_opt_dict.restore_abs_path) if is_windows(): # on windows, chdir to restore path. os.chdir(backup_opt_dict.restore_abs_path) tar_cmd = '{0} -x -z --incremental --unlink-first ' \ '--ignore-zeros -f - '.format(backup_opt_dict.tar_path) # Check if encryption file is provided and set the openssl decrypt # command accordingly if backup_opt_dict.encrypt_pass_file: openssl_cmd = " {0} enc -d -aes-256-cfb -pass file:{1}".format( backup_opt_dict.openssl_path, backup_opt_dict.encrypt_pass_file) tar_cmd = '{0} | {1} '.format(openssl_cmd, tar_cmd) tar_cmd_proc = subprocess.Popen(tar_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) # Start loop reading the pipe and pass the data to the tar std input. # If EOFError exception is raised, the loop end the std err will be # checked for errors. try: while True: tar_cmd_proc.stdin.write(read_pipe.recv_bytes()) except EOFError: logging.info( '[*] Pipe closed as EOF reached. Data transmitted succesfully') tar_err = tar_cmd_proc.communicate()[1] if 'error' in tar_err.lower(): logging.exception('[*] Restore error: {0}'.format(tar_err)) sys.exit(1)
def ssh_command(ssh_key, ssh_user, ssh_ip, command): """ Use no compression because the data is already compressed. To prevent asking to add a key, two more options are provided: UserKnownHostsFile and StrictHostKeyChecking returns something like ssh -o Compression=no -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i mytestpair.pem [email protected] "cat > file.tar.gz" """ devnull = "/dev/null" if winutils.is_windows(): devnull = "NUL" return ('ssh -o Compression=no -o StrictHostKeyChecking=no -o ' 'UserKnownHostsFile={0} -i {1} {2}@{3} "{4}"'.format( devnull, ssh_key, ssh_user, ssh_ip, command))
def get_executable_path(binary): """ This function returns the executable path of a given binary if it is found in the system. :param binary: :type binary: str :rtype: str :return: Absoulte Path to the executable file """ from freezer import winutils if winutils.is_windows(): path_to_binaries = os.path.dirname(os.path.abspath(__file__)) return '{0}\\bin\\{1}.exe'.format(path_to_binaries, binary) elif is_bsd(): return (distspawn.find_executable(binary) or distspawn.find_executable(binary, path=':'.join(sys.path))) else: return distspawn.find_executable(binary)
def snapshot_create(backup_opt_dict): """ Calls the code to take fs snapshots, depending on the platform :param backup_opt_dict: :return: boolean value, True if snapshot has been taken, false otherwise """ if winutils.is_windows(): if backup_opt_dict.snapshot: # Create a shadow copy. backup_opt_dict.shadow_path, backup_opt_dict.shadow = \ vss.vss_create_shadow_copy(backup_opt_dict.windows_volume) backup_opt_dict.path_to_backup = winutils.use_shadow( backup_opt_dict.path_to_backup, backup_opt_dict.windows_volume) return True return False else: return lvm.lvm_snap(backup_opt_dict)
def freezer_main(backup_args): """Freezer main loop for job execution. """ def configure_log_file_using_defaults(): """ Configure log file for freezer """ dry_run_message = '' if backup_args.dry_run: dry_run_message = '[DRY_RUN] ' def configure_logging(file_name): expanded_file_name = os.path.expanduser(file_name) expanded_dir_name = os.path.dirname(expanded_file_name) utils.create_dir(expanded_dir_name, do_log=False) logging.basicConfig( filename=expanded_file_name, level=logging.INFO, format=('%(asctime)s %(name)s %(levelname)s {0}%(message)s'. format(dry_run_message))) return expanded_file_name if backup_args.log_file: return configure_logging(backup_args.log_file) for file_name in ['/var/log/freezer.log', '~/.freezer/freezer.log']: try: return configure_logging(file_name) except IOError: pass raise Exception("Unable to write to log file") def set_max_process_priority(): """ Set freezer in max priority on the os """ # children processes inherit niceness from father try: logging.warning( '[*] Setting freezer execution with high CPU and I/O priority') PID = os.getpid() # Set cpu priority os.nice(-19) # Set I/O Priority to Real Time class with level 0 subprocess.call([ u'{0}'.format(utils.find_executable("ionice")), u'-c', u'1', u'-n', u'0', u'-t', u'-p', u'{0}'.format(PID) ]) except Exception as priority_error: logging.warning('[*] Priority: {0}'.format(priority_error)) try: log_file_name = configure_log_file_using_defaults() except Exception as err: fail(1, err, quiet=backup_args.quiet, do_log=False) if not backup_args.quiet: logging.info('log file at {0}'.format(log_file_name)) if backup_args.max_priority: set_max_process_priority() monkeypatch_socket_bandwidth(backup_args) backup_args.__dict__['hostname_backup_name'] = "{0}_{1}".format( backup_args.hostname, backup_args.backup_name) validator.validate(backup_args) if backup_args.storage == "swift": options = utils.OpenstackOptions.create_from_env() identity_api_version = (backup_args.os_identity_api_version or options.identity_api_version) client_manager = ClientManager( options=options, insecure=backup_args.insecure, swift_auth_version=identity_api_version, dry_run=backup_args.dry_run) storage = swift.SwiftStorage( client_manager, backup_args.container, backup_args.work_dir, backup_args.max_segment_size) backup_args.__dict__['client_manager'] = client_manager elif backup_args.storage == "local": storage = local.LocalStorage(backup_args.container, backup_args.work_dir) elif backup_args.storage == "ssh": storage = ssh.SshStorage( backup_args.container, backup_args.work_dir, backup_args.ssh_key, backup_args.ssh_username, backup_args.ssh_host, backup_args.ssh_port) else: raise Exception("Not storage found for name " + backup_args.storage) backup_args.__dict__['engine'] = tar_engine.TarBackupEngine( backup_args.compression, backup_args.dereference_symlink, backup_args.exclude, storage, winutils.is_windows(), backup_args.encrypt_pass_file, backup_args.dry_run) if hasattr(backup_args, 'trickle_command'): if "tricklecount" in os.environ: if int(os.environ.get("tricklecount")) > 1: logging.critical("[*] Trickle seems to be not working," " Switching to normal mode ") run_job(backup_args, storage) freezer_command = '{0} {1}'.format(backup_args.trickle_command, ' '.join(sys.argv)) process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) while process.poll() is None: print process.stdout.readline().rstrip() output, error = process.communicate() if process.returncode: logging.error("[*] Trickle Error: {0}".format(error)) logging.critical("[*] Switching to work without trickle ...") run_job(backup_args, storage) else: run_job(backup_args, storage)
def backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict): """ Execute the necessary tasks for file system backup mode """ logging.info('[*] File System backup is being executed...') try: if is_windows(): # Create a shadow copy. # Create a shadow copy. backup_opt_dict.shadow_path, backup_opt_dict.shadow = \ vss_create_shadow_copy(backup_opt_dict.volume) else: # If lvm_auto_snap is true, the volume group and volume name will # be extracted automatically if backup_opt_dict.lvm_auto_snap: backup_opt_dict = get_lvm_info(backup_opt_dict) # Generate the lvm_snap if lvm arguments are available lvm_snap(backup_opt_dict) # Generate a string hostname, backup name, timestamp and backup level file_name = add_host_name_ts_level(backup_opt_dict, time_stamp) meta_data_backup_file = u'tar_metadata_{0}'.format(file_name) backup_opt_dict.meta_data_file = meta_data_backup_file # Initialize a Queue for a maximum of 2 items tar_backup_queue = multiprocessing.Queue(maxsize=2) if is_windows(): backup_opt_dict.absolute_path = backup_opt_dict.src_file backup_opt_dict.src_file = use_shadow(backup_opt_dict.src_file, backup_opt_dict.volume) # Execute a tar gzip of the specified directory and return # small chunks (default 128MB), timestamp, backup, filename, # file chunk index and the tar meta-data file (backup_opt_dict, tar_command, manifest_meta_dict) = \ gen_tar_command(opt_dict=backup_opt_dict, time_stamp=time_stamp, remote_manifest_meta=manifest_meta_dict) tar_backup_stream = multiprocessing.Process(target=tar_backup, args=( backup_opt_dict, tar_command, tar_backup_queue, )) tar_backup_stream.daemon = True tar_backup_stream.start() add_object_stream = multiprocessing.Process( target=add_object, args=(backup_opt_dict, tar_backup_queue, file_name, time_stamp)) add_object_stream.daemon = True add_object_stream.start() tar_backup_stream.join() tar_backup_queue.put(({False: False})) tar_backup_queue.close() add_object_stream.join() if add_object_stream.exitcode: raise Exception('failed to upload object to swift server') (backup_opt_dict, manifest_meta_dict, tar_meta_to_upload, tar_meta_prev) = gen_manifest_meta(backup_opt_dict, manifest_meta_dict, meta_data_backup_file) manifest_file = u'' meta_data_abs_path = os.path.join(backup_opt_dict.workdir, tar_meta_prev) # Upload swift manifest for segments if backup_opt_dict.upload: # Request a new auth client in case the current token # is expired before uploading tar meta data or the swift manifest backup_opt_dict = get_client(backup_opt_dict) if not backup_opt_dict.no_incremental: # Upload tar incremental meta data file and remove it logging.info('[*] Uploading tar meta data file: {0}'.format( tar_meta_to_upload)) with open(meta_data_abs_path, 'r') as meta_fd: backup_opt_dict.sw_connector.put_object( backup_opt_dict.container, tar_meta_to_upload, meta_fd) # Removing tar meta data file, so we have only one # authoritative version on swift logging.info('[*] Removing tar meta data file: {0}'.format( meta_data_abs_path)) os.remove(meta_data_abs_path) # Upload manifest to swift manifest_upload(manifest_file, backup_opt_dict, file_name, manifest_meta_dict) finally: if is_windows(): # Delete the shadow copy after the backup vss_delete_shadow_copy(backup_opt_dict.shadow, backup_opt_dict.volume) else: # Unmount and remove lvm snapshot volume lvm_snap_remove(backup_opt_dict)
def freezer_main(backup_args): """Freezer main loop for job execution. """ def set_max_process_priority(): """ Set freezer in max priority on the os """ # children processes inherit niceness from father try: LOG.warning( '[*] Setting freezer execution with high CPU and I/O priority') PID = os.getpid() # Set cpu priority os.nice(-19) # Set I/O Priority to Real Time class with level 0 subprocess.call([ u'{0}'.format(utils.find_executable("ionice")), u'-c', u'1', u'-n', u'0', u'-t', u'-p', u'{0}'.format(PID) ]) except Exception as priority_error: LOG.warning('[*] Priority: {0}'.format(priority_error)) if not backup_args.quiet: LOG.info('log file at {0}'.format(CONF.get('log_file'))) if backup_args.max_priority: set_max_process_priority() bandwidth.monkeypatch_socket_bandwidth(backup_args) backup_args.__dict__['hostname_backup_name'] = "{0}_{1}".format( backup_args.hostname, backup_args.backup_name) validator.validate(backup_args) work_dir = backup_args.work_dir os_identity = backup_args.os_identity_api_version max_segment_size = backup_args.max_segment_size if backup_args.storages: storage = multiple.MultipleStorage( work_dir, [storage_from_dict(x, work_dir, max_segment_size, os_identity) for x in backup_args.storages]) else: storage = storage_from_dict(backup_args.__dict__, work_dir, max_segment_size, os_identity) backup_args.__dict__['engine'] = tar_engine.TarBackupEngine( backup_args.compression, backup_args.dereference_symlink, backup_args.exclude, storage, winutils.is_windows(), backup_args.encrypt_pass_file, backup_args.dry_run) if hasattr(backup_args, 'trickle_command'): if "tricklecount" in os.environ: if int(os.environ.get("tricklecount")) > 1: LOG.critical("[*] Trickle seems to be not working, Switching " "to normal mode ") run_job(backup_args, storage) freezer_command = '{0} {1}'.format(backup_args.trickle_command, ' '.join(sys.argv)) process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) while process.poll() is None: print(process.stdout.readline().rstrip()) output, error = process.communicate() if process.returncode: LOG.error("[*] Trickle Error: {0}".format(error)) LOG.critical("[*] Switching to work without trickle ...") run_job(backup_args, storage) else: run_job(backup_args, storage)
def test_is_windows(self, monkeypatch): fake_os = Os() monkeypatch.setattr(os, 'name', fake_os) assert is_windows() is False
def backup_arguments(args_dict={}): """ Default arguments and command line options interface. The function return a name space called backup_args. """ arg_parser = argparse.ArgumentParser(prog='freezerc') arg_parser.add_argument( '--action', choices=['backup', 'restore', 'info', 'admin'], help=( "Set the action to be taken. backup and restore are" " self explanatory, info is used to retrieve info from the" " storage media, while admin is used to delete old backups" " and other admin actions. Default backup."), dest='action', default='backup') arg_parser.add_argument( '-F', '--path-to-backup', '--file-to-backup', action='store', help="The file or directory you want to back up to Swift", dest='src_file', default=False) arg_parser.add_argument( '-N', '--backup-name', action='store', help="The backup name you want to use to identify your backup \ on Swift", dest='backup_name', default=False) arg_parser.add_argument( '-m', '--mode', action='store', help="Set the technology to back from. Options are, fs (filesystem),\ mongo (MongoDB), mysql (MySQL), sqlserver (SQL Server)\ Default set to fs", dest='mode', default='fs') arg_parser.add_argument( '-C', '--container', action='store', help="The Swift container used to upload files to", dest='container', default='freezer_backups') arg_parser.add_argument( '-L', '--list-containers', action='store_true', help='''List the Swift containers on remote Object Storage Server''', dest='list_container', default=False) arg_parser.add_argument( '-l', '--list-objects', action='store_true', help='''List the Swift objects stored in a container on remote Object\ Storage Server.''', dest='list_objects', default=False) arg_parser.add_argument( '-o', '--get-object', action='store', help="The Object name you want to download on the local file system.", dest='object', default=False) arg_parser.add_argument( '-d', '--dst-file', action='store', help="The file name used to save the object on your local disk and\ upload file in swift", dest='dst_file', default=False) arg_parser.add_argument( '--lvm-auto-snap', action='store', help=("Automatically guess the volume group and volume name for " "given PATH."), dest='lvm_auto_snap', default=False) arg_parser.add_argument( '--lvm-srcvol', action='store', help="Set the lvm volume you want to take a snaphost from. Default\ no volume", dest='lvm_srcvol', default=False) arg_parser.add_argument( '--lvm-snapname', action='store', help="Set the lvm snapshot name to use. If the snapshot name already\ exists, the old one will be used a no new one will be created. Default\ freezer_backup_snap.", dest='lvm_snapname', default=False) arg_parser.add_argument( '--lvm-snapsize', action='store', help="Set the lvm snapshot size when creating a new snapshot.\ Please add G for Gigabytes or M for Megabytes, i.e. 500M or 8G.\ Default 5G.", dest='lvm_snapsize', default=False) arg_parser.add_argument( '--lvm-dirmount', action='store', help="Set the directory you want to mount the lvm snapshot to.\ Default not set", dest='lvm_dirmount', default=False) arg_parser.add_argument( '--lvm-volgroup', action='store', help="Specify the volume group of your logical volume.\ This is important to mount your snapshot volume. Default not set", dest='lvm_volgroup', default=False) arg_parser.add_argument( '--max-level', action='store', help="Set the backup level used with tar to implement incremental \ backup. If a level 1 is specified but no level 0 is already \ available, a level 0 will be done and subsequently backs to level 1.\ Default 0 (No Incremental)", dest='max_backup_level', type=int, default=False) arg_parser.add_argument( '--always-level', action='store', help="Set backup\ maximum level used with tar to implement incremental backup. If a \ level 3 is specified, the backup will be executed from level 0 to \ level 3 and to that point always a backup level 3 will be executed. \ It will not restart from level 0. This option has precedence over \ --max-backup-level. Default False (Disabled)", dest='always_backup_level', type=int, default=False) arg_parser.add_argument( '--restart-always-level', action='store', help="Restart the backup \ from level 0 after n days. Valid only if --always-level option \ if set. If --always-level is used together with --remove-older-then, \ there might be the chance where the initial level 0 will be removed \ Default False (Disabled)", dest='restart_always_backup', type=float, default=False) arg_parser.add_argument( '-R', '--remove-older-then', '--remove-older-than', action='store', help=('Checks in the specified container for object older than the ' 'specified days.' 'If i.e. 30 is specified, it will remove the remote object ' 'older than 30 days. Default False (Disabled) ' 'The option --remove-older-then is deprecated ' 'and will be removed soon'), dest='remove_older_than', type=float, default=None) arg_parser.add_argument( '--remove-from-date', action='store', help=('Checks the specified container and removes objects older than ' 'the provided datetime in the form "YYYY-MM-DDThh:mm:ss ' 'i.e. "1974-03-25T23:23:23". ' 'Make sure the "T" is between date and time '), dest='remove_from_date', default=False) arg_parser.add_argument( '--no-incremental', action='store_true', help='''Disable incremental feature. By default freezer build the meta data even for level 0 backup. By setting this option incremental meta data is not created at all. Default disabled''', dest='no_incremental', default=False) arg_parser.add_argument( '--hostname', action='store', help='''Set hostname to execute actions. If you are executing freezer from one host but you want to delete objects belonging to another host then you can set this option that hostname and execute appropriate actions. Default current node hostname.''', dest='hostname', default=False) arg_parser.add_argument( '--mysql-conf', action='store', help='''Set the MySQL configuration file where freezer retrieve important information as db_name, user, password, host, port. Following is an example of config file: # cat ~/.freezer/backup_mysql_conf host = <db-host> user = <mysqluser> password = <mysqlpass> port = <db-port>''', dest='mysql_conf_file', default=False) if is_windows(): arg_parser.add_argument( '--log-file', action='store', help='Set log file. By default logs to ~/freezer.log', dest='log_file', default=os.path.join(home, '.freezer', 'freezer.log')) else: arg_parser.add_argument( '--log-file', action='store', help='Set log file. By default logs to /var/log/freezer.log' 'If that file is not writable, freezer tries to log' 'to ~/.freezer/freezer.log', dest='log_file', default=None) arg_parser.add_argument( '--exclude', action='store', help="Exclude files,\ given as a PATTERN.Ex: --exclude '*.log' will exclude any file \ with name ending with .log. Default no exclude", dest='exclude', default=False) arg_parser.add_argument( '--dereference-symlink', choices=['none', 'soft', 'hard', 'all'], help=( "Follow hard and soft links and archive and dump the files they " " refer to. Default False."), dest='dereference_symlink', default='none') arg_parser.add_argument( '-U', '--upload', action='store_true', help="Upload to Swift the destination file passed to the -d option.\ Default upload the data", dest='upload', default=True) arg_parser.add_argument( '--encrypt-pass-file', action='store', help="Passing a private key to this option, allow you to encrypt the \ files before to be uploaded in Swift. Default do not encrypt.", dest='encrypt_pass_file', default=False) arg_parser.add_argument( '-M', '--max-segment-size', action='store', help="Set the maximum file chunk size in bytes to upload to swift\ Default 67108864 bytes (64MB)", dest='max_seg_size', type=int, default=67108864) arg_parser.add_argument( '--restore-abs-path', action='store', help=('Set the absolute path where you want your data restored. ' 'Default False.'), dest='restore_abs_path', default=False) arg_parser.add_argument( '--restore-from-host', action='store', help='''Set the hostname used to identify the data you want to restore from. If you want to restore data in the same host where the backup was executed just type from your shell: "$ hostname" and the output is the value that needs to be passed to this option. Mandatory with Restore Default False.''', dest='restore_from_host', default=False) arg_parser.add_argument( '--restore-from-date', action='store', help='''Set the absolute path where you want your data restored. Please provide datetime in format "YYYY-MM-DDThh:mm:ss" i.e. "1979-10-03T23:23:23". Make sure the "T" is between date and time Default False.''', dest='restore_from_date', default=False) arg_parser.add_argument( '--max-priority', action='store_true', help='''Set the cpu process to the highest priority (i.e. -20 on Linux) and real-time for I/O. The process priority will be set only if nice and ionice are installed Default disabled. Use with caution.''', dest='max_priority', default=False) arg_parser.add_argument( '-V', '--version', action='store_true', help='''Print the release version and exit''', dest='version', default=False) arg_parser.add_argument( '-q', '--quiet', action='store_true', help='''Suppress error messages''', dest='quiet', default=False) arg_parser.add_argument( '--insecure', action='store_true', help='Allow to access swift servers without checking SSL certs.', dest='insecure', default=False) arg_parser.add_argument( '--os-auth-ver', choices=['1', '2', '3'], action='store', help='Swift auth version, could be 1, 2 or 3', dest='auth_version', default=2) arg_parser.add_argument( '--proxy', action='store', help='''Enforce proxy that alters system HTTP_PROXY and HTTPS_PROXY, use \'\' to eliminate all system proxies''', dest='proxy', default=False) arg_parser.add_argument( '--dry-run', action='store_true', help='Do everything except writing or removing objects', dest='dry_run', default=False) arg_parser.add_argument( '--upload-limit', action='store', help='''Upload bandwidth limit in Bytes per sec. Can be invoked with dimensions (10K, 120M, 10G).''', dest='upload_limit', type=utils.human2bytes, default=-1) arg_parser.add_argument( '--download-limit', action='store', help='''Download bandwidth limit in Bytes per sec. Can be invoked with dimensions (10K, 120M, 10G).''', dest='download_limit', type=utils.human2bytes, default=-1) arg_parser.add_argument( '--sql-server-conf', action='store', help='''Set the SQL Server configuration file where freezer retrieve the sql server instance. Following is an example of config file: instance = <db-instance>''', dest='sql_server_config', default=False) arg_parser.add_argument( '--volume', action='store', help='Create a snapshot of the selected volume', dest='volume', default=False) backup_args = arg_parser.parse_args() # windows bin path_to_binaries = os.path.dirname(os.path.abspath(__file__)) # Intercept command line arguments if you are not using the CLI if args_dict: backup_args.__dict__.update(args_dict) # Set additional namespace attributes backup_args.__dict__['remote_match_backup'] = [] backup_args.__dict__['remote_objects'] = [] backup_args.__dict__['remote_obj_list'] = [] backup_args.__dict__['remote_newest_backup'] = u'' # Set default workdir to ~/.freezer backup_args.__dict__['workdir'] = os.path.join(home, '.freezer') # Create a new namespace attribute for container_segments backup_args.__dict__['container_segments'] = u'{0}_segments'.format( backup_args.container) # The containers used by freezer to executed backups needs to have # freezer_ prefix in the name. If the user provider container doesn't # have the prefix, it is automatically added also to the container # segments name. This is done to quickly identify the containers # that contain freezer generated backups if not backup_args.container.startswith('freezer_'): backup_args.container = 'freezer_{0}'.format( backup_args.container) backup_args.container_segments = 'freezer_{0}'.format( backup_args.container_segments) # If hostname is not set, hostname of the current node will be used if not backup_args.hostname: backup_args.__dict__['hostname'] = socket.gethostname() backup_args.__dict__['manifest_meta_dict'] = {} backup_args.__dict__['curr_backup_level'] = '' backup_args.__dict__['manifest_meta_dict'] = '' if is_windows(): backup_args.__dict__['tar_path'] = '{0}\\bin\\tar.exe'.\ format(path_to_binaries) else: backup_args.__dict__['tar_path'] = distspawn.find_executable('tar') # If freezer is being used under OSX, please install gnutar and # rename the executable as gnutar if 'darwin' in sys.platform or 'bsd' in sys.platform: if distspawn.find_executable('gtar'): backup_args.__dict__['tar_path'] = \ distspawn.find_executable('gtar') else: raise Exception('Please install gnu tar (gtar) as it is a ' 'mandatory requirement to use freezer.') # If we have provided --proxy then overwrite the system HTTP_PROXY and # HTTPS_PROXY alter_proxy(backup_args.__dict__) # Get absolute path of other commands used by freezer backup_args.__dict__['lvcreate_path'] = distspawn.find_executable( 'lvcreate') backup_args.__dict__['lvremove_path'] = distspawn.find_executable( 'lvremove') backup_args.__dict__['bash_path'] = distspawn.find_executable('bash') if is_windows(): backup_args.__dict__['openssl_path'] = 'openssl' else: backup_args.__dict__['openssl_path'] = \ distspawn.find_executable('openssl') backup_args.__dict__['file_path'] = distspawn.find_executable('file') backup_args.__dict__['mount_path'] = distspawn.find_executable('mount') backup_args.__dict__['umount_path'] = distspawn.find_executable('umount') backup_args.__dict__['ionice'] = distspawn.find_executable('ionice') # MySQLdb object backup_args.__dict__['mysql_db_inst'] = '' # SQL Server object backup_args.__dict__['sql_server_instance'] = '' # Windows volume backup_args.__dict__['shadow'] = '' backup_args.__dict__['shadow_path'] = '' backup_args.__dict__['file_name'] = '' backup_args.__dict__['meta_data'] = {} backup_args.__dict__['meta_data_file'] = '' backup_args.__dict__['absolute_path'] = '' # Freezer version backup_args.__dict__['__version__'] = '1.1.3' return backup_args, arg_parser
def main(): doers = _get_doers(shell) doers.update(_get_doers(utils)) possible_actions = doers.keys() + ['start', 'stop', 'status'] args = arguments.get_args(possible_actions) if args.action is None: print ('No action') return 65 # os.EX_DATAERR apiclient = None verify = True if args.insecure: verify = False if args.no_api is False: apiclient = client.Client(opts=args, verify=verify) if args.client_id: apiclient.client_id = args.client_id else: if winutils.is_windows(): print("--no-api mode is not available on windows") return 69 # os.EX_UNAVAILABLE if args.action in doers: try: return doers[args.action](apiclient, args) except Exception as e: print ('ERROR {0}'.format(e)) return 70 # os.EX_SOFTWARE freezer_scheduler = FreezerScheduler(apiclient=apiclient, interval=int(args.interval), job_path=args.jobs_dir) if args.no_daemon: print ('Freezer Scheduler running in no-daemon mode') daemon = NoDaemon(daemonizable=freezer_scheduler) else: if winutils.is_windows(): daemon = Daemon(daemonizable=freezer_scheduler, interval=int(args.interval), job_path=args.jobs_dir, insecure=args.insecure) else: daemon = Daemon(daemonizable=freezer_scheduler) if args.action == 'start': daemon.start(log_file=args.log_file) elif args.action == 'stop': daemon.stop() elif args.action == 'reload': daemon.reload() elif args.action == 'status': daemon.status() # os.RETURN_CODES are only available to posix like systems, on windows # we need to translate the code to an actual number which is the equivalent return 0 # os.EX_OK
def openssl_path(): from freezer import winutils if winutils.is_windows(): return 'openssl' else: return find_executable('openssl')
def backup_arguments(args_dict={}): """ Default arguments and command line options interface. The function return a name space called backup_args. """ conf_parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False, prog="freezerc" ) conf_parser.add_argument( "--config", action="store", dest="config", default=False, help=( "Config file abs path. Option arguments are provided " "from config file. When config file is used any option " "from command line provided take precedence." ), ) defaults = DEFAULT_PARAMS.copy() args, remaining_argv = conf_parser.parse_known_args() if args.config: config = configparser.SafeConfigParser() config.read([args.config]) section = config.sections()[0] for option in config.options(section): option_value = config.get(section, option) if option_value in ("False", "None"): option_value = False defaults[option] = option_value # Generate a new argparse istance and inherit options from config parse arg_parser = argparse.ArgumentParser(parents=[conf_parser]) arg_parser.add_argument( "--action", choices=["backup", "restore", "info", "admin", "exec"], help=( "Set the action to be taken. backup and restore are" " self explanatory, info is used to retrieve info from the" " storage media, exec is used to execute a script," " while admin is used to delete old backups" " and other admin actions. Default backup." ), dest="action", default="backup", ) arg_parser.add_argument( "-F", "--path-to-backup", "--file-to-backup", action="store", help="The file or directory you want to back up to Swift", dest="path_to_backup", default=False, ) arg_parser.add_argument( "-N", "--backup-name", action="store", help="The backup name you want to use to identify your backup \ on Swift", dest="backup_name", default=False, ) arg_parser.add_argument( "-m", "--mode", action="store", help="Set the technology to back from. Options are, fs (filesystem),\ mongo (MongoDB), mysql (MySQL), sqlserver (SQL Server)\ Default set to fs", dest="mode", default="fs", ) arg_parser.add_argument( "-C", "--container", action="store", help="The Swift container (or path to local storage) " "used to upload files to", dest="container", default="freezer_backups", ) arg_parser.add_argument( "-L", "--list-containers", action="store_true", help="""List the Swift containers on remote Object Storage Server""", dest="list_containers", default=False, ) arg_parser.add_argument( "-l", "--list-objects", action="store_true", help="""List the Swift objects stored in a container on remote Object\ Storage Server.""", dest="list_objects", default=False, ) arg_parser.add_argument( "-o", "--get-object", action="store", help="The Object name you want to download on the local file system.", dest="get_object", default=False, ) arg_parser.add_argument( "-d", "--dst-file", action="store", help="The file name used to save the object on your local disk and\ upload file in swift", dest="dst_file", default=False, ) arg_parser.add_argument( "--lvm-auto-snap", action="store", help=("Automatically guess the volume group and volume name for " "given PATH."), dest="lvm_auto_snap", default=False, ) arg_parser.add_argument( "--lvm-srcvol", action="store", help="Set the lvm volume you want to take a snaphost from. Default\ no volume", dest="lvm_srcvol", default=False, ) arg_parser.add_argument( "--lvm-snapname", action="store", help="Set the lvm snapshot name to use. If the snapshot name already\ exists, the old one will be used a no new one will be created. Default\ freezer_backup_snap.", dest="lvm_snapname", default=False, ) arg_parser.add_argument( "--lvm-snap-perm", action="store", choices=["ro", "rw"], help="Set the lvm snapshot permission to use. If the permission\ is set to ro The snapshot will be immutable - read only -.\ If the permission is set to rw it will be mutable", dest="lvm_snapperm", default="ro", ) arg_parser.add_argument( "--lvm-snapsize", action="store", help="Set the lvm snapshot size when creating a new snapshot.\ Please add G for Gigabytes or M for Megabytes, i.e. 500M or 8G.\ Default 5G.", dest="lvm_snapsize", default=False, ) arg_parser.add_argument( "--lvm-dirmount", action="store", help="Set the directory you want to mount the lvm snapshot to.\ Default not set", dest="lvm_dirmount", default=False, ) arg_parser.add_argument( "--lvm-volgroup", action="store", help="Specify the volume group of your logical volume.\ This is important to mount your snapshot volume. Default not set", dest="lvm_volgroup", default=False, ) arg_parser.add_argument( "--max-level", action="store", help="Set the backup level used with tar to implement incremental \ backup. If a level 1 is specified but no level 0 is already \ available, a level 0 will be done and subsequently backs to level 1.\ Default 0 (No Incremental)", dest="max_level", type=int, default=False, ) arg_parser.add_argument( "--always-level", action="store", help="Set backup\ maximum level used with tar to implement incremental backup. If a \ level 3 is specified, the backup will be executed from level 0 to \ level 3 and to that point always a backup level 3 will be executed. \ It will not restart from level 0. This option has precedence over \ --max-backup-level. Default False (Disabled)", dest="always_level", type=int, default=False, ) arg_parser.add_argument( "--restart-always-level", action="store", help="Restart the backup \ from level 0 after n days. Valid only if --always-level option \ if set. If --always-level is used together with --remove-older-then, \ there might be the chance where the initial level 0 will be removed \ Default False (Disabled)", dest="restart_always_level", type=float, default=False, ) arg_parser.add_argument( "-R", "--remove-older-then", "--remove-older-than", action="store", help=( "Checks in the specified container for object older than the " "specified days." "If i.e. 30 is specified, it will remove the remote object " "older than 30 days. Default False (Disabled) " "The option --remove-older-then is deprecated " "and will be removed soon" ), dest="remove_older_than", type=float, default=None, ) arg_parser.add_argument( "--remove-from-date", action="store", help=( "Checks the specified container and removes objects older than " 'the provided datetime in the form "YYYY-MM-DDThh:mm:ss ' 'i.e. "1974-03-25T23:23:23". ' 'Make sure the "T" is between date and time ' ), dest="remove_from_date", default=False, ) arg_parser.add_argument( "--no-incremental", action="store_true", help="""Disable incremental feature. By default freezer build the meta data even for level 0 backup. By setting this option incremental meta data is not created at all. Default disabled""", dest="no_incremental", default=False, ) arg_parser.add_argument( "--hostname", action="store", help="""Set hostname to execute actions. If you are executing freezer from one host but you want to delete objects belonging to another host then you can set this option that hostname and execute appropriate actions. Default current node hostname.""", dest="hostname", default=False, ) arg_parser.add_argument( "--mysql-conf", action="store", help="""Set the MySQL configuration file where freezer retrieve important information as db_name, user, password, host, port. Following is an example of config file: # backup_mysql_conf host = <db-host> user = <mysqluser> password = <mysqlpass> port = <db-port>""", dest="mysql_conf", default=False, ) arg_parser.add_argument( "--metadata-out", action="store", help=( "Set the filename to which write the metadata regarding " 'the backup metrics. Use "-" to output to standard output.' ), dest="metadata_out", default=False, ) if winutils.is_windows(): arg_parser.add_argument( "--log-file", action="store", help="Set log file. By default logs to ~/freezer.log", dest="log_file", default=os.path.join(home, ".freezer", "freezer.log"), ) else: arg_parser.add_argument( "--log-file", action="store", help="Set log file. By default logs to /var/log/freezer.log" "If that file is not writable, freezer tries to log" "to ~/.freezer/freezer.log", dest="log_file", default=None, ) arg_parser.add_argument( "--exclude", action="store", help="Exclude files,\ given as a PATTERN.Ex: --exclude '*.log' will exclude any file \ with name ending with .log. Default no exclude", dest="exclude", default=False, ) arg_parser.add_argument( "--dereference-symlink", choices=["none", "soft", "hard", "all"], help=("Follow hard and soft links and archive and dump the files they " " refer to. Default False."), dest="dereference_symlink", default="", ) arg_parser.add_argument( "-U", "--upload", action="store_true", help="Upload to Swift the destination file passed to the -d option.\ Default upload the data", dest="upload", default=True, ) arg_parser.add_argument( "--encrypt-pass-file", action="store", help="Passing a private key to this option, allow you to encrypt the \ files before to be uploaded in Swift. Default do not encrypt.", dest="encrypt_pass_file", default=False, ) arg_parser.add_argument( "-M", "--max-segment-size", action="store", help="Set the maximum file chunk size in bytes to upload to swift\ Default 67108864 bytes (64MB)", dest="max_segment_size", type=int, default=67108864, ) arg_parser.add_argument( "--restore-abs-path", action="store", help=("Set the absolute path where you want your data restored. " "Default False."), dest="restore_abs_path", default=False, ) arg_parser.add_argument( "--restore-from-host", action="store", help="""Set the hostname used to identify the data you want to restore from. If you want to restore data in the same host where the backup was executed just type from your shell: "$ hostname" and the output is the value that needs to be passed to this option. Mandatory with Restore Default False.""", dest="restore_from_host", default=False, ) arg_parser.add_argument( "--restore-from-date", action="store", help="""Set the absolute path where you want your data restored. Please provide datetime in format "YYYY-MM-DDThh:mm:ss" i.e. "1979-10-03T23:23:23". Make sure the "T" is between date and time Default None.""", dest="restore_from_date", default=None, ) arg_parser.add_argument( "--max-priority", action="store_true", help="""Set the cpu process to the highest priority (i.e. -20 on Linux) and real-time for I/O. The process priority will be set only if nice and ionice are installed Default disabled. Use with caution.""", dest="max_priority", default=False, ) arg_parser.add_argument( "-V", "--version", action="store_true", help="""Print the release version and exit""", dest="version", default=False, ) arg_parser.add_argument( "-q", "--quiet", action="store_true", help="""Suppress error messages""", dest="quiet", default=False ) arg_parser.add_argument( "--insecure", action="store_true", help="Allow to access swift servers without checking SSL certs.", dest="insecure", default=False, ) arg_parser.add_argument( "--os-auth-ver", "--os-identity-api-version", choices=["1", "2", "2.0", "3"], action="store", help="Openstack identity api version, can be 1, 2, 2.0 or 3", dest="os_identity_api_version", default=None, ) arg_parser.add_argument( "--proxy", action="store", help="""Enforce proxy that alters system HTTP_PROXY and HTTPS_PROXY, use \'\' to eliminate all system proxies""", dest="proxy", default=False, ) arg_parser.add_argument( "--dry-run", action="store_true", help="Do everything except writing or removing objects", dest="dry_run", default=False, ) arg_parser.add_argument( "--upload-limit", action="store", help="""Upload bandwidth limit in Bytes per sec. Can be invoked with dimensions (10K, 120M, 10G).""", dest="upload_limit", type=utils.human2bytes, default=-1, ) arg_parser.add_argument( "--cinder-vol-id", action="store", help="Id of cinder volume for backup", dest="cinder_vol_id", default="" ) arg_parser.add_argument( "--nova-inst-id", action="store", help="Id of nova instance for backup", dest="nova_inst_id", default="" ) arg_parser.add_argument( "--cindernative-vol-id", action="store", help="Id of cinder volume for native backup", dest="cindernative_vol_id", default="", ) arg_parser.add_argument( "--download-limit", action="store", help="""Download bandwidth limit in Bytes per sec. Can be invoked with dimensions (10K, 120M, 10G).""", dest="download_limit", type=utils.human2bytes, default=-1, ) arg_parser.add_argument( "--sql-server-conf", action="store", help="""Set the SQL Server configuration file where freezer retrieve the sql server instance. Following is an example of config file: instance = <db-instance>""", dest="sql_server_conf", default=False, ) arg_parser.add_argument( "--vssadmin", action="store", help="""Create a backup using a snapshot on windows using vssadmin. Options are: True and False, default is True""", dest="vssadmin", default=True, ) arg_parser.add_argument( "--command", action="store", help="Command executed by exec action", dest="command", default=None ) arg_parser.add_argument( "--compression", action="store", choices=["gzip", "bzip2", "xz"], help="compression algorithm to use. gzip is default algorithm", dest="compression", default="gzip", ) arg_parser.add_argument( "--storage", action="store", choices=["local", "swift", "ssh"], help="Storage for backups. Can be Swift or Local now. Swift is default" "storage now. Local stores backups on the same defined path and" "swift will store files in container.", dest="storage", default="swift", ) arg_parser.add_argument( "--ssh-key", action="store", help="Path ot ssh-key for ssh storage only", dest="ssh_key", default=DEFAULT_PARAMS["ssh_key"], ) arg_parser.add_argument( "--ssh-username", action="store", help="Remote username for ssh storage only", dest="ssh_username", default=DEFAULT_PARAMS["ssh_username"], ) arg_parser.add_argument( "--ssh-host", action="store", help="Remote host for ssh storage only", dest="ssh_host", default=DEFAULT_PARAMS["ssh_host"], ) arg_parser.set_defaults(**defaults) backup_args = arg_parser.parse_args() # windows bin path_to_binaries = os.path.dirname(os.path.abspath(__file__)) # Intercept command line arguments if you are not using the CLIvss if args_dict: backup_args.__dict__.update(args_dict) # Set additional namespace attributes backup_args.__dict__["remote_match_backup"] = [] backup_args.__dict__["remote_obj_list"] = [] backup_args.__dict__["remote_newest_backup"] = u"" # Set default working directory to ~/.freezer. If the directory # does not exists it is created work_dir = os.path.join(home, ".freezer") backup_args.__dict__["work_dir"] = work_dir if not os.path.exists(work_dir): try: os.makedirs(work_dir) except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode("{}".format(err_msg)), file=sys.stderr) # The containers used by freezer to executed backups needs to have # freezer_ prefix in the name. If the user provider container doesn't # have the prefix, it is automatically added also to the container # segments name. This is done to quickly identify the containers # that contain freezer generated backups if not backup_args.container.startswith("freezer_") and backup_args.storage == "swift": backup_args.container = "freezer_{0}".format(backup_args.container) # If hostname is not set, hostname of the current node will be used if not backup_args.hostname: backup_args.__dict__["hostname"] = socket.gethostname() backup_args.__dict__["manifest_meta_dict"] = {} backup_args.__dict__["curr_backup_level"] = "" backup_args.__dict__["manifest_meta_dict"] = "" if winutils.is_windows(): backup_args.__dict__["tar_path"] = "{0}\\bin\\tar.exe".format(path_to_binaries) else: backup_args.__dict__["tar_path"] = distspawn.find_executable("tar") # If freezer is being used under OSX, please install gnutar and # rename the executable as gnutar if "darwin" in sys.platform or "bsd" in sys.platform: if distspawn.find_executable("gtar"): backup_args.__dict__["tar_path"] = distspawn.find_executable("gtar") elif distspawn.find_executable("gnutar"): backup_args.__dict__["tar_path"] = distspawn.find_executable("gnutar") else: raise Exception("Please install gnu tar (gtar) as it is a " "mandatory requirement to use freezer.") # If we have provided --proxy then overwrite the system HTTP_PROXY and # HTTPS_PROXY alter_proxy(backup_args.__dict__) # Get absolute path of other commands used by freezer backup_args.__dict__["lvcreate_path"] = distspawn.find_executable("lvcreate") backup_args.__dict__["lvremove_path"] = distspawn.find_executable("lvremove") backup_args.__dict__["bash_path"] = distspawn.find_executable("bash") if winutils.is_windows(): backup_args.__dict__["openssl_path"] = "openssl" else: backup_args.__dict__["openssl_path"] = distspawn.find_executable("openssl") backup_args.__dict__["file_path"] = distspawn.find_executable("file") backup_args.__dict__["mount_path"] = distspawn.find_executable("mount") backup_args.__dict__["umount_path"] = distspawn.find_executable("umount") backup_args.__dict__["ionice"] = distspawn.find_executable("ionice") # MySQLdb object backup_args.__dict__["mysql_db_inst"] = "" # SQL Server object backup_args.__dict__["sql_server_instance"] = "" # Windows volume backup_args.__dict__["shadow"] = "" backup_args.__dict__["shadow_path"] = "" backup_args.__dict__["file_name"] = "" if winutils.is_windows(): if backup_args.path_to_backup: backup_args.__dict__["windows_volume"] = backup_args.path_to_backup[:3] if backup_args.vssadmin == "False" or backup_args.vssadmin == "false": backup_args.vssadmin = False backup_args.__dict__["meta_data"] = {} backup_args.__dict__["meta_data_file"] = "" backup_args.__dict__["absolute_path"] = "" # Freezer version backup_args.__dict__["__version__"] = "1.1.3" # todo(enugaev) move it to new command line param backup_media backup_media = "fs" if backup_args.cinder_vol_id: backup_media = "cinder" elif backup_args.cindernative_vol_id: backup_media = "cindernative" elif backup_args.nova_inst_id: backup_media = "nova" backup_args.__dict__["backup_media"] = backup_media backup_args.__dict__["time_stamp"] = None return backup_args, arg_parser
def freezer_main(args={}): """Freezer main loop for job execution. """ global backup_args, arg_parse def configure_log_file_using_defaults(): """ Configure log file for freezer """ dry_run_message = "" if backup_args.dry_run: dry_run_message = "[DRY_RUN] " def configure_logging(file_name): expanded_file_name = os.path.expanduser(file_name) expanded_dir_name = os.path.dirname(expanded_file_name) utils.create_dir(expanded_dir_name, do_log=False) logging.basicConfig( filename=expanded_file_name, level=logging.INFO, format=("%(asctime)s %(name)s %(levelname)s {0}%(message)s".format(dry_run_message)), ) return expanded_file_name if backup_args.log_file: return configure_logging(backup_args.log_file) for file_name in ["/var/log/freezer.log", "~/.freezer/freezer.log"]: try: return configure_logging(file_name) except IOError: pass raise Exception("Unable to write to log file") def set_max_process_priority(): """ Set freezer in max priority on the os """ # children processes inherit niceness from father try: logging.warning("[*] Setting freezer execution with high CPU and I/O priority") PID = os.getpid() # Set cpu priority os.nice(-19) # Set I/O Priority to Real Time class with level 0 subprocess.call( [u"{0}".format(backup_args.ionice), u"-c", u"1", u"-n", u"0", u"-t", u"-p", u"{0}".format(PID)] ) except Exception as priority_error: logging.warning("[*] Priority: {0}".format(priority_error)) # Alternative arguments provision useful to run Freezer without # command line e.g. functional testing if args: backup_args.__dict__.update(args) elif len(sys.argv) < 2: arg_parse.print_help() sys.exit(1) if backup_args.version: print "freezer version {0}".format(backup_args.__version__) sys.exit(1) try: log_file_name = configure_log_file_using_defaults() except Exception as err: fail(1, err, do_log=False) if not backup_args.quiet: logging.info("log file at {0}".format(log_file_name)) if backup_args.max_priority: set_max_process_priority() monkeypatch_socket_bandwidth(backup_args) backup_args.__dict__["hostname_backup_name"] = "{0}_{1}".format(backup_args.hostname, backup_args.backup_name) Validator.validate(backup_args) if backup_args.storage == "swift": options = utils.OpenstackOptions.create_from_env() Validator.validate_env(options) identity_api_version = backup_args.os_identity_api_version or options.identity_api_version client_manager = ClientManager( options=options, insecure=backup_args.insecure, swift_auth_version=identity_api_version, dry_run=backup_args.dry_run, ) storage = swift.SwiftStorage( client_manager, backup_args.container, backup_args.work_dir, backup_args.max_segment_size ) backup_args.__dict__["client_manager"] = client_manager elif backup_args.storage == "local": storage = local.LocalStorage(backup_args.container, backup_args.work_dir) elif backup_args.storage == "ssh": storage = ssh.SshStorage( backup_args.container, backup_args.work_dir, backup_args.ssh_key, backup_args.ssh_username, backup_args.ssh_host, backup_args.ssh_port, ) else: raise Exception("Not storage found for name " + backup_args.storage) backup_args.__dict__["storage"] = storage backup_args.__dict__["engine"] = tar_engine.TarBackupEngine( backup_args.tar_path, backup_args.compression, backup_args.dereference_symlink, backup_args.exclude, storage, winutils.is_windows(), backup_args.openssl_path, backup_args.encrypt_pass_file, backup_args.dry_run, ) freezer_job = job.create_job(backup_args) freezer_job.execute() if backup_args.metadata_out == "-": metadata = freezer_job.get_metadata() if metadata: sys.stdout.write(json.dumps(metadata)) return backup_args
def get_backup_args(): defaults = DEFAULT_PARAMS.copy() class FreezerConfig(object): def __init__(self, args): self.__dict__.update(args) cli_options = dict([(x, y) for x, y in CONF.iteritems() if y is not None]) defaults.update(cli_options) conf = None if CONF.get('config'): conf = freezer_config.Config.parse(CONF.get('config')) defaults.update(conf.default) # TODO: restore_from_host is deprecated and to be removed defaults['hostname'] = conf.default.get('hostname') or \ conf.default.get('restore_from_host') # override default oslo values levels = { 'all': log.NOTSET, 'debug': log.DEBUG, 'warn': log.WARN, 'info': log.INFO, 'error': log.ERROR, 'critical': log.CRITICAL } if not CONF.get('log_file'): CONF.set_override('log_file', levels.get(defaults['log_file'], log.NOTSET)) CONF.set_override('default_log_levels', defaults['log_level']) if not CONF.get('log_file'): log_file = None for file_name in ['/var/log/freezer.log', '~/.freezer/freezer.log']: try: log_file = prepare_logging(file_name) except IOError: pass if log_file: CONF.set_default('log_file', log_file) else: LOG.warn("log file cannot be created. Freezer will proceed with " "default stdout and stderr") backup_args = FreezerConfig(defaults) # Set default working directory to ~/.freezer. If the directory # does not exists it is created work_dir = os.path.join(home, '.freezer') backup_args.__dict__['work_dir'] = work_dir if not os.path.exists(work_dir): try: os.makedirs(work_dir) except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode('{}'.format(err_msg)), file=sys.stderr) # If hostname is not set, hostname of the current node will be used if not backup_args.hostname: backup_args.__dict__['hostname'] = socket.gethostname() # If we have provided --proxy then overwrite the system HTTP_PROXY and # HTTPS_PROXY if backup_args.proxy: utils.alter_proxy(backup_args.proxy) # MySQLdb object backup_args.__dict__['mysql_db_inst'] = '' backup_args.__dict__['storages'] = None if conf and conf.storages: backup_args.__dict__['storages'] = conf.storages # Windows volume backup_args.__dict__['shadow'] = '' backup_args.__dict__['shadow_path'] = '' backup_args.__dict__['file_name'] = '' if winutils.is_windows(): if backup_args.path_to_backup: backup_args.__dict__['windows_volume'] = \ backup_args.path_to_backup[:3] # todo(enugaev) move it to new command line param backup_media backup_media = 'fs' if backup_args.cinder_vol_id: backup_media = 'cinder' elif backup_args.cindernative_vol_id: backup_media = 'cindernative' elif backup_args.nova_inst_id: backup_media = 'nova' backup_args.__dict__['backup_media'] = backup_media backup_args.__dict__['time_stamp'] = None if backup_args.upload_limit or backup_args.download_limit and not\ winutils.is_windows(): if backup_args.config: conf_file = NamedTemporaryFile(prefix='freezer_job_', delete=False) defaults['upload_limit'] = defaults['download_limit'] = -1 utils.save_config_to_file(defaults, conf_file) conf_index = sys.argv.index('--config') + 1 sys.argv[conf_index] = conf_file.name if '--upload-limit' in sys.argv: index = sys.argv.index('--upload-limit') sys.argv.pop(index) sys.argv.pop(index) if '--download-limit' in sys.argv: index = sys.argv.index('--download-limit') sys.argv.pop(index) sys.argv.pop(index) trickle_executable = distspawn.find_executable('trickle') if trickle_executable is None: trickle_executable = distspawn.find_executable( 'trickle', path=":".join(sys.path)) if trickle_executable is None: trickle_executable = distspawn.find_executable( 'trickle', path=":".join(os.environ.get('PATH'))) trickle_lib = distspawn.find_executable('trickle-overload.so') if trickle_lib is None: trickle_lib = distspawn.find_executable( 'trickle-overload.so', path=":".join(sys.path)) if trickle_lib is None: trickle_lib = distspawn.find_executable( 'trickle-overload.so', path=":".join( os.environ.get('PATH'))) if trickle_executable and trickle_lib: LOG.info("[*] Info: Starting trickle ...") os.environ['LD_PRELOAD'] = trickle_lib trickle_command = '{0} -d {1} -u {2} '.\ format(trickle_executable, getattr(backup_args, 'download_limit') or -1, getattr(backup_args, 'upload_limit') or -1) backup_args.__dict__['trickle_command'] = trickle_command if "tricklecount" in os.environ: tricklecount = int(os.environ.get("tricklecount", 1)) tricklecount += 1 os.environ["tricklecount"] = str(tricklecount) else: os.environ["tricklecount"] = str(1) else: LOG.critical("[*] Trickle or Trickle library not found. Switching " "to normal mode without limiting bandwidth") return backup_args
def backup_arguments(args_dict={}): """ Default arguments and command line options interface. The function return a name space called backup_args. """ conf_parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False, prog='freezerc') conf_parser.add_argument( '--config', action='store', dest='config', default=False, help=("Config file abs path. Option arguments are provided " "from config file. When config file is used any option " "from command line provided take precedence.")) defaults = DEFAULT_PARAMS.copy() args, remaining_argv = conf_parser.parse_known_args() if args.config: config = configparser.SafeConfigParser() config.read([args.config]) section = config.sections()[0] for option in config.options(section): option_value = config.get(section, option) if option_value in ('False', 'None'): option_value = False defaults[option] = option_value # Generate a new argparse istance and inherit options from config parse arg_parser = argparse.ArgumentParser( parents=[conf_parser]) arg_parser.add_argument( '--action', choices=['backup', 'restore', 'info', 'admin', 'exec'], help=( "Set the action to be taken. backup and restore are" " self explanatory, info is used to retrieve info from the" " storage media, exec is used to execute a script," " while admin is used to delete old backups" " and other admin actions. Default backup."), dest='action', default='backup') arg_parser.add_argument( '-F', '--path-to-backup', '--file-to-backup', action='store', help="The file or directory you want to back up to Swift", dest='path_to_backup', default=False) arg_parser.add_argument( '-N', '--backup-name', action='store', help="The backup name you want to use to identify your backup \ on Swift", dest='backup_name', default=False) arg_parser.add_argument( '-m', '--mode', action='store', help="Set the technology to back from. Options are, fs (filesystem),\ mongo (MongoDB), mysql (MySQL), sqlserver (SQL Server)\ Default set to fs", dest='mode', default='fs') arg_parser.add_argument( '-C', '--container', action='store', help="The Swift container (or path to local storage) " "used to upload files to", dest='container', default='freezer_backups') arg_parser.add_argument( '-L', '--list-containers', action='store_true', help='''List the Swift containers on remote Object Storage Server''', dest='list_containers', default=False) arg_parser.add_argument( '-l', '--list-objects', action='store_true', help='''List the Swift objects stored in a container on remote Object\ Storage Server.''', dest='list_objects', default=False) arg_parser.add_argument( '-o', '--get-object', action='store', help="The Object name you want to download on the local file system.", dest='get_object', default=False) arg_parser.add_argument( '-d', '--dst-file', action='store', help="The file name used to save the object on your local disk and\ upload file in swift", dest='dst_file', default=False) arg_parser.add_argument( '--lvm-auto-snap', action='store', help=("Automatically guess the volume group and volume name for " "given PATH."), dest='lvm_auto_snap', default=False) arg_parser.add_argument( '--lvm-srcvol', action='store', help="Set the lvm volume you want to take a snaphost from. Default\ no volume", dest='lvm_srcvol', default=False) arg_parser.add_argument( '--lvm-snapname', action='store', help="Set the lvm snapshot name to use. If the snapshot name already\ exists, the old one will be used a no new one will be created. Default\ freezer_backup_snap.", dest='lvm_snapname', default=False) arg_parser.add_argument( '--lvm-snap-perm', action='store', choices=['ro', 'rw'], help="Set the lvm snapshot permission to use. If the permission\ is set to ro The snapshot will be immutable - read only -.\ If the permission is set to rw it will be mutable", dest='lvm_snapperm', default='ro') arg_parser.add_argument( '--lvm-snapsize', action='store', help="Set the lvm snapshot size when creating a new snapshot.\ Please add G for Gigabytes or M for Megabytes, i.e. 500M or 8G.\ Default 5G.", dest='lvm_snapsize', default=False) arg_parser.add_argument( '--lvm-dirmount', action='store', help="Set the directory you want to mount the lvm snapshot to.\ Default not set", dest='lvm_dirmount', default=False) arg_parser.add_argument( '--lvm-volgroup', action='store', help="Specify the volume group of your logical volume.\ This is important to mount your snapshot volume. Default not set", dest='lvm_volgroup', default=False) arg_parser.add_argument( '--max-level', action='store', help="Set the backup level used with tar to implement incremental \ backup. If a level 1 is specified but no level 0 is already \ available, a level 0 will be done and subsequently backs to level 1.\ Default 0 (No Incremental)", dest='max_level', type=int, default=False) arg_parser.add_argument( '--always-level', action='store', help="Set backup\ maximum level used with tar to implement incremental backup. If a \ level 3 is specified, the backup will be executed from level 0 to \ level 3 and to that point always a backup level 3 will be executed. \ It will not restart from level 0. This option has precedence over \ --max-backup-level. Default False (Disabled)", dest='always_level', type=int, default=False) arg_parser.add_argument( '--restart-always-level', action='store', help="Restart the backup \ from level 0 after n days. Valid only if --always-level option \ if set. If --always-level is used together with --remove-older-then, \ there might be the chance where the initial level 0 will be removed \ Default False (Disabled)", dest='restart_always_level', type=float, default=False) arg_parser.add_argument( '-R', '--remove-older-then', '--remove-older-than', action='store', help=('Checks in the specified container for object older than the ' 'specified days.' 'If i.e. 30 is specified, it will remove the remote object ' 'older than 30 days. Default False (Disabled) ' 'The option --remove-older-then is deprecated ' 'and will be removed soon'), dest='remove_older_than', type=float, default=None) arg_parser.add_argument( '--remove-from-date', action='store', help=('Checks the specified container and removes objects older than ' 'the provided datetime in the form "YYYY-MM-DDThh:mm:ss ' 'i.e. "1974-03-25T23:23:23". ' 'Make sure the "T" is between date and time '), dest='remove_from_date', default=False) arg_parser.add_argument( '--no-incremental', action='store_true', help='''Disable incremental feature. By default freezer build the meta data even for level 0 backup. By setting this option incremental meta data is not created at all. Default disabled''', dest='no_incremental', default=False) arg_parser.add_argument( '--hostname', action='store', help='''Set hostname to execute actions. If you are executing freezer from one host but you want to delete objects belonging to another host then you can set this option that hostname and execute appropriate actions. Default current node hostname.''', dest='hostname', default=False) arg_parser.add_argument( '--mysql-conf', action='store', help='''Set the MySQL configuration file where freezer retrieve important information as db_name, user, password, host, port. Following is an example of config file: # backup_mysql_conf host = <db-host> user = <mysqluser> password = <mysqlpass> port = <db-port>''', dest='mysql_conf', default=False) arg_parser.add_argument( '--metadata-out', action='store', help=('Set the filename to which write the metadata regarding ' 'the backup metrics. Use "-" to output to standard output.'), dest='metadata_out', default=False) if winutils.is_windows(): arg_parser.add_argument( '--log-file', action='store', help='Set log file. By default logs to ~/freezer.log', dest='log_file', default=os.path.join(home, '.freezer', 'freezer.log')) else: arg_parser.add_argument( '--log-file', action='store', help='Set log file. By default logs to /var/log/freezer.log' 'If that file is not writable, freezer tries to log' 'to ~/.freezer/freezer.log', dest='log_file', default=None) arg_parser.add_argument( '--exclude', action='store', help="Exclude files,\ given as a PATTERN.Ex: --exclude '*.log' will exclude any file \ with name ending with .log. Default no exclude", dest='exclude', default=False) arg_parser.add_argument( '--dereference-symlink', choices=['none', 'soft', 'hard', 'all'], help=( "Follow hard and soft links and archive and dump the files they " " refer to. Default False."), dest='dereference_symlink', default='') arg_parser.add_argument( '-U', '--upload', action='store_true', help="Upload to Swift the destination file passed to the -d option.\ Default upload the data", dest='upload', default=True) arg_parser.add_argument( '--encrypt-pass-file', action='store', help="Passing a private key to this option, allow you to encrypt the \ files before to be uploaded in Swift. Default do not encrypt.", dest='encrypt_pass_file', default=False) arg_parser.add_argument( '-M', '--max-segment-size', action='store', help="Set the maximum file chunk size in bytes to upload to swift\ Default 67108864 bytes (64MB)", dest='max_segment_size', type=int, default=67108864) arg_parser.add_argument( '--restore-abs-path', action='store', help=('Set the absolute path where you want your data restored. ' 'Default False.'), dest='restore_abs_path', default=False) arg_parser.add_argument( '--restore-from-host', action='store', help='''Set the hostname used to identify the data you want to restore from. If you want to restore data in the same host where the backup was executed just type from your shell: "$ hostname" and the output is the value that needs to be passed to this option. Mandatory with Restore Default False.''', dest='restore_from_host', default=False) arg_parser.add_argument( '--restore-from-date', action='store', help='''Set the absolute path where you want your data restored. Please provide datetime in format "YYYY-MM-DDThh:mm:ss" i.e. "1979-10-03T23:23:23". Make sure the "T" is between date and time Default None.''', dest='restore_from_date', default=None) arg_parser.add_argument( '--max-priority', action='store_true', help='''Set the cpu process to the highest priority (i.e. -20 on Linux) and real-time for I/O. The process priority will be set only if nice and ionice are installed Default disabled. Use with caution.''', dest='max_priority', default=False) arg_parser.add_argument( '-V', '--version', action='store_true', help='''Print the release version and exit''', dest='version', default=False) arg_parser.add_argument( '-q', '--quiet', action='store_true', help='''Suppress error messages''', dest='quiet', default=False) arg_parser.add_argument( '--insecure', action='store_true', help='Allow to access swift servers without checking SSL certs.', dest='insecure', default=False) arg_parser.add_argument( '--os-auth-ver', '--os-identity-api-version', choices=['1', '2', '2.0', '3'], action='store', help='Openstack identity api version, can be 1, 2, 2.0 or 3', dest='os_identity_api_version', default=None) arg_parser.add_argument( '--proxy', action='store', help='''Enforce proxy that alters system HTTP_PROXY and HTTPS_PROXY, use \'\' to eliminate all system proxies''', dest='proxy', default=False) arg_parser.add_argument( '--dry-run', action='store_true', help='Do everything except writing or removing objects', dest='dry_run', default=False) arg_parser.add_argument( '--upload-limit', action='store', help='''Upload bandwidth limit in Bytes per sec. Can be invoked with dimensions (10K, 120M, 10G).''', dest='upload_limit', type=utils.human2bytes, default=-1) arg_parser.add_argument( "--cinder-vol-id", action='store', help='Id of cinder volume for backup', dest="cinder_vol_id", default='') arg_parser.add_argument( "--nova-inst-id", action='store', help='Id of nova instance for backup', dest="nova_inst_id", default='') arg_parser.add_argument( "--cindernative-vol-id", action='store', help='Id of cinder volume for native backup', dest="cindernative_vol_id", default='') arg_parser.add_argument( '--download-limit', action='store', help='''Download bandwidth limit in Bytes per sec. Can be invoked with dimensions (10K, 120M, 10G).''', dest='download_limit', type=utils.human2bytes, default=-1) arg_parser.add_argument( '--sql-server-conf', action='store', help='''Set the SQL Server configuration file where freezer retrieve the sql server instance. Following is an example of config file: instance = <db-instance>''', dest='sql_server_conf', default=False) arg_parser.add_argument( '--vssadmin', action='store', help='''Create a backup using a snapshot on windows using vssadmin. Options are: True and False, default is True''', dest='vssadmin', default=True) arg_parser.add_argument( '--command', action='store', help='Command executed by exec action', dest='command', default=None) arg_parser.add_argument( '--compression', action='store', choices=['gzip', 'bzip2', 'xz'], help='compression algorithm to use. gzip is default algorithm', dest='compression', default='gzip') arg_parser.add_argument( '--storage', action='store', choices=['local', 'swift', 'ssh'], help="Storage for backups. Can be Swift or Local now. Swift is default" "storage now. Local stores backups on the same defined path and" "swift will store files in container.", dest='storage', default='swift') arg_parser.add_argument( '--ssh-key', action='store', help="Path ot ssh-key for ssh storage only", dest='ssh_key', default=DEFAULT_PARAMS['ssh_key']) arg_parser.add_argument( '--ssh-username', action='store', help="Remote username for ssh storage only", dest='ssh_username', default=DEFAULT_PARAMS['ssh_username']) arg_parser.add_argument( '--ssh-host', action='store', help="Remote host for ssh storage only", dest='ssh_host', default=DEFAULT_PARAMS['ssh_host']) arg_parser.add_argument( '--ssh-port', action='store', help="Remote port for ssh storage only (default 22)", type=int, dest='ssh_port', default=DEFAULT_PARAMS['ssh_port']) arg_parser.set_defaults(**defaults) backup_args = arg_parser.parse_args() # windows bin path_to_binaries = os.path.dirname(os.path.abspath(__file__)) # Intercept command line arguments if you are not using the CLIvss if args_dict: backup_args.__dict__.update(args_dict) # Set additional namespace attributes backup_args.__dict__['remote_match_backup'] = [] backup_args.__dict__['remote_obj_list'] = [] backup_args.__dict__['remote_newest_backup'] = u'' # Set default working directory to ~/.freezer. If the directory # does not exists it is created work_dir = os.path.join(home, '.freezer') backup_args.__dict__['work_dir'] = work_dir if not os.path.exists(work_dir): try: os.makedirs(work_dir) except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode( '{}'.format(err_msg)), file=sys.stderr) # The containers used by freezer to executed backups needs to have # freezer_ prefix in the name. If the user provider container doesn't # have the prefix, it is automatically added also to the container # segments name. This is done to quickly identify the containers # that contain freezer generated backups if not backup_args.container.startswith('freezer_') and \ backup_args.storage == 'swift': backup_args.container = 'freezer_{0}'.format( backup_args.container) # If hostname is not set, hostname of the current node will be used if not backup_args.hostname: backup_args.__dict__['hostname'] = socket.gethostname() backup_args.__dict__['manifest_meta_dict'] = {} backup_args.__dict__['curr_backup_level'] = '' backup_args.__dict__['manifest_meta_dict'] = '' if winutils.is_windows(): backup_args.__dict__['tar_path'] = '{0}\\bin\\tar.exe'. \ format(path_to_binaries) else: backup_args.__dict__['tar_path'] = distspawn.find_executable('tar') # If freezer is being used under OSX, please install gnutar and # rename the executable as gnutar if 'darwin' in sys.platform or 'bsd' in sys.platform: if distspawn.find_executable('gtar'): backup_args.__dict__['tar_path'] = \ distspawn.find_executable('gtar') elif distspawn.find_executable('gnutar'): backup_args.__dict__['tar_path'] = \ distspawn.find_executable('gnutar') else: raise Exception('Please install gnu tar (gtar) as it is a ' 'mandatory requirement to use freezer.') # If we have provided --proxy then overwrite the system HTTP_PROXY and # HTTPS_PROXY alter_proxy(backup_args.__dict__) # Get absolute path of other commands used by freezer backup_args.__dict__['lvcreate_path'] = distspawn.find_executable( 'lvcreate') backup_args.__dict__['lvremove_path'] = distspawn.find_executable( 'lvremove') backup_args.__dict__['bash_path'] = distspawn.find_executable('bash') if winutils.is_windows(): backup_args.__dict__['openssl_path'] = 'openssl' else: backup_args.__dict__['openssl_path'] = \ distspawn.find_executable('openssl') backup_args.__dict__['file_path'] = distspawn.find_executable('file') backup_args.__dict__['mount_path'] = distspawn.find_executable('mount') backup_args.__dict__['umount_path'] = distspawn.find_executable('umount') backup_args.__dict__['ionice'] = distspawn.find_executable('ionice') # MySQLdb object backup_args.__dict__['mysql_db_inst'] = '' # SQL Server object backup_args.__dict__['sql_server_instance'] = '' # Windows volume backup_args.__dict__['shadow'] = '' backup_args.__dict__['shadow_path'] = '' backup_args.__dict__['file_name'] = '' if winutils.is_windows(): if backup_args.path_to_backup: backup_args.__dict__['windows_volume'] = \ backup_args.path_to_backup[:3] if backup_args.vssadmin == 'False' or backup_args.vssadmin == 'false': backup_args.vssadmin = False backup_args.__dict__['meta_data'] = {} backup_args.__dict__['meta_data_file'] = '' backup_args.__dict__['absolute_path'] = '' # Freezer version backup_args.__dict__['__version__'] = '1.1.3' # todo(enugaev) move it to new command line param backup_media backup_media = 'fs' if backup_args.cinder_vol_id: backup_media = 'cinder' elif backup_args.cindernative_vol_id: backup_media = 'cindernative' elif backup_args.nova_inst_id: backup_media = 'nova' backup_args.__dict__['backup_media'] = backup_media backup_args.__dict__['time_stamp'] = None return backup_args, arg_parser
def backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict): """ Execute the necessary tasks for file system backup mode """ logging.info('[*] File System backup is being executed...') try: if is_windows(): # Create a shadow copy. # Create a shadow copy. backup_opt_dict.shadow_path, backup_opt_dict.shadow = \ vss_create_shadow_copy(backup_opt_dict.volume) else: # If lvm_auto_snap is true, the volume group and volume name will # be extracted automatically if backup_opt_dict.lvm_auto_snap: backup_opt_dict = get_lvm_info(backup_opt_dict) # Generate the lvm_snap if lvm arguments are available lvm_snap(backup_opt_dict) # Generate a string hostname, backup name, timestamp and backup level file_name = add_host_name_ts_level(backup_opt_dict, time_stamp) meta_data_backup_file = u'tar_metadata_{0}'.format(file_name) backup_opt_dict.meta_data_file = meta_data_backup_file # Initialize a Queue for a maximum of 2 items tar_backup_queue = multiprocessing.Queue(maxsize=2) if is_windows(): backup_opt_dict.absolute_path = backup_opt_dict.src_file backup_opt_dict.src_file = use_shadow(backup_opt_dict.src_file, backup_opt_dict.volume) # Execute a tar gzip of the specified directory and return # small chunks (default 128MB), timestamp, backup, filename, # file chunk index and the tar meta-data file (backup_opt_dict, tar_command, manifest_meta_dict) = \ gen_tar_command(opt_dict=backup_opt_dict, time_stamp=time_stamp, remote_manifest_meta=manifest_meta_dict) tar_backup_stream = multiprocessing.Process( target=tar_backup, args=( backup_opt_dict, tar_command, tar_backup_queue,)) tar_backup_stream.daemon = True tar_backup_stream.start() add_object_stream = multiprocessing.Process( target=add_object, args=( backup_opt_dict, tar_backup_queue, file_name, time_stamp)) add_object_stream.daemon = True add_object_stream.start() tar_backup_stream.join() tar_backup_queue.put(({False: False})) tar_backup_queue.close() add_object_stream.join() if add_object_stream.exitcode: raise Exception('failed to upload object to swift server') (backup_opt_dict, manifest_meta_dict, tar_meta_to_upload, tar_meta_prev) = gen_manifest_meta( backup_opt_dict, manifest_meta_dict, meta_data_backup_file) manifest_file = u'' meta_data_abs_path = os.path.join(backup_opt_dict.workdir, tar_meta_prev) # Upload swift manifest for segments if backup_opt_dict.upload: # Request a new auth client in case the current token # is expired before uploading tar meta data or the swift manifest backup_opt_dict = get_client(backup_opt_dict) if not backup_opt_dict.no_incremental: # Upload tar incremental meta data file and remove it logging.info('[*] Uploading tar meta data file: {0}'.format( tar_meta_to_upload)) with open(meta_data_abs_path, 'r') as meta_fd: backup_opt_dict.sw_connector.put_object( backup_opt_dict.container, tar_meta_to_upload, meta_fd) # Removing tar meta data file, so we have only one # authoritative version on swift logging.info('[*] Removing tar meta data file: {0}'.format( meta_data_abs_path)) os.remove(meta_data_abs_path) # Upload manifest to swift manifest_upload( manifest_file, backup_opt_dict, file_name, manifest_meta_dict) finally: if is_windows(): # Delete the shadow copy after the backup vss_delete_shadow_copy(backup_opt_dict.shadow, backup_opt_dict.volume) else: # Unmount and remove lvm snapshot volume lvm_snap_remove(backup_opt_dict)
def test_is_windows(self): fake_os = Os() os.name = fake_os assert is_windows() is False
def backup_arguments(args_dict={}): """ Default arguments and command line options interface. The function return a name space called backup_args. """ arg_parser = argparse.ArgumentParser(prog='freezerc') arg_parser.add_argument( '--action', choices=['backup', 'restore', 'info', 'admin'], help=("Set the action to be taken. backup and restore are" " self explanatory, info is used to retrieve info from the" " storage media, while admin is used to delete old backups" " and other admin actions. Default backup."), dest='action', default='backup') arg_parser.add_argument( '-F', '--path-to-backup', '--file-to-backup', action='store', help="The file or directory you want to back up to Swift", dest='src_file', default=False) arg_parser.add_argument( '-N', '--backup-name', action='store', help="The backup name you want to use to identify your backup \ on Swift", dest='backup_name', default=False) arg_parser.add_argument( '-m', '--mode', action='store', help="Set the technology to back from. Options are, fs (filesystem),\ mongo (MongoDB), mysql (MySQL), sqlserver (SQL Server)\ Default set to fs", dest='mode', default='fs') arg_parser.add_argument('-C', '--container', action='store', help="The Swift container used to upload files to", dest='container', default='freezer_backups') arg_parser.add_argument( '-L', '--list-containers', action='store_true', help='''List the Swift containers on remote Object Storage Server''', dest='list_container', default=False) arg_parser.add_argument( '-l', '--list-objects', action='store_true', help='''List the Swift objects stored in a container on remote Object\ Storage Server.''', dest='list_objects', default=False) arg_parser.add_argument( '-o', '--get-object', action='store', help="The Object name you want to download on the local file system.", dest='object', default=False) arg_parser.add_argument( '-d', '--dst-file', action='store', help="The file name used to save the object on your local disk and\ upload file in swift", dest='dst_file', default=False) arg_parser.add_argument( '--lvm-auto-snap', action='store', help=("Automatically guess the volume group and volume name for " "given PATH."), dest='lvm_auto_snap', default=False) arg_parser.add_argument( '--lvm-srcvol', action='store', help="Set the lvm volume you want to take a snaphost from. Default\ no volume", dest='lvm_srcvol', default=False) arg_parser.add_argument( '--lvm-snapname', action='store', help="Set the lvm snapshot name to use. If the snapshot name already\ exists, the old one will be used a no new one will be created. Default\ freezer_backup_snap.", dest='lvm_snapname', default=False) arg_parser.add_argument( '--lvm-snapsize', action='store', help="Set the lvm snapshot size when creating a new snapshot.\ Please add G for Gigabytes or M for Megabytes, i.e. 500M or 8G.\ Default 5G.", dest='lvm_snapsize', default=False) arg_parser.add_argument( '--lvm-dirmount', action='store', help="Set the directory you want to mount the lvm snapshot to.\ Default not set", dest='lvm_dirmount', default=False) arg_parser.add_argument( '--lvm-volgroup', action='store', help="Specify the volume group of your logical volume.\ This is important to mount your snapshot volume. Default not set", dest='lvm_volgroup', default=False) arg_parser.add_argument( '--max-level', action='store', help="Set the backup level used with tar to implement incremental \ backup. If a level 1 is specified but no level 0 is already \ available, a level 0 will be done and subsequently backs to level 1.\ Default 0 (No Incremental)", dest='max_backup_level', type=int, default=False) arg_parser.add_argument('--always-level', action='store', help="Set backup\ maximum level used with tar to implement incremental backup. If a \ level 3 is specified, the backup will be executed from level 0 to \ level 3 and to that point always a backup level 3 will be executed. \ It will not restart from level 0. This option has precedence over \ --max-backup-level. Default False (Disabled)", dest='always_backup_level', type=int, default=False) arg_parser.add_argument('--restart-always-level', action='store', help="Restart the backup \ from level 0 after n days. Valid only if --always-level option \ if set. If --always-level is used together with --remove-older-then, \ there might be the chance where the initial level 0 will be removed \ Default False (Disabled)", dest='restart_always_backup', type=float, default=False) arg_parser.add_argument( '-R', '--remove-older-then', '--remove-older-than', action='store', help=('Checks in the specified container for object older than the ' 'specified days.' 'If i.e. 30 is specified, it will remove the remote object ' 'older than 30 days. Default False (Disabled) ' 'The option --remove-older-then is deprecated ' 'and will be removed soon'), dest='remove_older_than', type=float, default=None) arg_parser.add_argument( '--remove-from-date', action='store', help=('Checks the specified container and removes objects older than ' 'the provided datetime in the form "YYYY-MM-DDThh:mm:ss ' 'i.e. "1974-03-25T23:23:23". ' 'Make sure the "T" is between date and time '), dest='remove_from_date', default=False) arg_parser.add_argument( '--no-incremental', action='store_true', help='''Disable incremental feature. By default freezer build the meta data even for level 0 backup. By setting this option incremental meta data is not created at all. Default disabled''', dest='no_incremental', default=False) arg_parser.add_argument( '--hostname', action='store', help='''Set hostname to execute actions. If you are executing freezer from one host but you want to delete objects belonging to another host then you can set this option that hostname and execute appropriate actions. Default current node hostname.''', dest='hostname', default=False) arg_parser.add_argument( '--mysql-conf', action='store', help='''Set the MySQL configuration file where freezer retrieve important information as db_name, user, password, host, port. Following is an example of config file: # cat ~/.freezer/backup_mysql_conf host = <db-host> user = <mysqluser> password = <mysqlpass> port = <db-port>''', dest='mysql_conf_file', default=False) if is_windows(): arg_parser.add_argument( '--log-file', action='store', help='Set log file. By default logs to ~/freezer.log', dest='log_file', default=os.path.join(home, '.freezer', 'freezer.log')) else: arg_parser.add_argument( '--log-file', action='store', help='Set log file. By default logs to /var/log/freezer.log' 'If that file is not writable, freezer tries to log' 'to ~/.freezer/freezer.log', dest='log_file', default=None) arg_parser.add_argument('--exclude', action='store', help="Exclude files,\ given as a PATTERN.Ex: --exclude '*.log' will exclude any file \ with name ending with .log. Default no exclude", dest='exclude', default=False) arg_parser.add_argument( '--dereference-symlink', choices=['none', 'soft', 'hard', 'all'], help=("Follow hard and soft links and archive and dump the files they " " refer to. Default False."), dest='dereference_symlink', default='none') arg_parser.add_argument( '-U', '--upload', action='store_true', help="Upload to Swift the destination file passed to the -d option.\ Default upload the data", dest='upload', default=True) arg_parser.add_argument( '--encrypt-pass-file', action='store', help="Passing a private key to this option, allow you to encrypt the \ files before to be uploaded in Swift. Default do not encrypt.", dest='encrypt_pass_file', default=False) arg_parser.add_argument( '-M', '--max-segment-size', action='store', help="Set the maximum file chunk size in bytes to upload to swift\ Default 67108864 bytes (64MB)", dest='max_seg_size', type=int, default=67108864) arg_parser.add_argument( '--restore-abs-path', action='store', help=('Set the absolute path where you want your data restored. ' 'Default False.'), dest='restore_abs_path', default=False) arg_parser.add_argument( '--restore-from-host', action='store', help='''Set the hostname used to identify the data you want to restore from. If you want to restore data in the same host where the backup was executed just type from your shell: "$ hostname" and the output is the value that needs to be passed to this option. Mandatory with Restore Default False.''', dest='restore_from_host', default=False) arg_parser.add_argument( '--restore-from-date', action='store', help='''Set the absolute path where you want your data restored. Please provide datetime in format "YYYY-MM-DDThh:mm:ss" i.e. "1979-10-03T23:23:23". Make sure the "T" is between date and time Default False.''', dest='restore_from_date', default=False) arg_parser.add_argument( '--max-priority', action='store_true', help='''Set the cpu process to the highest priority (i.e. -20 on Linux) and real-time for I/O. The process priority will be set only if nice and ionice are installed Default disabled. Use with caution.''', dest='max_priority', default=False) arg_parser.add_argument('-V', '--version', action='store_true', help='''Print the release version and exit''', dest='version', default=False) arg_parser.add_argument('-q', '--quiet', action='store_true', help='''Suppress error messages''', dest='quiet', default=False) arg_parser.add_argument( '--insecure', action='store_true', help='Allow to access swift servers without checking SSL certs.', dest='insecure', default=False) arg_parser.add_argument('--os-auth-ver', choices=['1', '2', '3'], action='store', help='Swift auth version, could be 1, 2 or 3', dest='auth_version', default=2) arg_parser.add_argument( '--proxy', action='store', help='''Enforce proxy that alters system HTTP_PROXY and HTTPS_PROXY, use \'\' to eliminate all system proxies''', dest='proxy', default=False) arg_parser.add_argument( '--dry-run', action='store_true', help='Do everything except writing or removing objects', dest='dry_run', default=False) arg_parser.add_argument('--upload-limit', action='store', help='''Upload bandwidth limit in Bytes per sec. Can be invoked with dimensions (10K, 120M, 10G).''', dest='upload_limit', type=utils.human2bytes, default=-1) arg_parser.add_argument('--download-limit', action='store', help='''Download bandwidth limit in Bytes per sec. Can be invoked with dimensions (10K, 120M, 10G).''', dest='download_limit', type=utils.human2bytes, default=-1) arg_parser.add_argument( '--sql-server-conf', action='store', help='''Set the SQL Server configuration file where freezer retrieve the sql server instance. Following is an example of config file: instance = <db-instance>''', dest='sql_server_config', default=False) arg_parser.add_argument('--volume', action='store', help='Create a snapshot of the selected volume', dest='volume', default=False) backup_args = arg_parser.parse_args() # windows bin path_to_binaries = os.path.dirname(os.path.abspath(__file__)) # Intercept command line arguments if you are not using the CLI if args_dict: backup_args.__dict__.update(args_dict) # Set additional namespace attributes backup_args.__dict__['remote_match_backup'] = [] backup_args.__dict__['remote_objects'] = [] backup_args.__dict__['remote_obj_list'] = [] backup_args.__dict__['remote_newest_backup'] = u'' # Set default workdir to ~/.freezer backup_args.__dict__['workdir'] = os.path.join(home, '.freezer') # Create a new namespace attribute for container_segments backup_args.__dict__['container_segments'] = u'{0}_segments'.format( backup_args.container) # The containers used by freezer to executed backups needs to have # freezer_ prefix in the name. If the user provider container doesn't # have the prefix, it is automatically added also to the container # segments name. This is done to quickly identify the containers # that contain freezer generated backups if not backup_args.container.startswith('freezer_'): backup_args.container = 'freezer_{0}'.format(backup_args.container) backup_args.container_segments = 'freezer_{0}'.format( backup_args.container_segments) # If hostname is not set, hostname of the current node will be used if not backup_args.hostname: backup_args.__dict__['hostname'] = socket.gethostname() backup_args.__dict__['manifest_meta_dict'] = {} backup_args.__dict__['curr_backup_level'] = '' backup_args.__dict__['manifest_meta_dict'] = '' if is_windows(): backup_args.__dict__['tar_path'] = '{0}\\bin\\tar.exe'.\ format(path_to_binaries) else: backup_args.__dict__['tar_path'] = distspawn.find_executable('tar') # If freezer is being used under OSX, please install gnutar and # rename the executable as gnutar if 'darwin' in sys.platform or 'bsd' in sys.platform: if distspawn.find_executable('gtar'): backup_args.__dict__['tar_path'] = \ distspawn.find_executable('gtar') else: raise Exception('Please install gnu tar (gtar) as it is a ' 'mandatory requirement to use freezer.') # If we have provided --proxy then overwrite the system HTTP_PROXY and # HTTPS_PROXY alter_proxy(backup_args.__dict__) # Get absolute path of other commands used by freezer backup_args.__dict__['lvcreate_path'] = distspawn.find_executable( 'lvcreate') backup_args.__dict__['lvremove_path'] = distspawn.find_executable( 'lvremove') backup_args.__dict__['bash_path'] = distspawn.find_executable('bash') if is_windows(): backup_args.__dict__['openssl_path'] = 'openssl' else: backup_args.__dict__['openssl_path'] = \ distspawn.find_executable('openssl') backup_args.__dict__['file_path'] = distspawn.find_executable('file') backup_args.__dict__['mount_path'] = distspawn.find_executable('mount') backup_args.__dict__['umount_path'] = distspawn.find_executable('umount') backup_args.__dict__['ionice'] = distspawn.find_executable('ionice') # MySQLdb object backup_args.__dict__['mysql_db_inst'] = '' # SQL Server object backup_args.__dict__['sql_server_instance'] = '' # Windows volume backup_args.__dict__['shadow'] = '' backup_args.__dict__['shadow_path'] = '' backup_args.__dict__['file_name'] = '' backup_args.__dict__['meta_data'] = {} backup_args.__dict__['meta_data_file'] = '' backup_args.__dict__['absolute_path'] = '' # Freezer version backup_args.__dict__['__version__'] = '1.1.3' return backup_args, arg_parser
See the License for the specific language governing permissions and limitations under the License. """ import os import sys from oslo_config import cfg from oslo_log import log from freezer import __version__ as FREEZER_VERSION from freezer.apiclient import client as api_client from freezer import winutils if winutils.is_windows(): DEFAULT_FREEZER_SCHEDULER_CONF_D = r'C:\.freezer\scheduler\conf.d' else: DEFAULT_FREEZER_SCHEDULER_CONF_D = '/etc/freezer/scheduler/conf.d' CONF = cfg.CONF _LOG = log.getLogger(__name__) def getCommonOpts(): scheduler_conf_d = os.environ.get('FREEZER_SCHEDULER_CONF_D', DEFAULT_FREEZER_SCHEDULER_CONF_D) common_opts = [ cfg.StrOpt('job',
def main(): doers = _get_doers(shell) doers.update(_get_doers(utils)) possible_actions = doers.keys() + ['start', 'stop', 'status'] arguments.parse_args(possible_actions) arguments.setup_logging() if CONF.action is None: CONF.print_help() return 65 # os.EX_DATAERR apiclient = None insecure = False if CONF.insecure: insecure = True if CONF.no_api is False: try: apiclient = client.Client(opts=CONF, insecure=insecure) if CONF.client_id: apiclient.client_id = CONF.client_id except Exception as e: LOG.error(e) print(e) sys.exit(1) else: if winutils.is_windows(): print("--no-api mode is not available on windows") return 69 # os.EX_UNAVAILABLE if CONF.action in doers: try: return doers[CONF.action](apiclient, CONF) except Exception as e: LOG.error(e) print ('ERROR {0}'.format(e)) return 70 # os.EX_SOFTWARE freezer_scheduler = FreezerScheduler(apiclient=apiclient, interval=int(CONF.interval), job_path=CONF.jobs_dir) if CONF.no_daemon: print ('Freezer Scheduler running in no-daemon mode') LOG.debug('Freezer Scheduler running in no-daemon mode') daemon = NoDaemon(daemonizable=freezer_scheduler) else: if winutils.is_windows(): daemon = Daemon(daemonizable=freezer_scheduler, interval=int(CONF.interval), job_path=CONF.jobs_dir, insecure=CONF.insecure) else: daemon = Daemon(daemonizable=freezer_scheduler) if CONF.action == 'start': daemon.start(log_file=CONF.log_file) elif CONF.action == 'stop': daemon.stop() elif CONF.action == 'reload': daemon.reload() elif CONF.action == 'status': daemon.status() # os.RETURN_CODES are only available to posix like systems, on windows # we need to translate the code to an actual number which is the equivalent return 0 # os.EX_OK