def main(): possible_actions = ['start', 'stop', 'status', 'reload'] arguments.parse_args(possible_actions) arguments.setup_logging() if CONF.action is None or CONF.action not in possible_actions: CONF.print_help() return 65 # os.EX_DATAERR apiclient = None if CONF.no_api is False: try: apiclient = client.Client(opts=CONF) if CONF.client_id: apiclient.client_id = CONF.client_id except Exception as e: LOG.error(e) print(e) sys.exit(1) else: if winutils.is_windows(): print("--no-api mode is not available on windows") return 69 # os.EX_UNAVAILABLE freezer_utils.create_dir(CONF.jobs_dir, do_log=False) freezer_scheduler = FreezerScheduler(apiclient=apiclient, interval=int(CONF.interval), job_path=CONF.jobs_dir, concurrent_jobs=CONF.concurrent_jobs) if CONF.no_daemon: print('Freezer Scheduler running in no-daemon mode') LOG.debug('Freezer Scheduler running in no-daemon mode') if winutils.is_windows(): daemon = win_daemon.NoDaemon(daemonizable=freezer_scheduler) else: daemon = linux_daemon.NoDaemon(daemonizable=freezer_scheduler) else: if winutils.is_windows(): daemon = win_daemon.Daemon(daemonizable=freezer_scheduler, interval=int(CONF.interval), job_path=CONF.jobs_dir, insecure=CONF.insecure, concurrent_jobs=CONF.concurrent_jobs) else: daemon = linux_daemon.Daemon(daemonizable=freezer_scheduler) if CONF.action == 'start': daemon.start() elif CONF.action == 'stop': daemon.stop() elif CONF.action == 'reload': daemon.reload() elif CONF.action == 'status': daemon.status() # os.RETURN_CODES are only available to posix like systems, on windows # we need to translate the code to an actual number which is the equivalent return 0 # os.EX_OK
def snapshot_remove(backup_opt_dict, shadow, windows_volume): if winutils.is_windows(): # Delete the shadow copy after the backup vss.vss_delete_shadow_copy(shadow, windows_volume) else: # Unmount and remove lvm snapshot volume lvm.lvm_snap_remove(backup_opt_dict)
def restore_level(self, restore_path, read_pipe): """ Restore the provided file into backup_opt_dict.restore_abs_path Decrypt the file if backup_opt_dict.encrypt_pass_file key is provided """ tar_command = tar_builders.TarCommandRestoreBuilder( restore_path, self.compression_algo, self.is_windows) if self.encrypt_pass_file: tar_command.set_encryption(self.encrypt_pass_file) if self.dry_run: tar_command.set_dry_run() command = tar_command.build() if winutils.is_windows(): # on windows, chdir to restore path. os.chdir(restore_path) tar_process = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) # Start loop reading the pipe and pass the data to the tar std input. # If EOFError exception is raised, the loop end the std err will be # checked for errors. try: while True: tar_process.stdin.write(read_pipe.recv_bytes()) except EOFError: logging.info('[*] Pipe closed as EOF reached. ' 'Data transmitted successfully') self.check_process_output(tar_process)
def restore_level(self, restore_resource, read_pipe, backup, except_queue): """ Restore the provided file into backup_opt_dict.restore_abs_path Decrypt the file if backup_opt_dict.encrypt_pass_file key is provided :param restore_path: :param read_pipe: :type backup: freezer.storage.base.Backup :param backup: """ try: metadata = backup.metadata() if (not self.encrypt_pass_file and metadata.get("encryption", False)): raise Exception("Cannot restore encrypted backup without key") tar_command = tar_builders.TarCommandRestoreBuilder( restore_resource, metadata.get('compression', self.compression_algo), self.is_windows) if self.encrypt_pass_file: tar_command.set_encryption(self.encrypt_pass_file) if self.dry_run: tar_command.set_dry_run() command = tar_command.build() if winutils.is_windows(): # on windows, chdir to restore path. os.chdir(restore_resource) tar_process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, executable='/bin/bash') # Start loop reading the pipe and pass the data to the tar # std input. If EOFError exception is raised, the loop end # the std err will be checked for errors. try: while True: tar_process.stdin.write(read_pipe.recv_bytes()) except EOFError: LOG.info('Pipe closed as EOF reached. ' 'Data transmitted successfully') finally: self.check_process_output(tar_process, 'Restore') except Exception as e: LOG.exception(e) except_queue.put(e) raise
def restore_level(self, restore_resource, read_pipe, backup, except_queue): """ Restore the provided file into backup_opt_dict.restore_abs_path Decrypt the file if backup_opt_dict.encrypt_pass_file key is provided :param restore_path: :param read_pipe: :type backup: freezer.storage.base.Backup :param backup: """ try: metadata = backup.metadata() if (not self.encrypt_pass_file and metadata.get("encryption", False)): raise Exception("Cannot restore encrypted backup without key") tar_command = tar_builders.TarCommandRestoreBuilder( restore_resource, metadata.get('compression', self.compression_algo), self.is_windows) if self.encrypt_pass_file: tar_command.set_encryption(self.encrypt_pass_file) if self.dry_run: tar_command.set_dry_run() command = tar_command.build() if winutils.is_windows(): # on windows, chdir to restore path. os.chdir(restore_resource) tar_process = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, executable='/bin/bash') # Start loop reading the pipe and pass the data to the tar # std input. If EOFError exception is raised, the loop end # the std err will be checked for errors. try: while True: tar_process.stdin.write(read_pipe.recv_bytes()) except EOFError: LOG.info('Pipe closed as EOF reached. ' 'Data transmitted successfully') finally: self.check_process_output(tar_process, 'Restore') except Exception as e: LOG.exception(e) except_queue.put(e) raise
def __init__(self, **kwargs): self.compression_algo = kwargs.get('compression') self.encrypt_pass_file = kwargs.get('encrypt_key', None) self.dereference_symlink = kwargs.get('symlinks') self.exclude = kwargs.get('exclude') self.storage = kwargs.get('storage') self.is_windows = winutils.is_windows() self.dry_run = kwargs.get('dry_run', False) self.max_segment_size = kwargs.get('max_segment_size') self.rsync_block_size = kwargs.get('rsync_block_size') self.fixed_blocks = 0 self.modified_blocks = 0 super(Rsyncv2Engine, self).__init__(storage=kwargs.get('storage'))
def tar_path(): """This function returns tar binary path""" from freezer.utils import winutils if winutils.is_windows(): path_to_binaries = os.path.dirname(os.path.abspath(__file__)) return '{0}\\bin\\tar.exe'.format(path_to_binaries) tar = (get_executable_path('gnutar') or get_executable_path('gtar') or get_executable_path('tar')) if not tar: raise Exception('Please install gnu tar (gtar) as it is a ' 'mandatory requirement to use freezer.') return tar
def __init__(self, storage, **kwargs): super(OsbrickEngine, self).__init__(storage=storage) self.client = client_manager.get_client_manager(CONF) self.cinder = self.client.create_cinder() self.volume_info = None self.compression_algo = kwargs.get('compression') self.encrypt_pass_file = kwargs.get('encrypt_key') self.dereference_symlink = kwargs.get('symlinks') self.exclude = kwargs.get('exclude') self.storage = storage self.is_windows = winutils.is_windows() self.dry_run = kwargs.get('dry_run', False) self.max_segment_size = kwargs.get('max_segment_size')
def __init__( self, compression, symlinks, exclude, storage, max_segment_size, encrypt_key=None, dry_run=False): self.compression_algo = compression self.encrypt_pass_file = encrypt_key self.dereference_symlink = symlinks self.exclude = exclude self.storage = storage self.is_windows = winutils.is_windows() self.dry_run = dry_run self.max_segment_size = max_segment_size # Compression and encryption objects self.compressor = None self.cipher = None super(RsyncEngine, self).__init__(storage=storage)
def __init__( self, compression, symlinks, exclude, storage, max_segment_size, encrypt_key=None, dry_run=False, **kwargs): self.compression_algo = compression self.encrypt_pass_file = encrypt_key self.dereference_symlink = symlinks self.exclude = exclude self.storage = storage self.is_windows = winutils.is_windows() self.dry_run = dry_run self.max_segment_size = max_segment_size # Compression and encryption objects self.compressor = None self.cipher = None super(RsyncEngine, self).__init__(storage=storage)
def __init__( self, compression, symlinks, exclude, storage, max_segment_size, encrypt_key=None, dry_run=False, **kwargs): """ :type storage: freezer.storage.base.Storage :return: """ self.compression_algo = compression self.encrypt_pass_file = encrypt_key self.dereference_symlink = symlinks self.exclude = exclude self.storage = storage self.is_windows = winutils.is_windows() self.dry_run = dry_run self.max_segment_size = max_segment_size super(TarEngine, self).__init__(storage=storage)
def __init__( self, compression, symlinks, exclude, storage, max_segment_size, encrypt_key=None, dry_run=False): """ :type storage: freezer.storage.base.Storage :return: """ self.compression_algo = compression self.encrypt_pass_file = encrypt_key self.dereference_symlink = symlinks self.exclude = exclude self.storage = storage self.is_windows = winutils.is_windows() self.dry_run = dry_run self.max_segment_size = max_segment_size super(TarEngine, self).__init__(storage=storage)
def get_executable_path(binary): """ This function returns the executable path of a given binary if it is found in the system. :param binary: :type binary: str :rtype: str :return: Absolute Path to the executable file """ from freezer.utils import winutils if winutils.is_windows(): path_to_binaries = os.path.dirname(os.path.abspath(__file__)) return '{0}\\bin\\{1}.exe'.format(path_to_binaries, binary) elif is_bsd(): return (distspawn.find_executable(binary) or distspawn.find_executable(binary, path=':'.join(sys.path))) else: return distspawn.find_executable(binary)
def restore_level(self, restore_path, read_pipe, backup): """ Restore the provided file into backup_opt_dict.restore_abs_path Decrypt the file if backup_opt_dict.encrypt_pass_file key is provided """ metadata = backup.metadata() if not self.encrypt_pass_file and metadata.get("encryption", False): raise Exception("Cannot restore encrypted backup without key") tar_command = tar_builders.TarCommandRestoreBuilder( restore_path, metadata.get('compression', self.compression_algo), self.is_windows) if self.encrypt_pass_file: tar_command.set_encryption(self.encrypt_pass_file) if self.dry_run: tar_command.set_dry_run() command = tar_command.build() if winutils.is_windows(): # on windows, chdir to restore path. os.chdir(restore_path) tar_process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) # Start loop reading the pipe and pass the data to the tar std input. # If EOFError exception is raised, the loop end the std err will be # checked for errors. try: while True: tar_process.stdin.write(read_pipe.recv_bytes()) except EOFError: logging.info('[*] Pipe closed as EOF reached. ' 'Data transmitted successfully') self.check_process_output(tar_process)
def snapshot_create(backup_opt_dict): """ Calls the code to take fs snapshots, depending on the platform :param backup_opt_dict: :return: boolean value, True if snapshot has been taken, false otherwise """ if winutils.is_windows(): if backup_opt_dict.snapshot: # Create a shadow copy. backup_opt_dict.shadow_path, backup_opt_dict.shadow = \ vss.vss_create_shadow_copy(backup_opt_dict.windows_volume) backup_opt_dict.path_to_backup = winutils.use_shadow( backup_opt_dict.path_to_backup, backup_opt_dict.windows_volume) return True return False else: return lvm.lvm_snap(backup_opt_dict)
def snapshot_create(backup_opt_dict): """ Calls the code to take fs snapshots, depending on the platform :param backup_opt_dict: :return: boolean value, True if snapshot has been taken, false otherwise """ if not backup_opt_dict.snapshot: return False if winutils.is_windows(): if backup_opt_dict.snapshot: # Create a shadow copy. backup_opt_dict.shadow_path, backup_opt_dict.shadow = \ vss.vss_create_shadow_copy(backup_opt_dict.windows_volume) backup_opt_dict.path_to_backup = winutils.use_shadow( backup_opt_dict.path_to_backup, backup_opt_dict.windows_volume) return True return False else: return lvm.lvm_snap(backup_opt_dict)
def get_backup_args(): defaults = DEFAULT_PARAMS.copy() class FreezerConfig(object): def __init__(self, args): self.__dict__.update(args) cli_options = dict([(x, y) for x, y in CONF.iteritems() if y is not None]) defaults.update(cli_options) conf = None if CONF.get('config'): conf = freezer_config.Config.parse(CONF.get('config')) defaults.update(conf.default) # TODO: restore_from_host is deprecated and to be removed defaults['hostname'] = conf.default.get('hostname') or \ conf.default.get('restore_from_host') # override default oslo values levels = { 'all': log.NOTSET, 'debug': log.DEBUG, 'warn': log.WARN, 'info': log.INFO, 'error': log.ERROR, 'critical': log.CRITICAL } if not CONF.get('log_file'): CONF.set_override('log_file', levels.get(defaults['log_file'], log.NOTSET)) CONF.set_override('default_log_levels', defaults['log_level']) if not CONF.get('log_file'): log_file = None for file_name in ['/var/log/freezer.log', '~/.freezer/freezer.log']: try: log_file = prepare_logging(file_name) except IOError: pass if log_file: CONF.set_default('log_file', log_file) else: LOG.warn("log file cannot be created. Freezer will proceed with " "default stdout and stderr") backup_args = FreezerConfig(defaults) # Set default working directory to ~/.freezer. If the directory # does not exists it is created work_dir = os.path.join(home, '.freezer') backup_args.__dict__['work_dir'] = work_dir if not os.path.exists(work_dir): try: os.makedirs(work_dir) except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode('{}'.format(err_msg)), file=sys.stderr) # If hostname is not set, hostname of the current node will be used if not backup_args.hostname: backup_args.__dict__['hostname'] = socket.gethostname() # If we have provided --proxy then overwrite the system HTTP_PROXY and # HTTPS_PROXY if backup_args.proxy: utils.alter_proxy(backup_args.proxy) # MySQLdb object backup_args.__dict__['mysql_db_inst'] = '' backup_args.__dict__['storages'] = None if conf and conf.storages: backup_args.__dict__['storages'] = conf.storages # Windows volume backup_args.__dict__['shadow'] = '' backup_args.__dict__['shadow_path'] = '' backup_args.__dict__['file_name'] = '' if winutils.is_windows(): if backup_args.path_to_backup: backup_args.__dict__['windows_volume'] = \ backup_args.path_to_backup[:3] # todo(enugaev) move it to new command line param backup_media if backup_args.lvm_auto_snap: raise Exception('lvm-auto-snap is deprecated. ' 'Please use --snapshot instead') backup_media = 'fs' if backup_args.cinder_vol_id: backup_media = 'cinder' elif backup_args.cindernative_vol_id: backup_media = 'cindernative' elif backup_args.nova_inst_id: backup_media = 'nova' backup_args.__dict__['backup_media'] = backup_media backup_args.__dict__['time_stamp'] = None if backup_args.upload_limit != -1 or backup_args.download_limit != -1 and \ not winutils.is_windows(): # handle --config option with tmp config file if backup_args.config: conf_file = NamedTemporaryFile(prefix='freezer_job_', delete=False) # remove the limits from the new file if 'upload_limit' in conf.default: conf.default.pop('upload_limit') elif 'download_limit' in conf.default: conf.default.pop('download_limit') utils.save_config_to_file(conf.default, conf_file, 'default') # replace the original file with the tmp one conf_index = sys.argv.index('--config') + 1 sys.argv[conf_index] = conf_file.name # if limits provided from cli remove it ! if '--upload-limit' in sys.argv: index = sys.argv.index('--upload-limit') sys.argv.pop(index) sys.argv.pop(index) if '--download-limit' in sys.argv: index = sys.argv.index('--download-limit') sys.argv.pop(index) sys.argv.pop(index) # locate trickle trickle_executable = distspawn.find_executable('trickle') if trickle_executable is None: trickle_executable = distspawn.find_executable( 'trickle', path=":".join(sys.path)) if trickle_executable is None: trickle_executable = distspawn.find_executable( 'trickle', path=":".join(os.environ.get('PATH'))) if trickle_executable: LOG.info("[*] Info: Starting trickle ...") trickle_command = '{0} -d {1} -u {2} '.\ format(trickle_executable, getattr(backup_args, 'download_limit') or -1, getattr(backup_args, 'upload_limit') or -1) backup_args.__dict__['trickle_command'] = trickle_command if backup_args.config: backup_args.__dict__['tmp_file'] = conf_file.name # maintain env variable not to get into infinite loop if "tricklecount" in os.environ: tricklecount = int(os.environ.get("tricklecount", 1)) tricklecount += 1 os.environ["tricklecount"] = str(tricklecount) else: os.environ["tricklecount"] = str(1) else: LOG.warn("[*] Trickle not found. Switching to normal mode without " "limiting bandwidth") if backup_args.config: # remove index tmp_file from backup arguments dict backup_args.__dict__.pop('tmp_file') utils.delete_file(conf_file.name) return backup_args
def freezer_main(backup_args): """Freezer main loop for job execution. """ if not backup_args.quiet: LOG.info('log file at {0}'.format(CONF.get('log_file'))) if backup_args.max_priority: utils.set_max_process_priority() bandwidth.monkeypatch_socket_bandwidth(backup_args) backup_args.__dict__['hostname_backup_name'] = "{0}_{1}".format( backup_args.hostname, backup_args.backup_name) validator.validate(backup_args) work_dir = backup_args.work_dir os_identity = backup_args.os_identity_api_version max_segment_size = backup_args.max_segment_size if backup_args.storages: storage = multiple.MultipleStorage( work_dir, [storage_from_dict(x, work_dir, max_segment_size, os_identity) for x in backup_args.storages]) else: storage = storage_from_dict(backup_args.__dict__, work_dir, max_segment_size, os_identity) backup_args.__dict__['engine'] = tar_engine.TarBackupEngine( backup_args.compression, backup_args.dereference_symlink, backup_args.exclude, storage, winutils.is_windows(), backup_args.encrypt_pass_file, backup_args.dry_run) if hasattr(backup_args, 'trickle_command'): if "tricklecount" in os.environ: if int(os.environ.get("tricklecount")) > 1: LOG.critical("[*] Trickle seems to be not working, Switching " "to normal mode ") run_job(backup_args, storage) freezer_command = '{0} {1}'.format(backup_args.trickle_command, ' '.join(sys.argv)) process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) while process.poll() is None: print(process.stdout.readline().rstrip()) output, error = process.communicate() if process.returncode: LOG.error("[*] Trickle Error: {0}".format(error)) LOG.critical("[*] Switching to work without trickle ...") run_job(backup_args, storage) else: run_job(backup_args, storage)
def test_is_windows(self): fake_os = commons.Os() os.name = fake_os assert winutils.is_windows() is False
def get_backup_args(): defaults = DEFAULT_PARAMS.copy() class FreezerConfig(object): def __init__(self, args): self.__dict__.update(args) cli_options = dict([(x, y) for x, y in CONF.iteritems() if y is not None]) defaults.update(cli_options) conf = None if CONF.get('config'): conf = freezer_config.Config.parse(CONF.get('config')) # force log_config_append to always exists in defaults even if not # provided. defaults['log_config_append'] = None defaults.update(conf.default) if defaults['log_file']: CONF.set_override('log_file', defaults['log_file']) CONF.set_override('default_log_levels', _DEFAULT_LOG_LEVELS) if not CONF.get('log_file'): log_file = None for file_name in [ '/var/log/freezer-agent/freezer-agent.log', '/var/log/freezer.log' ]: try: log_file = prepare_logging(file_name) except IOError: pass if not log_file: # Set default working directory to ~/.freezer. If the directory # does not exists it is created work_dir = os.path.join(home, '.freezer') if not os.path.exists(work_dir): try: os.makedirs(work_dir) log_file = prepare_logging( os.path.join(work_dir, 'freezer.log')) except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode('{}'.format(err_msg)), file=sys.stderr) if log_file: CONF.set_default('log_file', log_file) else: LOG.warning("log file cannot be created. Freezer will proceed with" " default stdout and stderr") backup_args = FreezerConfig(defaults) if CONF.get('config'): backup_args.__dict__['config'] = CONF.get('config') # Set default working directory to ~/.freezer. If the directory # does not exists it is created work_dir = os.path.join(home, '.freezer') backup_args.__dict__['work_dir'] = work_dir if not os.path.exists(work_dir): try: os.makedirs(work_dir) except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode('{}'.format(err_msg)), file=sys.stderr) # If hostname is not set, hostname of the current node will be used if not backup_args.hostname: backup_args.__dict__['hostname'] = socket.gethostname() # If we have provided --proxy then overwrite the system HTTP_PROXY and # HTTPS_PROXY if backup_args.proxy: utils.alter_proxy(backup_args.proxy) # MySQLdb object backup_args.__dict__['mysql_db_inst'] = '' backup_args.__dict__['storages'] = None if conf and conf.storages: backup_args.__dict__['storages'] = conf.storages # Windows volume backup_args.__dict__['shadow'] = '' backup_args.__dict__['shadow_path'] = '' backup_args.__dict__['file_name'] = '' if winutils.is_windows(): if backup_args.path_to_backup: backup_args.__dict__['windows_volume'] = \ backup_args.path_to_backup[:3] backup_media = 'fs' if backup_args.cinder_vol_id: backup_media = 'cinder' elif backup_args.cindernative_vol_id or backup_args.cindernative_backup_id: backup_media = 'cindernative' elif backup_args.engine_name == 'nova' and (backup_args.project_id or backup_args.nova_inst_id): backup_media = 'nova' elif backup_args.cinderbrick_vol_id: backup_media = 'cinderbrick' backup_args.__dict__['backup_media'] = backup_media backup_args.__dict__['time_stamp'] = None if backup_args.upload_limit != -1 or backup_args.download_limit != -1 and \ not winutils.is_windows(): # handle --config option with tmp config file if backup_args.config: conf_file = tempfile.NamedTemporaryFile(prefix='freezer_job_', delete=False) # remove the limits from the new file if 'upload_limit' in conf.default: conf.default.pop('upload_limit') elif 'download_limit' in conf.default: conf.default.pop('download_limit') utils.save_config_to_file(conf.default, conf_file, 'default') # replace the original file with the tmp one conf_index = sys.argv.index('--config') + 1 sys.argv[conf_index] = conf_file.name # if limits provided from cli remove it ! if '--upload-limit' in sys.argv: index = sys.argv.index('--upload-limit') sys.argv.pop(index) sys.argv.pop(index) if '--download-limit' in sys.argv: index = sys.argv.index('--download-limit') sys.argv.pop(index) sys.argv.pop(index) # locate trickle trickle_executable = distspawn.find_executable('trickle') if trickle_executable is None: trickle_executable = distspawn.find_executable('trickle', path=":".join( sys.path)) if trickle_executable is None: trickle_executable = distspawn.find_executable( 'trickle', path=":".join(os.environ.get('PATH'))) if trickle_executable: LOG.info("Info: Starting trickle ...") trickle_command = '{0} -d {1} -u {2} '.\ format(trickle_executable, getattr(backup_args, 'download_limit') or -1, getattr(backup_args, 'upload_limit') or -1) backup_args.__dict__['trickle_command'] = trickle_command if backup_args.config: backup_args.__dict__['tmp_file'] = conf_file.name # maintain env variable not to get into infinite loop if "tricklecount" in os.environ: tricklecount = int(os.environ.get("tricklecount", 1)) tricklecount += 1 os.environ["tricklecount"] = str(tricklecount) else: os.environ["tricklecount"] = str(1) else: LOG.warning("Trickle not found. Switching to normal mode without " "limiting bandwidth") if backup_args.config: # remove index tmp_file from backup arguments dict utils.delete_file(conf_file.name) return backup_args
def openssl_path(): from freezer.utils import winutils if winutils.is_windows(): return 'openssl' else: return find_executable('openssl')
def main(): doers = _get_doers(shell) doers.update(_get_doers(utils)) possible_actions = doers.keys() + ['start', 'stop', 'status'] arguments.parse_args(possible_actions) arguments.setup_logging() if CONF.action is None: CONF.print_help() return 65 # os.EX_DATAERR apiclient = None insecure = False if CONF.insecure: insecure = True if CONF.no_api is False: try: apiclient = client.Client(opts=CONF, insecure=insecure) if CONF.client_id: apiclient.client_id = CONF.client_id except Exception as e: LOG.error(e) print(e) sys.exit(1) else: if winutils.is_windows(): print("--no-api mode is not available on windows") return 69 # os.EX_UNAVAILABLE if CONF.action in doers: try: return doers[CONF.action](apiclient, CONF) except Exception as e: LOG.error(e) print ('ERROR {0}'.format(e)) return 70 # os.EX_SOFTWARE freezer_scheduler = FreezerScheduler(apiclient=apiclient, interval=int(CONF.interval), job_path=CONF.jobs_dir) if CONF.no_daemon: print ('Freezer Scheduler running in no-daemon mode') LOG.debug('Freezer Scheduler running in no-daemon mode') daemon = NoDaemon(daemonizable=freezer_scheduler) else: if winutils.is_windows(): daemon = Daemon(daemonizable=freezer_scheduler, interval=int(CONF.interval), job_path=CONF.jobs_dir, insecure=CONF.insecure) else: daemon = Daemon(daemonizable=freezer_scheduler) if CONF.action == 'start': daemon.start(log_file=CONF.log_file) elif CONF.action == 'stop': daemon.stop() elif CONF.action == 'reload': daemon.reload() elif CONF.action == 'status': daemon.status() # os.RETURN_CODES are only available to posix like systems, on windows # we need to translate the code to an actual number which is the equivalent return 0 # os.EX_OK
def get_backup_args(): defaults = DEFAULT_PARAMS.copy() class FreezerConfig(object): def __init__(self, args): self.__dict__.update(args) cli_options = dict([(x, y) for x, y in CONF.iteritems() if y is not None]) defaults.update(cli_options) conf = None if CONF.get('config'): conf = freezer_config.Config.parse(CONF.get('config')) # force log_config_append to always exists in defaults even if not # provided. defaults['log_config_append'] = None defaults.update(conf.default) for config_key in conf.default.keys(): try: CONF.get(config_key) CONF.set_override(config_key, conf.default[config_key]) except NoSuchOptError: LOG.debug('No such opt, {0}, so set it'.format(config_key)) setattr(CONF, config_key, conf.default[config_key]) except KeyError: real_opt, real_group = CONF._find_deprecated_opts(config_key) if '-' in real_opt: real_opt = real_opt.replace('-', '_') CONF.set_override(real_opt, conf.default[real_opt]) if defaults['log_file']: CONF.set_override('log_file', defaults['log_file']) CONF.set_override('default_log_levels', _DEFAULT_LOG_LEVELS) if not CONF.get('log_file'): log_file = None file_name = '/var/log/freezer-agent/freezer-agent.log' try: log_file = prepare_logging(file_name) except IOError: pass if not log_file: # Set default working directory to ~/.freezer. If the directory # does not exists it is created try: log_file = prepare_logging() except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode('{}'.format(err_msg)), file=sys.stderr) if log_file: CONF.set_default('log_file', log_file) else: LOG.warning("log file cannot be created. Freezer will proceed with" " default stdout and stderr") backup_args = FreezerConfig(defaults) if CONF.get('config'): backup_args.__dict__['config'] = CONF.get('config') # Set default working directory to ~/.freezer. If the directory # does not exists it is created work_dir = os.path.join(home, '.freezer') backup_args.__dict__['work_dir'] = work_dir if not os.path.exists(work_dir): try: os.makedirs(work_dir) except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode('{}'.format(err_msg)), file=sys.stderr) # If hostname is not set, hostname of the current node will be used if not backup_args.hostname: backup_args.__dict__['hostname'] = socket.gethostname() # If we have provided --proxy then overwrite the system HTTP_PROXY and # HTTPS_PROXY if backup_args.proxy: utils.alter_proxy(backup_args.proxy) # MySQLdb object backup_args.__dict__['mysql_db_inst'] = '' backup_args.__dict__['storages'] = None if conf and conf.storages: backup_args.__dict__['storages'] = conf.storages # Windows volume backup_args.__dict__['shadow'] = '' backup_args.__dict__['shadow_path'] = '' backup_args.__dict__['file_name'] = '' if winutils.is_windows(): if backup_args.path_to_backup: backup_args.__dict__['windows_volume'] = \ backup_args.path_to_backup[:3] backup_media = 'fs' if backup_args.cinder_vol_id or backup_args.cinder_vol_name: backup_media = 'cinder' elif backup_args.cindernative_vol_id or backup_args.cindernative_backup_id: backup_media = 'cindernative' elif backup_args.engine_name == 'nova' and (backup_args.project_id or backup_args.nova_inst_id or backup_args.nova_inst_name): backup_media = 'nova' elif backup_args.cinderbrick_vol_id: backup_media = 'cinderbrick' backup_args.__dict__['backup_media'] = backup_media backup_args.__dict__['time_stamp'] = None if backup_args.upload_limit != -1 or backup_args.download_limit != -1 and \ not winutils.is_windows(): # handle --config option with tmp config file if backup_args.config: conf_file = tempfile.NamedTemporaryFile(prefix='freezer_job_', delete=False) # remove the limits from the new file if 'upload_limit' in conf.default: conf.default.pop('upload_limit') elif 'download_limit' in conf.default: conf.default.pop('download_limit') utils.save_config_to_file(conf.default, conf_file, 'default') # replace the original file with the tmp one conf_index = sys.argv.index('--config') + 1 sys.argv[conf_index] = conf_file.name # if limits provided from cli remove it ! if '--upload-limit' in sys.argv: index = sys.argv.index('--upload-limit') sys.argv.pop(index) sys.argv.pop(index) if '--download-limit' in sys.argv: index = sys.argv.index('--download-limit') sys.argv.pop(index) sys.argv.pop(index) # locate trickle trickle_executable = distspawn.find_executable('trickle') if trickle_executable is None: trickle_executable = distspawn.find_executable( 'trickle', path=":".join(sys.path)) if trickle_executable is None: trickle_executable = distspawn.find_executable( 'trickle', path=":".join(os.environ.get('PATH'))) if trickle_executable: LOG.info("Info: Starting trickle ...") trickle_command = '{0} -d {1} -u {2} '.\ format(trickle_executable, getattr(backup_args, 'download_limit') or -1, getattr(backup_args, 'upload_limit') or -1) backup_args.__dict__['trickle_command'] = trickle_command if backup_args.config: backup_args.__dict__['tmp_file'] = conf_file.name # maintain env variable not to get into infinite loop if "tricklecount" in os.environ: tricklecount = int(os.environ.get("tricklecount", 1)) tricklecount += 1 os.environ["tricklecount"] = str(tricklecount) else: os.environ["tricklecount"] = str(1) else: LOG.warning("Trickle not found. Switching to normal mode without " "limiting bandwidth") if backup_args.config: # remove index tmp_file from backup arguments dict utils.delete_file(conf_file.name) return backup_args
def freezer_main(backup_args): """Freezer main loop for job execution. """ if not backup_args.quiet: LOG.info('log file at {0}'.format(CONF.get('log_file'))) if backup_args.max_priority: utils.set_max_process_priority() backup_args.__dict__['hostname_backup_name'] = "{0}_{1}".format( backup_args.hostname, backup_args.backup_name) validator.validate(backup_args) work_dir = backup_args.work_dir max_segment_size = backup_args.max_segment_size if (backup_args.storage == 'swift' or backup_args.backup_media in ['nova', 'cinder', 'cindernative']): backup_args.client_manager = get_client_manager(backup_args.__dict__) if backup_args.storages: storage = multiple.MultipleStorage(work_dir, [ storage_from_dict(x, work_dir, max_segment_size) for x in backup_args.storages ]) else: storage = storage_from_dict(backup_args.__dict__, work_dir, max_segment_size) backup_args.engine = tar_engine.TarBackupEngine( backup_args.compression, backup_args.dereference_symlink, backup_args.exclude, storage, winutils.is_windows(), backup_args.max_segment_size, backup_args.encrypt_pass_file, backup_args.dry_run) if hasattr(backup_args, 'trickle_command'): if "tricklecount" in os.environ: if int(os.environ.get("tricklecount")) > 1: LOG.critical("[*] Trickle seems to be not working, Switching " "to normal mode ") return run_job(backup_args, storage) freezer_command = '{0} {1}'.format(backup_args.trickle_command, ' '.join(sys.argv)) LOG.debug('Trickle command: {0}'.format(freezer_command)) process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) while process.poll() is None: line = process.stdout.readline().strip() if line != '': print(line) output, error = process.communicate() if hasattr(backup_args, 'tmp_file'): utils.delete_file(backup_args.tmp_file) if process.returncode: LOG.warn("[*] Trickle Error: {0}".format(error)) LOG.info("[*] Switching to work without trickle ...") return run_job(backup_args, storage) else: return run_job(backup_args, storage)
import threading import time from apscheduler.schedulers.background import BackgroundScheduler from distutils import spawn from oslo_config import cfg from oslo_log import log from freezer.apiclient import client from freezer.scheduler import arguments from freezer.scheduler import scheduler_job from freezer.scheduler import shell from freezer.scheduler import utils from freezer.utils import winutils if winutils.is_windows(): from win_daemon import Daemon from win_daemon import NoDaemon else: from daemon import Daemon from daemon import NoDaemon CONF = cfg.CONF LOG = log.getLogger(__name__) class FreezerScheduler(object): def __init__(self, apiclient, interval, job_path): # config_manager self.client = apiclient self.freezerc_executable = spawn.find_executable('freezer-agent')
def freezer_main(backup_args): """Freezer main loop for job execution. """ if not backup_args.quiet: LOG.info('log file at {0}'.format(CONF.get('log_file'))) if backup_args.max_priority: utils.set_max_process_priority() backup_args.__dict__['hostname_backup_name'] = "{0}_{1}".format( backup_args.hostname, backup_args.backup_name) validator.validate(backup_args) work_dir = backup_args.work_dir max_segment_size = backup_args.max_segment_size if backup_args.storage == 'swift' or ( backup_args.backup_media in ['nova', 'cinder', 'cindernative']): backup_args.client_manager = get_client_manager(backup_args.__dict__) if backup_args.storages: storage = multiple.MultipleStorage( work_dir, [storage_from_dict(x, work_dir, max_segment_size) for x in backup_args.storages]) else: storage = storage_from_dict(backup_args.__dict__, work_dir, max_segment_size) backup_args.engine = tar_engine.TarBackupEngine( backup_args.compression, backup_args.dereference_symlink, backup_args.exclude, storage, winutils.is_windows(), backup_args.max_segment_size, backup_args.encrypt_pass_file, backup_args.dry_run) if hasattr(backup_args, 'trickle_command'): if "tricklecount" in os.environ: if int(os.environ.get("tricklecount")) > 1: LOG.critical("[*] Trickle seems to be not working, Switching " "to normal mode ") return run_job(backup_args, storage) freezer_command = '{0} {1}'.format(backup_args.trickle_command, ' '.join(sys.argv)) LOG.debug('Trickle command: {0}'.format(freezer_command)) process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) while process.poll() is None: line = process.stdout.readline().strip() if line != '': print (line) output, error = process.communicate() if hasattr(backup_args, 'tmp_file'): utils.delete_file(backup_args.tmp_file) if process.returncode: LOG.warn("[*] Trickle Error: {0}".format(error)) LOG.info("[*] Switching to work without trickle ...") return run_job(backup_args, storage) else: return run_job(backup_args, storage)
def main(): doers = _get_doers(shell) doers.update(_get_doers(utils)) possible_actions = doers.keys() + ['start', 'stop', 'status', 'reload'] arguments.parse_args(possible_actions) arguments.setup_logging() if CONF.action is None: CONF.print_help() return 65 # os.EX_DATAERR if CONF.action not in ['start', 'stop', 'status', 'reload']: sys.stderr.write("Using freezer-scheduler as a command line client is " "deprecated. Please use the freezer command line tool" " from python-freezerclient.") apiclient = None insecure = False if CONF.insecure: insecure = True if CONF.no_api is False: try: apiclient = client.Client(opts=CONF, insecure=insecure) if CONF.client_id: apiclient.client_id = CONF.client_id except Exception as e: LOG.error(e) print(e) sys.exit(1) else: if winutils.is_windows(): print("--no-api mode is not available on windows") return 69 # os.EX_UNAVAILABLE if CONF.action in doers: try: return doers[CONF.action](apiclient, CONF) except Exception as e: LOG.error(e) print('ERROR {0}'.format(e)) return 70 # os.EX_SOFTWARE freezer_scheduler = FreezerScheduler(apiclient=apiclient, interval=int(CONF.interval), job_path=CONF.jobs_dir) if CONF.no_daemon: print('Freezer Scheduler running in no-daemon mode') LOG.debug('Freezer Scheduler running in no-daemon mode') daemon = NoDaemon(daemonizable=freezer_scheduler) else: if winutils.is_windows(): daemon = Daemon(daemonizable=freezer_scheduler, interval=int(CONF.interval), job_path=CONF.jobs_dir, insecure=CONF.insecure) else: daemon = Daemon(daemonizable=freezer_scheduler) if CONF.action == 'start': daemon.start() elif CONF.action == 'stop': daemon.stop() elif CONF.action == 'reload': daemon.reload() elif CONF.action == 'status': daemon.status() # os.RETURN_CODES are only available to posix like systems, on windows # we need to translate the code to an actual number which is the equivalent return 0 # os.EX_OK
def test_is_windows(self): fake_os = Os() os.name = fake_os assert winutils.is_windows() is False
""" import os import sys from freezer import __version__ as FREEZER_VERSION from freezer.apiclient import client as api_client from freezer.utils import winutils from oslo_config import cfg from oslo_log import log CONF = cfg.CONF _LOG = log.getLogger(__name__) if winutils.is_windows(): DEFAULT_FREEZER_SCHEDULER_CONF_D = r'C:\.freezer\scheduler\conf.d' else: DEFAULT_FREEZER_SCHEDULER_CONF_D = '/etc/freezer/scheduler/conf.d' def get_common_opts(): scheduler_conf_d = os.environ.get('FREEZER_SCHEDULER_CONF_D', DEFAULT_FREEZER_SCHEDULER_CONF_D) if not os.path.exists(DEFAULT_FREEZER_SCHEDULER_CONF_D): try: os.makedirs(DEFAULT_FREEZER_SCHEDULER_CONF_D) except OSError as err: _LOG.error('OS error: {0}'.format(err)) except IOError: _LOG.error('Cannot create the directory {0}'