def metadata(self): metadata_file = tempfile.NamedTemporaryFile('wb', delete=True) self.storage.get_file(self.metadata_path, metadata_file.name) with open(metadata_file.name) as f: metadata_content = f.readlines() LOG.info("metadata content download {0}".format(metadata_content)) metadata_file.close() utils.delete_file(metadata_file.name) return json.loads(metadata_content[0])
def execute_job_action(self, job_action): max_retries = job_action.get('max_retries', 1) tries = max_retries freezer_action = job_action.get('freezer_action', {}) max_retries_interval = job_action.get('max_retries_interval', 60) action_name = freezer_action.get('action', '') config_file_name = None while tries: with tempfile.NamedTemporaryFile(delete=False) as config_file: self.save_action_to_file(freezer_action, config_file) config_file_name = config_file.name freezer_command = '{0} --metadata-out - --config {1}'.\ format(self.executable, config_file.name) self.process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) output, error = self.process.communicate() # ensure the tempfile gets deleted utils.delete_file(config_file_name) if error: logging.error("[*] Freezer client error: {0}".format(error)) elif output: self.upload_metadata(output) if self.process.returncode: # ERROR tries -= 1 if tries: logging.warning('[*] Job {0} failed {1} action,' ' retrying in {2} seconds'.format( self.id, action_name, max_retries_interval)) # sleeping with the bloody lock, but we don't want other # actions to mess with our stuff like fs snapshots, do we ? time.sleep(max_retries_interval) else: # SUCCESS logging.info('[*] Job {0} action {1}' ' returned success exit code'.format( self.id, action_name)) return Job.SUCCESS_RESULT logging.error('[*] Job {0} action {1} failed after {2} tries'.format( self.id, action_name, max_retries)) return Job.FAIL_RESULT
def execute_job_action(self, job_action): max_retries = job_action.get('max_retries', 1) tries = max_retries freezer_action = job_action.get('freezer_action', {}) max_retries_interval = job_action.get('max_retries_interval', 60) action_name = freezer_action.get('action', '') config_file_name = None while tries: with tempfile.NamedTemporaryFile(delete=False) as config_file: self.save_action_to_file(freezer_action, config_file) config_file_name = config_file.name freezer_command = '{0} --metadata-out - --config {1}'.\ format(self.executable, config_file.name) self.process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) output, error = self.process.communicate() # ensure the tempfile gets deleted utils.delete_file(config_file_name) if error: logging.error("[*] Freezer client error: {0}".format(error)) elif output: self.upload_metadata(output) if self.process.returncode: # ERROR tries -= 1 if tries: logging.warning('[*] Job {0} failed {1} action,' ' retrying in {2} seconds' .format(self.id, action_name, max_retries_interval)) # sleeping with the bloody lock, but we don't want other # actions to mess with our stuff like fs snapshots, do we ? time.sleep(max_retries_interval) else: # SUCCESS logging.info('[*] Job {0} action {1}' ' returned success exit code'. format(self.id, action_name)) return Job.SUCCESS_RESULT logging.error('[*] Job {0} action {1} failed after {2} tries' .format(self.id, action_name, max_retries)) return Job.FAIL_RESULT
def execute_job_action(self, job_action): max_tries = (job_action.get('max_retries', 0) + 1) tries = max_tries freezer_action = job_action.get('freezer_action', {}) max_retries_interval = job_action.get('max_retries_interval', 60) action_name = freezer_action.get('action', '') while tries: with tempfile.NamedTemporaryFile(delete=False) as config_file: self.save_action_to_file(freezer_action, config_file) config_file_name = config_file.name freezer_command = '{0} --metadata-out - --config {1}'.\ format(self.executable, config_file.name) self.process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) # store the pid for this process in the api try: self.job_doc['job_schedule']['current_pid'] = \ self.process.pid self.scheduler.update_job(self.job_doc['job_id'], self.job_doc) except Exception as error: LOG.error("Error saving the process id {}".format(error)) output, error = self.process.communicate() # ensure the tempfile gets deleted utils.delete_file(config_file_name) if error: LOG.error("Freezer client error: {0}".format(error)) elif output: self.upload_metadata(output) if self.process.returncode == -15: # This means the job action was aborted by the scheduler LOG.warning('Freezer-agent was killed by the scheduler. ' 'Cleanup should be done manually: container, ' 'mountpoint and lvm snapshots.') return Job.ABORTED_RESULT elif self.process.returncode: # ERROR tries -= 1 if tries: LOG.warning('Job {0} failed {1} action,' ' retrying in {2} seconds'.format( self.id, action_name, max_retries_interval)) time.sleep(max_retries_interval) else: # SUCCESS LOG.info('Job {0} action {1}' ' returned success exit code'.format( self.id, action_name)) return Job.SUCCESS_RESULT LOG.error('Job {0} action {1} failed after {2} tries'.format( self.id, action_name, max_tries)) return Job.FAIL_RESULT
def freezer_main(backup_args): """Freezer main loop for job execution. """ if not backup_args.quiet: LOG.info('log file at {0}'.format(CONF.get('log_file'))) if backup_args.max_priority: utils.set_max_process_priority() backup_args.__dict__['hostname_backup_name'] = "{0}_{1}".format( backup_args.hostname, backup_args.backup_name) validator.validate(backup_args) work_dir = backup_args.work_dir max_segment_size = backup_args.max_segment_size if (backup_args.storage == 'swift' or backup_args.backup_media in ['nova', 'cinder', 'cindernative']): backup_args.client_manager = get_client_manager(backup_args.__dict__) if backup_args.storages: storage = multiple.MultipleStorage(work_dir, [ storage_from_dict(x, work_dir, max_segment_size) for x in backup_args.storages ]) else: storage = storage_from_dict(backup_args.__dict__, work_dir, max_segment_size) backup_args.engine = tar_engine.TarBackupEngine( backup_args.compression, backup_args.dereference_symlink, backup_args.exclude, storage, winutils.is_windows(), backup_args.max_segment_size, backup_args.encrypt_pass_file, backup_args.dry_run) if hasattr(backup_args, 'trickle_command'): if "tricklecount" in os.environ: if int(os.environ.get("tricklecount")) > 1: LOG.critical("[*] Trickle seems to be not working, Switching " "to normal mode ") return run_job(backup_args, storage) freezer_command = '{0} {1}'.format(backup_args.trickle_command, ' '.join(sys.argv)) LOG.debug('Trickle command: {0}'.format(freezer_command)) process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) while process.poll() is None: line = process.stdout.readline().strip() if line != '': print(line) output, error = process.communicate() if hasattr(backup_args, 'tmp_file'): utils.delete_file(backup_args.tmp_file) if process.returncode: LOG.warn("[*] Trickle Error: {0}".format(error)) LOG.info("[*] Switching to work without trickle ...") return run_job(backup_args, storage) else: return run_job(backup_args, storage)
def freezer_main(backup_args): """Freezer main loop for job execution. """ if not backup_args.quiet: LOG.info("Begin freezer agent process with args: {0}".format(sys.argv)) LOG.info('log file at {0}'.format(CONF.get('log_file'))) if backup_args.max_priority: utils.set_max_process_priority() backup_args.__dict__['hostname_backup_name'] = "{0}_{1}".format( backup_args.hostname, backup_args.backup_name) max_segment_size = backup_args.max_segment_size if (backup_args.storage == 'swift' or backup_args.backup_media in ['nova', 'cinder', 'cindernative', 'cinderbrick']): backup_args.client_manager = client_manager.get_client_manager( backup_args.__dict__) if backup_args.storage == 's3': if backup_args.__dict__['access_key'] == '' \ and 'ACCESS_KEY' in os.environ: backup_args.__dict__['access_key'] = os.environ.get('ACCESS_KEY') if backup_args.__dict__['access_key'] == '': raise Exception('No access key found for S3 compatible storage') if backup_args.__dict__['secret_key'] == '' \ and 'SECRET_KEY' in os.environ: backup_args.__dict__['secret_key'] = os.environ.get('SECRET_KEY') if backup_args.__dict__['secret_key'] == '': raise Exception('No secret key found for S3 compatible storage') if backup_args.__dict__['endpoint'] == '' \ and 'ENDPOINT' in os.environ: backup_args.__dict__['endpoint'] = os.environ.get('ENDPOINT') if backup_args.__dict__['endpoint'] == '': raise Exception('No endpoint found for S3 compatible storage') if backup_args.storages: # pylint: disable=abstract-class-instantiated storage = multiple.MultipleStorage([ storage_from_dict(x, max_segment_size) for x in backup_args.storages ]) else: storage = storage_from_dict(backup_args.__dict__, max_segment_size) engine_loader = engine_manager.EngineManager() backup_args.engine = engine_loader.load_engine( compression=backup_args.compression, symlinks=backup_args.dereference_symlink, exclude=backup_args.exclude, storage=storage, max_segment_size=backup_args.max_segment_size, rsync_block_size=backup_args.rsync_block_size, encrypt_key=backup_args.encrypt_pass_file, dry_run=backup_args.dry_run) if hasattr(backup_args, 'trickle_command'): if "tricklecount" in os.environ: if int(os.environ.get("tricklecount")) > 1: LOG.critical("Trickle seems to be not working, Switching " "to normal mode ") return run_job(backup_args, storage) freezer_command = '{0} {1}'.format(backup_args.trickle_command, ' '.join(sys.argv)) LOG.debug('Trickle command: {0}'.format(freezer_command)) process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) while process.poll() is None: line = process.stdout.readline().strip() if line != '': print(line) output, error = process.communicate() if hasattr(backup_args, 'tmp_file'): utils.delete_file(backup_args.tmp_file) if process.returncode: LOG.warning("Trickle Error: {0}".format(error)) LOG.info("Switching to work without trickle ...") return run_job(backup_args, storage) else: return run_job(backup_args, storage)
def get_backup_args(): defaults = DEFAULT_PARAMS.copy() class FreezerConfig(object): def __init__(self, args): self.__dict__.update(args) cli_options = dict([(x, y) for x, y in CONF.iteritems() if y is not None]) defaults.update(cli_options) conf = None if CONF.get('config'): conf = freezer_config.Config.parse(CONF.get('config')) # force log_config_append to always exists in defaults even if not # provided. defaults['log_config_append'] = None defaults.update(conf.default) if defaults['log_file']: CONF.set_override('log_file', defaults['log_file']) CONF.set_override('default_log_levels', _DEFAULT_LOG_LEVELS) if not CONF.get('log_file'): log_file = None for file_name in [ '/var/log/freezer-agent/freezer-agent.log', '/var/log/freezer.log' ]: try: log_file = prepare_logging(file_name) except IOError: pass if not log_file: # Set default working directory to ~/.freezer. If the directory # does not exists it is created work_dir = os.path.join(home, '.freezer') if not os.path.exists(work_dir): try: os.makedirs(work_dir) log_file = prepare_logging( os.path.join(work_dir, 'freezer.log')) except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode('{}'.format(err_msg)), file=sys.stderr) if log_file: CONF.set_default('log_file', log_file) else: LOG.warning("log file cannot be created. Freezer will proceed with" " default stdout and stderr") backup_args = FreezerConfig(defaults) if CONF.get('config'): backup_args.__dict__['config'] = CONF.get('config') # Set default working directory to ~/.freezer. If the directory # does not exists it is created work_dir = os.path.join(home, '.freezer') backup_args.__dict__['work_dir'] = work_dir if not os.path.exists(work_dir): try: os.makedirs(work_dir) except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode('{}'.format(err_msg)), file=sys.stderr) # If hostname is not set, hostname of the current node will be used if not backup_args.hostname: backup_args.__dict__['hostname'] = socket.gethostname() # If we have provided --proxy then overwrite the system HTTP_PROXY and # HTTPS_PROXY if backup_args.proxy: utils.alter_proxy(backup_args.proxy) # MySQLdb object backup_args.__dict__['mysql_db_inst'] = '' backup_args.__dict__['storages'] = None if conf and conf.storages: backup_args.__dict__['storages'] = conf.storages # Windows volume backup_args.__dict__['shadow'] = '' backup_args.__dict__['shadow_path'] = '' backup_args.__dict__['file_name'] = '' if winutils.is_windows(): if backup_args.path_to_backup: backup_args.__dict__['windows_volume'] = \ backup_args.path_to_backup[:3] backup_media = 'fs' if backup_args.cinder_vol_id: backup_media = 'cinder' elif backup_args.cindernative_vol_id or backup_args.cindernative_backup_id: backup_media = 'cindernative' elif backup_args.engine_name == 'nova' and (backup_args.project_id or backup_args.nova_inst_id): backup_media = 'nova' elif backup_args.cinderbrick_vol_id: backup_media = 'cinderbrick' backup_args.__dict__['backup_media'] = backup_media backup_args.__dict__['time_stamp'] = None if backup_args.upload_limit != -1 or backup_args.download_limit != -1 and \ not winutils.is_windows(): # handle --config option with tmp config file if backup_args.config: conf_file = tempfile.NamedTemporaryFile(prefix='freezer_job_', delete=False) # remove the limits from the new file if 'upload_limit' in conf.default: conf.default.pop('upload_limit') elif 'download_limit' in conf.default: conf.default.pop('download_limit') utils.save_config_to_file(conf.default, conf_file, 'default') # replace the original file with the tmp one conf_index = sys.argv.index('--config') + 1 sys.argv[conf_index] = conf_file.name # if limits provided from cli remove it ! if '--upload-limit' in sys.argv: index = sys.argv.index('--upload-limit') sys.argv.pop(index) sys.argv.pop(index) if '--download-limit' in sys.argv: index = sys.argv.index('--download-limit') sys.argv.pop(index) sys.argv.pop(index) # locate trickle trickle_executable = distspawn.find_executable('trickle') if trickle_executable is None: trickle_executable = distspawn.find_executable('trickle', path=":".join( sys.path)) if trickle_executable is None: trickle_executable = distspawn.find_executable( 'trickle', path=":".join(os.environ.get('PATH'))) if trickle_executable: LOG.info("Info: Starting trickle ...") trickle_command = '{0} -d {1} -u {2} '.\ format(trickle_executable, getattr(backup_args, 'download_limit') or -1, getattr(backup_args, 'upload_limit') or -1) backup_args.__dict__['trickle_command'] = trickle_command if backup_args.config: backup_args.__dict__['tmp_file'] = conf_file.name # maintain env variable not to get into infinite loop if "tricklecount" in os.environ: tricklecount = int(os.environ.get("tricklecount", 1)) tricklecount += 1 os.environ["tricklecount"] = str(tricklecount) else: os.environ["tricklecount"] = str(1) else: LOG.warning("Trickle not found. Switching to normal mode without " "limiting bandwidth") if backup_args.config: # remove index tmp_file from backup arguments dict utils.delete_file(conf_file.name) return backup_args
def get_backup_args(): defaults = DEFAULT_PARAMS.copy() class FreezerConfig(object): def __init__(self, args): self.__dict__.update(args) cli_options = dict([(x, y) for x, y in CONF.iteritems() if y is not None]) defaults.update(cli_options) conf = None if CONF.get('config'): conf = freezer_config.Config.parse(CONF.get('config')) defaults.update(conf.default) # TODO: restore_from_host is deprecated and to be removed defaults['hostname'] = conf.default.get('hostname') or \ conf.default.get('restore_from_host') # override default oslo values levels = { 'all': log.NOTSET, 'debug': log.DEBUG, 'warn': log.WARN, 'info': log.INFO, 'error': log.ERROR, 'critical': log.CRITICAL } if not CONF.get('log_file'): CONF.set_override('log_file', levels.get(defaults['log_file'], log.NOTSET)) CONF.set_override('default_log_levels', defaults['log_level']) if not CONF.get('log_file'): log_file = None for file_name in ['/var/log/freezer.log', '~/.freezer/freezer.log']: try: log_file = prepare_logging(file_name) except IOError: pass if log_file: CONF.set_default('log_file', log_file) else: LOG.warn("log file cannot be created. Freezer will proceed with " "default stdout and stderr") backup_args = FreezerConfig(defaults) # Set default working directory to ~/.freezer. If the directory # does not exists it is created work_dir = os.path.join(home, '.freezer') backup_args.__dict__['work_dir'] = work_dir if not os.path.exists(work_dir): try: os.makedirs(work_dir) except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode('{}'.format(err_msg)), file=sys.stderr) # If hostname is not set, hostname of the current node will be used if not backup_args.hostname: backup_args.__dict__['hostname'] = socket.gethostname() # If we have provided --proxy then overwrite the system HTTP_PROXY and # HTTPS_PROXY if backup_args.proxy: utils.alter_proxy(backup_args.proxy) # MySQLdb object backup_args.__dict__['mysql_db_inst'] = '' backup_args.__dict__['storages'] = None if conf and conf.storages: backup_args.__dict__['storages'] = conf.storages # Windows volume backup_args.__dict__['shadow'] = '' backup_args.__dict__['shadow_path'] = '' backup_args.__dict__['file_name'] = '' if winutils.is_windows(): if backup_args.path_to_backup: backup_args.__dict__['windows_volume'] = \ backup_args.path_to_backup[:3] # todo(enugaev) move it to new command line param backup_media if backup_args.lvm_auto_snap: raise Exception('lvm-auto-snap is deprecated. ' 'Please use --snapshot instead') backup_media = 'fs' if backup_args.cinder_vol_id: backup_media = 'cinder' elif backup_args.cindernative_vol_id: backup_media = 'cindernative' elif backup_args.nova_inst_id: backup_media = 'nova' backup_args.__dict__['backup_media'] = backup_media backup_args.__dict__['time_stamp'] = None if backup_args.upload_limit != -1 or backup_args.download_limit != -1 and \ not winutils.is_windows(): # handle --config option with tmp config file if backup_args.config: conf_file = NamedTemporaryFile(prefix='freezer_job_', delete=False) # remove the limits from the new file if 'upload_limit' in conf.default: conf.default.pop('upload_limit') elif 'download_limit' in conf.default: conf.default.pop('download_limit') utils.save_config_to_file(conf.default, conf_file, 'default') # replace the original file with the tmp one conf_index = sys.argv.index('--config') + 1 sys.argv[conf_index] = conf_file.name # if limits provided from cli remove it ! if '--upload-limit' in sys.argv: index = sys.argv.index('--upload-limit') sys.argv.pop(index) sys.argv.pop(index) if '--download-limit' in sys.argv: index = sys.argv.index('--download-limit') sys.argv.pop(index) sys.argv.pop(index) # locate trickle trickle_executable = distspawn.find_executable('trickle') if trickle_executable is None: trickle_executable = distspawn.find_executable( 'trickle', path=":".join(sys.path)) if trickle_executable is None: trickle_executable = distspawn.find_executable( 'trickle', path=":".join(os.environ.get('PATH'))) if trickle_executable: LOG.info("[*] Info: Starting trickle ...") trickle_command = '{0} -d {1} -u {2} '.\ format(trickle_executable, getattr(backup_args, 'download_limit') or -1, getattr(backup_args, 'upload_limit') or -1) backup_args.__dict__['trickle_command'] = trickle_command if backup_args.config: backup_args.__dict__['tmp_file'] = conf_file.name # maintain env variable not to get into infinite loop if "tricklecount" in os.environ: tricklecount = int(os.environ.get("tricklecount", 1)) tricklecount += 1 os.environ["tricklecount"] = str(tricklecount) else: os.environ["tricklecount"] = str(1) else: LOG.warn("[*] Trickle not found. Switching to normal mode without " "limiting bandwidth") if backup_args.config: # remove index tmp_file from backup arguments dict backup_args.__dict__.pop('tmp_file') utils.delete_file(conf_file.name) return backup_args
def freezer_main(backup_args): """Freezer main loop for job execution. """ if not backup_args.quiet: LOG.info('log file at {0}'.format(CONF.get('log_file'))) if backup_args.max_priority: utils.set_max_process_priority() backup_args.__dict__['hostname_backup_name'] = "{0}_{1}".format( backup_args.hostname, backup_args.backup_name) validator.validate(backup_args) work_dir = backup_args.work_dir max_segment_size = backup_args.max_segment_size if backup_args.storage == 'swift' or ( backup_args.backup_media in ['nova', 'cinder', 'cindernative']): backup_args.client_manager = get_client_manager(backup_args.__dict__) if backup_args.storages: storage = multiple.MultipleStorage( work_dir, [storage_from_dict(x, work_dir, max_segment_size) for x in backup_args.storages]) else: storage = storage_from_dict(backup_args.__dict__, work_dir, max_segment_size) backup_args.engine = tar_engine.TarBackupEngine( backup_args.compression, backup_args.dereference_symlink, backup_args.exclude, storage, winutils.is_windows(), backup_args.max_segment_size, backup_args.encrypt_pass_file, backup_args.dry_run) if hasattr(backup_args, 'trickle_command'): if "tricklecount" in os.environ: if int(os.environ.get("tricklecount")) > 1: LOG.critical("[*] Trickle seems to be not working, Switching " "to normal mode ") return run_job(backup_args, storage) freezer_command = '{0} {1}'.format(backup_args.trickle_command, ' '.join(sys.argv)) LOG.debug('Trickle command: {0}'.format(freezer_command)) process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) while process.poll() is None: line = process.stdout.readline().strip() if line != '': print (line) output, error = process.communicate() if hasattr(backup_args, 'tmp_file'): utils.delete_file(backup_args.tmp_file) if process.returncode: LOG.warn("[*] Trickle Error: {0}".format(error)) LOG.info("[*] Switching to work without trickle ...") return run_job(backup_args, storage) else: return run_job(backup_args, storage)
def get_backup_args(): defaults = DEFAULT_PARAMS.copy() class FreezerConfig(object): def __init__(self, args): self.__dict__.update(args) cli_options = dict([(x, y) for x, y in CONF.iteritems() if y is not None]) defaults.update(cli_options) conf = None if CONF.get('config'): conf = freezer_config.Config.parse(CONF.get('config')) # force log_config_append to always exists in defaults even if not # provided. defaults['log_config_append'] = None defaults.update(conf.default) for config_key in conf.default.keys(): try: CONF.get(config_key) CONF.set_override(config_key, conf.default[config_key]) except NoSuchOptError: LOG.debug('No such opt, {0}, so set it'.format(config_key)) setattr(CONF, config_key, conf.default[config_key]) except KeyError: real_opt, real_group = CONF._find_deprecated_opts(config_key) if '-' in real_opt: real_opt = real_opt.replace('-', '_') CONF.set_override(real_opt, conf.default[real_opt]) if defaults['log_file']: CONF.set_override('log_file', defaults['log_file']) CONF.set_override('default_log_levels', _DEFAULT_LOG_LEVELS) if not CONF.get('log_file'): log_file = None file_name = '/var/log/freezer-agent/freezer-agent.log' try: log_file = prepare_logging(file_name) except IOError: pass if not log_file: # Set default working directory to ~/.freezer. If the directory # does not exists it is created try: log_file = prepare_logging() except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode('{}'.format(err_msg)), file=sys.stderr) if log_file: CONF.set_default('log_file', log_file) else: LOG.warning("log file cannot be created. Freezer will proceed with" " default stdout and stderr") backup_args = FreezerConfig(defaults) if CONF.get('config'): backup_args.__dict__['config'] = CONF.get('config') # Set default working directory to ~/.freezer. If the directory # does not exists it is created work_dir = os.path.join(home, '.freezer') backup_args.__dict__['work_dir'] = work_dir if not os.path.exists(work_dir): try: os.makedirs(work_dir) except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode('{}'.format(err_msg)), file=sys.stderr) # If hostname is not set, hostname of the current node will be used if not backup_args.hostname: backup_args.__dict__['hostname'] = socket.gethostname() # If we have provided --proxy then overwrite the system HTTP_PROXY and # HTTPS_PROXY if backup_args.proxy: utils.alter_proxy(backup_args.proxy) # MySQLdb object backup_args.__dict__['mysql_db_inst'] = '' backup_args.__dict__['storages'] = None if conf and conf.storages: backup_args.__dict__['storages'] = conf.storages # Windows volume backup_args.__dict__['shadow'] = '' backup_args.__dict__['shadow_path'] = '' backup_args.__dict__['file_name'] = '' if winutils.is_windows(): if backup_args.path_to_backup: backup_args.__dict__['windows_volume'] = \ backup_args.path_to_backup[:3] backup_media = 'fs' if backup_args.cinder_vol_id or backup_args.cinder_vol_name: backup_media = 'cinder' elif backup_args.cindernative_vol_id or backup_args.cindernative_backup_id: backup_media = 'cindernative' elif backup_args.engine_name == 'nova' and (backup_args.project_id or backup_args.nova_inst_id or backup_args.nova_inst_name): backup_media = 'nova' elif backup_args.cinderbrick_vol_id: backup_media = 'cinderbrick' backup_args.__dict__['backup_media'] = backup_media backup_args.__dict__['time_stamp'] = None if backup_args.upload_limit != -1 or backup_args.download_limit != -1 and \ not winutils.is_windows(): # handle --config option with tmp config file if backup_args.config: conf_file = tempfile.NamedTemporaryFile(prefix='freezer_job_', delete=False) # remove the limits from the new file if 'upload_limit' in conf.default: conf.default.pop('upload_limit') elif 'download_limit' in conf.default: conf.default.pop('download_limit') utils.save_config_to_file(conf.default, conf_file, 'default') # replace the original file with the tmp one conf_index = sys.argv.index('--config') + 1 sys.argv[conf_index] = conf_file.name # if limits provided from cli remove it ! if '--upload-limit' in sys.argv: index = sys.argv.index('--upload-limit') sys.argv.pop(index) sys.argv.pop(index) if '--download-limit' in sys.argv: index = sys.argv.index('--download-limit') sys.argv.pop(index) sys.argv.pop(index) # locate trickle trickle_executable = distspawn.find_executable('trickle') if trickle_executable is None: trickle_executable = distspawn.find_executable( 'trickle', path=":".join(sys.path)) if trickle_executable is None: trickle_executable = distspawn.find_executable( 'trickle', path=":".join(os.environ.get('PATH'))) if trickle_executable: LOG.info("Info: Starting trickle ...") trickle_command = '{0} -d {1} -u {2} '.\ format(trickle_executable, getattr(backup_args, 'download_limit') or -1, getattr(backup_args, 'upload_limit') or -1) backup_args.__dict__['trickle_command'] = trickle_command if backup_args.config: backup_args.__dict__['tmp_file'] = conf_file.name # maintain env variable not to get into infinite loop if "tricklecount" in os.environ: tricklecount = int(os.environ.get("tricklecount", 1)) tricklecount += 1 os.environ["tricklecount"] = str(tricklecount) else: os.environ["tricklecount"] = str(1) else: LOG.warning("Trickle not found. Switching to normal mode without " "limiting bandwidth") if backup_args.config: # remove index tmp_file from backup arguments dict utils.delete_file(conf_file.name) return backup_args
def execute_job_action(self, job_action): max_tries = (job_action.get('max_retries', 0) + 1) tries = max_tries freezer_action = job_action.get('freezer_action', {}) max_retries_interval = job_action.get('max_retries_interval', 60) action_name = freezer_action.get('action', '') while tries: with tempfile.NamedTemporaryFile(delete=False) as config_file: self.save_action_to_file(freezer_action, config_file) config_file_name = config_file.name freezer_command = '{0} --metadata-out - --config {1}'.\ format(self.executable, config_file.name) self.process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) # store the pid for this process in the api try: self.job_doc['job_schedule']['current_pid'] = \ self.process.pid self.scheduler.update_job(self.job_doc['job_id'], self.job_doc) except Exception as error: LOG.error("Error saving the process id {}".format(error)) output, error = self.process.communicate() # ensure the tempfile gets deleted utils.delete_file(config_file_name) if error: LOG.error("Freezer client error: {0}".format(error)) elif output: self.upload_metadata(output) if self.process.returncode == -15: # This means the job action was aborted by the scheduler LOG.warning('Freezer-agent was killed by the scheduler. ' 'Cleanup should be done manually: container, ' 'mountpoint and lvm snapshots.') return Job.ABORTED_RESULT elif self.process.returncode: # ERROR tries -= 1 if tries: LOG.warning('Job {0} failed {1} action,' ' retrying in {2} seconds' .format(self.id, action_name, max_retries_interval)) time.sleep(max_retries_interval) else: # SUCCESS LOG.info('Job {0} action {1}' ' returned success exit code'. format(self.id, action_name)) return Job.SUCCESS_RESULT LOG.error('Job {0} action {1} failed after {2} tries' .format(self.id, action_name, max_tries)) return Job.FAIL_RESULT
def freezer_main(backup_args): """Freezer main loop for job execution. """ if not backup_args.quiet: LOG.info("Begin freezer agent process with args: {0}".format(sys.argv)) LOG.info('log file at {0}'.format(CONF.get('log_file'))) if backup_args.max_priority: utils.set_max_process_priority() backup_args.__dict__['hostname_backup_name'] = "{0}_{1}".format( backup_args.hostname, backup_args.backup_name) max_segment_size = backup_args.max_segment_size if (backup_args.storage == 'swift' or backup_args.backup_media in ['nova', 'cinder', 'cindernative', 'cinderbrick']): backup_args.client_manager = client_manager.get_client_manager( backup_args.__dict__) if backup_args.storage == 's3': if backup_args.__dict__['access_key'] == '' \ and 'ACCESS_KEY' in os.environ: backup_args.__dict__['access_key'] = os.environ.get('ACCESS_KEY') if backup_args.__dict__['access_key'] == '': raise Exception('No access key found for S3 compatible storage') if backup_args.__dict__['secret_key'] == '' \ and 'SECRET_KEY' in os.environ: backup_args.__dict__['secret_key'] = os.environ.get('SECRET_KEY') if backup_args.__dict__['secret_key'] == '': raise Exception('No secret key found for S3 compatible storage') if backup_args.__dict__['endpoint'] == '' \ and 'ENDPOINT' in os.environ: backup_args.__dict__['endpoint'] = os.environ.get('ENDPOINT') if backup_args.__dict__['endpoint'] == '': raise Exception('No endpoint found for S3 compatible storage') if backup_args.storages: # pylint: disable=abstract-class-instantiated storage = multiple.MultipleStorage( [storage_from_dict(x, max_segment_size) for x in backup_args.storages]) else: storage = storage_from_dict(backup_args.__dict__, max_segment_size) engine_loader = engine_manager.EngineManager() backup_args.engine = engine_loader.load_engine( compression=backup_args.compression, symlinks=backup_args.dereference_symlink, exclude=backup_args.exclude, storage=storage, max_segment_size=backup_args.max_segment_size, rsync_block_size=backup_args.rsync_block_size, encrypt_key=backup_args.encrypt_pass_file, dry_run=backup_args.dry_run ) if hasattr(backup_args, 'trickle_command'): if "tricklecount" in os.environ: if int(os.environ.get("tricklecount")) > 1: LOG.critical("Trickle seems to be not working, Switching " "to normal mode ") return run_job(backup_args, storage) freezer_command = '{0} {1}'.format(backup_args.trickle_command, ' '.join(sys.argv)) LOG.debug('Trickle command: {0}'.format(freezer_command)) process = subprocess.Popen(freezer_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) while process.poll() is None: line = process.stdout.readline().strip() if line != '': print(line) output, error = process.communicate() if hasattr(backup_args, 'tmp_file'): utils.delete_file(backup_args.tmp_file) if process.returncode: LOG.warning("Trickle Error: {0}".format(error)) LOG.info("Switching to work without trickle ...") return run_job(backup_args, storage) else: return run_job(backup_args, storage)