def execute(self): holder_id = self.params.get('holder_id', '') marker = self.params['mark_backend'] try: dirname = os.path.dirname(marker) if not os.path.exists(dirname): os.makedirs(dirname, 0755) with open(marker, 'w') as f: f.write(holder_id) except Exception as e: cmd_logger.error('Failed to create backend marker: {}'.format(e), extra=self.log_extra) # stop_marker_on_errors option is used in tasks to restore group # if node backend is broken and currently in RO state, we cannot create lock file, # so we can only skip exception. if self.params.get('skip_errors'): pass else: raise cmd_logger.info( 'Successfully created backend marker: {}'.format(marker), extra=self.log_extra)
def collect_artifacts(self): result = {} cmd_logger.info('Parsing output json for artifacts', extra=self.log_extra) output = self.watcher.get_stdout() try: result = json.loads(output) except Exception as e: result = { 'error': { 'message': 'failed to parse dnet_client output, see logs', 'code': -666, } } cmd_logger.error( 'Failed to parse dnet_client output: error {}, stdout "{}"'.format(e, output), extra=self.log_extra, ) pass if 'error' not in result: # do not save artifacts for successfully executed commands return {} return result
def update_broken_commands(self): s = Session() s.begin() try: for c in s.query(Command).filter_by(exit_code=None): log_extra = {'task_id': c.task_id, 'job_id': c.job_id} if not self.pid_exists(c.pid): c.progress = 1.0 c.exit_code = 666 c.finish_ts = int(time.time()) s.add(c) cmd_logger.info( 'Command {}, pid {} is considered broken, will be marked as ' 'finished'.format(c.uid, c.pid), extra=log_extra, ) else: cmd_logger.warn( 'Command {}, pid {} is considered broken, but process is running' .format(c.uid, c.pid), extra=log_extra, ) s.commit() except Exception: logger.exception('Failed to update broken commands') s.rollback() raise
def execute(self): try: os.rename(self.params['move_src'], self.params['move_dst']) except Exception as e: cmd_logger.error( 'Failed to execute move path command: {} to {}: {}'.format( self.params['move_src'], self.params['move_dst'], e, ), extra=self.log_extra, ) marker = self.params.get('stop_backend') if marker: try: with open(marker, 'w') as f: f.write(self.params['move_src']) except Exception as e: cmd_logger.error('Failed to create backend stop file: {}'.format(e), extra=self.log_extra) raise cmd_logger.info( 'Successfully performed move task: {} to {}'.format( self.params['move_src'], self.params['move_dst'], ), extra=self.log_extra, )
def collect_artifacts(self): commands_stats_path = self.params.get('commands_stats_path') if not commands_stats_path: cmd_logger.info('Commands stats path was not supplied', extra=self.log_extra) return {} cmd_logger.info( 'Parsing commands stats path: {}'.format(commands_stats_path), extra=self.log_extra) commands_stats = {} try: with open(commands_stats_path, 'rb') as f: commands_stats = json.load(f).get('commands', {}) except Exception: cmd_logger.exception( 'Failed to parse commands stats file {}'.format( commands_stats_path), extra=self.log_extra, ) parsed_stats = self._parse_commands_stats(commands_stats) # NOTE: temporary backward compatibility self.commands_stats = parsed_stats return parsed_stats
def execute(self): cmd_logger.info('Removing group file {0}'.format( self.params['remove_group_file']), extra=self.log_extra) path = self.params['remove_group_file'] try: os.remove(path) except Exception as e: cmd_logger.error('Failed to remove group file: {}'.format(e), extra=self.log_extra) marker = self.params.get('stop_backend') if marker: try: with open(marker, 'w') as f: f.write(path) except Exception as e: cmd_logger.error( 'Failed to create backend stop file: {}'.format(e), extra=self.log_extra) raise else: raise cmd_logger.info('Successfully removed group file {}'.format( self.params['remove_group_file']), extra=self.log_extra)
def on_command_completed(self): self.finish_ts = int(time.time()) self.artifacts = self.collect_artifacts() self.on_update_progress() if not self._apply_postprocessors(): # TODO: add status codes cmd_logger.info('Command failed, no post processors will be applied', extra=self.log_extra) return for post_processor in self.POST_PROCESSORS: params_supplied = all( param in self.params for param in post_processor.REQUIRED_PARAMS ) if params_supplied: # TODO: replace by required params? possibly not cmd_logger.info('Running post processor {}'.format(post_processor.__name__), extra=self.log_extra) uid = uuid.uuid4().hex command = post_processor(uid, params=self.params) try: # NOTE: when running as a post processor command is not # dumped to database, therefore 'execute' method is called # instead of 'run' command.execute() except: cmd_logger.exception( 'Post processor {} failed, skipped'.format( post_processor.__name__ ), extra=self.log_extra, ) continue
def execute(self): try: shutil.rmtree(self.params['remove_path']) except Exception: cmd_logger.exception('Failed to remove path {}'.format(self.params['remove_path']), extra=self.log_extra) raise cmd_logger.info('Successfully removed path {}'.format(self.params['remove_path']), extra=self.log_extra)
def collect_artifacts(self): tmp_dir = None for i, cmd_part in enumerate(self.cmd): if cmd_part in ('--tmp', '-t'): tmp_dir = self.cmd[i + 1] break if not tmp_dir: cmd_logger.info('Failed to determine tmp directory', extra=self.log_extra) return {} exec_state_path = os.path.join(tmp_dir, 'exec_state') cmd_logger.info('Parsing exec state: {}'.format(exec_state_path), extra=self.log_extra) exec_state = {} try: with open(exec_state_path, 'rb') as f: exec_state = json.load(f).get('status', {}) except Exception: cmd_logger.exception( 'Failed to parse exec state file {}'.format(exec_state_path), extra=self.log_extra, ) pass return exec_state
def execute(self): group_str = str(self.params.get('group', '')) path = self.params['group_file_marker'].format(group_id=group_str) try: dirname = os.path.dirname(path) if not os.path.exists(dirname): os.makedirs(dirname, 0755) with open(path, 'w') as f: f.write(group_str) except Exception as e: cmd_logger.error( 'Failed to create group file marker: {}'.format(e), extra=self.log_extra) marker = self.params.get('stop_backend') if marker: try: with open(marker, 'w') as f: f.write(path) except Exception as e: cmd_logger.error( 'Failed to create backend stop file: {}'.format(e), extra=self.log_extra) raise else: raise cmd_logger.info( 'Successfully created group file marker for group {}'.format( group_str), extra=self.log_extra)
def _prepare_command(self, cmd): cmd_str = ' '.join(cmd) if 'cmd_tpl' in self.params: # TODO: remove this backward-compatibility hack cmd_str = self.params['cmd_tpl'] missing_parameters = self.PLACEHOLDERS_TPL.findall(cmd_str) for param in missing_parameters: if param in self.params: continue elif param == 'backend_id': if 'group' not in self.params: raise ValueError('Parameter "group" is required') config_path = self.params['config_path'] cmd_logger.info( 'backend id not found in request params, ' 'getting backend id from elliptics config {config}, ' 'group {group}'.format( config=config_path, group=self.params['group'], ), extra=self.log_extra, ) config = EllipticsConfig(self.params['config_path']) self.params['backend_id'] = config.get_group_backend(int(self.params['group'])) else: raise ValueError('Cannot process command: unknown parameter "{}"'.format(param)) cmd_str = cmd_str.format(**self.params) cmd_logger.info('Prepared command: {}'.format(cmd_str), extra=self.log_extra) return shlex.split(cmd_str)
def execute(self): try: os.rename(self.params['move_src'], self.params['move_dst']) except Exception as e: cmd_logger.error( 'Failed to execute move path command: {} to {}: {}'.format( self.params['move_src'], self.params['move_dst'], e, ), extra=self.log_extra, ) marker = self.params.get('stop_backend') if marker: try: with open(marker, 'w') as f: f.write(self.params['move_src']) except Exception as e: cmd_logger.error( 'Failed to create backend stop file: {}'.format(e), extra=self.log_extra) raise cmd_logger.info( 'Successfully performed move task: {} to {}'.format( self.params['move_src'], self.params['move_dst'], ), extra=self.log_extra, )
def collect_artifacts(self): result = {} cmd_logger.info('Parsing output json for artifacts', extra=self.log_extra) output = self.watcher.get_stdout() try: result = json.loads(output) except Exception as e: result = { 'error': { 'message': 'failed to parse dnet_client output, see logs', 'code': -666, } } cmd_logger.error( 'Failed to parse dnet_client output: error {}, stdout "{}"'. format(e, output), extra=self.log_extra, ) pass if 'error' not in result: # do not save artifacts for successfully executed commands return {} return result
def execute(self): group_base_path = self.params['group_base_path'].rstrip('/') if not os.path.exists(group_base_path): raise RuntimeError('Group dir {path} does not exist'.format( path=group_base_path, )) dst_base_path, basename = os.path.split(group_base_path) remove_path = os.path.join(dst_base_path, self.removed_basename(basename)) cmd_logger.info( 'Renaming group base dir {tmp_dir} to destination dir {dest_dir}'. format( tmp_dir=group_base_path, dest_dir=remove_path, ), extra=self.log_extra, ) try: os.rename(group_base_path, remove_path) except OSError as e: if e.errno == 2: # errno == 2: No such file or directory if os.path.exists(remove_path): # group_base_path was already renamed, not an error pass else: raise else: raise except Exception: cmd_logger.exception('Failed to rename tmp dir to dest dir', extra=self.log_extra) raise
def collect_artifacts(self): commands_stats_path = self.params.get('commands_stats_path') if not commands_stats_path: cmd_logger.info('Commands stats path was not supplied', extra=self.log_extra) return {} cmd_logger.info('Parsing commands stats path: {}'.format(commands_stats_path), extra=self.log_extra) commands_stats = {} try: with open(commands_stats_path, 'rb') as f: commands_stats = json.load(f).get('commands', {}) except Exception: cmd_logger.exception( 'Failed to parse commands stats file {}'.format(commands_stats_path), extra=self.log_extra, ) parsed_stats = self._parse_commands_stats(commands_stats) # NOTE: temporary backward compatibility self.commands_stats = parsed_stats return parsed_stats
def update_broken_commands(self): s = Session() s.begin() try: for c in s.query(Command).filter_by(exit_code=None): log_extra = {'task_id': c.task_id, 'job_id': c.job_id} if not self.pid_exists(c.pid): c.progress = 1.0 c.exit_code = 666 c.finish_ts = int(time.time()) s.add(c) cmd_logger.info( 'Command {}, pid {} is considered broken, will be marked as ' 'finished'.format( c.uid, c.pid ), extra=log_extra, ) else: cmd_logger.warn( 'Command {}, pid {} is considered broken, but process is running'.format( c.uid, c.pid ), extra=log_extra, ) s.commit() except Exception: logger.exception('Failed to update broken commands') s.rollback() raise
def execute(self): stop_file = self.params['create_stop_file'] try: open(stop_file, 'w').close() except Exception as e: cmd_logger.error('Failed to create backend stop marker: {}'.format(e), extra=self.log_extra) raise cmd_logger.info('Successfully created backend stop marker: {}'.format(stop_file), extra=self.log_extra)
def execute(self): path = self.params['backend_path'] try: os.listdir(path) except Exception as e: cmd_logger.error('Failed to check path: {}'.format(e), extra=self.log_extra) raise cmd_logger.info('Successfully check path: {}'.format(path), extra=self.log_extra)
def execute(self): stop_file = self.params['remove_stop_file'] try: os.remove(stop_file) except Exception as e: cmd_logger.error('Failed to remove backend stop marker: {}'.format(e), extra=self.log_extra) raise cmd_logger.info('Successfully removed backend stop marker: {}'.format(stop_file), extra=self.log_extra)
def execute(self): marker = self.params['mark_backend'] try: open(marker, 'w').close() except Exception as e: cmd_logger.error('Failed to create backend marker: {}'.format(e), extra=self.log_extra) raise cmd_logger.info('Successfully created backend marker: {}'.format(marker), extra=self.log_extra)
def execute(self): try: shutil.rmtree(self.params['remove_path']) except Exception: cmd_logger.exception('Failed to remove path {}'.format( self.params['remove_path']), extra=self.log_extra) raise cmd_logger.info('Successfully removed path {}'.format( self.params['remove_path']), extra=self.log_extra)
def run(self, command, params, env=None, success_codes=None): log_extra = { 'task_id': params.get('task_id'), 'job_id': params.get('job_id') } cmd_logger.info('command to execute: {0}'.format(command), extra=log_extra) cmd_logger.info( 'parameters supplied: {params}, env variables: {env}'.format( params=params, env=env, ), extra=log_extra, ) if isinstance(command, unicode): command = command.encode('utf-8') cmd = (shlex.split(command) if isinstance(command, basestring) else command) subprocess_uid, status = self.try_find_nonfailed_subprocess_and_status( params.get('job_id'), params.get('task_id')) if subprocess_uid: cmd_logger.info( 'command execution is not required, process for task {} is already running: ' '{}'.format( params['task_id'], status, ), extra=log_extra, ) return subprocess_uid Subprocess = self.get_subprocess(cmd, params) uid = uuid.uuid4().hex if issubclass(Subprocess, BaseSubprocess): sub = Subprocess(uid, cmd, params=params, env=env, success_codes=success_codes) else: sub = Subprocess(uid, params=params) sub.run() if sub.error: cmd_logger.info('command execution failed: {}: {}'.format( sub, sub.error), extra=log_extra) else: cmd_logger.info( 'command execution started successfully: {}'.format(sub), extra=log_extra) self.subprocesses[uid] = sub return uid
def _parse_commands_stats(self, commands_stats): op_statuses_count = {} for operation_status, count in commands_stats.iteritems(): operation, status = operation_status.split('.', 1) statuses_count = op_statuses_count.setdefault(operation, {}) statuses_count.setdefault(status, 0) statuses_count[status] += count cmd_logger.info('Parsed command statuses: {}'.format(op_statuses_count), extra=self.log_extra) return op_statuses_count
def execute(self): stop_file = self.params['create_stop_file'] try: open(stop_file, 'w').close() except Exception as e: cmd_logger.error( 'Failed to create backend stop marker: {}'.format(e), extra=self.log_extra) raise cmd_logger.info( 'Successfully created backend stop marker: {}'.format(stop_file), extra=self.log_extra)
def execute(self): stop_file = self.params['remove_stop_file'] try: os.remove(stop_file) except Exception as e: cmd_logger.error( 'Failed to remove backend stop marker: {}'.format(e), extra=self.log_extra) raise cmd_logger.info( 'Successfully removed backend stop marker: {}'.format(stop_file), extra=self.log_extra)
def _parse_commands_stats(self, commands_stats): op_statuses_count = {} for operation_status, count in commands_stats.iteritems(): operation, status = operation_status.split('.', 1) statuses_count = op_statuses_count.setdefault(operation, {}) statuses_count.setdefault(status, 0) statuses_count[status] += count cmd_logger.info( 'Parsed command statuses: {}'.format(op_statuses_count), extra=self.log_extra) return op_statuses_count
def terminate(self, uid): if uid not in self.subprocesses: raise ValueError('Unknown command uid: {0}'.format(uid)) sub = self.subprocesses[uid] cmd_logger.info( 'terminating command {}, pid: {}'.format(uid, sub.process.pid), extra=sub.log_extra, ) # result, error, sub = self.subprocesses[uid].terminate().result() code = self.subprocesses[uid].terminate() if code: raise RuntimeError('Failed to terminate command {}, exit code: {}'.format(uid, code))
def execute(self): marker = self.params['unmark_backend'] try: os.remove(marker) except OSError as e: if e.errno == errno.ENOENT: # errno == 2: No such file or directory pass else: raise except Exception as e: cmd_logger.error('Failed to remove backend marker: {}'.format(e), extra=self.log_extra) raise cmd_logger.info('Successfully removed backend marker: {}'.format(marker), extra=self.log_extra)
def execute(self): group = str(int(self.params['group'])) path = self.params['group_file'].format(group_id=group) try: if os.path.exists(path): os.rename(path, path + '.bak') dirname = os.path.dirname(path) if not os.path.exists(dirname): os.makedirs(dirname, 0755) with open(path, 'w') as f: f.write(group) except Exception: cmd_logger.exception('Failed to create group file', extra=self.log_extra) raise cmd_logger.info('Successfully created group file {} for group {}'.format(path, group), extra=self.log_extra)
def _apply_postprocessors(self): if self.watcher.exit_code == 0: return True cmd_logger.info( 'Checking success codes: command code {}, success codes {}'.format( self.command_code, self.success_codes, ), extra=self.log_extra, ) if self.success_codes and self.command_code in self.success_codes: return True return False
def execute(self): marker = self.params['unmark_backend'] try: os.remove(marker) except OSError as e: if e.errno == errno.ENOENT: # errno == 2: No such file or directory pass else: raise except Exception as e: cmd_logger.error('Failed to remove backend marker: {}'.format(e), extra=self.log_extra) raise cmd_logger.info( 'Successfully removed backend marker: {}'.format(marker), extra=self.log_extra)
def _complete_if_ready(self): if not self._exit: return if not self.output_closed: return if not self.error_output_closed: return if self._force_complete_cb_timeout: cmd_logger.debug('pid {0}: removing force complete callback'.format(self.subprocess.pid), extra=self.log_extra) self.subprocess.io_loop.remove_timeout(self._force_complete_cb_timeout) self._force_complete_cb_timeout = None cmd_logger.info('pid {0}: command execution is completed'.format(self.subprocess.pid), extra=self.log_extra) self.command.on_command_completed()
def _exit_cb(self, code): self._exit = True cmd_logger.debug('pid {0}: exit callback'.format(self.subprocess.pid), extra=self.log_extra) self.exit_code = code self.progress = 1.0 self.set_command_code() cmd_logger.info('pid {0}: exit code {1}, command code {2}'.format( self.subprocess.pid, self.exit_code, self.command_code), extra=self.log_extra) if self._force_complete_cb_timeout is None: cmd_logger.debug('pid {0}: setting force complete callback '.format(self.subprocess.pid), extra=self.log_extra) self._force_complete_cb_timeout = self.subprocess.io_loop.add_timeout( datetime.timedelta(seconds=10), self._force_complete, ) self._complete_if_ready()
def execute(self): cmd_logger.info('Removing group file {0}'.format(self.params['remove_group_file']), extra=self.log_extra) path = self.params['remove_group_file'] try: os.remove(path) except Exception as e: cmd_logger.error('Failed to remove group file: {}'.format(e), extra=self.log_extra) marker = self.params.get('stop_backend') if marker: try: with open(marker, 'w') as f: f.write(path) except Exception as e: cmd_logger.error('Failed to create backend stop file: {}'.format(e), extra=self.log_extra) raise else: raise cmd_logger.info('Successfully removed group file {}'.format(self.params['remove_group_file']), extra=self.log_extra)
def execute(self): group = str(int(self.params['group'])) path = self.params['group_file'].format(group_id=group) try: if os.path.exists(path): os.rename(path, path + '.bak') dirname = os.path.dirname(path) if not os.path.exists(dirname): os.makedirs(dirname, 0755) with open(path, 'w') as f: f.write(group) except Exception: cmd_logger.exception('Failed to create group file', extra=self.log_extra) raise cmd_logger.info( 'Successfully created group file {} for group {}'.format( path, group), extra=self.log_extra)
def run(self, command, params, env=None, success_codes=None): log_extra = {'task_id': params.get('task_id'), 'job_id': params.get('job_id')} cmd_logger.info('command to execute: {0}'.format(command), extra=log_extra) cmd_logger.info( 'parameters supplied: {params}, env variables: {env}'.format( params=params, env=env, ), extra=log_extra, ) if isinstance(command, unicode): command = command.encode('utf-8') cmd = (shlex.split(command) if isinstance(command, basestring) else command) subprocess_uid, status = self.try_find_nonfailed_subprocess_and_status(params.get('job_id'), params.get('task_id')) if subprocess_uid: cmd_logger.info( 'command execution is not required, process for task {} is already running: ' '{}'.format( params['task_id'], status, ), extra=log_extra, ) # Note: subprocess_uid can be not in self.subprocesses raise gen.Return(subprocess_uid) Subprocess = self.get_subprocess(cmd, params) uid = uuid.uuid4().hex if issubclass(Subprocess, BaseSubprocess): sub = Subprocess(uid, cmd, params=params, env=env, success_codes=success_codes) else: sub = Subprocess(uid, params=params) yield sub.run() if sub.error: cmd_logger.info('command execution failed: {}: {}'.format(sub, sub.error), extra=log_extra) else: cmd_logger.info('command execution started successfully: {}'.format(sub), extra=log_extra) self.subprocesses[uid] = sub raise gen.Return(uid)
def collect_artifacts(self): """Collect job artifacts either from stdout or file.""" result = { ResultFields.SUB_CMD: self.params[ResultFields.SUB_CMD] } if OUTPUT_PARAM in self.params: file_name = self.params[OUTPUT_PARAM] try: cmd_logger.info('Collecting artifacts from: {}'.format(file_name), extra=self.log_extra) with open(file_name, 'rb') as f: result[ResultFields.OUTPUT] = json.load(f) return result except Exception as e: cmd_logger.error( 'Failed to load eblob_kit artifact: error {}, file name "{}"'.format(e, file_name), extra=self.log_extra, ) result[ResultFields.ERR_MSG] = 'error: {}, file {}'.format(e, file_name) return result if self.watcher is None: result[ResultFields.ERR_MSG] = 'no watcher and no resulting json' return result cmd_logger.info('Collecting artifacts from stdout', extra=self.log_extra) try: output = self.watcher.get_stdout() result[ResultFields.OUTPUT] = json.loads(output) return result except Exception as e: cmd_logger.error('Failed to parse stdout as json: error {}, output "{}"'.format(e, output)) result[ResultFields.ERR_MSG] = 'failed to load json from stdout {}'.format(e) return result
def _parse_job_stats(self, job_stats): op_statuses_count = {} commands_stats = job_stats.get('commands', {}) for operation_status, count in commands_stats.iteritems(): operation, status = operation_status.split('.', 1) statuses_count = op_statuses_count.setdefault(operation, {}) statuses_count.setdefault(status, 0) statuses_count[status] += count unavailable_groups = job_stats.get('unavailable_groups') if unavailable_groups is not None: # we imitate artifacts format for such errors op_statuses_count.setdefault('unavailable_groups', {})["-6"] = unavailable_groups cmd_logger.info( 'Parsed command statuses: {}'.format(op_statuses_count), extra=self.log_extra) return op_statuses_count
def _parse_command_code(self): if self.watcher.exit_code == 0: return self.watcher.exit_code output = self.watcher.get_stdout() try: data = json.loads(output) except Exception: cmd_logger.error( 'pid {}: failed to parse output json: {}'.format( self.pid, output, ), extra=self.log_extra, ) return self.watcher.exit_code if 'error' not in data: cmd_logger.error( 'pid {}: no "error" key in response data'.format(self.pid), extra=self.log_extra, ) return self.watcher.exit_code if 'code' not in data['error']: cmd_logger.error( 'pid {}: no "code" key in response error data'.format( self.pid), extra=self.log_extra, ) return self.watcher.exit_code cmd_logger.info( 'pid {}: operation error code {}'.format( self.pid, data['error']['code'], ), extra=self.log_extra, ) return data['error']['code']
def execute(self): group_str = str(self.params.get('group', '')) path = self.params['group_file_marker'].format(group_id=group_str) try: dirname = os.path.dirname(path) if not os.path.exists(dirname): os.makedirs(dirname, 0755) with open(path, 'w') as f: f.write(group_str) except Exception as e: cmd_logger.error('Failed to create group file marker: {}'.format(e), extra=self.log_extra) marker = self.params.get('stop_backend') if marker: try: with open(marker, 'w') as f: f.write(path) except Exception as e: cmd_logger.error('Failed to create backend stop file: {}'.format(e), extra=self.log_extra) raise else: raise cmd_logger.info('Successfully created group file marker for group {}'.format(group_str), extra=self.log_extra)
def _parse_command_code(self): if self.watcher.exit_code == 0: return self.watcher.exit_code output = self.watcher.get_stdout() try: data = json.loads(output) except Exception: cmd_logger.error( 'pid {}: failed to parse output json: {}'.format( self.pid, output, ), extra=self.log_extra, ) return self.watcher.exit_code if 'error' not in data: cmd_logger.error( 'pid {}: no "error" key in response data'.format(self.pid), extra=self.log_extra, ) return self.watcher.exit_code if 'code' not in data['error']: cmd_logger.error( 'pid {}: no "code" key in response error data'.format(self.pid), extra=self.log_extra, ) return self.watcher.exit_code cmd_logger.info( 'pid {}: operation error code {}'.format( self.pid, data['error']['code'], ), extra=self.log_extra, ) return data['error']['code']
def execute(self): group_base_path = self.params['group_base_path'].rstrip('/') if not os.path.exists(group_base_path): raise RuntimeError( 'Group dir {path} does not exist'.format( path=group_base_path, ) ) dst_base_path, basename = os.path.split(group_base_path) remove_path = os.path.join( dst_base_path, self.removed_basename(basename) ) cmd_logger.info( 'Renaming group base dir {tmp_dir} to destination dir {dest_dir}'.format( tmp_dir=group_base_path, dest_dir=remove_path, ), extra=self.log_extra, ) try: os.rename(group_base_path, remove_path) except OSError as e: if e.errno == 2: # errno == 2: No such file or directory if os.path.exists(remove_path): # group_base_path was already renamed, not an error pass else: raise else: raise except Exception: cmd_logger.exception('Failed to rename tmp dir to dest dir', extra=self.log_extra) raise
def execute(self): holder_id = self.params.get('holder_id', '') marker = self.params['mark_backend'] try: dirname = os.path.dirname(marker) if not os.path.exists(dirname): os.makedirs(dirname, 0755) with open(marker, 'w') as f: f.write(holder_id) except Exception as e: cmd_logger.error('Failed to create backend marker: {}'.format(e), extra=self.log_extra) # stop_marker_on_errors option is used in tasks to restore group # if node backend is broken and currently in RO state, we cannot create lock file, # so we can only skip exception. if self.params.get('skip_errors'): pass else: raise cmd_logger.info('Successfully created backend marker: {}'.format(marker), extra=self.log_extra)
def execute(self): group_base_path_root_dir = self.params[ 'group_base_path_root_dir'].rstrip('/') basename = self.get_vacant_basename(group_base_path_root_dir) tmp_basename = self.tmp_basename(basename) tmp_dir = os.path.join(group_base_path_root_dir, tmp_basename) cmd_logger.info('Creating tmp dir for new group: {}'.format(tmp_dir), extra=self.log_extra) try: os.mkdir(tmp_dir, 0755) except Exception: cmd_logger.exception('Failed to create tmp dir for new group', extra=self.log_extra) raise cmd_logger.info('Adding group files', extra=self.log_extra) for filename, body in self.params['files'].iteritems(): cmd_logger.info('Adding file {}'.format(filename), extra=self.log_extra) filename = os.path.join(tmp_dir, filename) dirname, basefname = os.path.split(filename) if not os.path.exists(dirname): os.makedirs(dirname) with open(filename, 'wb') as f: f.write(body) dest_dir = os.path.join(group_base_path_root_dir, basename) cmd_logger.info( 'Renaming tmp dir {tmp_dir} to destination dir {dest_dir}'.format( tmp_dir=tmp_dir, dest_dir=dest_dir, ), extra=self.log_extra, ) try: os.rename(tmp_dir, dest_dir) except Exception: cmd_logger.exception('Failed to rename tmp dir to dest dir', extra=self.log_extra) raise backend_path = dest_dir if not backend_path.endswith('/'): backend_path += '/' self.artifacts['backend_path'] = backend_path
def _prepare_command(self, cmd): cmd_str = ' '.join(cmd) if 'cmd_tpl' in self.params: # TODO: remove this backward-compatibility hack cmd_str = self.params['cmd_tpl'] missing_parameters = self.PLACEHOLDERS_TPL.findall(cmd_str) for param in missing_parameters: if param in self.params: continue elif param == 'backend_id': if 'group' not in self.params: raise ValueError('Parameter "group" is required') config_path = self.params['config_path'] cmd_logger.info( 'backend id not found in request params, ' 'getting backend id from elliptics config {config}, ' 'group {group}'.format( config=config_path, group=self.params['group'], ), extra=self.log_extra, ) config = EllipticsConfig(self.params['config_path']) self.params['backend_id'] = config.get_group_backend( int(self.params['group'])) else: raise ValueError( 'Cannot process command: unknown parameter "{}"'.format( param)) cmd_str = cmd_str.format(**self.params) cmd_logger.info('Prepared command: {}'.format(cmd_str), extra=self.log_extra) return shlex.split(cmd_str)
def execute(self): group_base_path_root_dir = self.params['group_base_path_root_dir'].rstrip('/') basename = self.get_vacant_basename(group_base_path_root_dir) tmp_basename = self.tmp_basename(basename) tmp_dir = os.path.join(group_base_path_root_dir, tmp_basename) cmd_logger.info('Creating tmp dir for new group: {}'.format(tmp_dir), extra=self.log_extra) try: os.mkdir(tmp_dir, 0755) except Exception: cmd_logger.exception('Failed to create tmp dir for new group', extra=self.log_extra) raise cmd_logger.info('Adding group files', extra=self.log_extra) for filename, body in self.params['files'].iteritems(): cmd_logger.info('Adding file {}'.format(filename), extra=self.log_extra) filename = os.path.join( tmp_dir, filename ) dirname, basefname = os.path.split(filename) if not os.path.exists(dirname): os.makedirs(dirname) with open(filename, 'wb') as f: f.write(body) dest_dir = os.path.join(group_base_path_root_dir, basename) cmd_logger.info( 'Renaming tmp dir {tmp_dir} to destination dir {dest_dir}'.format( tmp_dir=tmp_dir, dest_dir=dest_dir, ), extra=self.log_extra, ) try: os.rename(tmp_dir, dest_dir) except Exception: cmd_logger.exception('Failed to rename tmp dir to dest dir', extra=self.log_extra) raise backend_path = dest_dir if not backend_path.endswith('/'): backend_path += '/' self.artifacts['backend_path'] = backend_path
def execute(self): ids_file = self.params['ids'] cmd_logger.info('Generating ids file {}'.format(ids_file), extra=self.log_extra) if os.path.exists(ids_file): cmd_logger.info('Ids file {} already exists'.format(ids_file), extra=self.log_extra) else: try: with open(ids_file, 'wb') as f: f.write(os.urandom(64)) except Exception: cmd_logger.exception( 'Failed to create ids file {}'.format(ids_file), extra=self.log_extra, ) raise cmd_logger.info('Successfully created ids file {}'.format(ids_file), extra=self.log_extra)