def get_next_dir(self, cur_dir): cur_dir = cur_dir.replace(self._file_tree.get_root_name(), '') cur_time = FileUtil.parse_dir_to_timestamp(dir_name=cur_dir) if self.PARTITIONER_TYPE == PartitionerStorageType.YEARLY: next_time = datetime.datetime(cur_time.year + 1, 1, 1) elif self.PARTITIONER_TYPE == PartitionerStorageType.MONTHLY: if cur_time.month == 12: next_time = datetime.datetime(cur_time.year + 1, 1, 1) else: next_time = datetime.datetime(cur_time.year, cur_time.month + 1, 1) elif self.PARTITIONER_TYPE == PartitionerStorageType.DAILY: next_time = cur_time + datetime.timedelta(days=1) elif self.PARTITIONER_TYPE == PartitionerStorageType.HOURLY: next_time = cur_time + datetime.timedelta(hours=1) else: next_time = cur_time + datetime.timedelta(minutes=1) next_dir_name = FileUtil.parse_timestamp_to_dir( timestamp=next_time).split('/') next_dir_name = '/'.join( next_dir_name[:self.PARTITIONER_TYPE_TO_HEIGHT_MAP[ self.PARTITIONER_TYPE]]) next_dir_name = FileUtil.join_paths_to_dir( root_dir=self._file_tree.get_root_name(), base_name=next_dir_name) if FileUtil.does_dir_exist(dir_name=next_dir_name): return next_dir_name else: return None
def get_previous_dir(self, cur_dir): self.initialize_from_dir(dir_name=self.get_dir_name()) cur_dir = cur_dir.replace(self._file_tree.get_root_name(), '') cur_time = FileUtil.parse_dir_to_timestamp(dir_name=cur_dir) if self.PARTITIONER_TYPE == PartitionerStorageType.YEARLY: pre_time = datetime.datetime(cur_time.year - 1, 1, 1) elif self.PARTITIONER_TYPE == PartitionerStorageType.MONTHLY: if cur_time.month == 1: pre_time = datetime.datetime(cur_time.year - 1, 12, 1) else: pre_time = datetime.datetime(cur_time.year, cur_time.month - 1, 1) elif self.PARTITIONER_TYPE == PartitionerStorageType.DAILY: pre_time = cur_time - datetime.timedelta(days=1) elif self.PARTITIONER_TYPE == PartitionerStorageType.HOURLY: pre_time = cur_time - datetime.timedelta(hours=1) else: pre_time = cur_time - datetime.timedelta(minutes=1) last_dir_name = FileUtil.parse_timestamp_to_dir( timestamp=pre_time).split('/') last_dir_name = '/'.join( last_dir_name[:self.PARTITIONER_TYPE_TO_HEIGHT_MAP[ self.PARTITIONER_TYPE]]) last_dir_name = FileUtil.join_paths_to_dir( root_dir=self._file_tree.get_root_name(), base_name=last_dir_name) self.increment_rpc_count_by(n=1) if FileUtil.does_dir_exist(dir_name=last_dir_name): return last_dir_name else: return None
def __init__(self, client_name, server_url): super().__init__(client_name=client_name, server_url=server_url) self._logger = glogging.get_logger( log_name=self.get_client_name(), log_dir=EnvUtil.get_pslx_env_variable(var='PSLX_DEFAULT_LOG_DIR') + 'PSLX_INTERNAL/container_backend_client') self._backend_folder = FileUtil.join_paths_to_dir( root_dir=EnvUtil.get_pslx_env_variable( 'PSLX_INTERNAL_METADATA_DIR'), base_name='PSLX_CONTAINER_BACKEND_TABLE')
def __init__(self, rpc_storage): super().__init__(service_name=self.get_class_name(), rpc_storage=rpc_storage) self._logger = LoggingTool( name='PSLX_CONTAINER_BACKEND_RPC', ttl=EnvUtil.get_pslx_env_variable(var='PSLX_INTERNAL_TTL')) self._lru_cache_tool = LRUCacheTool( max_capacity=EnvUtil.get_pslx_env_variable( var='PSLX_INTERNAL_CACHE')) self._backend_folder = FileUtil.join_paths_to_dir( root_dir=EnvUtil.get_pslx_env_variable('PSLX_DATABASE'), base_name='PSLX_CONTAINER_BACKEND_TABLE')
def __init__(self, rpc_storage): super().__init__(service_name=self.get_class_name(), rpc_storage=rpc_storage) self._logger = glogging.get_logger( log_name='PSLX_CONTAINER_BACKEND_RPC', log_dir=EnvUtil.get_pslx_env_variable(var='PSLX_DEFAULT_LOG_DIR') + 'PSLX_INTERNAL/container_backend_rpc') self._lru_cache_tool = LRUCacheTool(max_capacity=int( EnvUtil.get_pslx_env_variable(var='PSLX_INTERNAL_CACHE'))) self._backend_folder = FileUtil.join_paths_to_dir( root_dir=EnvUtil.get_pslx_env_variable( 'PSLX_INTERNAL_METADATA_DIR'), base_name='PSLX_CONTAINER_BACKEND_TABLE')
def get_response_and_status_impl(self, request): storage_value = ContainerBackendValue() storage_value.container_name = request.container_name storage_value.container_status = request.status for operator_name, operator_snapshot in dict( request.operator_snapshot_map).items(): operator_info = ContainerBackendValue.OperatorInfo() operator_info.status = operator_snapshot.status for parent in operator_snapshot.node_snapshot.parents_names: operator_info.parents.append(parent) operator_info.start_time = operator_snapshot.start_time operator_info.end_time = operator_snapshot.end_time storage_value.operator_info_map[operator_name].CopyFrom( operator_info) storage_value.mode = request.mode storage_value.data_model = request.data_model storage_value.updated_time = str(TimezoneUtil.cur_time_in_pst()) storage_value.start_time = request.start_time storage_value.end_time = request.end_time storage_value.log_dir = request.log_dir for key in request.counters: storage_value.counters[key] = request.counters[key] partitioner_dir = FileUtil.join_paths_to_dir_with_mode( root_dir=FileUtil.join_paths_to_dir( root_dir=self._backend_folder, base_name=ProtoUtil.get_name_by_value( enum_type=DataModelType, value=storage_value.data_model)), base_name=storage_value.container_name, ttl=EnvUtil.get_pslx_env_variable('PSLX_INTERNAL_TTL')) if storage_value.mode == ModeType.TEST: partitioner_dir = partitioner_dir.replace('PROD', 'TEST') storage = self._lru_cache_tool.get(key=partitioner_dir) if not storage: self.sys_log( "Did not find the storage in cache. Making a new one...") storage = DailyPartitionerStorage() proto_table = ProtoTableStorage() storage.set_underlying_storage(storage=proto_table) storage.initialize_from_dir(dir_name=partitioner_dir) self._lru_cache_tool.set(key=partitioner_dir, value=storage) else: self.sys_log("Found key in LRU cache.") storage.write(data={storage_value.container_name: storage_value}, params={ 'overwrite': True, 'make_partition': True, }) return None, Status.SUCCEEDED
def make_new_partition(self, timestamp): new_dir_list = FileUtil.parse_timestamp_to_dir( timestamp=timestamp).split('/') for i in range( 1, self.PARTITIONER_TYPE_TO_HEIGHT_MAP[self.PARTITIONER_TYPE] + 1): new_dir = '/'.join(new_dir_list[:i]) child_node_name = FileUtil.join_paths_to_dir( root_dir=self._file_tree.get_root_name(), base_name=new_dir) if not self._file_tree.find_node(child_node_name): parent_node_name = FileUtil.join_paths_to_dir( root_dir=self._file_tree.get_root_name(), base_name='/'.join(new_dir_list[:i - 1]) if i > 1 else '') parent_node = self._file_tree.find_node( node_name=parent_node_name) child_node = OrderedNodeBase(node_name=child_node_name) assert parent_node is not None, "Parent node at least needs to exist." self._file_tree.add_node(parent_node=parent_node, child_node=child_node, order=SortOrder.REVERSE) self._file_tree.trim_tree(max_capacity=self._max_capacity)
def __init__(self, container_name, logger=DummyUtil.dummy_logger()): super().__init__() self._container_name = container_name self._is_initialized = False self._snapshot_file_folder = FileUtil.join_paths_to_dir( EnvUtil.get_pslx_env_variable(var='PSLX_SNAPSHOT_DIR'), self._container_name) self._start_time = None self._end_time = None self._logger = logger self._upstream_ops = [] self._backend = None self._status = Status.IDLE self._counter = defaultdict(int)
def get_container_snapshot(self, send_backend=True): if not self._is_initialized: self._logger.error("Warning: taking snapshot when the container [" + self.get_container_name() + "] is not initialized.") self.sys_log("Warning: taking snapshot when the container [" + self.get_container_name() + "] is not initialized.") snapshot = ContainerSnapshot() snapshot.container_name = self._container_name snapshot.is_initialized = self._is_initialized snapshot.status = self._status snapshot.class_name = self.get_full_class_name() snapshot.mode = self._mode snapshot.data_model = self.DATA_MODEL snapshot.log_dir = self._logger.get_log_dir() for key, val in self._counter.items(): snapshot.counters[key] = val if self._start_time: snapshot.start_time = str(self._start_time) if self._end_time: snapshot.end_time = str(self._end_time) for op_name, op in self._node_name_to_node_dict.items(): if 'Dummy' in op.get_class_name(): continue op_output_file = FileUtil.join_paths_to_file( root_dir=FileUtil.join_paths_to_dir(FileUtil.dir_name(self._snapshot_file_folder), 'operators'), base_name='SNAPSHOT_' + str(TimezoneUtil.cur_time_in_pst()) + '_' + op_name + '.pb' ) snapshot.operator_snapshot_map[op_name].CopyFrom(op.get_operator_snapshot(output_file=op_output_file)) self.sys_log("Snapshot saved to folder [" + self._snapshot_file_folder + '].') self._logger.info("Snapshot saved to folder [" + self._snapshot_file_folder + '].') output_file_name = FileUtil.join_paths_to_file( root_dir=FileUtil.dir_name(self._snapshot_file_folder), base_name='SNAPSHOT_' + str(TimezoneUtil.cur_time_in_pst()) + '_' + self._container_name + '.pb' ) with FileLockTool(output_file_name, read_mode=False): FileUtil.write_proto_to_file( proto=snapshot, file_name=output_file_name ) if self._backend and send_backend: try: self._backend.send_to_backend(snapshot=snapshot) except Exception as err: self._logger.error("Sending backend failed with error " + str(err) + '.') return snapshot
def _get_latest_status_of_operators(self): operator_status = {} snapshot_files = FileUtil.get_file_names_in_dir( dir_name=FileUtil.join_paths_to_dir(FileUtil.dir_name(self._snapshot_file_folder), 'operators')) for snapshot_file in snapshot_files[::-1]: operator_name = snapshot_file.split('_')[1] if operator_name not in operator_status: self._logger.info("Getting status for operator [" + operator_name + '].') self.sys_log("Getting status for operator [" + operator_name + '].') operator_status[operator_name] = self._node_name_to_node_dict[operator_name].get_status_from_snapshot( snapshot_file=snapshot_file ) self.sys_log("Status for operator [" + operator_name + '] is [' + ProtoUtil.get_name_by_value( enum_type=Status, value=operator_status[operator_name]) + '].') if len(operator_status) == len(self._node_name_to_node_dict): break return operator_status
def __init__(self, container_name, logger=DummyUtil.dummy_logging(), ttl=-1): super().__init__() self._container_name = container_name self._is_initialized = False self._snapshot_file_folder = FileUtil.join_paths_to_dir_with_mode( root_dir=FileUtil.join_paths_to_dir( root_dir=EnvUtil.get_pslx_env_variable(var='PSLX_DATABASE'), base_name='snapshots' ), base_name=self.get_class_name() + '__' + container_name, ttl=ttl ) self._start_time = None self._end_time = None self._logger = logger self._upstream_ops = [] self._backend = None self._status = Status.IDLE self._counter = defaultdict(int)
def make_new_partition(self, timestamp): new_dir_list = FileUtil.parse_timestamp_to_dir( timestamp=timestamp).split('/') new_dir = '/'.join(new_dir_list[:self.PARTITIONER_TYPE_TO_HEIGHT_MAP[ self.PARTITIONER_TYPE]]) child_node = OrderedNodeBase(node_name=FileUtil.join_paths_to_dir( root_dir=self._file_tree.get_root_name(), base_name=new_dir)) if FileUtil.does_dir_exist(dir_name=child_node.get_node_name()): self.sys_log('Node [' + child_node.get_node_name() + "] exist. Don't make new partition.") return None else: self.sys_log('Node [' + child_node.get_node_name() + "] doesn't exist. Make new partition.") self._logger.info('Node [' + child_node.get_node_name() + "] doesn't exist. Make new partition.") FileUtil.create_dir_if_not_exist( dir_name=child_node.get_node_name()) self.initialize_from_dir(dir_name=self._file_tree.get_root_name()) return child_node.get_node_name()
def test_join_paths_to_dir(self): root_dir = 'database' base_name = 'foo' self.assertEqual(FileUtil.join_paths_to_dir(root_dir, base_name), 'database/foo/')
def read_range(self, params): def _reformat_time(timestamp): if self.PARTITIONER_TYPE == PartitionerStorageType.YEARLY: timestamp = timestamp.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0, tzinfo=None) elif self.PARTITIONER_TYPE == PartitionerStorageType.MONTHLY: timestamp = timestamp.replace(day=1, hour=0, minute=0, second=0, microsecond=0, tzinfo=None) elif self.PARTITIONER_TYPE == PartitionerStorageType.DAILY: timestamp = timestamp.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None) elif self.PARTITIONER_TYPE == PartitionerStorageType.HOURLY: timestamp = timestamp.replace(minute=0, second=0, microsecond=0, tzinfo=None) else: timestamp = timestamp.replace(second=0, microsecond=0, tzinfo=None) return timestamp assert 'start_time' in params and 'end_time' in params and params[ 'start_time'] <= params['end_time'] while self._writer_status != Status.IDLE: self.sys_log("Waiting for writer to finish.") time.sleep(TimeSleepObj.ONE_SECOND) self._reader_status = Status.RUNNING oldest_dir, latest_dir = self.get_oldest_dir(), self.get_latest_dir() if not latest_dir or not oldest_dir: if self.is_empty(): self._logger.warning("Current partitioner [" + self.get_dir_name() + "] is empty, cannot read anything.") self.sys_log("Current partitioner [" + self.get_dir_name() + "] is empty, cannot read anything.") return {} oldest_dir = oldest_dir.replace(self._file_tree.get_root_name(), '') latest_dir = latest_dir.replace(self._file_tree.get_root_name(), '') oldest_timestamp = FileUtil.parse_dir_to_timestamp(dir_name=oldest_dir) latest_timestamp = FileUtil.parse_dir_to_timestamp(dir_name=latest_dir) start_time = max(_reformat_time(params['start_time']), oldest_timestamp) end_time = min(_reformat_time(params['end_time']), latest_timestamp) result = {} try: while start_time <= end_time: dir_list = FileUtil.parse_timestamp_to_dir( timestamp=start_time).split('/') dir_name = '/'.join( dir_list[:self.PARTITIONER_TYPE_TO_HEIGHT_MAP[ self.PARTITIONER_TYPE]]) dir_name = FileUtil.join_paths_to_dir( root_dir=self._file_tree.get_root_name(), base_name=dir_name) if FileUtil.does_dir_exist(dir_name=dir_name): if self._underlying_storage.get_storage_type( ) == StorageType.PROTO_TABLE_STORAGE: storage = ProtoTableStorage() else: storage = DefaultStorage() file_names = FileUtil.list_files_in_dir(dir_name=dir_name) for file_name in file_names: storage.initialize_from_file(file_name=file_name) if storage.get_storage_type( ) == StorageType.PROTO_TABLE_STORAGE: result[file_name] = storage.read_all() else: result[file_name] = storage.read( params={'num_line': -1}) if self.PARTITIONER_TYPE == PartitionerStorageType.YEARLY: start_time = start_time.replace(year=start_time.year + 1, month=1, day=1) elif self.PARTITIONER_TYPE == PartitionerStorageType.MONTHLY: if start_time.month == 12: start_time = start_time.replace(year=start_time.year + 1, month=1, day=1) else: start_time = start_time.replace( month=start_time.month + 1) elif self.PARTITIONER_TYPE == PartitionerStorageType.DAILY: start_time += datetime.timedelta(days=1) elif self.PARTITIONER_TYPE == PartitionerStorageType.HOURLY: start_time += datetime.timedelta(hours=1) else: start_time += datetime.timedelta(minutes=1) self._reader_status = Status.IDLE return result except Exception as err: self.sys_log("Read range in dir [" + self.get_dir_name() + "] got exception " + str(err) + '.') self._logger.error("Read range in dir [" + self.get_dir_name() + "] got exception " + str(err) + '.') raise StorageReadException("Read range in dir [" + self.get_dir_name() + "] got exception " + str(err) + '.')
slack_payload = "payload={'text':'" + request.message + "\nCurrent time is "\ + str(TimezoneUtil.cur_time_in_pst()) + "'}" status = Status.SUCCEEDED try: requests.post(request.webhook_url, data=slack_payload, headers=header) except Exception as err: self._logger.error("Slack failed to send message with err " + str(err)) status = Status.FAILED return None, status if __name__ == "__main__": consumer = GenericConsumer( connection_str='amqp://*****:*****@localhost:5672') partitioner_dir = FileUtil.join_paths_to_dir_with_mode( root_dir=FileUtil.join_paths_to_dir( root_dir=EnvUtil.get_pslx_env_variable(var='PSLX_DATABASE'), base_name='msg_queue'), base_name='msg_queue_example', ttl='1h') storage = DailyPartitionerStorage() storage.initialize_from_dir(dir_name=partitioner_dir) slack_queue = SlackQueue(queue_name='slack_queue', queue_storage=storage) consumer.bind_queue(exchange='slack_exchange', queue=slack_queue) consumer.start_consumer()
def read_range(self, params): self.initialize_from_dir(dir_name=self.get_dir_name()) def _reformat_time(timestamp): if self.PARTITIONER_TYPE == PartitionerStorageType.YEARLY: timestamp = timestamp.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0, tzinfo=None) elif self.PARTITIONER_TYPE == PartitionerStorageType.MONTHLY: timestamp = timestamp.replace(day=1, hour=0, minute=0, second=0, microsecond=0, tzinfo=None) elif self.PARTITIONER_TYPE == PartitionerStorageType.DAILY: timestamp = timestamp.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None) elif self.PARTITIONER_TYPE == PartitionerStorageType.HOURLY: timestamp = timestamp.replace(minute=0, second=0, microsecond=0, tzinfo=None) else: timestamp = timestamp.replace(second=0, microsecond=0, tzinfo=None) return timestamp assert 'start_time' in params and 'end_time' in params and params[ 'start_time'] <= params['end_time'] oldest_dir, latest_dir = self._get_oldest_dir_in_root_directory_interal( ), self._get_latest_dir_internal() if not latest_dir or not oldest_dir: self._logger.warning("Current partitioner [" + self.get_dir_name() + "] is empty, cannot read anything.") self._SYS_LOGGER.warning("Current partitioner [" + self.get_dir_name() + "] is empty, cannot read anything.") return {} oldest_dir = oldest_dir.replace(self._file_tree.get_root_name(), '') latest_dir = latest_dir.replace(self._file_tree.get_root_name(), '') oldest_timestamp = FileUtil.parse_dir_to_timestamp(dir_name=oldest_dir) latest_timestamp = FileUtil.parse_dir_to_timestamp(dir_name=latest_dir) start_time = max(_reformat_time(params['start_time']), oldest_timestamp) end_time = min(_reformat_time(params['end_time']), latest_timestamp) result = {} try: all_file_names = [] while start_time <= end_time: dir_list = FileUtil.parse_timestamp_to_dir( timestamp=start_time).split('/') dir_name = '/'.join( dir_list[:self.PARTITIONER_TYPE_TO_HEIGHT_MAP[ self.PARTITIONER_TYPE]]) dir_name = FileUtil.join_paths_to_dir( root_dir=self._file_tree.get_root_name(), base_name=dir_name) try: self.increment_rpc_count_by(n=1) file_names = FileUtil.list_files_in_dir(dir_name=dir_name) all_file_names.extend(file_names) except Exception as _: pass if self.PARTITIONER_TYPE == PartitionerStorageType.YEARLY: start_time = start_time.replace(year=start_time.year + 1, month=1, day=1) elif self.PARTITIONER_TYPE == PartitionerStorageType.MONTHLY: if start_time.month == 12: start_time = start_time.replace(year=start_time.year + 1, month=1, day=1) else: start_time = start_time.replace( month=start_time.month + 1) elif self.PARTITIONER_TYPE == PartitionerStorageType.DAILY: start_time += datetime.timedelta(days=1) elif self.PARTITIONER_TYPE == PartitionerStorageType.HOURLY: start_time += datetime.timedelta(hours=1) else: start_time += datetime.timedelta(minutes=1) result = {} self.increment_rpc_count_by(n=1) if self._underlying_storage.get_storage_type( ) == StorageType.PROTO_TABLE_STORAGE: tmp_result = gclient_ext.read_proto_messages( paths=all_file_names, message_type=ProtoTable) for file_name, v in tmp_result.items(): result[file_name] = dict(v.data) else: tmp_result = gclient_ext.read_txts(all_file_names) for file_name, v in tmp_result.items(): result[file_name] = v.rstrip().split('\n') return result except Exception as err: self._SYS_LOGGER.error("Read range in dir [" + self.get_dir_name() + "] got exception " + str(err) + '.') self._logger.error("Read range in dir [" + self.get_dir_name() + "] got exception " + str(err) + '.') raise StorageReadException("Read range in dir [" + self.get_dir_name() + "] got exception " + str(err) + '.')
import datetime from galaxy_py import gclient from flask import render_template, request from flask_login import login_required from pslx.micro_service.frontend import pslx_frontend_ui_app, pslx_frontend_logger from pslx.schema.enums_pb2 import Status, ModeType, DataModelType from pslx.schema.storage_pb2 import ContainerBackendValue from pslx.storage.proto_table_storage import ProtoTableStorage from pslx.util.env_util import EnvUtil from pslx.util.file_util import FileUtil from pslx.util.proto_util import ProtoUtil from pslx.util.timezone_util import TimezoneUtil container_backend_config = pslx_frontend_ui_app.config['frontend_config'].container_backend_config backend_folder = FileUtil.join_paths_to_dir( root_dir=EnvUtil.get_pslx_env_variable('PSLX_INTERNAL_METADATA_DIR'), base_name='PSLX_CONTAINER_BACKEND_TABLE' ) galaxy_viewer_url = pslx_frontend_ui_app.config['frontend_config'].galaxy_viewer_url if galaxy_viewer_url and galaxy_viewer_url[-1] == '/': galaxy_viewer_url = galaxy_viewer_url[:-1] def get_containers_info(): containers_info = [] existing_containers = {} all_proto_files = set() if not FileUtil.is_local_path(backend_folder): all_cells = [''] else: all_cells = gclient.list_cells()
def get_container_snapshot(self, send_backend=True): if not self._is_initialized: self._logger.error( "Warning: taking snapshot when the container [" + self.get_container_name() + "] is not initialized.") self._SYS_LOGGER.error( "Warning: taking snapshot when the container [" + self.get_container_name() + "] is not initialized.") snapshot = ContainerSnapshot() snapshot.container_name = self._container_name snapshot.is_initialized = self._is_initialized snapshot.status = self._status snapshot.class_name = self.get_full_class_name() snapshot.mode = self._mode snapshot.data_model = self.DATA_MODEL snapshot.log_file = FileUtil.convert_local_to_cell_path( glogging.get_logger_file(self._logger)) snapshot.run_cell = EnvUtil.get_other_env_variable( var='GALAXY_fs_cell', fallback_value='') snapshot.snapshot_cell = FileUtil.get_cell_from_path( FileUtil.convert_local_to_cell_path(self._snapshot_file_folder)) for key, val in self._counter.items(): snapshot.counters[key] = val if self._start_time: snapshot.start_time = str(self._start_time) if self._end_time: snapshot.end_time = str(self._end_time) for op_name, op in self._node_name_to_node_dict.items(): if 'Dummy' in op.get_class_name(): continue op_output_file = FileUtil.join_paths_to_file( root_dir=FileUtil.join_paths_to_dir( FileUtil.dir_name(self._snapshot_file_folder), 'operators'), base_name=op_name + '_SNAPSHOT_' + str(TimezoneUtil.cur_time_in_pst()) + '.pb') snapshot.operator_snapshot_map[op_name].CopyFrom( op.get_operator_snapshot(output_file=op_output_file)) self._SYS_LOGGER.info( "Snapshot saved to folder [" + FileUtil.convert_local_to_cell_path(self._snapshot_file_folder) + '].') self._logger.info( "Snapshot saved to folder [" + FileUtil.convert_local_to_cell_path(self._snapshot_file_folder) + '].') output_file_name = FileUtil.join_paths_to_file( root_dir=FileUtil.join_paths_to_dir( FileUtil.dir_name(self._snapshot_file_folder), 'containers'), base_name=self._container_name + '_SNAPSHOT_' + str(TimezoneUtil.cur_time_in_pst()) + '.pb') FileUtil.write_proto_to_file(proto=snapshot, file_name=output_file_name) if self._backend and send_backend: try: self._backend.send_to_backend(snapshot=snapshot) except Exception as err: self._logger.error("Sending backend failed with error " + str(err) + '.') return snapshot