コード例 #1
0
ファイル: logging_renderer.py プロジェクト: alpha-hunter/pslx
class FrontendDedicatedLoggingOp(StreamingOperator):

    def __init__(self):
        super().__init__(operator_name='FRONTEND_DEDICATED_LOGGING_OP')
        self._pslx_dedicated_logging_storage = ProtoTableStorage(logger=pslx_frontend_logger)
        self._pslx_dedicated_logging_storage.initialize_from_file(
            file_name=pslx_dedicated_logging_storage_path
        )
        self._cached_logging = {}
        self._logging_storage_capacity = max(int(EnvUtil.get_pslx_env_variable(var='PSLX_INTERNAL_CACHE')), 700)

    def pubsub_msg_parser(self, exchange_name, topic_name, message):
        self._cached_logging[str(TimezoneUtil.cur_time_in_pst())] = message
        if len(self._cached_logging) >= int(EnvUtil.get_pslx_env_variable('PSLX_RPC_FLUSH_RATE')):
            self._pslx_dedicated_logging_storage.write(
                data=self._cached_logging
            )
            self._cached_logging.clear()

            total_entries = self._pslx_dedicated_logging_storage.get_num_entries()
            if total_entries > self._logging_storage_capacity:
                try:
                    all_data = self._pslx_dedicated_logging_storage.read_all()
                    all_sorted_keys = sorted(list(all_data.keys()))
                    for i in range(total_entries - self._logging_storage_capacity):
                        key_to_delete = all_sorted_keys[i]
                        self._pslx_dedicated_logging_storage.delete(key=key_to_delete)
                except Exception as _:
                    pass

    def execute_impl(self):
        pslx_dedicated_subscriber.bind_to_op(self)
        pslx_dedicated_subscriber.start()
コード例 #2
0
 def test_write_1(self):
     proto_table_storage = ProtoTableStorage()
     proto_table_storage.initialize_from_file(file_name=self.TEST_DATA_3)
     proto_table_storage.write(data={'test': self.EXAMPLE_PROTO_1}, )
     result_proto = proto_table_storage.read(params={
         'key': 'test',
         'message_type': NodeSnapshot
     })
     self.assertEqual(result_proto, self.EXAMPLE_PROTO_1)
     gclient_ext.cp_file(self.TEST_DATA_1, self.TEST_DATA_3)
コード例 #3
0
ファイル: util.py プロジェクト: kfrancischen/pslx
    def get_response_impl(backend_folder, request, lru_cache=None):
        storage_value = ContainerBackendValue()
        storage_value.container_name = request.container_name
        storage_value.container_status = request.status
        for operator_name, operator_snapshot in dict(
                request.operator_snapshot_map).items():
            operator_info = ContainerBackendValue.OperatorInfo()
            operator_info.status = operator_snapshot.status
            for parent in operator_snapshot.node_snapshot.parents_names:
                operator_info.parents.append(parent)

            operator_info.start_time = operator_snapshot.start_time
            operator_info.end_time = operator_snapshot.end_time
            operator_info.log_file = operator_snapshot.log_file
            storage_value.operator_info_map[operator_name].CopyFrom(
                operator_info)

        storage_value.mode = request.mode
        storage_value.data_model = request.data_model
        storage_value.updated_time = str(TimezoneUtil.cur_time_in_pst())
        storage_value.start_time = request.start_time
        storage_value.end_time = request.end_time
        storage_value.log_file = request.log_file
        storage_value.run_cell = request.run_cell
        storage_value.snapshot_cell = request.snapshot_cell
        for key in request.counters:
            storage_value.counters[key] = request.counters[key]
        storage_value.ttl = int(
            EnvUtil.get_pslx_env_variable('PSLX_BACKEND_CONTAINER_TTL'))

        storage = lru_cache.get(
            key=storage_value.container_name) if lru_cache else None

        if not storage:
            storage = ProtoTableStorage()
            storage.initialize_from_file(file_name=FileUtil.join_paths_to_file(
                root_dir=backend_folder,
                base_name=storage_value.container_name + '.pb'))
            if lru_cache:
                lru_cache.set(key=backend_folder, value=storage)
        all_data = storage.read_all()
        if len(all_data) >= int(
                EnvUtil.get_pslx_env_variable('PSLX_INTERNAL_CACHE')) > 0:
            key_to_delete = sorted(all_data.keys())[0]
            storage.delete(key=key_to_delete)

        storage.write(data={storage_value.start_time: storage_value})
コード例 #4
0
    def test_write_2(self):
        proto_table_storage = ProtoTableStorage()
        proto_table_storage.initialize_from_file(file_name=self.TEST_DATA_4)
        proto_table_storage.write(data={'test': self.EXAMPLE_PROTO_1})
        proto_table_storage.write(data={'test_1': self.EXAMPLE_PROTO_2})
        result_proto = proto_table_storage.read(
            params={
                'key': 'test_1',
                'message_type': OperatorSnapshot
            })
        self.assertEqual(result_proto, self.EXAMPLE_PROTO_2)

        proto_table_storage.write(data={'test_1': self.EXAMPLE_PROTO_3})
        result_proto = proto_table_storage.read(
            params={
                'key': 'test_1',
                'message_type': OperatorSnapshot
            })
        self.assertEqual(result_proto, self.EXAMPLE_PROTO_3)

        result = proto_table_storage.read_all()
        self.assertDictEqual(
            result, {
                'test': ProtoUtil.message_to_any(self.EXAMPLE_PROTO_1),
                'test_1': ProtoUtil.message_to_any(self.EXAMPLE_PROTO_3),
            })

        self.assertEqual(proto_table_storage.get_num_entries(), 2)
        gclient_ext.cp_file(self.TEST_DATA_2, self.TEST_DATA_4)
コード例 #5
0
    def write(self, data, params=None):
        if not params:
            params = {}
        if 'overwrite' not in params:
            params['overwrite'] = True
        assert isinstance(data, dict)

        exising_shard_to_data_map = defaultdict(dict)
        new_data = {}
        for key, val in data.items():
            if key in self._index_map.index_map:
                exising_shard_to_data_map[self._index_map.index_map[key]][key] = val
            else:
                new_data[key] = val

        try:
            for shard, existing_data in exising_shard_to_data_map.items():
                related_proto_file = self._shard_to_file(shard=shard)
                proto_table = ProtoTableStorage()
                proto_table.initialize_from_file(file_name=related_proto_file)
                proto_table.write(data=existing_data, params=params)
                self.increment_rpc_count_by(n=proto_table.get_rpc_call_count_and_reset())
            if new_data:
                all_new_keys = list(new_data.keys())
                latest_shard = self.get_latest_shard()
                latest_proto_file = self._shard_to_file(shard=latest_shard)
                proto_table = ProtoTableStorage()
                proto_table.initialize_from_file(file_name=latest_proto_file)
                latest_proto_table_size = proto_table.get_num_entries()
                proto_table.write(
                    data={key: new_data[key] for key in all_new_keys[:self._size_per_shard - latest_proto_table_size]},
                    params=params
                )
                self.increment_rpc_count_by(n=proto_table.get_rpc_call_count_and_reset())
                for key in all_new_keys[:self._size_per_shard - latest_proto_table_size]:
                    self._index_map.index_map[key] = latest_shard

                if len(all_new_keys) > self._size_per_shard - latest_proto_table_size:
                    remaining_new_keys = all_new_keys[self._size_per_shard - latest_proto_table_size:]
                    start_index = 0
                    while start_index < len(remaining_new_keys):
                        data_for_new_shard = remaining_new_keys[start_index:start_index + self._size_per_shard]
                        latest_shard += 1
                        self._index_map.cur_shard += 1
                        proto_file = self._shard_to_file(shard=latest_shard)
                        self._logger.info("Write to new file with name [" + proto_file + '] and shard [' +
                                          str(latest_shard) + '].')
                        proto_table = ProtoTableStorage()
                        proto_table.initialize_from_file(file_name=proto_file)
                        proto_table.write(
                            data={key: new_data[key] for key in data_for_new_shard},
                            params=params
                        )
                        self.increment_rpc_count_by(n=proto_table.get_rpc_call_count_and_reset())
                        for key in data_for_new_shard:
                            self._index_map.index_map[key] = latest_shard

                        start_index += self._size_per_shard
                self._logger.info("Writing the index map to [" + self._index_map_file + '].')
                self.increment_rpc_count_by(n=1)
                FileUtil.write_proto_to_file(
                    proto=self._index_map,
                    file_name=self._index_map_file
                )

        except Exception as err:
            self._SYS_LOGGER.error("Write to dir [" + self.get_dir_name() + "] got exception: " + str(err) + '.')
            self._logger.error("Write to dir [" + self.get_dir_name() + "] got exception: " + str(err) + '.')
            raise StorageWriteException("Write to dir [" + self.get_dir_name() + "] got exception: " + str(err) + '.')