Пример #1
0
    def put(self, value):
        """
        Put value into event_cache.

        Args:
            value (dict): The event to be put into cache.
        """
        if not isinstance(value, dict):
            log.error("Dict type required when put event message.")
            raise DebuggerParamValueError("Dict type required when put event message.")

        with self._lock:
            log.debug("Put the %d-th message into queue. \n %d requests is waiting.",
                      self._next_idx, len(self._pending_requests))
            cur_pos = self._next_idx
            # update next pos
            self._next_idx += 1
            if self._next_idx >= self.max_limit:
                self._next_idx = 0
                self._prev_flag = self._cur_flag
                self._cur_flag = str(uuid.uuid4())
            # set next pos
            if not value.get('metadata'):
                value['metadata'] = {}
            value['metadata']['pos'] = self.next_pos
            self._event_cache[cur_pos] = value
            # feed the value for pending requests
            self.clean_pending_requests(value)
Пример #2
0
    def _continue(self, metadata_stream, params):
        """
        Send RunCMD to MindSpore.

        Args:
            metadata_stream (MetadataHandler): The metadata_handler
            params (dict): The control params.
        """
        if metadata_stream.state != ServerStatus.WAITING.value:
            log.error("MindSpore is not ready to run. Current state is: %s",
                      metadata_stream.state)
            raise DebuggerContinueError(
                "MindSpore is not ready to run or is running currently.")
        metadata_stream.state = ServerStatus.RUNNING.value
        current_state = ServerStatus.RUNNING.value
        try:
            event = self._construct_run_event(params)
            self._send_watchpoints()
            self.cache_store.put_command(event)
        except MindInsightException as err:
            log.error("Failed to send run event.")
            log.exception(err)
            current_state = ServerStatus.WAITING.value
            metadata_stream.state = current_state
            raise DebuggerContinueError("Failed to send run command.")
        else:
            log.debug("Send the RunCMD to command queue.")

        return {'metadata': {'state': current_state}}
Пример #3
0
 def _validate_leaf_name(self, node_name):
     """Validate if the node is a leaf node."""
     graph_stream = self.cache_store.get_stream_handler(Streams.GRAPH)
     node_type = graph_stream.get_node_type(node_name)
     if is_scope_type(node_type):
         log.error("Scope type node has no tensor history.")
         raise DebuggerParamValueError("Invalid leaf node name.")
Пример #4
0
    def get_tensor_value_by_shape(self, shape=None):
        """
        Get tensor value by shape.

        Args:
            shape (tuple): The specified shape.

        Returns:
            Union[None, str, numpy.ndarray], the sub-tensor.
        """
        if self._value is None:
            log.warning("%s has no value yet.", self.name)
            return None
        if shape is None or not isinstance(shape, tuple):
            log.info("Get the whole tensor value with shape is %s", shape)
            return self._value
        if len(shape) != len(self.shape):
            log.error("Invalid shape. Received: %s, tensor shape: %s", shape,
                      self.shape)
            raise DebuggerParamValueError("Invalid shape. Shape unmatched.")
        try:
            value = self._value[shape]
        except IndexError as err:
            log.error("Invalid shape. Received: %s, tensor shape: %s", shape,
                      self.shape)
            log.exception(err)
            raise DebuggerParamValueError("Invalid shape. Shape unmatched.")
        if isinstance(value, np.ndarray):
            if value.size > self.max_number_data_show_on_ui:
                value = "Too large to show."
                log.info(
                    "The tensor size is %s, which is too large to show on UI.")
        else:
            value = np.asarray(value)
        return value
Пример #5
0
    def retrieve(self, mode, filter_condition=None):
        """
        Retrieve data according to mode and params.

        Args:
            mode (str): The type of info message.
            filter_condition (dict): The filter condition.

        Returns:
            dict, the retrieved data.
        """
        log.info(
            "receive retrieve request for mode:%s\n, filter_condition: %s",
            mode, filter_condition)
        # validate watchpoint_id

        mode_mapping = {
            'all': self._retrieve_all,
            'node': self._retrieve_node,
            'watchpoint': self._retrieve_watchpoint,
            'watchpoint_hit': self._retrieve_watchpoint_hit
        }
        # validate param <mode>
        if mode not in mode_mapping.keys():
            log.error(
                "Invalid param <mode>. <mode> should be in ['all', 'node', 'watchpoint', "
                "'watchpoint_hit', 'tensor'], but got %s.", mode_mapping)
            raise DebuggerParamTypeError("Invalid mode.")
        filter_condition = {} if filter_condition is None else filter_condition
        reply = mode_mapping[mode](filter_condition)

        return reply
Пример #6
0
    def control(self, params=None):
        """
        Control the training process.

        Args:
            params (dict): The control params.

                - mode (str): Acceptable control command, including `continue`,
                    `pause` and `terminate`.

                - level (str): The control granularity, `node` level or `step` level.
                    Default: `step`.

                - steps (int): Specify the steps that training should run.
                    Used when `level` is `step`.

                - name (str): Specify the name of the node. Used when `level` is `node`.

        Returns:
            dict, the response.
        """
        log.info("Receive control request: %s.", params)
        mode = params.get('mode')
        metadata_stream = self.cache_store.get_stream_handler(Streams.METADATA)
        if mode == 'continue':
            reply = self._continue(metadata_stream, params)
        elif mode in ['pause', 'terminate']:
            mode_mapping = {'pause': self._pause, 'terminate': self._terminate}
            reply = mode_mapping.get(mode)(metadata_stream)
        else:
            log.error("Invalid control mode %s", mode)
            raise DebuggerParamValueError("Invalid control mode.")

        return reply
Пример #7
0
 def remove(self, sub_name):
     """Remove sub node."""
     try:
         self._children.pop(sub_name)
     except KeyError as err:
         log.error("Failed to find node %s. %s", sub_name, err)
         raise DebuggerParamValueError(
             "Failed to find node {}".format(sub_name))
Пример #8
0
    def get_watchpoint_by_id(self, watchpoint_id):
        """Get watchpoint by watchpoint id."""
        watchpoint = self._watchpoints.get(watchpoint_id)
        if not watchpoint:
            log.error("Invalid watchpoint id %d", watchpoint_id)
            raise DebuggerParamValueError(
                "Invalid watchpoint id {}".format(watchpoint_id))

        return watchpoint
Пример #9
0
 def validate_tensor_param(name, detail):
     """Validate params for retrieve tensor request."""
     # validate name
     if not isinstance(name, str) or ':' not in name:
         log.error("Invalid tensor name. Received: %s", name)
         raise DebuggerParamValueError("Invalid tensor name.")
     # validate data
     if detail != 'data':
         log.error("Invalid detail value. Received: %s", detail)
         raise DebuggerParamValueError("Invalid detail value.")
Пример #10
0
    def _graph_exists(self):
        """
        Check if the graph has been loaded in the debugger cache.

        Raises:
            DebuggerGraphNotExistError: If the graph does not exist.
        """
        if self._graph is None:
            log.error('The graph does not exist. Please start the '
                      'training script and try again.')
            raise DebuggerGraphNotExistError
Пример #11
0
 def _validate_node_type(self, node_name):
     """Check the node type in node control."""
     if not node_name:
         return
     node_type = self.cache_store.get_stream_handler(
         Streams.GRAPH).get_node_type(node_name)
     unsupported_types = [item.value for item in list(NodeTypeEnum)]
     if node_type in unsupported_types:
         log.error("Invalid node type. %s", node_name)
         raise DebuggerParamValueError(
             f"The type of node {node_name} is unsupported for "
             "continue to command.")
Пример #12
0
def validate_watch_condition(watch_condition):
    """Validate watch condition."""
    if not isinstance(watch_condition, dict):
        log.error("<watch_condition> should be dict. %s received.",
                  watch_condition)
        raise DebuggerParamTypeError("<watch_condition> should be dict.")
    # validate condition
    condition = watch_condition.get('condition')
    if condition not in WATCHPOINT_CONDITION_MAPPING.keys():
        log.error("Invalid watch condition. Acceptable values are <%s>.",
                  str(WATCHPOINT_CONDITION_MAPPING.keys()))
        raise DebuggerParamValueError("Invalid watch condition value.")
    # validate param
    validate_watch_condition_params(watch_condition)
Пример #13
0
    def _retrieve_all(self, filter_condition=None):
        """Retrieve metadata, root graph and watchpoint list."""
        if filter_condition:
            log.error("No filter condition required for retrieve all request.")
            raise DebuggerParamTypeError("filter_condition should be empty.")
        result = {}
        self._watch_point_id = 0
        self.cache_store.clean_data()
        log.info("Clean data queue cache when retrieve all request.")
        for stream in [Streams.METADATA, Streams.GRAPH, Streams.WATCHPOINT]:
            sub_res = self.cache_store.get_stream_handler(stream).get()
            result.update(sub_res)

        return result
Пример #14
0
    def get_tensors_diff(self, tensor_name, shape, tolerance=0):
        """
            Get tensor comparisons data for given name, detail, shape and tolerance.

        Args:
            tensor_name (str): The name of tensor for cache.
            shape (tuple): Specify concrete dimensions of shape.
            tolerance (str): Specify tolerance of difference between current step tensor and previous
                step tensor. Default value is 0. Its is a percentage. The boundary value is equal to
                max(abs(min),abs(max)) * tolerance. The function of min and max is being used to
                calculate the min value and max value of the result of the current step tensor subtract
                the previous step tensor. If the absolute value of result is less than or equal to
                boundary value, the result will set to be zero.

        Raises:
            DebuggerParamValueError, If get current step node and previous step node failed.

        Returns:
            dict, the retrieved data.
        """
        curr_tensor = self.get_tensor_value_by_name(tensor_name)
        prev_tensor = self.get_tensor_value_by_name(tensor_name, prev=True)
        if not (curr_tensor and prev_tensor):
            log.error(
                "Get current step and previous step for this tensor name %s failed.",
                tensor_name)
            raise DebuggerParamValueError(
                f"Get current step and previous step for this tensor name "
                f"{tensor_name} failed.")
        curr_tensor_slice = curr_tensor.get_tensor_value_by_shape(shape)
        prev_tensor_slice = prev_tensor.get_tensor_value_by_shape(shape)
        tensor_info = curr_tensor.get_basic_info()
        if isinstance(tensor_info, dict):
            del tensor_info['has_prev_step']
            del tensor_info['value']
        # the type of curr_tensor_slice is one of None, np.ndarray or str
        if isinstance(curr_tensor_slice, np.ndarray) and isinstance(
                prev_tensor_slice, np.ndarray):
            diff_tensor = TensorUtils.calc_diff_between_two_tensor(
                curr_tensor_slice, prev_tensor_slice, tolerance)
            result = np.stack(
                [prev_tensor_slice, curr_tensor_slice, diff_tensor], axis=-1)
            tensor_info['diff'] = result.tolist()
            stats = TensorUtils.get_statistics_from_tensor(diff_tensor)
            tensor_info['statistics'] = TensorUtils.get_statistics_dict(stats)
        elif isinstance(curr_tensor_slice, str):
            tensor_info['diff'] = curr_tensor_slice
        reply = {'tensor_value': tensor_info}
        return reply
Пример #15
0
    def _parse_pos(self, pos):
        """Get next pos according to input position."""
        elements = pos.split(':')
        try:
            idx = int(elements[-1])
        except ValueError:
            log.error("Invalid index. The index in pos should be digit but get pos:%s", pos)
            raise DebuggerParamValueError("Invalid pos.")

        if idx < 0 or idx >= self.max_limit:
            log.error("Invalid index. The index in pos should between [0, %d)", self.max_limit)
            raise DebuggerParamValueError(f"Invalid pos. {idx}")
        flag = elements[0] if len(elements) == 2 else ''

        return flag, idx
Пример #16
0
    def _pause(self, metadata_stream):
        """
        Pause the training.

        Args:
            metadata_stream (MetadataHandler): The metadata stream handler.
        """
        if metadata_stream.state != ServerStatus.RUNNING.value:
            log.error("The MindSpore is not running.")
            raise DebuggerPauseError("The MindSpore is not running.")
        metadata_stream.state = 'waiting'
        event = get_ack_reply()
        event.run_cmd.CopyFrom(RunCMD(run_level='step', run_steps=0))
        self.cache_store.put_command(event)
        log.debug("Send the Pause command")
        return {'metadata': {'state': 'waiting'}}
Пример #17
0
    def poll_data(self, pos):
        """
        Get the pos-th data from DebuggerCache.

        Args:
            pos (int): The index of data.

        Returns:
            dict, the data to be updated.
        """
        if not isinstance(pos, str):
            log.error("Pos should be string. Received: %s", pos)
            raise DebuggerParamValueError("Pos should be string.")

        reply = self.cache_store.get_data(pos)

        return reply
Пример #18
0
 def parse_shape(shape):
     """Parse shape."""
     if shape is None:
         return shape
     if not (isinstance(shape, str) and shape.startswith('[')
             and shape.endswith(']')):
         log.error("Invalid shape. Received: %s", shape)
         raise DebuggerParamValueError("Invalid shape.")
     shape = shape.strip('[]')
     if shape.count(':') > 2:
         log.error("Invalid shape. At most two dimensions are specified.")
         raise DebuggerParamValueError("Invalid shape.")
     parsed_shape = tuple(
         str_to_slice_or_int(dim)
         for dim in shape.split(',')) if shape else tuple()
     log.info("Parsed shape: %s from %s", parsed_shape, shape)
     return parsed_shape
Пример #19
0
    def tensor_comparisons(self, name, shape, detail='data', tolerance='0'):
        """
        Get tensor comparisons data for given name, detail, shape and tolerance.

        Args:
            name (str): The name of tensor for ui.
            detail (str): Specify which data to query. Current available value is 'data' which means
                          concrete tensor data. Histogram or unique count can be supported in the future.
            shape (str): Specify concrete dimensions of shape.
            tolerance (str): Specify tolerance of difference between current step tensor and previous
                             step tensor. Default value is 0.

        Raises:
            DebuggerParamValueError, If node type is not parameter or value of detail is not support.
            DebuggerCompareTensorError, If MindSpore is not in waiting state.
        Returns:
            dict, the retrieved data.
        """
        if self.cache_store.get_stream_handler(
                Streams.METADATA).state != ServerStatus.WAITING.value:
            log.error(
                "Failed to compare tensors as the MindSpore is not in waiting state."
            )
            raise DebuggerCompareTensorError(
                "Failed to compare tensors as the MindSpore is not in waiting state."
            )
        self.validate_tensor_param(name, detail)
        parsed_shape = self.parse_shape(shape)
        node_type, tensor_name = self._get_tensor_name_and_type_by_ui_name(
            name)
        tolerance = to_float(tolerance, 'tolerance')
        tensor_stream = self.cache_store.get_stream_handler(Streams.TENSOR)
        if detail == 'data':
            if node_type == NodeTypeEnum.PARAMETER.value:
                reply = tensor_stream.get_tensors_diff(tensor_name,
                                                       parsed_shape, tolerance)
            else:
                raise DebuggerParamValueError(
                    "The node type must be parameter, but got {}.".format(
                        node_type))
        else:
            raise DebuggerParamValueError(
                "The value of detail: {} is not support.".format(detail))
        return reply
Пример #20
0
    def create_watchpoint(self,
                          watch_condition,
                          watch_nodes=None,
                          watch_point_id=None):
        """
        Create watchpoint.

        Args:
            watch_condition (dict): The watch condition.

                - condition (str): Accept `INF` or `NAN`.

                - param (list[float]): Not defined yet.
            watch_nodes (list[str]): The list of node names.
            watch_point_id (int): The id of watchpoint.

        Returns:
            dict, the id of new watchpoint.
        """
        log.info("Received create watchpoint request. WatchCondition: %s",
                 watch_condition)
        metadata_stream = self.cache_store.get_stream_handler(Streams.METADATA)
        if metadata_stream.state != ServerStatus.WAITING.value:
            log.error(
                "Failed to create watchpoint as the MindSpore is not in waiting state."
            )
            raise DebuggerCreateWatchPointError(
                "Failed to create watchpoint as the MindSpore is not in waiting state."
            )
        if metadata_stream.backend == 'GPU' and watch_condition.get(
                'condition') == 'OVERFLOW':
            log.error("GPU doesn't support OVERFLOW watch condition.")
            raise DebuggerParamValueError(
                "GPU doesn't support OVERFLOW watch condition.")

        watch_nodes = self._get_node_basic_infos(watch_nodes)
        watch_point_id = self.cache_store.get_stream_handler(
            Streams.WATCHPOINT).create_watchpoint(watch_condition, watch_nodes,
                                                  watch_point_id)
        self._watch_point_id = 0
        log.info("Create watchpoint %d", watch_point_id)
        return {'id': watch_point_id}
Пример #21
0
    def get_default_root(self):
        """
        Get a node as default root for BFS in graph. Using the
        leaf node with the smallest node id as the default root.

        Returns:
            str, the name of the default root.
        """
        default_root = None
        for _, item in self._leaf_nodes.items():
            if item.node_id == '1':
                default_root = item
                break

        if default_root is None:
            log.error("Abnormal graph. Invalid node for BFS.")
            msg = 'Abnormal graph. Invalid node for BFS.'
            raise DebuggerParamValueError(msg)

        return default_root
Пример #22
0
    def get_node_by_bfs_order(self, node_name=None, ascend=True):
        """
        Traverse the graph in order of breath-first search by given node.

        Args:
            node_name (str): The name of current chosen leaf node.
            ascend (bool): If True, traverse the input nodes;
                If False, traverse the output nodes. Default is True.

        Returns:
            Union[None, dict], the next node object in dict type or None.
        """
        self._graph_exists()
        bfs_order = self.bfs_order
        length = len(bfs_order)

        if not bfs_order:
            log.error('Cannot get the BFS order of the graph!')
            msg = 'Cannot get the BFS order of the graph!'
            raise DebuggerParamValueError(msg)

        if node_name is None:
            if ascend is False:
                next_node = None
            else:
                next_node = bfs_order[0]
        else:
            try:
                index = bfs_order.index(node_name)
                log.debug("The index of the node in BFS list is: %d", index)
            except ValueError as err:
                log.error(
                    'Cannot find the node: %s. Please check '
                    'the node name: %s', node_name, err)
                msg = f'Cannot find the node: {node_name}. ' \
                      f'Please check the node name {err}.'
                raise DebuggerParamValueError(msg)

            next_node = self.get_next_node_in_bfs(index, length, ascend)

        return next_node
Пример #23
0
    def _construct_run_event(self, params):
        """
        Construct run cmd from input control params.

        Args:
            params (dict): The control params.

                - level (str): The control granularity, `node` level or `step` level.
                    Default: `step`.

                - steps (int): Specify the steps that training should run.
                    Used when `level` is `step`.

                - full_name (str): Specify the name of the node. Used when `level` is `node`.

        Returns:
            EventReply, control event with run command.
        """
        level = params.get('level', 'step')
        event = get_ack_reply()
        if level == 'step':
            steps = params.get('steps')
            if not steps:
                steps = 1
            run_cmd = RunCMD(run_level='step', run_steps=steps)
        elif level == 'node':
            self._validate_node_type(params.get('name'))
            name = self.cache_store.get_stream_handler(
                Streams.GRAPH).get_full_name(params['name'])
            if not name:
                name = ''
            run_cmd = RunCMD(run_level='node', node_name=name)
        else:
            log.error(
                "Invalid Value. `level` should be `step` or `node`. Got %s",
                level)
            raise DebuggerParamValueError("level` should be `step` or `node`")

        event.run_cmd.CopyFrom(run_cmd)
        log.debug("Construct run event. %s", event)
        return event
Пример #24
0
def str_to_slice_or_int(input_str):
    """
    Translate param from string to slice or int.

    Args:
        input_str (str): The string to be translated.

    Returns:
        Union[int, slice], the transformed param.
    """
    try:
        if ':' in input_str:
            ret = slice(*map(lambda x: int(x.strip()) if x.strip() else None,
                             input_str.split(':')))
        else:
            ret = int(input_str)
    except ValueError as err:
        log.error("Failed to create slice from %s", input_str)
        log.exception(err)
        raise DebuggerParamValueError("Invalid shape.")
    return ret
Пример #25
0
    def remove_node(self, node_name):
        """Remove sub node from current tree."""
        log.debug("Remove %s", node_name)
        scope_names = node_name.split('/', 1)
        sub_tree_name = scope_names[0]
        sub_tree = self._children.get(sub_tree_name)
        if not sub_tree:
            log.error("Failed to find node %s in WatchNodeTree.",
                      sub_tree_name)
            raise DebuggerParamValueError(
                "Failed to find node {}".format(sub_tree_name))

        if len(scope_names) > 1:
            sub_tree.remove_node(scope_names[1])

        if sub_tree.watch_status == WatchNodeTree.NOT_WATCH or len(
                scope_names) == 1:
            self._children.pop(sub_tree_name)

        self._watch_status = WatchNodeTree.PARTIAL_WATCH if self._children else \
            WatchNodeTree.NOT_WATCH
Пример #26
0
def validate_watch_condition_params(watch_condition):
    """
    Validate watch condition parameters.

    Args:
        watch_condition (dict): Watch condition.

            - condition (str): Condition type. Should be in WATCHPOINT_CONDITION_MAPPING.

            - param (list): Condition value. Should be given for comparison condition. The value will
                be translated to np.float32.
    """
    condition = watch_condition.get('condition')
    param = watch_condition.get('param')
    if condition in ['NAN', 'INF', 'OVERFLOW']:
        if param:
            log.error("No param is expected for %s condition.", condition)
            raise DebuggerParamValueError("No param is expected.")
    else:
        if not isinstance(param, (float, int)):
            log.error("Number param should be given for condition <%s>.",
                      condition)
            raise DebuggerParamValueError("Number param should be given.")
        if np.isinf(np.float32(param)):
            log.error("Condition param should be float32.")
            raise DebuggerParamValueError(
                "The value of condition param should be within float32.")
Пример #27
0
    def update_watchpoint(self, watch_point_id, watch_nodes, mode, name=None):
        """
        Update watchpoint.

        Args:
            watch_point_id (int): The id of watchpoint.
            watch_nodes (list[str]): The list of node names.
            mode (int): The update operator on nodes. 0 for remove nodes from watch nodes.
                1 for add nodes to watch nodes.
            name (str): The search name. Default: None.

        Returns:
            dict, empty response.
        """
        if self.cache_store.get_stream_handler(
                Streams.METADATA).state != ServerStatus.WAITING.value:
            log.error(
                "Failed to update watchpoint as the MindSpore is not in waiting state."
            )
            raise DebuggerUpdateWatchPointError(
                "Failed to update watchpoint as the MindSpore is not in waiting state."
            )
        # validate
        if not watch_nodes or not watch_point_id:
            log.error("Invalid parameter for update watchpoint.")
            raise DebuggerParamValueError(
                "Invalid parameter for update watchpoint.")
        # update watch node
        if name is not None:
            watch_nodes = self._get_watch_nodes_by_search(watch_nodes)
        elif mode == 1:
            watch_nodes = self._get_node_basic_infos(watch_nodes)

        self.cache_store.get_stream_handler(
            Streams.WATCHPOINT).update_watchpoint(watch_point_id, watch_nodes,
                                                  mode)
        self._watch_point_id = watch_point_id
        log.info("Update watchpoint with id: %d", watch_point_id)
        return {}
Пример #28
0
    def delete_watchpoint(self, watch_point_id):
        """
        Delete watchpoint.

        Args:
            watch_point_id (int): The id of watchpoint.

        Returns:
            dict, empty response.
        """
        if self.cache_store.get_stream_handler(
                Streams.METADATA).state != ServerStatus.WAITING.value:
            log.error(
                "Failed to delete watchpoint as the MindSpore is not in waiting state."
            )
            raise DebuggerDeleteWatchPointError(
                "Failed to delete watchpoint as the MindSpore is not in waiting state."
            )
        self.cache_store.get_stream_handler(
            Streams.WATCHPOINT).delete_watchpoint(watch_point_id)
        self._watch_point_id = 0
        log.info("Delete watchpoint with id: %d", watch_point_id)
        return {}
Пример #29
0
    def get_bfs_order(self):
        """
        Traverse the graph in order of breath-first search.

        Returns:
            list, including the leaf nodes arranged in BFS order.
        """
        root = self.get_default_root()
        log.info('Randomly choose node %s as root to do BFS.', root.name)

        bfs_order = []
        self.get_bfs_graph(root.name, bfs_order)
        length = len(self._leaf_nodes.keys())
        # Find rest un-traversed nodes
        for node_name, _ in self._leaf_nodes.items():
            if node_name not in bfs_order:
                self.get_bfs_graph(node_name, bfs_order)

        if len(bfs_order) != length:
            log.error("The length of bfs and leaf nodes are not equal.")
            msg = "Not all nodes are traversed!"
            raise DebuggerParamValueError(msg)

        return bfs_order
Пример #30
0
    def get(self, filter_condition=None):
        """
        Get full tensor value.

        Args:
            filter_condition (dict): Filter condition.

                - name (str): The name of tensor.

                - node_type (str): The type of the node.

        Returns:
            dict, the tensor_value.
        """
        name = filter_condition.get('name')
        node_type = filter_condition.get('node_type')
        shape = filter_condition.get('shape')
        tensor = self._get_tensor(name, node_type)
        if not tensor:
            log.error("No tensor named %s", name)
            raise DebuggerParamValueError("No tensor named {}".format(name))
        tensor_info = tensor.get_full_info(shape)
        self._update_has_prev_step_field(tensor_info, name, node_type)
        return {'tensor_value': tensor_info}