def test_get_tensor_data_success(self):
        """Get tensor data success."""
        test_tag_name = self._complete_tag_name

        processor = TensorProcessor(self._mock_data_manager)
        results = processor.get_tensors([self._train_id], [test_tag_name],
                                        step='1',
                                        dims='[0,0,:-1,:]',
                                        detail='data')

        recv_metadata = results.get('tensors')[0].get("values")

        for recv_values, expected_values in zip(recv_metadata, self._tensors):
            assert recv_values.get('wall_time') == expected_values.get(
                'wall_time')
            assert recv_values.get('step') == expected_values.get('step')
            dims = expected_values.get('value').get("dims")
            expected_data = np.array(
                expected_values.get('value').get("float_data")).reshape(dims)
            recv_tensor = np.array(recv_values.get('value').get("data"))
            expected_tensor = TensorUtils.get_specific_dims_data(
                expected_data, (0, 0, slice(None, -1, None), slice(None)))
            # Compare tensor shape when recv_tensor shape is not empty.
            if recv_tensor.shape != (0, ):
                assert recv_tensor.shape == expected_tensor.shape
                assert np.allclose(recv_tensor, expected_tensor, rtol=1e-6)
    def test_get_tensor_data_success(self):
        """Get tensor data success."""
        test_tag_name = self._complete_tag_name

        processor = TensorProcessor(self._mock_data_manager)
        results = processor.get_tensors([self._train_id], [test_tag_name], step='1', dims='[0,0,:,:]', detail='data')

        recv_metadata = results.get('tensors')[0].get("values")

        for recv_values, expected_values in zip(recv_metadata, self._tensors):
            assert recv_values.get('wall_time') == expected_values.get('wall_time')
            assert recv_values.get('step') == expected_values.get('step')
            dims = expected_values.get('value').get("dims")
            expected_data = np.array(expected_values.get('value').get("float_data")).reshape(dims)
            recv_tensor = np.array(recv_values.get('value').get("data"))
            expected_tensor = TensorUtils.get_specific_dims_data(expected_data, [0, 0, None, None], dims)
            assert np.sum(np.isclose(recv_tensor, expected_tensor, rtol=1e-6) == 0) == 0
    def _get_tensors_data(self, step, dims, tensors):
        """
        Builds a JSON-serializable object with information about tensor dims data.

        Args:
            step (int): Specify step of tensor.
            dims (str): Specify dims of tensor.
            tensors (list): The list of _Tensor data.

        Returns:
            dict, a dict including the `wall_time`, `step`, and `value' for each tensor.
                    {
                        "wall_time": 0,
                        "step": 0,
                        "value": {
                            "dims": [1],
                            "data_type": "DT_FLOAT32",
                            "data": [[0.1]]
                            "statistics": {
                                "max": 0,
                                "min": 0,
                                "avg": 0,
                                "count": 1,
                                "nan_count": 0,
                                "neg_inf_count": 0,
                                "pos_inf_count": 0
                            }
                        }
                    }

        Raises:
            ResponseDataExceedMaxValueError, If the size of response data exceed max value.
            StepTensorDataNotInCacheError, If query step is not in cache.
        """
        values = []
        step_in_cache = False
        dims = TensorUtils.convert_array_from_str_dims(dims, limit=2)
        for tensor in tensors:
            # This value is an instance of TensorContainer
            value = tensor.value
            if step != tensor.step:
                continue
            step_in_cache = True
            res_data = TensorUtils.get_specific_dims_data(
                value.ndarray, dims, list(value.dims))
            flatten_data = res_data.flatten().tolist()
            if len(flatten_data) > MAX_TENSOR_RESPONSE_DATA_SIZE:
                raise ResponseDataExceedMaxValueError(
                    "the size of response data: {} exceed max value: {}.".
                    format(len(flatten_data), MAX_TENSOR_RESPONSE_DATA_SIZE))

            def transfer(array):
                if not isinstance(array, np.ndarray):
                    # The list is used here so that len function can be used
                    # when the value of array is `NAN`、`-INF` or `INF`.
                    array = [array]
                transfer_data = [None] * len(array)
                for index, data in enumerate(array):
                    if isinstance(data, np.ndarray):
                        transfer_data[index] = transfer(data)
                    else:
                        if np.isnan(data):
                            transfer_data[index] = 'NAN'
                        elif np.isneginf(data):
                            transfer_data[index] = '-INF'
                        elif np.isposinf(data):
                            transfer_data[index] = 'INF'
                        else:
                            transfer_data[index] = float(data)
                return transfer_data

            stats = TensorUtils.get_statistics_from_tensor(res_data)
            if stats.nan_count + stats.neg_inf_count + stats.pos_inf_count > 0:
                tensor_data = transfer(res_data)
            else:
                tensor_data = res_data.tolist()
            values.append({
                "wall_time": tensor.wall_time,
                "step": tensor.step,
                "value": {
                    "dims": value.dims,
                    "data_type": anf_ir_pb2.DataType.Name(value.data_type),
                    "data": tensor_data,
                    "statistics": TensorUtils.get_statistics_dict(stats)
                }
            })
            break
        if not step_in_cache:
            raise StepTensorDataNotInCacheError(
                "this step: {} data may has been dropped.".format(step))

        return values