Ejemplo n.º 1
0
 def test_compute_tile_dimensions_buffer(self, tensor_dims, shape, is_rgb,
                                         result_dims):
     view_projection = TensorViewProjection(True)
     view_projection._shape = shape
     height, width = view_projection._compute_tile_dimensions(
         tensor_dims, is_rgb)
     assert result_dims == [height, width]
Ejemplo n.º 2
0
class ClusterProjectionsGroupProperties:
    _prop_builder: ObserverPropertiesBuilder
    _is_rgb: bool = False
    tensor_view_projection: TensorViewProjection

    def __init__(self):
        self._prop_builder = ObserverPropertiesBuilder(self)
        self.tensor_view_projection = TensorViewProjection(is_buffer=False)

    def project_and_scale(self, tensor: torch.Tensor):
        tensor, projection_params = self.tensor_view_projection.transform_tensor(
            tensor, self.is_rgb)
        return tensor, projection_params

    @property
    def is_rgb(self) -> bool:
        return self._is_rgb

    @is_rgb.setter
    def is_rgb(self, value: bool):
        self._is_rgb = value

    def get_properties(self) -> List[ObserverPropertiesItem]:
        properties = [self._prop_builder.auto("RGB",
                                              type(self).is_rgb)
                      ] + self.tensor_view_projection.get_properties()

        header_name = f'Projections'
        for prop in properties:
            prop.name = f"{header_name}.{prop.name}"

        return [
            self._prop_builder.collapsible_header(header_name, True),
            *properties
        ]
Ejemplo n.º 3
0
 def test_transform_tensor_one_channel_rgb(self, t_input_size, t_shape,
                                           items_per_row, result):
     t_input = torch.arange(1, t_input_size + 1).float() / 100
     t_input = t_input.view([*t_shape, 1])
     view_projection = TensorViewProjection(False)
     # view_projection._shape = t_shape
     view_projection._items_per_row = items_per_row
     tensor, _ = view_projection.transform_tensor(t_input, True)
     assert same(result, tensor, eps=0.001)
Ejemplo n.º 4
0
class HierarchicalGroupProperties:
    _parent: 'HierarchicalObserver'
    scale: int = 4
    scale_set_by_user: int = 1
    is_rgb: bool = False
    tensor_view_projection: TensorViewProjection

    # is_extended = True

    # minimum observer size in pixels, used for automatic rescaling of observers which are too small
    #    MINIMAL_SIZE = 10

    def __init__(self, group_id: int, parent: 'HierarchicalObserver'):
        self._parent = parent
        self.group_id = group_id
        self.tensor_view_projection = TensorViewProjection(is_buffer=False)

    def project_and_scale(self, tensor):
        tensor, projection_params = self.tensor_view_projection.transform_tensor(
            tensor, self.is_rgb)
        self.scale = update_scale_to_respect_minimum_size(
            tensor, self._parent.minimal_group_size, self.scale_set_by_user)
        return tensor, projection_params

    def get_properties(self):
        def update_scale(value):
            self.scale_set_by_user = int(value)
            return value

        def update_is_rgb(value):
            self.is_rgb = parse_bool(value)
            return value

        # def update_header(value):
        #     self.is_extended = parse_bool(value)
        #     return value

        properties = [
            ObserverPropertiesItem("Scale", 'number', self.scale,
                                   update_scale),
            ObserverPropertiesItem("RGB", 'checkbox', self.is_rgb,
                                   update_is_rgb)
        ] + self.tensor_view_projection.get_properties()

        header_name = f'Group {self.group_id}'
        for prop in properties:
            prop.name = f"{header_name}.{prop.name}"

        return [
            self._parent.prop_builder.collapsible_header(header_name, True),
            # ObserverPropertiesItem(header_name, 'collapsible_header', self.is_extended, update_header),
            *properties
        ]
Ejemplo n.º 5
0
    def test_transform_tensor_tiling_padding_rgb(self, t_input_size, t_shape,
                                                 items_per_row, result):
        t_input = torch.arange(1, t_input_size + 1).float() / 100
        # Add dimensions - there should be no effect
        t_input = torch.tensor(g(1)).expand(
            (t_input_size, 3)).mul(t_input.unsqueeze(1))

        view_projection = TensorViewProjection(False)
        view_projection._shape = t_shape
        view_projection._items_per_row = items_per_row
        tensor, _ = view_projection.transform_tensor(t_input, True)
        assert same(result, tensor, eps=0.001)
Ejemplo n.º 6
0
    def test_transform_tensor_tiling_padding(self, t_input_size, t_shape,
                                             items_per_row, result):
        t_input = torch.arange(1, t_input_size + 1).float() / 100
        # Add dimensions - there should be no effect
        t_input = t_input.expand((1, 1, 1, t_input_size))
        t_input = t_input.transpose(3, 1)

        view_projection = TensorViewProjection(False)
        view_projection._shape = t_shape
        view_projection._items_per_row = items_per_row
        tensor, _ = view_projection.transform_tensor(t_input, False)
        assert same(result, tensor, eps=0.001)
Ejemplo n.º 7
0
    def test_value_at(self, t_input_size, t_shape, items_per_row, result):
        t_input = torch.arange(1, t_input_size + 1).float() / 100
        # Add dimensions - there should be no effect
        t_input = t_input.expand((1, 1, 1, t_input_size))
        t_input = t_input.transpose(3, 1)

        view_projection = TensorViewProjection(False)
        view_projection._shape = t_shape
        view_projection._items_per_row = items_per_row

        height, width = result.size()
        for y in range(height):
            for x in range(width):
                value = view_projection.value_at(t_input, x, y)
                expected = float(result[y, x])
                if math.isnan(expected):
                    assert math.isnan(value), f'Value at {x}, {y}'
                else:
                    assert expected == value, f'Value at {x}, {y}'
Ejemplo n.º 8
0
class BufferObserver(TensorObservable):
    """Buffer observer - observe MemoryBlock and highlight current positions."""
    _buffer_memory_block: 'BufferMemoryBlock'
    _observable_dims: List[int]

    def __init__(self, buffer_memory_block: 'BufferMemoryBlock'):
        super().__init__()
        # TODO not nice to redefine _tensor_view_projection here
        self._tensor_view_projection = TensorViewProjection(is_buffer=True)
        self._buffer_memory_block = buffer_memory_block

    def get_tensor(self) -> Optional[torch.Tensor]:
        """Get tensor to be displayed."""
        if not self._buffer_memory_block.owner.is_initialized():
            return None

        return self._buffer_memory_block.buffer.stored_data if self._buffer_memory_block.buffer is not None else None

    def get_data(self) -> BufferObserverData:
        self._tensor = self.get_tensor()
        if self._tensor is not None:
            self._tensor = sanitize_tensor(self._tensor)
            tensor, projection_params = self._tensor_view_projection.transform_tensor(
                self._tensor, self._is_rgb)
            self._update_scale_to_respect_minimum_size(tensor)

            current_ptr = []
            if self._buffer_memory_block.buffer is not None:
                ptr_tensor = self._buffer_memory_block.buffer.current_ptr
                if is_valid_tensor(ptr_tensor):
                    flock_size = ptr_tensor.numel()
                    buffer_size = math.floor(projection_params.count /
                                             flock_size)
                    offsets = torch.arange(0,
                                           buffer_size * flock_size,
                                           buffer_size,
                                           device=ptr_tensor.device)
                    current_ptr = ptr_tensor.add(offsets).to(
                        "cpu").numpy().tolist()

            tensor_data = TensorObservableData(
                tensor,
                TensorObservableParams(scale=self._scale,
                                       projection=projection_params))
            result = BufferObserverData(tensor_data, current_ptr=current_ptr)
        else:
            result = BufferObserverData(dummy_tensor_observable_data(),
                                        current_ptr=[0])
        return result
Ejemplo n.º 9
0
 def __init__(self):
     self._prop_builder = ObserverPropertiesBuilder(self)
     self.tensor_view_projection = TensorViewProjection(is_buffer=False)
Ejemplo n.º 10
0
 def __init__(self, buffer_memory_block: 'BufferMemoryBlock'):
     super().__init__()
     # TODO not nice to redefine _tensor_view_projection here
     self._tensor_view_projection = TensorViewProjection(is_buffer=True)
     self._buffer_memory_block = buffer_memory_block
Ejemplo n.º 11
0
 def __init__(self, group_id: int, parent: 'HierarchicalObserver'):
     self._parent = parent
     self.group_id = group_id
     self.tensor_view_projection = TensorViewProjection(is_buffer=False)
Ejemplo n.º 12
0
 def setup_method(self):
     self.instance = TensorViewProjection(False)
Ejemplo n.º 13
0
class TestTensorViewProjection(ABC):
    instance: TensorViewProjection

    def setup_method(self):
        self.instance = TensorViewProjection(False)

    @pytest.mark.parametrize("t_input, minimum, maximum, expected_result", [
        (torch.Tensor([[-2.0, -1.0, -0.5, 0], [2.0, 1.0, 0.5, 0]]), 0, 1,
         torch.Tensor([[r(1.0), r(1.0), r(0.5),
                        gr(0.0)], [g(1.0), g(1.0),
                                   g(0.5), gr(0.0)]])),
        (torch.Tensor([[-2.0, -1.0, -0.5, 0], [2.0, 1.0, 0.5, 0]]), 0.5, 1,
         torch.Tensor([[r(1.0), r(1.0), r(0.0),
                        gr(0.0)], [g(1.0), g(1.0),
                                   g(0.0), gr(0.0)]])),
        (torch.Tensor([[-2.0, -1.0, -0.5, -0.2], [2.0, 1.0, 0.5, 0.2]
                       ]), 0.5, 1.5,
         torch.Tensor([[r(1.0), r(0.5), r(0.0),
                        gr(0.0)], [g(1.0), g(0.5),
                                   g(0.0), gr(0.0)]])),
    ])
    def test_colorize(self, t_input, minimum, maximum, expected_result):
        result = self.instance._colorize(t_input, minimum, maximum)
        assert same(expected_result, result, eps=0.001)

    def test_colorize_big_tensor(self):
        """Check for a tensor coloring problem. Note: torch was crashing hard, before the fix."""
        w, h = 10000, 100
        data = torch.full((h, w), 1.0)
        result = self.instance._colorize(data, minimum=0, maximum=4).numpy()
        assert (h, w, 3) == result.shape
        expected = np.repeat(np.array([[[0, .25, 0]]]), h, axis=0)
        expected = np.repeat(expected, w, axis=1)
        np.testing.assert_array_equal(expected, result)

    def test_colorize_inf_nan(self):
        data = torch.Tensor([[float('NaN'), float('Inf'), -float('Inf')]])
        result = self.instance._colorize(data, minimum=-2, maximum=4).numpy()
        assert (1, 3, 3) == result.shape
        np.testing.assert_array_equal(
            np.array([
                [
                    [0, 0, 1],
                    [0, 1, 1],
                    [1, 0, 1],
                ],
            ]), result)

    @pytest.mark.parametrize("t_input_size, t_shape, items_per_row, result", [
        (6, [1, 6], 1,
         torch.Tensor([[
             g(0.01),
             g(0.02),
             g(0.03),
             g(0.04),
             g(0.05),
             g(0.06),
         ]])),
        (6, [2, 3], 1,
         torch.Tensor([[g(0.01), g(0.02), g(0.03)],
                       [g(0.04), g(0.05), g(0.06)]])),
        (12, [2, 3], 2,
         torch.Tensor(
             [[g(0.01), g(0.02),
               g(0.03), g(0.07),
               g(0.08), g(0.09)],
              [g(0.04), g(0.05),
               g(0.06), g(0.10),
               g(0.11), g(0.12)]])),
        (12, [3, 2], 2,
         torch.Tensor(
             [[g(0.01), g(0.02), g(0.07), g(0.08)],
              [g(0.03), g(0.04), g(0.09), g(0.10)],
              [g(0.05), g(0.06), g(0.11), g(0.12)]])),
        (10, [2, 3], 2,
         torch.Tensor(
             [[g(0.01), g(0.02),
               g(0.03), g(0.07),
               g(0.08), g(0.09)],
              [g(0.04), g(0.05),
               g(0.06), g(0.10),
               pad(), pad()]])),
        (9, [2, 2], 2,
         torch.Tensor([[
             g(0.01),
             g(0.02),
             g(0.05),
             g(0.06),
         ], [
             g(0.03),
             g(0.04),
             g(0.07),
             g(0.08),
         ], [g(0.09), pad(), pad(), pad()], [pad(), pad(),
                                             pad(), pad()]])),
    ])
    def test_transform_tensor_tiling_padding(self, t_input_size, t_shape,
                                             items_per_row, result):
        t_input = torch.arange(1, t_input_size + 1).float() / 100
        # Add dimensions - there should be no effect
        t_input = t_input.expand((1, 1, 1, t_input_size))
        t_input = t_input.transpose(3, 1)

        view_projection = TensorViewProjection(False)
        view_projection._shape = t_shape
        view_projection._items_per_row = items_per_row
        tensor, _ = view_projection.transform_tensor(t_input, False)
        assert same(result, tensor, eps=0.001)

    @pytest.mark.parametrize(
        "tensor_dims, shape, is_rgb, result_dims",
        [
            ([3, 4, 5], [], False, [4, 5]),
            ([4, 5], [], False, [4, 5]),
            ([5], [], False, [1, 1]),  # Linear tensor should have tile 1x1
            ([5, 6, 3], [], True, [5, 6]),
            ([8, 5, 6, 3], [], True, [5, 6]),
            ([8, 5, 6, 1], [], True, [5, 6]),
            ([5, 3], [], True, [1, 1
                                ]),  # RGB Linear tensor should have tile 1x1
            ([3], [], True, [1, 1]),  # RGB Linear tensor should have tile 1x1
            ([4, 5], [3], False, [4, 5]),  # Incomplete shape has no effect
            ([4, 5], [2, 3], False, [2, 3]),
            ([4, 5], [2, 3], True, [2, 3]),
            ([4, 5], [1, 2, 3], True, [2, 3]),
        ])
    def test_compute_tile_dimensions(self, tensor_dims, shape, is_rgb,
                                     result_dims):
        view_projection = TensorViewProjection(False)
        view_projection._shape = shape
        height, width = view_projection._compute_tile_dimensions(
            tensor_dims, is_rgb)
        assert result_dims == [height, width]

    @pytest.mark.parametrize(
        "tensor_dims, shape, is_rgb, result_dims",
        [
            ([3, 4, 5], [], False, [1, 5]),
            ([4, 5], [], False, [1, 5]),
            ([5], [], False, [1, 1]),  # Linear tensor should have tile 1x1
            ([5, 6, 3], [], True, [1, 6]),
            ([8, 5, 6, 3], [], True, [1, 6]),
            ([5, 3], [], True, [1, 1
                                ]),  # RGB Linear tensor should have tile 1x1
            ([3], [], True, [1, 1]),  # RGB Linear tensor should have tile 1x1
            ([4, 5], [3], False, [1, 5]),  # Incomplete shape has no effect
            ([4, 5], [2, 3], False, [2, 3]),
            ([4, 5], [2, 3], True, [2, 3]),
            ([4, 5], [1, 2, 3], True, [2, 3]),
        ])
    def test_compute_tile_dimensions_buffer(self, tensor_dims, shape, is_rgb,
                                            result_dims):
        view_projection = TensorViewProjection(True)
        view_projection._shape = shape
        height, width = view_projection._compute_tile_dimensions(
            tensor_dims, is_rgb)
        assert result_dims == [height, width]

    @pytest.mark.parametrize("t_input_size, t_shape, items_per_row, result", [
        (6, [1, 6], 1,
         torch.Tensor([[
             g(0.01),
             g(0.02),
             g(0.03),
             g(0.04),
             g(0.05),
             g(0.06),
         ]])),
        (6, [2, 3], 1,
         torch.Tensor([[g(0.01), g(0.02), g(0.03)],
                       [g(0.04), g(0.05), g(0.06)]])),
        (12, [2, 3], 2,
         torch.Tensor(
             [[g(0.01), g(0.02),
               g(0.03), g(0.07),
               g(0.08), g(0.09)],
              [g(0.04), g(0.05),
               g(0.06), g(0.10),
               g(0.11), g(0.12)]])),
        (12, [3, 2], 2,
         torch.Tensor(
             [[g(0.01), g(0.02), g(0.07), g(0.08)],
              [g(0.03), g(0.04), g(0.09), g(0.10)],
              [g(0.05), g(0.06), g(0.11), g(0.12)]])),
        (10, [2, 3], 2,
         torch.Tensor(
             [[g(0.01), g(0.02),
               g(0.03), g(0.07),
               g(0.08), g(0.09)],
              [g(0.04), g(0.05),
               g(0.06), g(0.10),
               pad(), pad()]])),
        (9, [2, 2], 2,
         torch.Tensor([[
             g(0.01),
             g(0.02),
             g(0.05),
             g(0.06),
         ], [
             g(0.03),
             g(0.04),
             g(0.07),
             g(0.08),
         ], [g(0.09), pad(), pad(), pad()], [pad(), pad(),
                                             pad(), pad()]])),
    ])
    def test_transform_tensor_tiling_padding_rgb(self, t_input_size, t_shape,
                                                 items_per_row, result):
        t_input = torch.arange(1, t_input_size + 1).float() / 100
        # Add dimensions - there should be no effect
        t_input = torch.tensor(g(1)).expand(
            (t_input_size, 3)).mul(t_input.unsqueeze(1))

        view_projection = TensorViewProjection(False)
        view_projection._shape = t_shape
        view_projection._items_per_row = items_per_row
        tensor, _ = view_projection.transform_tensor(t_input, True)
        assert same(result, tensor, eps=0.001)

    @pytest.mark.parametrize("t_input_size, t_shape, items_per_row, result", [
        (6, [2, 3], 1,
         torch.Tensor([[g(0.01), g(0.02), g(0.03)],
                       [g(0.04), g(0.05), g(0.06)]])),
    ])
    def test_transform_tensor_one_channel_rgb(self, t_input_size, t_shape,
                                              items_per_row, result):
        t_input = torch.arange(1, t_input_size + 1).float() / 100
        t_input = t_input.view([*t_shape, 1])
        view_projection = TensorViewProjection(False)
        # view_projection._shape = t_shape
        view_projection._items_per_row = items_per_row
        tensor, _ = view_projection.transform_tensor(t_input, True)
        assert same(result, tensor, eps=0.001)

    @pytest.mark.parametrize("t_input_size, t_shape, items_per_row, result", [
        (12, [2, 3], 2,
         torch.Tensor([[0.01, 0.02, 0.03, 0.07, 0.08, 0.09],
                       [0.04, 0.05, 0.06, 0.10, 0.11, 0.12]])),
        (12, [3, 2], 2,
         torch.Tensor([[0.01, 0.02, 0.07, 0.08], [0.03, 0.04, 0.09, 0.10],
                       [0.05, 0.06, 0.11, 0.12]])),
        (9, [2, 2], 2,
         torch.Tensor([[
             0.01,
             0.02,
             0.05,
             0.06,
         ], [
             0.03,
             0.04,
             0.07,
             0.08,
         ], [0.09, nan(), nan(), nan()], [nan(), nan(),
                                          nan(), nan()]])),
    ])
    def test_value_at(self, t_input_size, t_shape, items_per_row, result):
        t_input = torch.arange(1, t_input_size + 1).float() / 100
        # Add dimensions - there should be no effect
        t_input = t_input.expand((1, 1, 1, t_input_size))
        t_input = t_input.transpose(3, 1)

        view_projection = TensorViewProjection(False)
        view_projection._shape = t_shape
        view_projection._items_per_row = items_per_row

        height, width = result.size()
        for y in range(height):
            for x in range(width):
                value = view_projection.value_at(t_input, x, y)
                expected = float(result[y, x])
                if math.isnan(expected):
                    assert math.isnan(value), f'Value at {x}, {y}'
                else:
                    assert expected == value, f'Value at {x}, {y}'

        # tensor, _ = view_projection.transform_tensor(t_input, False)
        # assert same(result, tensor, eps=0.001)

    @pytest.mark.parametrize("t_input, p_min, p_max, expected_result", [
        (torch.Tensor([[
            gr(0.5), gr(1.0), gr(1.5), gr(2.0)
        ]]), 0, 1, torch.Tensor([[gr(0.5), gr(1.0),
                                  gr(1.0), gr(1.0)]])),
        (torch.Tensor([[
            gr(-4.0), gr(-1.0), gr(0), gr(2.0)
        ]]), -2, 0, torch.Tensor([[gr(0.0), gr(0.5),
                                   gr(1.0), gr(1.0)]])),
        (torch.Tensor(
            [[gr(-4.0), gr(-1.0),
              gr(0), gr(2.0),
              gr(6.0), gr(8.0)]]), -2, 6,
         torch.Tensor(
             [[gr(0), gr(0.125),
               gr(0.25), gr(0.5),
               gr(1.0), gr(1.0)]]))
    ])
    def test_rgb_transform(self, t_input, p_min, p_max, expected_result):
        result = TensorViewProjection._rgb_transform(t_input, p_min, p_max)
        assert same(expected_result, result, eps=0.001)
Ejemplo n.º 14
0
 def test_rgb_transform(self, t_input, p_min, p_max, expected_result):
     result = TensorViewProjection._rgb_transform(t_input, p_min, p_max)
     assert same(expected_result, result, eps=0.001)