コード例 #1
0
ファイル: test_fork_node.py プロジェクト: jvitku/torchsim
def test_fork_node_inverse_0():
    # TODO (Test): add for dim = 1, then refactor.
    creator = AllocatingCreator(device='cpu')
    dtype = creator.float32

    dim = 0

    expected_results = [
        creator.tensor(
            [[1, 2, 3, 4], [5, 6, 7, 8], [0, 0, 0, 0], [0, 0, 0, 0]],
            dtype=dtype,
            device=creator.device)
    ]

    input_memory_block = MemoryBlock()
    input_memory_block.tensor = creator.zeros((4, 4))

    output_tensor = creator.tensor([[1, 2, 3, 4], [5, 6, 7, 8]],
                                   dtype=dtype,
                                   device=creator.device)

    fork_node = ForkNode(dim, split_sizes=[2, 2])
    Connector.connect(input_memory_block, fork_node.inputs.input)

    output_inverse_packet = InversePassOutputPacket(output_tensor,
                                                    fork_node.outputs[0])

    fork_node.allocate_memory_blocks(creator)
    results = fork_node.recursive_inverse_projection_from_output(
        output_inverse_packet)

    for expected, result in zip(expected_results, results):
        assert same(expected, result.tensor)
コード例 #2
0
    def test_sample_learning_batch_combinations(self, method, flock_indices,
                                                elements_written):
        flock_size = 3
        creator = AllocatingCreator('cpu')

        f_size = len(flock_indices) if flock_indices is not None else 3

        buffer = SPFlockBuffer(creator,
                               buffer_size=20,
                               n_cluster_centers=3,
                               flock_size=flock_size,
                               input_size=5)
        if flock_indices is not None:
            buffer.set_flock_indices(
                creator.tensor(flock_indices, dtype=torch.int64))

        buffer.total_data_written[:] = elements_written
        buffer.clusters.stored_data[:, :elements_written] = creator.tensor(
            [0, 1, 0])
        buffer.inputs.stored_data[:, :elements_written, :] = creator.tensor(
            [1.3, 0.2, 0.6, 0.4, 0.1])

        # use some dummy value here to check that it is rewriting all the lines in res
        dummy_value = -2.1
        sampled_data = creator.full((f_size, elements_written, 5),
                                    fill_value=dummy_value)
        buffer.sample_learning_batch(elements_written, sampled_data, method)

        assert (sampled_data == dummy_value).any().item() == 0
コード例 #3
0
ファイル: test_join_node.py プロジェクト: jvitku/torchsim
def test_join_node_inverse_flatten():
    device = 'cpu'
    creator = AllocatingCreator(device)
    dtype = creator.float32

    # The result of the inverse projection should only be one tensor.
    expected_results = [
        creator.tensor([[1, 2, 3, 4], [5, 6, 7, 8]],
                       dtype=dtype,
                       device=device),
        creator.tensor([9, 10], dtype=dtype, device=device)
    ]

    input_memory_blocks = [MemoryBlock(), MemoryBlock()]
    input_memory_blocks[0].tensor = creator.zeros((2, 4))
    input_memory_blocks[1].tensor = creator.zeros((2, ))

    output_tensor = creator.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
                                   dtype=dtype,
                                   device=device)

    join_node = JoinNode(flatten=True)
    Connector.connect(input_memory_blocks[0], join_node.inputs[0])
    Connector.connect(input_memory_blocks[1], join_node.inputs[1])

    output_inverse_packet = InversePassOutputPacket(output_tensor,
                                                    join_node.outputs.output)

    join_node.allocate_memory_blocks(creator)
    results = join_node.recursive_inverse_projection_from_output(
        output_inverse_packet)

    for expected, result in zip(expected_results, results):
        assert same(expected, result.tensor)
コード例 #4
0
ファイル: test_mse_node.py プロジェクト: jvitku/torchsim
def test_mse_multiple_steps():
    device = 'cpu'
    creator = AllocatingCreator(device=device)
    dtype = creator.float32

    input_tensors1 = [
        creator.tensor([0, 1, 2, 0, 1, 2], device=device, dtype=dtype),
        creator.tensor([0, 3, 0, 0, 1, 0], device=device, dtype=dtype)
    ]  # squared diff = 12
    input_tensors2 = [
        creator.tensor([1, 1, 0, 0, 1, 2], device=device, dtype=dtype),
        creator.tensor([0, 3, 2, 1, 3, 0], device=device, dtype=dtype)
    ]  # squared diff = 18
    input_tensors3 = [
        creator.tensor([1, 1, 2, 0, 1, 1], device=device, dtype=dtype),
        creator.tensor([-2, 3, 1, 3, 1, 0], device=device, dtype=dtype)
    ]  # squared diff = 24

    expected_result = creator.tensor(
        [3], dtype=dtype,
        device=device)  # MSE=3 for a concatenation of the input tensors

    mse_unit = MseUnit(creator,
                       input_shape=input_tensors1[0].shape,
                       buffer_size=3)

    mse_unit.step(input_tensors1[0], input_tensors1[1])
    mse_unit.step(input_tensors2[0], input_tensors2[1])
    mse_unit.step(input_tensors3[0], input_tensors3[1])

    assert same(expected_result, mse_unit._mean_square_error_output)
コード例 #5
0
    def _create_expected_images_tensor(creator: AllocatingCreator):
        device = creator.device
        dtype = get_float(device)

        expected_images_tensor = creator.full([3, 2, 3, 3],
                                              dtype=dtype,
                                              device=device,
                                              fill_value=1.0)
        expected_images_tensor[0, 1, 2, :] = creator.tensor([1.0, 0.0, 0.0])
        expected_images_tensor[1, 1, 2, :] = creator.tensor([0.0, 1.0, 0.0])
        expected_images_tensor[2, 1, 2, :] = creator.tensor([0.0, 0.0, 1.0])

        return creator.cat([expected_images_tensor, expected_images_tensor])
コード例 #6
0
ファイル: test_fork_node.py プロジェクト: jvitku/torchsim
def test_fork_inverse_dim_1():
    creator = AllocatingCreator(device='cpu')
    dtype = creator.float32

    dim = 1

    outputs = [
        creator.tensor([[1, 2], [5, 6], [9, 10], [13, 14]],
                       dtype=dtype,
                       device=creator.device),
        creator.tensor([[3, 4], [7, 8], [11, 12], [15, 16]],
                       dtype=dtype,
                       device=creator.device)
    ]

    _test_fork_inverse(dim, outputs, creator, dtype, creator.device)
コード例 #7
0
ファイル: test_fork_node.py プロジェクト: jvitku/torchsim
def test_fork_inverse_dim_0():
    creator = AllocatingCreator(device='cpu')
    dtype = creator.float32

    dim = 0

    outputs = [
        creator.tensor([[1, 2, 3, 4], [5, 6, 7, 8]],
                       dtype=dtype,
                       device=creator.device),
        creator.tensor([[9, 10, 11, 12], [13, 14, 15, 16]],
                       dtype=dtype,
                       device=creator.device)
    ]

    _test_fork_inverse(dim, outputs, creator, dtype, creator.device)
コード例 #8
0
ファイル: test_tensor_utils.py プロジェクト: jvitku/torchsim
def test_clamp_tensor(clamped_data, min, max, expected_result, device):
    dtype = get_float(device)
    creator = AllocatingCreator(device=device)
    clamped_tensor = creator.tensor(clamped_data, dtype=dtype, device=device)
    min_tensor = None if min is None else creator.tensor(
        min, dtype=dtype, device=device)
    max_tensor = None if max is None else creator.tensor(
        max, dtype=dtype, device=device)

    expected_tensor = creator.tensor(expected_result,
                                     dtype=dtype,
                                     device=device)

    result = clamp_tensor(clamped_tensor, min_tensor, max_tensor)

    assert same(expected_tensor, result)
コード例 #9
0
ファイル: test_join_node.py プロジェクト: jvitku/torchsim
def test_join_inverse_dim_0():
    creator = AllocatingCreator(device='cpu')
    dtype = creator.float32

    dim = 0

    input_shapes = [(2, 4), (2, 4)]

    expected_results = [
        creator.tensor([[1, 2, 3, 4], [5, 6, 7, 8]],
                       dtype=dtype,
                       device=creator.device),
        creator.tensor([[9, 10, 11, 12], [13, 14, 15, 16]],
                       dtype=dtype,
                       device=creator.device)
    ]

    _test_join_inverse(dim, input_shapes, expected_results, creator, dtype,
                       creator.device)
コード例 #10
0
ファイル: test_join_node.py プロジェクト: jvitku/torchsim
def test_join_inverse_dim_1():
    creator = AllocatingCreator(device='cpu')
    dtype = creator.float32

    dim = 1

    input_shapes = [(4, 2), (4, 2)]

    expected_results = [
        creator.tensor([[1, 2], [5, 6], [9, 10], [13, 14]],
                       dtype=dtype,
                       device=creator.device),
        creator.tensor([[3, 4], [7, 8], [11, 12], [15, 16]],
                       dtype=dtype,
                       device=creator.device)
    ]

    _test_join_inverse(dim, input_shapes, expected_results, creator, dtype,
                       creator.device)
コード例 #11
0
ファイル: test_join_node.py プロジェクト: jvitku/torchsim
def test_join_node_inverse_0():
    # TODO (Test): Make a dim = 1 variant
    # TODO (Test): Then, refactor tests here, maybe something to match the test class above, but for the backward projection.
    creator = AllocatingCreator(device='cpu')
    dtype = creator.float32

    dim = 0

    # The result of the inverse projection should only be one tensor.
    expected_results = [
        creator.tensor([[1, 2, 3, 4], [5, 6, 7, 8]],
                       dtype=dtype,
                       device=creator.device),
        creator.tensor([[9, 10, 11, 12], [13, 14, 15, 16]],
                       dtype=dtype,
                       device=creator.device)
    ]

    input_memory_blocks = [MemoryBlock(), MemoryBlock()]
    input_memory_blocks[0].tensor = creator.zeros((2, 4))
    input_memory_blocks[1].tensor = creator.zeros((2, 4))

    output_tensor = creator.tensor(
        [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]],
        dtype=dtype,
        device=creator.device)

    join_node = JoinNode(dim, n_inputs=2)
    Connector.connect(input_memory_blocks[0], join_node.inputs[0])
    Connector.connect(input_memory_blocks[1], join_node.inputs[1])

    output_inverse_packet = InversePassOutputPacket(output_tensor,
                                                    join_node.outputs.output)

    join_node.allocate_memory_blocks(creator)
    results = join_node.recursive_inverse_projection_from_output(
        output_inverse_packet)

    for expected, result in zip(expected_results, results):
        assert same(expected, result.tensor)
コード例 #12
0
    def test_sample_learning_batch_smart_sampling(self, method, flock_indices,
                                                  device):
        """Test sampling methods.

        Tests if the balanced sampling chooses the data from each cluster equally,
        and if uniform sampling uniformly according to their prevalence.
        """

        buffer_size = 3000
        sixth = buffer_size // 6
        batch_size = 1800
        flock_size = 4
        n_cluster_centers = 4
        float_dtype = get_float(device)
        creator = AllocatingCreator(device)

        f_size = len(
            flock_indices) if flock_indices is not None else flock_size

        buffer = SPFlockBuffer(creator,
                               buffer_size=buffer_size,
                               n_cluster_centers=n_cluster_centers,
                               flock_size=flock_size,
                               input_size=5)
        if flock_indices is not None:
            buffer.set_flock_indices(
                creator.tensor(flock_indices, dtype=torch.int64,
                               device=device))

        buffer.data_since_last_sample[:] = 3  # just some value to check if it was correclty updated

        data0 = creator.tensor([1, 0, 0, 0, 0],
                               dtype=float_dtype,
                               device=device)
        data1 = creator.tensor([0, 0.2, -15, 0, 0],
                               dtype=float_dtype,
                               device=device)
        data2 = creator.tensor([1, 2, 3, 4, 5],
                               dtype=float_dtype,
                               device=device)

        buffer.total_data_written[:] = buffer_size
        buffer.clusters.stored_data[:, :sixth] = creator.tensor(
            [1, 0, 0, 0], dtype=float_dtype, device=device)
        buffer.clusters.stored_data[:, sixth:(sixth * 2)] = creator.tensor(
            [0, 0, 1, 0], dtype=float_dtype, device=device)
        buffer.clusters.stored_data[:, (sixth * 2):] = creator.tensor(
            [0, 0, 0, 1], dtype=float_dtype, device=device)
        buffer.inputs.stored_data[:, :sixth] = data0
        buffer.inputs.stored_data[:, sixth:(sixth * 2)] = data1
        buffer.inputs.stored_data[:, (sixth * 2):] = data2

        # use some dummy value here to check that it is rewriting all the lines in res
        dummy_value = -2.1
        sampled_data = torch.full((f_size, batch_size, 5),
                                  fill_value=dummy_value,
                                  dtype=float_dtype,
                                  device=device)
        sampled_indices = buffer.sample_learning_batch(batch_size,
                                                       sampled_data, method)
        sampled_indices = sampled_indices.view(f_size, batch_size, 1).expand(
            f_size, batch_size, n_cluster_centers)
        sampled_clusters = torch.gather(buffer.clusters.get_stored_data(),
                                        dim=1,
                                        index=sampled_indices)

        # all data are taken from the buffer
        assert (sampled_data == dummy_value).any().item() == 0

        n_points_from_each_cluster = sampled_clusters.sum(dim=1)

        # Its impossible to test the exact values so we check if it falls into reasonable boundaries around the
        # expected values.
        # mean number of points from clusters 0, 1, 2 and 3 should be;
        # UNIFORM:   300, 0, 300, 1200
        # BALANCED:  it is not 600, 0, 600, 600, because there is not enough data in the buffer and it is sampled
        # without replacement

        if method == SamplingMethod.UNIFORM:
            assert check_values_close(n_points_from_each_cluster[:, 0], 300,
                                      0.2)
            assert check_values_close(n_points_from_each_cluster[:, 1], 0, 0)
            assert check_values_close(n_points_from_each_cluster[:, 2], 300,
                                      0.2)
            assert check_values_close(n_points_from_each_cluster[:, 3], 1200,
                                      0.1)

        else:
            assert check_values_close(n_points_from_each_cluster[:, 1], 0, 0)

            assert check_values_close(n_points_from_each_cluster[:, 0] -
                                      n_points_from_each_cluster[:, 2],
                                      0,
                                      tolerance_perc=0,
                                      tolerance_abs_value=100)
            assert (n_points_from_each_cluster[:, 3] < 1200).all()

        # check that all indices we sampled for are reset to zero
        assert (buffer.data_since_last_sample[flock_indices] == 0).all()