Ejemplo n.º 1
0
    def test_to_one_hot_speed(self, capsys):
        @measure_time(iterations=100, function_repetitions=100)
        def measured_step():
            mb.tensor.copy_(
                torch.rand(input_shape, dtype=get_float(device),
                           device=device))
            to_one_hot.step()

        input_shape = (150, )
        device = 'cuda'

        vector = torch.zeros(input_shape,
                             dtype=get_float(device),
                             device=device)

        mb = MemoryBlock()
        mb.tensor = torch.tensor(vector,
                                 device=device,
                                 dtype=get_float(device))

        to_one_hot = ToOneHotNode(mode=ToOneHotMode.RANDOM)
        Connector.connect(mb, to_one_hot.inputs.input)
        to_one_hot.allocate_memory_blocks(AllocatingCreator(device))

        with capsys.disabled():
            measured_step()
Ejemplo n.º 2
0
def get_data(device: str):
    """Define test data for the landmarks."""

    positions = torch.tensor([
        0, 0, 40, 0, 70, 0, 99, 0, 0, 55, 30, 55, 60, 55, 99.9, 55, 0, 99, 30,
        99, 60, 98, 97, 98
    ],
                             device=device,
                             dtype=get_float(device)).view(-1, 2) / 100

    results = torch.tensor([0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11],
                           device=device,
                           dtype=get_float(device))
    return positions, results
Ejemplo n.º 3
0
    def __init__(self, indices: torch.Tensor, do_subflocking: bool,
                 buffer: TPFlockBuffer, cluster_data: torch.Tensor,
                 context_data: torch.Tensor, reward_data: torch.Tensor,
                 projection_outputs: torch.Tensor,
                 action_outputs: torch.Tensor, n_frequent_seqs,
                 n_cluster_centers, device):
        super().__init__(indices, do_subflocking)
        float_dtype = get_float(device)

        self.n_cluster_centers = n_cluster_centers

        self._buffer = self._get_buffer(buffer)

        self._cluster_data = self._read(cluster_data)
        self._context_data = self._read(context_data)
        self._reward_data = self._read(reward_data)
        self._projection_outputs = self._read_write(projection_outputs)
        self._action_outputs = self._read_write(action_outputs)

        self.dummy_explore = torch.zeros((self._flock_size, 1),
                                         dtype=float_dtype,
                                         device=device)
        self.dummy_seq_probs = torch.zeros((self._flock_size, n_frequent_seqs),
                                           dtype=float_dtype,
                                           device=device)
Ejemplo n.º 4
0
def test_node_group_inverse_projection_fork_inside(device):
    float_dtype = get_float(device)

    graph = Topology(device)

    source_node = ConstantNode(shape=(2, 4), constant=0)
    fork_node = ForkNode(dim=1, split_sizes=[1, 3])
    join_node = JoinNode(dim=1, n_inputs=2)
    group_node = graph.create_generic_node_group('group', 1, 2)

    graph.add_node(source_node)
    group_node.add_node(fork_node)
    graph.add_node(join_node)

    Connector.connect(source_node.outputs.output, group_node.inputs[0])

    Connector.connect(group_node.inputs[0].output, fork_node.inputs.input)

    Connector.connect(fork_node.outputs[0], group_node.outputs[0].input)
    Connector.connect(fork_node.outputs[1], group_node.outputs[1].input)

    Connector.connect(group_node.outputs[0], join_node.inputs[0])
    Connector.connect(group_node.outputs[1], join_node.inputs[1])

    graph.step()

    output_tensor = torch.rand((2, 4), device=device, dtype=float_dtype)
    inverse_pass_packet = InversePassOutputPacket(output_tensor, join_node.outputs.output)
    results = join_node.recursive_inverse_projection_from_output(inverse_pass_packet)

    assert 1 == len(results)
    assert same(results[0].tensor, output_tensor)
Ejemplo n.º 5
0
    def test_inverse_projection(self, data, seqs, likelihoods, n_top_sequences,
                                expected_output, device):
        float_type = get_float(device)
        t_data = torch.tensor(data, dtype=float_type, device=device)
        t_seqs = torch.tensor(seqs, dtype=torch.int64, device=device)
        t_likelihoods = torch.tensor(likelihoods,
                                     dtype=float_type,
                                     device=device)
        t_expected_output = torch.tensor(expected_output,
                                         dtype=float_type,
                                         device=device)
        flock_size, n_frequent_seq, seq_length = t_seqs.shape
        n_cluster_centers = t_data.shape[-1]

        flock = create_tp_flock(flock_size=flock_size,
                                seq_length=seq_length,
                                seq_lookahead=1,
                                n_frequent_seq=n_frequent_seq,
                                n_cluster_centers=n_cluster_centers,
                                device=device)

        flock.frequent_seqs = t_seqs
        flock.frequent_seq_likelihoods_priors_clusters_context = t_likelihoods
        output = flock.inverse_projection(t_data,
                                          n_top_sequences=n_top_sequences)
        assert same(t_expected_output, output, eps=1e-3)
Ejemplo n.º 6
0
def test_forward_learn_enable_learning(enable_learning):
    device = 'cuda'
    float_dtype = get_float(device)
    flock = create_tp_flock(flock_size=1,
                            seq_length=3,
                            buffer_size=1000,
                            batch_size=4,
                            max_encountered_seq=800,
                            incoming_context_size=1,
                            exploration_probability=0,
                            enable_learning=enable_learning)

    seqs = torch.tensor([[[1, 0, 0]], [[0, 1, 0]], [[0, 0, 1]], [[0, 1, 0]]],
                        dtype=float_dtype,
                        device=device)

    initial_seq_occurrences = flock.all_encountered_seq_occurrences.clone()

    iterations = 4
    for k in range(iterations):
        cluster_data = seqs[k % 4]
        flock.forward_learn(cluster_data,
                            input_context=None,
                            input_rewards=None)

    # should be different if enable_learning == True
    assert (not same(initial_seq_occurrences,
                     flock.all_encountered_seq_occurrences)) == enable_learning
Ejemplo n.º 7
0
    def test_rf_unit(self, device):
        float_dtype = get_float(device)
        parent_rf_size_x = parent_rf_size_y = 4
        n_channels = 4
        image_grid_size_x = image_grid_size_y = 16
        dimensions = (image_grid_size_y, image_grid_size_x, n_channels)
        parent_rf_dims = Size2D(parent_rf_size_y, parent_rf_size_x)
        unit = ReceptiveFieldUnit(AllocatingCreator(device),
                                  dimensions,
                                  parent_rf_dims,
                                  flatten_output_grid_dimensions=True)

        input_image = torch.zeros(image_grid_size_y,
                                  image_grid_size_x,
                                  n_channels,
                                  dtype=float_dtype,
                                  device=device)
        input_image[0, parent_rf_size_x, 0] = 1

        unit.step(input_image)
        node_output = unit.output_tensor

        n_parent_rfs = (image_grid_size_x // parent_rf_size_x) * (
            image_grid_size_y // parent_rf_size_y)
        assert node_output.shape == torch.Size(
            [n_parent_rfs, parent_rf_size_y, parent_rf_size_x, n_channels])
        assert node_output[1, 0, 0, 0] == 1
        # assert node_output.interpret_shape == [n_parent_rfs, parent_rf_size_y, parent_rf_size_x, n_channels]

        back_projection = unit.inverse_projection(node_output)
        # assert back_projection.interpret_shape == input_image.shape
        assert back_projection.equal(input_image)
Ejemplo n.º 8
0
    def __init__(self, flock_size: int, n_frequent_seqs: int,
                 n_cluster_centers: int, seq_length: int, seq_lookahead: int,
                 device: str):
        super().__init__(device)

        self._flock_size = flock_size
        self._float_dtype = get_float(device)
        self.device = device

        self.n_frequent_seqs = n_frequent_seqs
        self.n_cluster_centers = n_cluster_centers

        self.seq_length = seq_length
        self.seq_lookahead = seq_lookahead
        self.seq_lookbehind = self.seq_length - self.seq_lookahead

        output_prob_scaling = self._generate_prob_scaling(
            seq_length, seq_lookahead)
        # tensor = torch.from_numpy(np.array(output_prob_scaling)).to(dtype=self._float_dtype, device=device)
        tensor = torch.tensor(output_prob_scaling,
                              dtype=self._float_dtype,
                              device=device)
        self._output_prob_scaling = self._expand_output_prob_scaling(tensor)

        self.frequent_seqs_scaled = torch.zeros(
            (self._flock_size, self.n_frequent_seqs, self.n_cluster_centers),
            dtype=self._float_dtype,
            device=device)
Ejemplo n.º 9
0
def test_whole_flock_flock_sizes(flock_size, n_providers):
    input_size = 1
    context_size = 5
    device = 'cuda'
    float_dtype = get_float(device)

    iterations = 10  # Needs to be high enough to run also the learning of TP.

    flock = create_flock(flock_size=flock_size,
                         input_size=input_size,
                         context_size=context_size,
                         n_providers=n_providers,
                         device=device)

    for i in range(iterations):
        data = torch.rand(flock_size,
                          input_size,
                          dtype=float_dtype,
                          device=device)
        context = torch.rand(flock_size,
                             n_providers,
                             NUMBER_OF_CONTEXT_TYPES,
                             context_size,
                             dtype=float_dtype,
                             device=device)
        flock.run(data, context)
Ejemplo n.º 10
0
    def test_inverse_projection(self, device):
        float_dtype = get_float(device)

        params = ExpertParams()
        params.flock_size = 2
        params.n_cluster_centers = 4

        params.spatial.input_size = 5
        params.spatial.buffer_size = 7
        params.spatial.batch_size = 6

        flock = SPFlock(params, AllocatingCreator(device))

        flock.cluster_centers = torch.tensor(
            [[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0.5, 0.5, 0],
              [0, 0, 0.5, 0, 0.5]],
             [[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0],
              [0, 0, 0, 1, 0]]],
            dtype=float_dtype,
            device=device)

        data = torch.tensor([[0, 0, 1, 0], [0.2, 0.3, 0.4, 0.1]],
                            dtype=float_dtype,
                            device=device)

        result = flock.inverse_projection(data)

        expected_projection = torch.tensor(
            [[0, 0, 0.5, 0.5, 0], [0.2, 0.3, 0.4, 0.1, 0]],
            dtype=float_dtype,
            device=device)

        assert same(expected_projection, result)
Ejemplo n.º 11
0
    def test_compute_squared_distances(self):
        input_size = 2
        flock_size = 2
        device = 'cuda'
        float_dtype = get_float(device)
        all_indices = torch.arange(flock_size,
                                   dtype=torch.int64,
                                   device=device)
        process = SPProcessStub(all_indices,
                                do_subflocking=True,
                                n_cluster_centers=3,
                                input_size=input_size,
                                device=device)
        cluster_centers = torch.tensor(
            [[[-1, 2], [1, 2], [2, 2]], [[4, 0], [5, 0], [6, 0]]],
            dtype=float_dtype,
            device=device)
        data = torch.tensor([[0.4, 1], [1.1, -1]],
                            dtype=float_dtype,
                            device=device).unsqueeze_(1)

        dist = process._compute_squared_distances(cluster_centers, data)
        expected_result = torch.tensor(
            [[[2.9600, 1.3600, 3.5600]], [[9.4100, 16.2100, 25.0100]]],
            dtype=float_dtype,
            device=device)
        assert same(expected_result, dist, eps=1e-1)
Ejemplo n.º 12
0
    def test_forward_learn_enable_learning(self, enable_learning, SP_type):
        device = 'cuda'
        float_dtype = get_float(device)

        params = ExpertParams()
        params.flock_size = 1
        params.n_cluster_centers = 4

        params.spatial.input_size = 2
        params.spatial.cluster_boost_threshold = 2
        params.spatial.learning_rate = 0.1
        params.spatial.learning_period = 1
        params.spatial.batch_size = 4
        params.spatial.max_boost_time = 10
        assert params.spatial.enable_learning  # True should be the default value
        params.spatial.enable_learning = enable_learning

        flock = SP_type(params, AllocatingCreator(device))

        data = torch.tensor([[0., 0], [1., 0], [0., 1], [1, 1]],
                            dtype=float_dtype,
                            device=device)
        initial_cluster_centers = flock.cluster_centers.clone()

        for input in data:
            flock.forward_learn(input.view(1, -1))

        # should be different if enable_learning == True
        assert (not same(initial_cluster_centers,
                         flock.cluster_centers)) == enable_learning
Ejemplo n.º 13
0
def id_to_one_hot(data: torch.Tensor,
                  vector_len: int,
                  dtype: Optional[torch.dtype] = None):
    """Converts ID to one-hot representation.

    Each element in `data` is converted into a one-hot-representation - a vector of
    length vector_len having all zeros and one 1 on the position of value of the element.

    Args:
        data: ID of a class, it must hold for each ID that 0 <= ID < vector_len
        vector_len: length of the output tensor for each one-hot-representation
        dtype: data type of the output tensor

    Returns:
        Tensor of size [data.shape[0], vector_len] with one-hot encoding.
        For example, it converts the integer cluster indices of size [flock_size, batch_size] into
        one hot representation [flock_size, batch_size, n_cluster_centers].
    """
    device = data.device
    dtype = dtype or get_float(device)

    data_a = data.view(-1, 1)
    n_samples = data_a.shape[0]
    output = torch.zeros(n_samples, vector_len, device=device, dtype=dtype)
    output.scatter_(1, data_a, 1)
    output_dims = data.size() + (vector_len, )
    return output.view(output_dims)
Ejemplo n.º 14
0
    def test_to_one_hot_in_process(self, device):
        flock_size = 2
        n_cluster_centers = 3
        float_dtype = get_float(device)
        all_indices = torch.arange(flock_size,
                                   dtype=torch.int64,
                                   device=device)
        process = SPProcessStub(all_indices,
                                do_subflocking=True,
                                n_cluster_centers=n_cluster_centers,
                                input_size=1,
                                device=device)
        closest_cluster_centers = torch.tensor([[0, 1], [2, 2], [1, 2]],
                                               dtype=torch.int64,
                                               device=device)

        result = id_to_one_hot(closest_cluster_centers,
                               process._n_cluster_centers,
                               dtype=float_dtype)
        expected_result = torch.tensor(
            [[[1, 0, 0], [0, 1, 0]], [[0, 0, 1], [0, 0, 1]],
             [[0, 1, 0], [0, 0, 1]]],
            dtype=float_dtype,
            device=device)

        assert same(expected_result, result)
Ejemplo n.º 15
0
    def test_overlapping_rfs(self, device):
        float_dtype = get_float(device)
        x = 3
        y = 4
        input_image_1 = torch.arange(0, x * y, dtype=float_dtype, device=device).view(y, x)
        input_image_2 = torch.rand_like(input_image_1, dtype=float_dtype, device=device)
        grids = Grids(Size2D(y, x), parent_rf_dims=Size2D(2, 2), parent_rf_stride=Stride(1, 1),
                      flatten_output_grid_dimensions=True)
        mapping = Mapping.from_default_input(grids, chunk_size=1, device=device)

        expert_input = mapping.map(input_image_1)
        assert expert_input.equal(torch.tensor(
            [[0, 1, 3, 4],
             [1, 2, 4, 5],
             [3, 4, 6, 7],
             [4, 5, 7, 8],
             [6, 7, 9, 10],
             [7, 8, 10, 11]],
            dtype=float_dtype,
            device=device
        ).view(6, 2, 2, 1))

        back_projection_1 = mapping.inverse_map(expert_input)
        assert input_image_1.equal(back_projection_1)
        back_projection_2 = mapping.inverse_map(mapping.map(input_image_2))
        assert input_image_2.equal(back_projection_2)
Ejemplo n.º 16
0
def test_compute_squared_distances(capsys):
    @measure_time(iterations=10)
    def measured_function():
        sp_process_kernels.compute_squared_distances(data, cluster_centers,
                                                     distances,
                                                     n_cluster_centers,
                                                     batch_size, input_size,
                                                     flock_size)

    input_size = 64 * 64 * 3
    flock_size = 20
    batch_size = 3000
    n_cluster_centers = 20
    device = 'cuda'
    float_dtype = get_float(device)

    cluster_centers = torch.rand((flock_size, n_cluster_centers, input_size),
                                 dtype=float_dtype,
                                 device=device)
    # cluster_centers_expanded = cluster_centers.unsqueeze(dim=1).expand(flock_size, batch_size, n_cluster_centers,
    #                                                                    input_size)

    data = torch.rand((flock_size, batch_size, input_size),
                      dtype=float_dtype,
                      device=device)
    # data_expanded = data.unsqueeze(dim=2).expand(flock_size, batch_size, n_cluster_centers, input_size)

    distances = torch.full((flock_size, batch_size, n_cluster_centers),
                           fill_value=-2.3,
                           dtype=float_dtype,
                           device=device)
    with capsys.disabled():
        measured_function()
Ejemplo n.º 17
0
    def __init__(self, params: ExpertParams, creator: TensorCreator):
        super().__init__(creator.device)
        float_dtype = get_float(self._device)

        self.params = params
        flock_size = params.flock_size
        self.n_cluster_centers = params.n_cluster_centers
        self.seq_lookbehind = params.temporal.seq_lookbehind
        # self.context_size = self.n_cluster_centers * 2
        self.n_providers = self.params.temporal.n_providers

        # Context is: <SP_output>
        #             <Rewards>
        #             <Punishments>
        #
        #             <Pred_clusters for next step>
        #             <NaNs>
        #             <NaNs>

        # With optional NaN Padding depending on the context size in the params
        self.output_context = creator.full(
            (flock_size, 2, NUMBER_OF_CONTEXT_TYPES, self.n_cluster_centers),
            fill_value=float("nan"),
            device=self._device,
            dtype=float_dtype)

        self.index_tensor = creator.arange(
            start=0, end=flock_size,
            device=self._device).view(-1, 1).expand(flock_size,
                                                    self.n_cluster_centers)

        self.create_flocks(params, creator)
Ejemplo n.º 18
0
 def test_average_buffer(self, device, buffer, expected_result):
     float_type = get_float(device)
     t_buffer = torch.tensor(buffer, dtype=torch.uint8, device=device)
     t_expected = torch.tensor(expected_result,
                               dtype=float_type,
                               device=device)
     result = AccuracyUnit._average_buffer(t_buffer)
     assert same(t_expected, result, eps=SMALL_CONSTANT)
Ejemplo n.º 19
0
    def __init__(self, grids: Grids, device: str, data_size: int):
        self._data_size = data_size
        self._device = device
        self._grids = grids
        self._float_dtype = get_float(self._device)

        # compute indices
        self._parent_map = self._grids.gen_positioned_parent_child_map()
Ejemplo n.º 20
0
def test_move_probabilities_towards_50_inplace_vs_not(device):
    float_type = get_float(device)

    input = torch.rand((1, 2, 3, 4), dtype=float_type, device=device)

    not_inplace = move_probs_towards_50(input)
    move_probs_towards_50_(input)

    assert same(input, not_inplace, eps=1e-4)
Ejemplo n.º 21
0
def test_forward_subflock_integration(device):
    flock_size = 10
    subflock_size = 6
    n_cluster_centers = 4
    context_size = n_cluster_centers
    buffer_size = 7
    float_dtype = get_float(device)
    n_providers = 1

    params = ExpertParams()
    params.flock_size = flock_size
    params.n_cluster_centers = n_cluster_centers

    params.temporal.n_providers = n_providers
    params.temporal.incoming_context_size = context_size
    params.temporal.buffer_size = buffer_size
    params.temporal.batch_size = buffer_size

    flock, indices, indices_np = get_subflock_integration_testing_flock(
        params, subflock_size, device)

    cluster_data = torch.rand((flock_size, n_cluster_centers),
                              dtype=float_dtype,
                              device=device)
    context_data = torch.rand(
        (flock_size, n_providers, NUMBER_OF_CONTEXT_TYPES, n_cluster_centers),
        dtype=float_dtype,
        device=device)
    reward_data = torch.rand((flock_size, 2), dtype=float_dtype, device=device)

    forward_factory = TrainedForwardProcessFactory()
    forward = forward_factory.create(flock, cluster_data, context_data,
                                     reward_data, indices, device)
    # TODO (Test): add other tensors from the process, check this also for the untrained_forward_process
    should_update = [
        (flock.projection_outputs, forward._projection_outputs),
        (flock.action_rewards, forward._action_rewards),
        (flock.action_outputs, forward._action_outputs),
    ]

    should_not_update = [
        (flock.frequent_seqs, forward._frequent_seqs),
        (flock.frequent_seq_occurrences, forward._frequent_seq_occurrences),
        (flock.frequent_context_likelihoods,
         forward._frequent_context_likelihoods),
    ]

    randomize_subflock(should_update, should_not_update)

    expected_results = calculate_expected_results(should_update,
                                                  should_not_update,
                                                  flock_size, indices_np)

    forward.integrate()

    check_integration_results(expected_results, should_update,
                              should_not_update)
Ejemplo n.º 22
0
 def test_compute_accuracy(self, device, input_a, input_b, expected_result):
     float_type = get_float(device)
     t_a = torch.tensor(input_a, dtype=float_type, device=device)
     t_b = torch.tensor(input_b, dtype=float_type, device=device)
     t_expected = torch.tensor(expected_result,
                               dtype=torch.uint8,
                               device=device)
     # assert expected_result - AccuracyUnit._compute_accuracy(t_a, t_b) < SMALL_CONSTANT
     result = AccuracyUnit._compute_accuracy(t_a, t_b)
     assert same(t_expected, result)
Ejemplo n.º 23
0
def test_kl_divergence(device, p, q, expected_result):
    float_type = get_float(device)
    t_p = torch.tensor(p, dtype=float_type, device=device)
    t_q = torch.tensor(q, dtype=float_type, device=device)
    t_expected_result = torch.tensor(expected_result,
                                     dtype=float_type,
                                     device=device)
    t_result = torch.zeros((t_p.shape[0], ), device=device)
    kl_divergence(t_p, t_q, t_result, dim=1)
    assert same(t_expected_result, t_result, eps=1e-4)
Ejemplo n.º 24
0
        def setup_class(cls, device: str = 'cuda'):
            super().setup_class()
            device = cls._creator.device
            dtype = get_float(device)

            indices = cls._creator.tensor([2, 0, 1, 0, 0, 0],
                                          dtype=torch.long,
                                          device=device)
            cls._expected_images_tensor.copy_(
                gather_from_dim(cls._expected_images_tensor, indices))
            cls._skip_checking = [0]
Ejemplo n.º 25
0
    def test_compute_output_projection_per_sequence(self, device, seqs, expected_output_projection):
        seqs_tensor = torch.tensor(seqs, device=device, dtype=torch.int64)
        expected_output_projection_tensor = torch.tensor(expected_output_projection, device=device,
                                                         dtype=get_float(device))
        flock_size, n_frequent_seqs, seq_length = seqs_tensor.shape
        n_cluster_centers = 4
        seq_lookahead = 1

        output_projection = TPOutputProjection(flock_size, n_frequent_seqs, n_cluster_centers, seq_length,
                                               seq_lookahead, device)

        projection_outputs = torch.zeros((flock_size, n_frequent_seqs, n_cluster_centers), device=device,
                                         dtype=get_float(device))

        output_projection.compute_output_projection_per_sequence(
            seqs_tensor,
            projection_outputs
        )

        assert same(expected_output_projection_tensor, projection_outputs)
Ejemplo n.º 26
0
    def __init__(self,
                 indices: torch.Tensor,
                 do_subflocking: bool,
                 n_cluster_centers: int,
                 input_size: int,
                 device='cuda'):
        super().__init__(indices, do_subflocking)

        self._device = device
        self._n_cluster_centers = n_cluster_centers
        self._input_size = input_size
        self._float_dtype = get_float(self._device)
Ejemplo n.º 27
0
    def test_scatter_node(self, device, input, mask, output_shape, dimension, expected_result):
        float_dtype = get_float(device)
        input_mb = MemoryBlock()
        input_mb.tensor = torch.tensor(input, device=device, dtype=float_dtype)
        expected = torch.tensor(expected_result, device=device, dtype=float_dtype)
        sn = ScatterNode(mapping=mask, output_shape=output_shape, dimension=dimension, device=device)

        Connector.connect(input_mb, sn.inputs.input)
        sn.allocate_memory_blocks(AllocatingCreator(device))
        sn.step()

        assert same(sn.outputs.output.tensor, expected)
Ejemplo n.º 28
0
 def setup_class(cls, device: str = 'cpu'):
     super().setup_class(device)
     cls.float_dtype = get_float(cls._device)
     cls.parent_rf_size_x = 2
     cls.parent_rf_size_y = 3
     cls.n_channels = 5
     cls.image_grid_size_x = 4
     cls.image_grid_size_y = 6
     cls.dimensions = (cls.image_grid_size_y, cls.image_grid_size_x,
                       cls.n_channels)
     cls.parent_rf_dims = Size2D(cls.parent_rf_size_y, cls.parent_rf_size_x)
     cls.parent_rf_stride_dims = Stride(3, 1)
Ejemplo n.º 29
0
def get_subflock_creation_testing_flock(
        flock_size=10,
        subflock_size=6,
        input_size=5,
        buffer_size=7,
        batch_size=5,
        n_cluster_centers=4,
        device='cpu',
        sampling_method=SamplingMethod.LAST_N) -> Tuple[SPFlock, torch.Tensor]:
    # Generate a flock with some data
    float_dtype = get_float(device)
    params = ExpertParams()
    params.flock_size = flock_size
    params.n_cluster_centers = n_cluster_centers

    params.spatial.input_size = input_size
    params.spatial.buffer_size = buffer_size
    params.spatial.batch_size = batch_size
    params.spatial.sampling_method = sampling_method

    flock = SPFlock(params, AllocatingCreator(device))

    flock.buffer.inputs.stored_data = torch.rand(
        (flock_size, buffer_size, input_size),
        dtype=float_dtype,
        device=device)
    flock.buffer.clusters.stored_data = torch.rand(
        (flock_size, buffer_size, n_cluster_centers),
        dtype=float_dtype,
        device=device)  # NOTE: This is not one-hot and thus not real data
    flock.buffer.current_ptr = torch.rand(flock_size,
                                          device=device).type(torch.int64)
    flock.buffer.total_data_written = torch.rand(
        flock_size, device=device).type(torch.int64)

    # The bookeeping values which are copied across
    flock.cluster_boosting_durations = torch.rand(
        (flock_size, n_cluster_centers), device=device).type(torch.int64)
    flock.prev_boosted_clusters = torch.clamp(
        torch.round(torch.rand((flock_size, n_cluster_centers),
                               device=device)), 0, 1).type(torch.uint8)
    flock.boosting_targets = torch.rand((flock_size, n_cluster_centers),
                                        device=device).type(torch.int64)

    # Indices which are to be subflocked
    indices = torch.tensor(np.random.choice(flock_size,
                                            subflock_size,
                                            replace=False),
                           dtype=torch.int64,
                           device=device)

    return flock, indices
Ejemplo n.º 30
0
    def _create_expected_images_tensor(creator: AllocatingCreator):
        device = creator.device
        dtype = get_float(device)

        expected_images_tensor = creator.full([3, 2, 3, 3],
                                              dtype=dtype,
                                              device=device,
                                              fill_value=1.0)
        expected_images_tensor[0, 1, 2, :] = creator.tensor([1.0, 0.0, 0.0])
        expected_images_tensor[1, 1, 2, :] = creator.tensor([0.0, 1.0, 0.0])
        expected_images_tensor[2, 1, 2, :] = creator.tensor([0.0, 0.0, 1.0])

        return creator.cat([expected_images_tensor, expected_images_tensor])