コード例 #1
0
    def test_determine_learning(self, mask, total_data_written,
                                data_since_last_sample, expected_learn_tensor):
        """Test for the mechanism which determines if the convolutional spatial pooler should learn.

        Args:
              mask (torch.Tensor): Mask of which experts performed a forward pass.
              total_data_written (int): How much data has been written to the buffer.
              data_since_last_sample (int): How much data has been written since the last learning pass.
              expected_learn_tensor (torch.Tensor): What mask we expect for the learning pass.

        - No data stored,  insufficient learning period, so no learning can run
        - Correct learning period, but insufficient data for batch
        - Sufficient data, incorrect learning period
        - Sufficient data and learn period and all forwarded
        - Sufficient data and learn period, but none forwarded
        """
        device = 'cpu'
        params = ExpertParams()
        params.flock_size = 7
        params.n_cluster_centers = 5

        s_pooler = params.spatial
        s_pooler.batch_size = 10
        s_pooler.learning_period = 5

        flock = ConvSPFlock(params, AllocatingCreator(device))

        flock.common_buffer.total_data_written.fill_(total_data_written)
        flock.common_buffer.data_since_last_sample.fill_(
            data_since_last_sample)
        learn_tensor = flock._determine_learning(mask)

        assert same(expected_learn_tensor, learn_tensor)
コード例 #2
0
    def test_determine_learning(self, mask, total_data_written,
                                data_since_last_sample, expected_learn_tensor,
                                device):
        """TODO: one-line summary here.

        - No data stored,  insufficient learning period, so no learning can run
        - Correct learning period, but insufficient data for batch
        - Sufficient data, incorrect learning period
        - Sufficient data and learn period and all forwarded
        - Sufficient data and learn period, but none forwarded
        - Sufficient data and learn period, and some forward
        - Different combinations of all three
        """
        params = ExpertParams()
        params.flock_size = 7
        params.n_cluster_centers = 5
        mask = mask.to(device)
        expected_learn_tensor = expected_learn_tensor.to(device)

        s_pooler = params.spatial
        s_pooler.batch_size = 10
        s_pooler.learning_period = 5

        flock = SPFlock(params, creator=AllocatingCreator(device))
        copy_or_fill_tensor(flock.buffer.total_data_written,
                            total_data_written)
        copy_or_fill_tensor(flock.buffer.data_since_last_sample,
                            data_since_last_sample)
        learn_tensor = flock._determine_learning(mask)
        assert same(expected_learn_tensor, learn_tensor)
コード例 #3
0
    def test_forward_learn_enable_learning(self, enable_learning, SP_type):
        device = 'cuda'
        float_dtype = get_float(device)

        params = ExpertParams()
        params.flock_size = 1
        params.n_cluster_centers = 4

        params.spatial.input_size = 2
        params.spatial.cluster_boost_threshold = 2
        params.spatial.learning_rate = 0.1
        params.spatial.learning_period = 1
        params.spatial.batch_size = 4
        params.spatial.max_boost_time = 10
        assert params.spatial.enable_learning  # True should be the default value
        params.spatial.enable_learning = enable_learning

        flock = SP_type(params, AllocatingCreator(device))

        data = torch.tensor([[0., 0], [1., 0], [0., 1], [1, 1]],
                            dtype=float_dtype,
                            device=device)
        initial_cluster_centers = flock.cluster_centers.clone()

        for input in data:
            flock.forward_learn(input.view(1, -1))

        # should be different if enable_learning == True
        assert (not same(initial_cluster_centers,
                         flock.cluster_centers)) == enable_learning
コード例 #4
0
    def test_inverse_projection(self, device):
        float_dtype = get_float(device)

        params = ExpertParams()
        params.flock_size = 2
        params.n_cluster_centers = 4

        params.spatial.input_size = 5
        params.spatial.buffer_size = 7
        params.spatial.batch_size = 6

        flock = SPFlock(params, AllocatingCreator(device))

        flock.cluster_centers = torch.tensor(
            [[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0.5, 0.5, 0],
              [0, 0, 0.5, 0, 0.5]],
             [[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0],
              [0, 0, 0, 1, 0]]],
            dtype=float_dtype,
            device=device)

        data = torch.tensor([[0, 0, 1, 0], [0.2, 0.3, 0.4, 0.1]],
                            dtype=float_dtype,
                            device=device)

        result = flock.inverse_projection(data)

        expected_projection = torch.tensor(
            [[0, 0, 0.5, 0.5, 0], [0.2, 0.3, 0.4, 0.1, 0]],
            dtype=float_dtype,
            device=device)

        assert same(expected_projection, result)
コード例 #5
0
    def create_flock_params(
            num_cluster_centers: Sequence[int], learning_rate: Sequence[float],
            buffer_size: Sequence[int], batch_size: Sequence[int],
            sampling_method: SamplingMethod,
            cluster_boost_threshold: Sequence[int], flock_size: int,
            max_boost_time: int, num_layers: int):

        params_list = []

        assert len(num_cluster_centers) == num_layers
        assert len(buffer_size) == num_layers
        assert len(batch_size) == num_layers
        assert len(learning_rate) == num_layers

        for layer_id in range(0, num_layers):
            params = ExpertParams()

            params.n_cluster_centers = num_cluster_centers[layer_id]
            params.flock_size = flock_size

            params.spatial.buffer_size = buffer_size[layer_id]
            params.spatial.batch_size = batch_size[layer_id]
            params.spatial.cluster_boost_threshold = cluster_boost_threshold[
                layer_id]
            params.spatial.max_boost_time = max_boost_time  # should be bigger than any cluster_boost_threshold
            params.spatial.learning_rate = learning_rate[layer_id]
            params.spatial.sampling_method = sampling_method
            # params.compute_reconstruction = True

            params_list.append(params)

        # just the top expert computes reconstruction
        params_list[-1].compute_reconstruction = True

        return params_list
コード例 #6
0
    def _create_and_connect_agent(self, join_node: JoinNode, fork_node: ForkNode):
        params = ExpertParams()
        params.flock_size = 1
        params.n_cluster_centers = 28
        params.compute_reconstruction = True
        params.spatial.cluster_boost_threshold = 1000
        params.spatial.buffer_size = 500
        params.spatial.batch_size = 500
        params.spatial.learning_rate = 0.3
        params.spatial.learning_period = 50

        # conv_expert = ConvExpertFlockNode(params, name="Conv. expert")
        # conv_expert = SpatialPoolerFlockNode(params, name=" SP")
        conv_expert = ExpertFlockNode(params, name=" expert")

        self.add_node(conv_expert)

        unsqueeze_node_0 = UnsqueezeNode(0)
        self.add_node(unsqueeze_node_0)

        Connector.connect(join_node.outputs.output, unsqueeze_node_0.inputs.input)
        Connector.connect(unsqueeze_node_0.outputs.output, conv_expert.inputs.sp.data_input)

        def squeeze(inputs, outputs):
            outputs[0].copy_(inputs[0].squeeze(0))

        squeeze_node = LambdaNode(squeeze, 1, [(sum(fork_node._split_sizes),)],
                                  name="squeeze lambda node")

        self.add_node(squeeze_node)

        Connector.connect(conv_expert.outputs.sp.predicted_reconstructed_input, squeeze_node.inputs[0])
        Connector.connect(squeeze_node.outputs[0], fork_node.inputs.input)
コード例 #7
0
def create_flock(flock_size=2,
                 input_size=7,
                 context_size=1,
                 n_providers=1,
                 device='cuda'):

    params = ExpertParams()
    params.flock_size = flock_size
    params.n_cluster_centers = 5

    sp_pooler = params.spatial
    sp_pooler.input_size = input_size
    sp_pooler.buffer_size = 10
    sp_pooler.batch_size = 3
    sp_pooler.learning_rate = 0.1
    sp_pooler.cluster_boost_threshold = 1
    sp_pooler.max_boost_time = 2
    sp_pooler.learning_period = 1

    tp_pooler = params.temporal
    tp_pooler.incoming_context_size = context_size
    tp_pooler.buffer_size = 15
    tp_pooler.batch_size = 10
    tp_pooler.learning_period = 1
    tp_pooler.seq_length = 4
    tp_pooler.seq_lookahead = 2
    tp_pooler.n_frequent_seqs = 20
    tp_pooler.max_encountered_seqs = 120
    tp_pooler.forgetting_limit = 5
    tp_pooler.n_providers = n_providers

    return ExpertFlock(params, AllocatingCreator(device))
コード例 #8
0
 def _create_expert_params() -> ExpertParams:
     expert_params = ExpertParams()
     expert_params.flock_size = 1
     expert_params.n_cluster_centers = 200
     expert_params.compute_reconstruction = True
     expert_params.spatial.batch_size = 1000
     expert_params.spatial.buffer_size = 1010
     expert_params.spatial.cluster_boost_threshold = 200
     return expert_params
コード例 #9
0
ファイル: test_integration.py プロジェクト: jvitku/torchsim
def test_forward_subflock_integration(device):
    flock_size = 10
    subflock_size = 6
    n_cluster_centers = 4
    context_size = n_cluster_centers
    buffer_size = 7
    float_dtype = get_float(device)
    n_providers = 1

    params = ExpertParams()
    params.flock_size = flock_size
    params.n_cluster_centers = n_cluster_centers

    params.temporal.n_providers = n_providers
    params.temporal.incoming_context_size = context_size
    params.temporal.buffer_size = buffer_size
    params.temporal.batch_size = buffer_size

    flock, indices, indices_np = get_subflock_integration_testing_flock(
        params, subflock_size, device)

    cluster_data = torch.rand((flock_size, n_cluster_centers),
                              dtype=float_dtype,
                              device=device)
    context_data = torch.rand(
        (flock_size, n_providers, NUMBER_OF_CONTEXT_TYPES, n_cluster_centers),
        dtype=float_dtype,
        device=device)
    reward_data = torch.rand((flock_size, 2), dtype=float_dtype, device=device)

    forward_factory = TrainedForwardProcessFactory()
    forward = forward_factory.create(flock, cluster_data, context_data,
                                     reward_data, indices, device)
    # TODO (Test): add other tensors from the process, check this also for the untrained_forward_process
    should_update = [
        (flock.projection_outputs, forward._projection_outputs),
        (flock.action_rewards, forward._action_rewards),
        (flock.action_outputs, forward._action_outputs),
    ]

    should_not_update = [
        (flock.frequent_seqs, forward._frequent_seqs),
        (flock.frequent_seq_occurrences, forward._frequent_seq_occurrences),
        (flock.frequent_context_likelihoods,
         forward._frequent_context_likelihoods),
    ]

    randomize_subflock(should_update, should_not_update)

    expected_results = calculate_expected_results(should_update,
                                                  should_not_update,
                                                  flock_size, indices_np)

    forward.integrate()

    check_integration_results(expected_results, should_update,
                              should_not_update)
コード例 #10
0
    def __init__(self):
        super().__init__(device='cuda')

        self.sp_params = ExpertParams()
        self.sp_params.n_cluster_centers = 4
        self.sp_params.spatial.input_size = 28 * 28
        self.sp_params.flock_size = 4
        self.sp_params.spatial.buffer_size = 100
        self.sp_params.spatial.batch_size = 45

        mnist_params0 = DatasetMNISTParams(class_filter=[0],
                                           one_hot_labels=False)
        mnist_params1 = DatasetMNISTParams(class_filter=[1],
                                           one_hot_labels=False)
        mnist_params2 = DatasetMNISTParams(class_filter=[2],
                                           one_hot_labels=False)
        mnist_params3 = DatasetMNISTParams(class_filter=[3],
                                           one_hot_labels=False)

        expert = ConvExpertFlockNode(self.sp_params.clone())
        mnist0 = DatasetMNISTNode(params=mnist_params0)
        mnist1 = DatasetMNISTNode(params=mnist_params1)
        mnist2 = DatasetMNISTNode(params=mnist_params2)
        mnist3 = DatasetMNISTNode(params=mnist_params3)

        expand0 = ExpandNode(dim=0, desired_size=1)
        expand1 = ExpandNode(dim=0, desired_size=1)
        expand2 = ExpandNode(dim=0, desired_size=1)
        expand3 = ExpandNode(dim=0, desired_size=1)

        join = JoinNode(dim=0, n_inputs=4)

        self.add_node(mnist0)
        self.add_node(mnist1)
        self.add_node(mnist2)
        self.add_node(mnist3)

        self.add_node(expand0)
        self.add_node(expand1)
        self.add_node(expand2)
        self.add_node(expand3)

        self.add_node(expert)
        self.add_node(join)

        Connector.connect(mnist0.outputs.data, expand0.inputs.input)
        Connector.connect(mnist1.outputs.data, expand1.inputs.input)
        Connector.connect(mnist2.outputs.data, expand2.inputs.input)
        Connector.connect(mnist3.outputs.data, expand3.inputs.input)

        Connector.connect(expand0.outputs.output, join.inputs[0])
        Connector.connect(expand1.outputs.output, join.inputs[1])
        Connector.connect(expand2.outputs.output, join.inputs[2])
        Connector.connect(expand3.outputs.output, join.inputs[3])

        Connector.connect(join.outputs.output, expert.inputs.sp.data_input)
コード例 #11
0
ファイル: test_integration.py プロジェクト: jvitku/torchsim
def test_learning_subflock_integration():
    flock_size = 10
    subflock_size = 6
    n_cluster_centers = 4
    context_size = n_cluster_centers
    buffer_size = 7
    device = 'cpu'

    params = ExpertParams()
    params.flock_size = flock_size
    params.n_cluster_centers = n_cluster_centers

    params.temporal.incoming_context_size = context_size
    params.temporal.buffer_size = buffer_size
    params.temporal.batch_size = buffer_size

    flock, indices, indices_np = get_subflock_integration_testing_flock(
        params, subflock_size, device)

    forward = flock._create_learning_process(indices)

    should_update = [
        (flock.all_encountered_seqs, forward._all_encountered_seqs),
        (flock.all_encountered_seq_occurrences,
         forward._all_encountered_seq_occurrences),
        (flock.all_encountered_context_occurrences,
         forward._all_encountered_context_occurrences),
        (flock.frequent_seqs, forward._frequent_seqs),
        (flock.frequent_seq_occurrences, forward._frequent_seq_occurrences),
        (flock.frequent_context_likelihoods,
         forward._frequent_context_likelihoods),
    ]

    should_not_update = [
        (flock.buffer.seq_probs.stored_data,
         forward._buffer.seq_probs.stored_data),
        (flock.buffer.outputs.stored_data,
         forward._buffer.outputs.stored_data),
        (flock.buffer.contexts.stored_data,
         forward._buffer.contexts.stored_data),
        (flock.buffer.clusters.stored_data,
         forward._buffer.clusters.stored_data),
        (flock.buffer.current_ptr, forward._buffer.current_ptr),
        (flock.buffer.total_data_written, forward._buffer.total_data_written),
    ]

    randomize_subflock(should_update, should_not_update)

    expected_results = calculate_expected_results(should_update,
                                                  should_not_update,
                                                  flock_size, indices_np)

    forward.integrate()

    check_integration_results(expected_results, should_update,
                              should_not_update)
コード例 #12
0
 def get_expert_params(self):
     expert_params = ExpertParams()
     expert_params.flock_size = EOX * EOY
     expert_params.n_cluster_centers = 30
     expert_params.spatial.buffer_size = 500
     expert_params.temporal.buffer_size = 100
     expert_params.temporal.incoming_context_size = 10
     expert_params.spatial.batch_size = 50
     expert_params.temporal.batch_size = 50
     expert_params.spatial.input_size = 3 * self.SX // EOX * self.SY // EOY
     return expert_params
コード例 #13
0
 def _make_top_layer(
     num_labels: int, input_data_size: int, n_cluster_centers: int
 ) -> (SpReconstructionLayer, InputSlot, OutputSlotBase):
     sp_params = ExpertParams()
     sp_params.flock_size = 1
     sp_params.n_cluster_centers = n_cluster_centers
     sp_params.compute_reconstruction = True
     layer = SpReconstructionLayer(input_data_size=input_data_size,
                                   labels_size=num_labels,
                                   sp_params=sp_params,
                                   name='TOP')
     return layer, layer.inputs.data, None
コード例 #14
0
ファイル: test_process.py プロジェクト: jvitku/torchsim
def get_subflock_creation_testing_flock(
        flock_size=10,
        subflock_size=6,
        input_size=5,
        buffer_size=7,
        batch_size=5,
        n_cluster_centers=4,
        device='cpu',
        sampling_method=SamplingMethod.LAST_N) -> Tuple[SPFlock, torch.Tensor]:
    # Generate a flock with some data
    float_dtype = get_float(device)
    params = ExpertParams()
    params.flock_size = flock_size
    params.n_cluster_centers = n_cluster_centers

    params.spatial.input_size = input_size
    params.spatial.buffer_size = buffer_size
    params.spatial.batch_size = batch_size
    params.spatial.sampling_method = sampling_method

    flock = SPFlock(params, AllocatingCreator(device))

    flock.buffer.inputs.stored_data = torch.rand(
        (flock_size, buffer_size, input_size),
        dtype=float_dtype,
        device=device)
    flock.buffer.clusters.stored_data = torch.rand(
        (flock_size, buffer_size, n_cluster_centers),
        dtype=float_dtype,
        device=device)  # NOTE: This is not one-hot and thus not real data
    flock.buffer.current_ptr = torch.rand(flock_size,
                                          device=device).type(torch.int64)
    flock.buffer.total_data_written = torch.rand(
        flock_size, device=device).type(torch.int64)

    # The bookeeping values which are copied across
    flock.cluster_boosting_durations = torch.rand(
        (flock_size, n_cluster_centers), device=device).type(torch.int64)
    flock.prev_boosted_clusters = torch.clamp(
        torch.round(torch.rand((flock_size, n_cluster_centers),
                               device=device)), 0, 1).type(torch.uint8)
    flock.boosting_targets = torch.rand((flock_size, n_cluster_centers),
                                        device=device).type(torch.int64)

    # Indices which are to be subflocked
    indices = torch.tensor(np.random.choice(flock_size,
                                            subflock_size,
                                            replace=False),
                           dtype=torch.int64,
                           device=device)

    return flock, indices
コード例 #15
0
 def _create_expert_params(self) -> ExpertParams:
     expert_params = ExpertParams()
     expert_params.flock_size = 1
     expert_params.n_cluster_centers = self._num_ccs
     expert_params.compute_reconstruction = True
     expert_params.spatial.batch_size = 990
     expert_params.spatial.buffer_size = self._buffer_size
     expert_params.spatial.cluster_boost_threshold = self._num_ccs * 2
     expert_params.spatial.learning_rate = SeT0BasicTopologyRT211.LEARNING_RATE
     expert_params.spatial.sampling_method = self._sampling_method
     expert_params.spatial.learning_period = 10
     expert_params.spatial.max_boost_time = 5000
     return expert_params
コード例 #16
0
    def test_conv_forward(self):
        params = ExpertParams()
        params.n_cluster_centers = 10
        params.flock_size = 5
        params.spatial.input_size = 3
        params.spatial.buffer_size = 30
        device = 'cuda'
        float_dtype = get_float(device)

        creator = AllocatingCreator(device)

        sp_flock = ConvSPFlock(params, creator)

        input_data = torch.tensor([[2, 0, 4], [1, 0.3, -1], [2, 0.1, 0.5],
                                   [0.7, 0.9, 0.8], [2, 0, 4]],
                                  dtype=float_dtype,
                                  device=device)

        forward_mask = sp_flock.forward(input_data)

        expected_forward_mask = torch.tensor([1, 1, 1, 1, 1],
                                             dtype=torch.uint8,
                                             device=device)

        assert same(expected_forward_mask, forward_mask)

        expected_common_buffer = torch.full(
            (1, params.spatial.buffer_size, params.spatial.input_size),
            fill_value=FLOAT_NAN,
            dtype=float_dtype,
            device=device)
        expected_common_buffer[0, 0] = input_data[0]
        expected_common_buffer[0, 1] = input_data[1]
        expected_common_buffer[0, 2] = input_data[2]
        expected_common_buffer[0, 3] = input_data[3]
        expected_common_buffer[0, 4] = input_data[4]

        assert same(expected_common_buffer,
                    sp_flock.common_buffer.inputs.stored_data)
        assert sp_flock.forward_clusters.sum() == params.flock_size
        assert same(sp_flock.forward_clusters[0], sp_flock.forward_clusters[4])
        assert sp_flock.common_buffer.current_ptr == 4
        assert sp_flock.common_buffer.total_data_written == 5

        assert (sp_flock.buffer.current_ptr == 0).all()
        assert (sp_flock.buffer.total_data_written == 1).all()

        for member in range(params.flock_size):
            assert same(input_data[member],
                        sp_flock.buffer.inputs.stored_data[member, 0])
コード例 #17
0
    def test_inverse_projection(self, device):
        dtype = get_float(device)
        params = ExpertParams()
        params.flock_size = 2
        params.n_cluster_centers = 4

        params.spatial.input_size = 6
        params.spatial.buffer_size = 7
        params.spatial.batch_size = 3
        params.temporal.n_frequent_seqs = 2
        params.temporal.seq_length = 3
        input_size = (3, 2)

        graph = Topology(device)
        node = ExpertFlockNode(params=params)

        graph.add_node(node)

        input_block = MemoryBlock()
        input_block.tensor = torch.rand((params.flock_size, ) + input_size,
                                        dtype=dtype,
                                        device=device)
        Connector.connect(input_block, node.inputs.sp.data_input)

        graph.prepare()

        node._unit.flock.sp_flock.cluster_centers = torch.tensor(
            [[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 0.5, 0.5, 0, 0],
              [0, 0, 0.5, 0, 0.5, 0]],
             [[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0],
              [0, 0, 0, 1, 0, 0]]],
            dtype=dtype,
            device=device)

        # Just SP inverse projection
        data = torch.tensor([[0, 0, 1, 0], [0.2, 0.3, 0.4, 0.1]],
                            dtype=dtype,
                            device=device)

        packet = InversePassOutputPacket(data,
                                         node.outputs.tp.projection_outputs)
        projected = node.recursive_inverse_projection_from_output(packet)

        # The result of the projection itself would be [[0, 0, 0.5, 0.5, 0, 0], ...], and it should be viewed as (2, 3, 2).
        expected_projection = torch.tensor(
            [[[0, 0], [0.5, 0.5], [0, 0]], [[0.2, 0.3], [0.4, 0.1], [0, 0]]],
            dtype=dtype,
            device=device)

        assert same(expected_projection, projected[0].tensor)
コード例 #18
0
 def test_validate_context_input_error_message(self, sp_data_input_shape,
                                               flock_size, context_size,
                                               context_input_shape,
                                               exception_message):
     params = ExpertParams()
     params.flock_size = flock_size
     params.temporal.incoming_context_size = context_size
     expert = ExpertFlockNode(params)
     Connector.connect(self.memory_block(torch.zeros(sp_data_input_shape)),
                       expert.inputs.sp.data_input)
     Connector.connect(self.memory_block(torch.zeros(context_input_shape)),
                       expert.inputs.tp.context_input)
     with raises(NodeValidationException, match=exception_message):
         expert.validate()
コード例 #19
0
    def topology_simple_dual_loop(self):
        dataset_node = DatasetSequenceMNISTNode(
            DatasetMNISTParams(one_hot_labels=False, examples_per_class=1),
            DatasetSequenceMNISTNodeParams(seqs=[[0, 1, 2], [0, 1, 3]]))
        flock_size = 1

        unsqueeze_node_child = UnsqueezeNode(0)
        unsqueeze_node_parent = UnsqueezeNode(0)
        expand_node_child = ExpandNode(0, flock_size)
        expand_node_parent = ExpandNode(0, flock_size)

        expert_node_child = ExpertFlockNode(
            ExpertParams(flock_size=flock_size,
                         n_cluster_centers=4,
                         spatial=SpatialPoolerParams(),
                         temporal=TemporalPoolerParams(incoming_context_size=2,
                                                       n_providers=2,
                                                       n_frequent_seqs=8,
                                                       seq_length=3,
                                                       seq_lookahead=1)))
        expert_node_parent = ExpertFlockNode(
            ExpertParams(flock_size=flock_size,
                         n_cluster_centers=2,
                         spatial=SpatialPoolerParams()))

        self.add_node(dataset_node)
        self.add_node(unsqueeze_node_child)
        self.add_node(unsqueeze_node_parent)
        self.add_node(expand_node_child)
        self.add_node(expand_node_parent)
        self.add_node(expert_node_child)
        self.add_node(expert_node_parent)

        Connector.connect(dataset_node.outputs.data,
                          unsqueeze_node_child.inputs.input)
        Connector.connect(dataset_node.outputs.sequence_id,
                          unsqueeze_node_parent.inputs.input)
        Connector.connect(unsqueeze_node_child.outputs.output,
                          expand_node_child.inputs.input)
        Connector.connect(unsqueeze_node_parent.outputs.output,
                          expand_node_parent.inputs.input)
        Connector.connect(expand_node_child.outputs.output,
                          expert_node_child.inputs.sp.data_input)
        Connector.connect(expand_node_parent.outputs.output,
                          expert_node_parent.inputs.sp.data_input)

        Connector.connect(expert_node_parent.outputs.output_context,
                          expert_node_child.inputs.tp.context_input,
                          is_backward=True)
コード例 #20
0
def create_flock(input_size,
                 flock_size,
                 incoming_context_size,
                 seq_length=4,
                 n_cluster_centers=20,
                 max_encountered_seqs=1000,
                 n_frequent_seqs=500,
                 sp_buffer_size=3000,
                 sp_batch_size=300,
                 sp_learning_period=20,
                 convolutional: bool = False,
                 sp_max_boost_time=1000,
                 sp_boost_threshold=100,
                 sampling_method: SamplingMethod = SamplingMethod.BALANCED,
                 n_context_providers=1,
                 device='cuda'):
    params = ExpertParams()
    params.n_cluster_centers = n_cluster_centers
    params.flock_size = flock_size
    params.compute_reconstruction = True

    sp_params = params.spatial
    sp_params.input_size = input_size
    sp_params.buffer_size = sp_buffer_size
    sp_params.batch_size = sp_batch_size
    sp_params.learning_rate = 0.1
    sp_params.cluster_boost_threshold = sp_boost_threshold
    sp_params.max_boost_time = sp_max_boost_time
    sp_params.learning_period = sp_learning_period
    sp_params.sampling_method = sampling_method

    tp_params = params.temporal
    tp_params.buffer_size = 100
    tp_params.seq_length = seq_length
    tp_params.batch_size = 50 + tp_params.seq_length - 1
    tp_params.learning_period = 50
    tp_params.seq_lookahead = 2
    tp_params.n_frequent_seqs = n_frequent_seqs
    tp_params.max_encountered_seqs = max_encountered_seqs
    tp_params.forgetting_limit = 5000
    tp_params.incoming_context_size = incoming_context_size
    tp_params.n_providers = n_context_providers

    if convolutional:
        print("Created convolutional flock.")
        return ConvExpertFlock(params, AllocatingCreator(device))
    else:
        print("Created normal flock.")
        return ExpertFlock(params, AllocatingCreator(device))
コード例 #21
0
    def test_forward_learn_streams(self, use_default_stream, sampling_method):
        params = ExpertParams()
        params.flock_size = 1
        params.n_cluster_centers = 4

        params.spatial.input_size = 2
        params.spatial.cluster_boost_threshold = 2
        params.spatial.learning_rate = 0.1
        params.spatial.learning_period = 1
        params.spatial.batch_size = 4
        params.spatial.buffer_size = 6
        params.spatial.max_boost_time = 10
        params.spatial.sampling_method = sampling_method

        device = 'cuda'
        float_dtype = get_float(device)

        flock = SPFlock(params, AllocatingCreator(device))

        data = torch.tensor([[0., 0], [1., 0], [0., 1], [1, 1]],
                            dtype=float_dtype,
                            device=device)

        iters = 20

        def run():
            for itr in range(iters):
                for k in data:
                    flock.forward_learn(k.view(1, -1))

        if use_default_stream:
            run()
        else:
            with torch.cuda.stream(torch.cuda.Stream()):
                run()

        expected_cluster_centers = data

        # Cluster centers have no guarantee of order - so we have to order them manually
        rounded_cluster_centers = np.around(
            flock.cluster_centers.cpu().data.numpy()[0], decimals=2)
        cc_indices = []
        for cc in rounded_cluster_centers:
            cc_indices.append(cc[0] * 2 + cc[1] * 4)

        sorted_indices = np.argsort(cc_indices)
        sorted_ccs = flock.cluster_centers[0, sorted_indices, :]

        assert same(expected_cluster_centers, sorted_ccs, eps=5e-2)
コード例 #22
0
ファイル: test_process.py プロジェクト: jvitku/torchsim
    def test_integrate_learning_subflock(self, device):
        flock_size = 10
        subflock_size = 6
        input_size = 5
        n_cluster_centers = 4
        buffer_size = 7

        params = ExpertParams()
        params.flock_size = flock_size
        params.n_cluster_centers = n_cluster_centers

        params.spatial.input_size = input_size
        params.spatial.buffer_size = buffer_size
        params.spatial.batch_size = buffer_size

        flock, indices, indices_np = get_subflock_integration_testing_flock(
            params, subflock_size, device)

        learning = flock._create_learning_process(indices)

        should_update = [
            (flock.cluster_boosting_durations,
             learning._cluster_boosting_durations),
            (flock.prev_boosted_clusters, learning._prev_boosted_clusters),
            (flock.boosting_targets, learning._boosting_targets),
            (flock.cluster_centers, learning._cluster_centers),
            (flock.cluster_center_deltas, learning._cluster_center_deltas)
        ]

        should_not_update = [(flock.buffer.inputs.stored_data,
                              learning._buffer.inputs.stored_data),
                             (flock.buffer.clusters.stored_data,
                              learning._buffer.clusters.stored_data),
                             (flock.buffer.current_ptr,
                              learning._buffer.current_ptr),
                             (flock.buffer.total_data_written,
                              learning._buffer.total_data_written)]

        randomize_subflock(should_update, should_not_update)

        expected_results = calculate_expected_results(should_update,
                                                      should_not_update,
                                                      flock_size, indices_np)

        # Run the integration
        learning.integrate()

        check_integration_results(expected_results, should_update,
                                  should_not_update)
コード例 #23
0
    def get_sp_params(num_cluster_centers: int, cluster_boost_threshold: int,
                      learning_rate: float, buffer_size: int, batch_size: int,
                      input_size: int, flock_size: int, max_boost_time):
        params = ExpertParams()
        params.n_cluster_centers = num_cluster_centers
        params.spatial.input_size = input_size
        params.flock_size = flock_size

        params.spatial.buffer_size = buffer_size
        params.spatial.batch_size = batch_size
        params.spatial.cluster_boost_threshold = cluster_boost_threshold
        params.spatial.max_boost_time = max_boost_time  # should be bigger than any cluster_boost_threshold
        params.spatial.learning_rate = learning_rate

        return params
コード例 #24
0
    def convert_to_expert_params(self):
        """Parse from the MultipleLayerParams to list of ExpertParams"""
        self.validate_params_for_n_layers()

        params_list = []

        for layer_id in range(self.num_conv_layers):
            params = ExpertParams()
            params.flock_size = 1

            # spatial
            params.n_cluster_centers = self.read_param('n_cluster_centers',
                                                       layer_id)

            params.spatial.buffer_size = self.read_param(
                'sp_buffer_size', layer_id)
            params.spatial.batch_size = self.read_param(
                'sp_batch_size', layer_id)

            params.spatial.cluster_boost_threshold = self.read_param(
                'cluster_boost_threshold', layer_id)
            params.spatial.max_boost_time = self.read_param(
                'max_boost_time', layer_id)

            params.spatial.learning_rate = self.read_param(
                'learning_rate', layer_id)
            params.spatial.sampling_method = self.read_param(
                'sampling_method', layer_id)

            params.compute_reconstruction = self.read_param(
                'compute_reconstruction', layer_id)

            # temporal
            params.temporal.seq_length = self.read_param(
                'seq_length', layer_id)
            params.temporal.seq_lookahead = self.read_param(
                'seq_lookahead', layer_id)
            params.temporal.max_encountered_seqs = self.read_param(
                'max_encountered_seqs', layer_id)
            params.temporal.n_frequent_seqs = self.read_param(
                'max_frequent_seqs', layer_id)
            params.temporal.exploration_probability = self.read_param(
                'exploration_probability', layer_id)

            # done
            params_list.append(params)

        return params_list
コード例 #25
0
    def __init__(self):
        super().__init__('cuda')

        noise_amplitude: float = 0
        env_size: Tuple[int, int] = (27, 27)
        ball_radius: int = 5
        switch_shape_after = 200

        sp_n_cluster_centers = 200  # free

        ball_env_params = BallEnvironmentParams(
            switch_shape_after=switch_shape_after,
            noise_amplitude=noise_amplitude,
            ball_radius=ball_radius,
            env_size=env_size)

        ball_env = BallEnvironment(ball_env_params)
        self.add_node(ball_env)
        self.ball_env = ball_env

        # topmost layer
        ep_sp = ExpertParams()
        ep_sp.flock_size = 1
        ep_sp.n_cluster_centers = sp_n_cluster_centers
        sp_reconstruction_layer = SpReconstructionLayer(
            env_size[0] * env_size[1],
            ball_env_params.n_shapes,
            sp_params=ep_sp,
            name="L0")
        self.add_node(sp_reconstruction_layer)
        self.sp_reconstruction_layer = sp_reconstruction_layer

        switch_node = SwitchNode(2)
        self.add_node(switch_node)
        self.switch_node = switch_node

        nan_node = ConstantNode(ball_env_params.n_shapes, math.nan)
        self.add_node(nan_node)

        Connector.connect(ball_env.outputs.data,
                          sp_reconstruction_layer.inputs.data)

        Connector.connect(ball_env.outputs.label, switch_node.inputs[0])
        Connector.connect(nan_node.outputs.output, switch_node.inputs[1])
        Connector.connect(switch_node.outputs.output,
                          sp_reconstruction_layer.inputs.label)

        self.is_training = True
コード例 #26
0
def test_max_new_seqs_default_autocalculate():
    params = ExpertParams()

    expected_max_new_seqs = params.temporal.batch_size - (
        params.temporal.seq_length - 1)

    assert expected_max_new_seqs == params.temporal.max_new_seqs
コード例 #27
0
 def test_validate_context_input(self, sp_data_input_shape, flock_size,
                                 context_size, context_input_shape,
                                 is_valid):
     params = ExpertParams()
     params.flock_size = flock_size
     params.temporal.incoming_context_size = context_size
     expert = ExpertFlockNode(params)
     Connector.connect(self.memory_block(torch.zeros(sp_data_input_shape)),
                       expert.inputs.sp.data_input)
     Connector.connect(self.memory_block(torch.zeros(context_input_shape)),
                       expert.inputs.tp.context_input)
     if is_valid:
         expert.validate()
     else:
         with raises(NodeValidationException):
             expert.validate()
コード例 #28
0
 def setup_class(cls, device: str = 'cuda'):
     super().setup_class()
     cls._dim = 1
     cls._device = 'cuda'
     cls.params = ExpertParams()
     cls.params.flock_size = 3
     cls.params.spatial.input_size = 5
コード例 #29
0
    def __init__(self,
                 params: DatasetAlphabetNodeGroupParams,
                 name: str = "DatasetAlphabetNodeGroup"):
        super().__init__(name,
                         inputs=EmptyInputs(self),
                         outputs=DatasetAlphabetNodeGroupOutputs(self))
        self._params = params

        dataset_node = self.create_node_dataset()

        sp_dataset_node = SpatialPoolerFlockNode(ExpertParams(
            flock_size=self._params.flock_size,
            n_cluster_centers=len(self._params.symbols),
            spatial=SpatialPoolerParams(enable_learning=False),
        ),
                                                 name="SP_dataset")

        # Dataset nodes
        self.add_node(dataset_node)
        self.add_node(sp_dataset_node)
        # Connect data output
        Connector.connect(dataset_node.outputs.outputs,
                          sp_dataset_node.inputs.sp.data_input)
        # Connect sequence_id output
        Connector.connect(dataset_node.outputs.sequence_ids_one_hot,
                          self.outputs.sequence_id_one_hot.input)
        Connector.connect(sp_dataset_node.outputs.sp.forward_clusters,
                          self.outputs.output.input)
        Connector.connect(dataset_node.outputs.sequence_ids,
                          self.outputs.scalar_sequence_ids.input)

        self._dataset_node = dataset_node
        self._sp_dataset_node = sp_dataset_node
コード例 #30
0
 def _make_middle_layer_expert(use_temporal_pooler: bool, n_middle_layer_cluster_centers: int) \
         -> (NodeBase, InputSlot, OutputSlotBase):
     params = ExpertParams()
     params.flock_size = 1
     params.n_cluster_centers = n_middle_layer_cluster_centers
     if use_temporal_pooler:
         expert_node: ExpertFlockNode = ExpertFlockNode(params)
         input_slot = expert_node.inputs.sp.data_input
         output_slot = expert_node.outputs.tp.projection_outputs
         node = expert_node
     else:
         sp_node: SpatialPoolerFlockNode = SpatialPoolerFlockNode(params)
         input_slot = sp_node.inputs.sp.data_input
         output_slot = sp_node.outputs.sp.forward_clusters
         node = sp_node
     return node, input_slot, output_slot