def test_default_top_params():
    """Default params should be set for 1 layer (i.e. the top-level one)"""

    params = MultipleLayersParams()
    params_list = params.convert_to_expert_params()

    assert len(params_list) == 1
Esempio n. 2
0
    def __init__(
            self,
            top_layer_params: Optional[
                MultipleLayersParams] = MultipleLayersParams(),
            conv_layers_params: MultipleLayersParams = MultipleLayersParams(),
            model_seed: int = 321,

            # DATASET
            image_size=SeDatasetSize.SIZE_24,
            baseline_seed: int = 123,
            class_filter: List[int] = None,
            random_order: bool = False,
            noise_amp: float = 0.0):
        """
        Constructor of the TA topology which should solve the Task0.

        Args:
            model_seed: seed of the model
            image_size: size of the dataset image
            class_filter: filters the classes in the dataset
            baseline_seed: seed for the baseline nodes
        """

        super().__init__('cuda')

        layer_sizes = conv_layers_params.read_list_of_params(
            'n_cluster_centers')
        if top_layer_params is not None:
            layer_sizes += top_layer_params.read_list_of_params(
                'n_cluster_centers')

        self.se_group = SeNodeGroup(baseline_seed=baseline_seed,
                                    layer_sizes=layer_sizes,
                                    class_filter=class_filter,
                                    image_size=image_size,
                                    random_order=random_order,
                                    noise_amp=noise_amp)

        self.add_node(self.se_group)

        if top_layer_params is None:
            self.model = NCMGroup(conv_layers_params=conv_layers_params,
                                  image_size=(image_size.value,
                                              image_size.value, 3),
                                  model_seed=model_seed)
        else:
            self.model = Nc1r1GroupWithAdapter(
                conv_layers_params=conv_layers_params,
                top_layer_params=top_layer_params,
                num_labels=20,
                image_size=(image_size.value, image_size.value, 3),
                model_seed=model_seed)

        self.add_node(self.model)

        Connector.connect(self.se_group.outputs.image, self.model.inputs.image)

        if isinstance(self.model, Nc1r1GroupWithAdapter):
            Connector.connect(self.se_group.outputs.labels,
                              self.model.inputs.label)
def multiple_runs_class_filter_example(args, debug: bool = False):
    name = "example_multiple_runs"

    cp = MultipleLayersParams()
    cp.compute_reconstruction = True
    cp.conv_classes = [ConvLayer, ConvLayer]
    cp.num_conv_layers = 2
    cp.rf_stride = (8, 8)
    cp.rf_size = (8, 8)

    tp = MultipleLayersParams()
    tp.n_cluster_centers = 250

    # class filter changed
    params = [{
        'class_filter': [1, 2]
    }, {
        'class_filter': [1, 2, 3]
    }, {
        'class_filter': [1]
    }]

    common_params = {
        'conv_layers_params': cp,
        'top_layer_params': tp,
        'image_size': SeDatasetSize.SIZE_64,
        'noise_amp': 0.0,
        'model_seed': None,
        'baseline_seed': None
    }

    # merge the params and common params
    p = ExperimentTemplateBase.add_common_params(params, common_params)

    run_measurement(name, p, args, debug=debug)
def run_debug_comparison(args, debug: bool = True):
    name = "Learning-rate-debug"
    # params = [
    #     {'se_group': {'class_filter': [1, 2, 3, 4]},
    #      'model': {'lr': [0.79, 0.79], 'batch_s': [300, 150]}},
    #     {'se_group': {'class_filter': [1, 2, 3, 4]},
    #      'model': {'lr': [0.7, 0.7], 'batch_s': [30, 15], 'num_cc': [300, 200]}}
    # ]
    params = [{
        'se_group': {
            'class_filter': [1, 2, 3, 4]
        },
        'model': {
            'conv_layers_params':
            MultipleLayersParams(learning_rate=0.79, sp_batch_size=300),
            'top_layer_params':
            MultipleLayersParams(learning_rate=0.79, sp_batch_size=150)
        }
    }, {
        'se_group': {
            'class_filter': [1, 2, 3, 4]
        },
        'model': {
            'conv_layers_params':
            MultipleLayersParams(learning_rate=0.7,
                                 sp_batch_size=30,
                                 n_cluster_centers=300),
            'top_layer_params':
            MultipleLayersParams(learning_rate=0.7,
                                 sp_batch_size=15,
                                 n_cluster_centers=200)
        }
    }]
    run_measurement(name, params, args, debug)
    def __init__(self,
                 top_layer_params: MultipleLayersParams = None,
                 model_seed: Optional[int] = None,
                 baseline_seed: Optional[int] = None,
                 class_filter: List[int] = None,
                 random_order: bool = False,
                 num_labels: int = 20,
                 image_size=SeDatasetSize.SIZE_64,
                 fof_fixed_size: Optional[int] = None):
        super().__init__(device='cuda')

        # parse params here
        self._dataset_params = DatasetSeObjectsParams()
        self._dataset_params.dataset_config = DatasetConfig.TRAIN_ONLY
        self._dataset_params.dataset_size = image_size
        self._dataset_params.class_filter = class_filter
        self._dataset_params.random_order = random_order
        self._dataset_params.seed = baseline_seed

        if top_layer_params is None:
            top_layer_params = MultipleLayersParams()
        self._top_params = top_layer_params.convert_to_expert_params()[0]

        self._label_size = num_labels
        self._fof_fixed_size = fof_fixed_size

        # create and add nodes here
        self._node_se_dataset = DatasetSeObjectsNode(self._dataset_params)
        self.add_node(self._node_se_dataset)

        self._attention_group = BottomUpAttentionGroup()
        self.add_node(self._attention_group)

        if self._fof_fixed_size is not None:
            input_size = self._fof_fixed_size
            self._attention_group.fixed_region_size = self._fof_fixed_size
            self._attention_group.use_fixed_region = True
        else:
            input_size = image_size.value
            self._attention_group.use_fixed_region = False
        self._input_data_size = input_size * input_size * 3

        self._sp_reconstruction_layer = SpReconstructionLayer(
            self._input_data_size,
            self._label_size,
            self._top_params,
            "ReconstructionLayer",
            model_seed)
        self.add_node(self._sp_reconstruction_layer)

        # connect nodes here
        Connector.connect(self._node_se_dataset.outputs.image_output,
                          self._attention_group.inputs.image)
        Connector.connect(self._attention_group.outputs.fof,
                          self._sp_reconstruction_layer.inputs.data)
        Connector.connect(self._node_se_dataset.outputs.task_to_agent_label,
                          self._sp_reconstruction_layer.inputs.label)
def test_group_constructor():

    conv_layers_params = MultipleLayersParams()
    conv_layers_params.n_cluster_centers = [20]

    top_layer_params = MultipleLayersParams()

    group = NCMR1Group(conv_layers_params, top_layer_params)

    assert group._num_layers == 2
def test_validate_params_for_n_layers():
    """Create default params, set custom values expecting 3-layer network, check validation"""

    params = MultipleLayersParams()
    params.seq_length = [1, 2, 3]
    params.n_cluster_centers = [1, 2, 3]
    params.cluster_boost_threshold = 123
    params.num_conv_layers = 3

    params.validate_params_for_n_layers()

    with pytest.raises(AttributeError):
        params.nonexisting_field = 23
Esempio n. 8
0
    def __init__(self,
                 inputs: TNCMGroupInputs,
                 outputs: TOutputs,
                 conv_layers_params: MultipleLayersParams,
                 model_seed: Optional[int] = 321,
                 image_size: Tuple[int, int, int] = (24, 24, 3),
                 name: str = "TA Model"):
        """ Create the multilayer topology of configuration: N conv layers

        Args:
            conv_layers_params: parameters for each layer (or default ones)
            model_seed: seed of the topology
            image_size: small resolution by default
        """
        super().__init__(name=name,
                         inputs=inputs,
                         outputs=outputs)

        self._num_layers = conv_layers_params.num_conv_layers + 1
        self._input_dims = image_size

        # parse conv layers to ExpertParams
        self._conv_params_list = conv_layers_params.convert_to_expert_params()

        # other parameters for the convolution
        conv_classes = conv_layers_params.read_list_of_params('conv_classes')
        rf_sizes = conv_layers_params.read_list_of_params('rf_size')
        rf_strides = conv_layers_params.read_list_of_params('rf_stride')
        num_flocks = conv_layers_params.read_list_of_params('n_flocks')

        self._conv_params = self.create_conv_layer_params(param_list=self._conv_params_list,
                                                          rf_sizes=rf_sizes,
                                                          rf_strides=rf_strides,
                                                          num_flocks=num_flocks,
                                                          conv_classes=conv_classes,
                                                          model_seed=model_seed)

        self.conv_layers, self.output_projection_sizes = create_connected_conv_layers(self._conv_params,
                                                                                      self._input_dims)
        for layer in self.conv_layers:
            self.add_node(layer)

        # image -> Conv0
        Connector.connect(
            self.inputs.image.output,
            self.conv_layers[0].inputs.data)
def test_add_common_params():
    """Test the helper function that adds default parameters to all experiment runs"""

    cp = MultipleLayersParams()
    cp.compute_reconstruction = True
    cp.conv_classes = [ConvLayer, ConvLayer]
    cp.num_conv_layers = 2

    tp = MultipleLayersParams()
    tp.n_cluster_centers = 250

    # class filter changed
    params = [{
        'class_filter': [1, 2]
    }, {
        'class_filter': [1, 2, 3]
    }, {
        'class_filter': [1]
    }, {}]

    common_params = {
        'conv_layers_params': cp,
        'top_layer_params': tp,
        'image_size': SeDatasetSize.SIZE_64,
        'noise_amp': 0.12,
        'model_seed': None,
        'baseline_seed': None,
        'class_filter': [9]
    }

    p = ExperimentTemplateBase.add_common_params(params, common_params)

    assert len(p) == 4
    assert len(p[0]) == 7
    assert p[0]['class_filter'] == [1, 2]
    assert p[1]['class_filter'] == [1, 2, 3]
    assert p[2]['class_filter'] == [1]
    assert p[3]['class_filter'] == [9]

    for param in p:
        assert param['noise_amp'] == 0.12
def test_empty_change():

    a = MultipleLayersParams()
    a.learning_rate = 0.345
    a.n_cluster_centers = [1, 2, 3]

    b = a.change()
    assert b.learning_rate == a.learning_rate
    assert a.n_cluster_centers == b.n_cluster_centers

    b.learning_rate = 0.1
    a.n_cluster_centers = [1, 2]

    assert b.learning_rate != a.learning_rate
    assert a.n_cluster_centers != b.n_cluster_centers
def test_change_params():
    default_params = MultipleLayersParams()
    default_params.learning_rate = 0.997
    default_params.n_cluster_centers = [2, 3]
    default_params.seq_length = 5

    sequence = [{
        'conv_layers_params':
        default_params.change(n_cluster_centers=[1, 2])
    }, {
        'conv_layers_params':
        default_params.change(n_cluster_centers=[1, 2, 3], learning_rate=0.11)
    }]

    assert default_params.n_cluster_centers == [2, 3]
    assert sequence[0]['conv_layers_params'].n_cluster_centers == [1, 2]
    assert sequence[1]['conv_layers_params'].n_cluster_centers == [1, 2, 3]

    assert sequence[0]['conv_layers_params'].learning_rate == 0.997
    assert sequence[1]['conv_layers_params'].learning_rate == 0.11
Esempio n. 12
0
    def __init__(self,
                 conv_layers_params: MultipleLayersParams,
                 top_layer_params: MultipleLayersParams,
                 model_seed: Optional[int] = 321,
                 num_labels: int = 20,
                 image_size: Tuple[int, int, int] = (24, 24, 3),
                 name: str = "Nc1r1Group"):
        super().__init__(ClassificationTaskInputs(self),
                         ClassificationTaskOutputs(self), conv_layers_params,
                         model_seed, image_size, name)

        self._num_labels = num_labels

        validate_predicate(
            lambda: self._num_labels is not None,
            "num_labels cannot be None if top layer is used (top_layer_params is not None)."
        )

        # parse to expert params
        self._top_params = top_layer_params.convert_to_expert_params()[0]

        self.top_layer = SpReconstructionLayer(self.output_projection_sizes,
                                               self._num_labels,
                                               sp_params=self._top_params,
                                               name='TOP',
                                               seed=model_seed)
        self.add_node(self.top_layer)

        # Conv[-1] -> Fully
        Connector.connect(self.conv_layers[-1].outputs.data,
                          self.top_layer.inputs.data)

        # Label -> Fully
        Connector.connect(self.inputs.label.output,
                          self.top_layer.inputs.label)

        # Fully -> Reconstructed label
        Connector.connect(self.top_layer.outputs.label,
                          self.outputs.reconstructed_label.input)
def make_test_params():

    cp = MultipleLayersParams()
    cp.compute_reconstruction = True
    cp.conv_classes = ConvLayer
    cp.sp_buffer_size = 6000
    cp.sp_batch_size = 4000
    cp.learning_rate = 0.02
    cp.cluster_boost_threshold = 1000
    cp.max_encountered_seqs = 10000
    cp.max_frequent_seqs = 1000
    cp.seq_length = 4
    cp.seq_lookahead = 1
    cp.num_conv_layers = 1

    cp.n_cluster_centers = 400
    cp.rf_size = (8, 8)
    cp.rf_stride = (8, 8)

    tp = MultipleLayersParams()
    tp.n_cluster_centers = 250
    tp.sp_buffer_size = 4000
    tp.sp_batch_size = 500
    tp.learning_rate = 0.2
    tp.cluster_boost_threshold = 1000
    tp.compute_reconstruction = True

    common_params = {
        'conv_layers_params': cp,
        'top_layer_params': tp,
        'image_size': SeDatasetSize.SIZE_64,
        'class_filter': [1, 2, 3],
        'noise_amp': 0.0,
        'model_seed': None,
        'baseline_seed': None
    }

    changing_params = [{
        'conv_layers_params': cp.change(learning_rate=0.1)
    }, {
        'conv_layers_params': cp.change(learning_rate=0.2),
        'top_layer_params': tp.change(learning_rate=0.1)
    }, {
        'conv_layers_params': cp.change(learning_rate=0.3)
    }]

    params = ExperimentTemplateBase.add_common_params(changing_params,
                                                      common_params)

    return params, common_params, changing_params
        Connector.connect(self._node_se_dataset.outputs.image_output,
                          self._attention_group.inputs.image)
        Connector.connect(self._attention_group.outputs.fof,
                          self._sp_reconstruction_layer.inputs.data)
        Connector.connect(self._node_se_dataset.outputs.task_to_agent_label,
                          self._sp_reconstruction_layer.inputs.label)

    def switch_learning(self, on: bool):
        self._node_se_dataset.switch_training(training_on=on, just_hide_labels=False)
        self._sp_reconstruction_layer.switch_learning(on)


if __name__ == '__main__':
    """Just an example configuration for GUI"""

    expert_params = MultipleLayersParams()
    expert_params.n_cluster_centers = 200
    expert_params.sp_buffer_size = 3000
    expert_params.sp_batch_size = 1000
    expert_params.learning_rate = 0.05
    expert_params.cluster_boost_threshold = 1000
    expert_params.compute_reconstruction = True

    class_f = None

    params = [
        {
            'top_layer_params': expert_params,
            'image_size': SeDatasetSize.SIZE_64,
            'class_filter': class_f,
            'model_seed': None,
Esempio n. 15
0
            graph_classes,
            discover_child_classes(
                f'torchsim.research.research_topics.{topic}.topologies',
                Topology, skip_classes))

    # TODO: Not testing because they need SpaceEngineers - they are now tested separately in test_se_tasks_topologies
    # graph_classes = set.union(graph_classes, discover_topology_classes(f'torchsim.research.se_tasks.topologies'))

    return [x for x in graph_classes if 'torchsim.research' in x.__module__]


# Put factories here if your topology needs parameters to be constructed.
topology_factories = {
    ClassificationAccuracyModularTopology:
    lambda: ClassificationAccuracyModularTopology(
        SeNodeGroup(), Nc1r1ClassificationGroup(MultipleLayersParams())),
    Task0TaAnalysisTopology:
    lambda: Task0TaAnalysisTopology(
        SeNodeGroup(), TaMultilayerClassificationGroup(MultipleLayersParams())
    ),
    GoalDirectedTemplateTopology:
    lambda: None
}


def instantiate_graph(topology_class):
    """Instantiate the topology.

    Use the factory from topology_factories, or try instantiating the topology without any parameters.
    """
    factory = topology_factories.get(topology_class, topology_class)
    def _create_and_connect_agent(self, input_image: MemoryBlock,
                                  input_size: Tuple[int, int, int]):

        params = MultipleLayersParams()
        params.num_conv_layers = 4
        params.n_flocks = [5, 5, 1, 1]
        params.n_cluster_centers = [30, 60, 60, 9]
        params.compute_reconstruction = True
        params.conv_classes = SpConvLayer
        params.sp_buffer_size = 5000
        params.sp_batch_size = 500
        params.learning_rate = 0.1
        params.cluster_boost_threshold = 1000
        params.max_encountered_seqs = 1000
        params.max_frequent_seqs = 500
        params.seq_lookahead = 2
        params.seq_length = 4
        params.exploration_probability = 0
        params.rf_size = (2, 2)
        params.rf_stride = None
        ta_group = NCMGroup(conv_layers_params=params,
                            model_seed=None,
                            image_size=input_size)

        self.add_node(ta_group)

        Connector.connect(input_image, ta_group.inputs.image)
Esempio n. 17
0
def run_opp(args, num_conv_layers: int, exp_params, top_cc: int = 150):
    name = f"OPP-influence_num_cc{top_cc}"

    cp = MultipleLayersParams()
    cp.compute_reconstruction = False
    cp.conv_classes = ConvLayer
    cp.sp_buffer_size = 6000
    cp.sp_batch_size = 1500  # original 4000
    cp.learning_rate = 0.10001
    cp.cluster_boost_threshold = 1000
    cp.max_encountered_seqs = 2000
    cp.max_frequent_seqs = 1000
    cp.seq_lookahead = 1
    cp.seq_length = 4  # note: might use also 5 (as in older experiments)

    if num_conv_layers == 2:
        cp.n_cluster_centers = [100, 230]

        cp.rf_size = [(8, 8), (4, 4)]
        cp.rf_stride = [(8, 8), (1, 1)]

        cp.num_conv_layers = 2
    else:
        cp.n_cluster_centers = 200

        cp.rf_size = (8, 8)
        cp.rf_stride = (8, 8)
        cp.num_conv_layers = 1

    tp = MultipleLayersParams()
    tp.n_cluster_centers = top_cc
    tp.sp_buffer_size = 3000
    tp.sp_batch_size = 1500  #
    tp.learning_rate = 0.15
    tp.cluster_boost_threshold = 1000
    tp.compute_reconstruction = True

    cf_easy = [1, 2, 3, 4]
    size = SeDatasetSize.SIZE_64
    size_int = (size.value, size.value, 3)

    params = [
        {
            'se_group': {'class_filter': cf_easy,
                         'image_size': size},
            'model': {'conv_layers_params': cp.change(opp=0),
                      'top_layer_params': tp,
                      'image_size': size_int}
        },
        {
            'se_group': {'class_filter': cf_easy,
                         'image_size': size},
            'model': {'conv_layers_params': cp.change(opp=0.05),
                      'top_layer_params': tp,
                      'image_size': size_int}
        },
        {
            'se_group': {'class_filter': cf_easy,
                         'image_size': size},
            'model': {'conv_layers_params': cp.change(opp=0.5),
                      'top_layer_params': tp,
                      'image_size': size_int}
        },
        {
            'se_group': {'class_filter': cf_easy,
                         'image_size': size},
            'model': {'conv_layers_params': cp.change(opp=0.95),
                      'top_layer_params': tp,
                      'image_size': size_int}
        },
        {
            'se_group': {'class_filter': cf_easy,
                         'image_size': size},
            'model': {'conv_layers_params': cp.change(opp=1),
                      'top_layer_params': tp,
                      'image_size': size_int}
        },
        # {
        #     'se_group': {'class_filter': cf_easy,
        #                  'image_size': size},
        #     'model': {'conv_layers_params': cp.change(opp=0.3),
        #               'top_layer_params': tp,
        #               'image_size': size_int}
        # },
        # {
        #     'se_group': {'class_filter': cf_easy,
        #                  'image_size': size},
        #     'model': {'conv_layers_params': cp.change(opp=0.7),
        #               'top_layer_params': tp,
        #               'image_size': size_int}
        # }
    ]

    exp_params.experiment_params.num_layers = num_conv_layers + 1  # needs to be there

    run_measurement(name, params, args, exp_params)
Esempio n. 18
0
def multiple_runs_lr_example(args, debug: bool = False):

    name = "example_multiple_runs"

    default_conv = MultipleLayersParams()
    default_conv.compute_reconstruction = True
    default_conv.conv_classes = [ConvLayer, SpConvLayer]
    default_conv.num_conv_layers = 2

    default_conv.rf_stride = (8, 8)
    default_conv.rf_size = (8, 8)

    tp = MultipleLayersParams()
    tp.n_cluster_centers = 250

    params = [{
        'conv_layers_params': default_conv.change(learning_rate=0.1)
    }, {
        'conv_layers_params': default_conv.change(learning_rate=0.2),
        'top_layer_params': tp.change(learning_rate=0.11)
    }, {
        'conv_layers_params': default_conv.change(learning_rate=0.3)
    }]

    common_params = {
        'top_layer_params': tp,
        'image_size': SeDatasetSize.SIZE_64,
        'noise_amp': 0.0,
        'model_seed': None,
        'baseline_seed': None
    }

    # merge the params and common params
    p = ExperimentTemplateBase.add_common_params(params, common_params)

    run_measurement(name, p, args, debug=debug)
Esempio n. 19
0
def run_rf_size(args, exp_params, opp: float = 1.0):
    name = "rf_size"

    cp = MultipleLayersParams()
    cp.compute_reconstruction = False
    cp.conv_classes = ConvLayer
    cp.sp_buffer_size = 6000
    cp.sp_batch_size = 4000
    cp.learning_rate = 0.1
    cp.cluster_boost_threshold = 1000
    cp.max_encountered_seqs = 2000
    cp.max_frequent_seqs = 1000
    cp.seq_lookahead = 1
    cp.seq_length = 5
    cp.opp = opp

    cp.n_cluster_centers = 200

    cp.rf_size = (8, 8)
    cp.rf_stride = (8, 8)
    cp.num_conv_layers = 1

    tp = MultipleLayersParams()
    tp.n_cluster_centers = 150
    tp.sp_buffer_size = 3000
    tp.sp_batch_size = 2000
    tp.learning_rate = 0.15
    tp.cluster_boost_threshold = 1000
    tp.compute_reconstruction = True

    cf_easy = [1, 2, 3, 4]
    size = SeDatasetSize.SIZE_64
    size_int = (size.value, size.value, 3)

    params = [
        {
            'se_group': {'class_filter': cf_easy,
                         'image_size': size},
            'model': {'conv_layers_params': cp.change(rf_stride=([(8, 8)])),
                      'top_layer_params': tp,
                      'image_size': size_int}
        },
        {
            'se_group': {'class_filter': cf_easy,
                         'image_size': size},
            'model': {'conv_layers_params': cp.change(rf_stride=([(4, 4)])),
                      'top_layer_params': tp,
                      'image_size': size_int}
        },
        {
            'se_group': {'class_filter': cf_easy,
                         'image_size': size},
            'model': {'conv_layers_params': cp.change(rf_stride=([(2, 2)])),
                      'top_layer_params': tp,
                      'image_size': size_int}
        },
    ]

    exp_params.experiment_params.num_layers = 2  # needs to be there

    run_measurement(name, params, args, exp_params)
Esempio n. 20
0
def run_debug_base(args, num_conv_layers: int, exp_params):
    name = "Learning-rate-debug_ncl_"+str(num_conv_layers)

    cp = MultipleLayersParams()
    cp.compute_reconstruction = False
    cp.conv_classes = ConvLayer
    cp.sp_buffer_size = 6000
    cp.sp_batch_size = 4000
    cp.learning_rate = 0.10005
    cp.cluster_boost_threshold = 1000
    cp.max_encountered_seqs = 2000
    cp.max_frequent_seqs = 1000
    cp.seq_lookahead = 1
    cp.seq_length = 5

    if num_conv_layers == 2:
        cp.n_cluster_centers = [100, 230]

        cp.rf_size = [(8, 8), (4, 4)]
        cp.rf_stride = [(8, 8), (1, 1)]

        cp.num_conv_layers = 2
    else:
        cp.n_cluster_centers = 200

        cp.rf_size = (8, 8)
        cp.rf_stride = (8, 8)
        cp.num_conv_layers = 1

    tp = MultipleLayersParams()
    tp.n_cluster_centers = 100
    tp.sp_buffer_size = 3000
    tp.sp_batch_size = 2000
    tp.learning_rate = 0.01
    tp.cluster_boost_threshold = 1000
    tp.compute_reconstruction = True

    cf_easy = [1, 2, 3, 4]
    size = SeDatasetSize.SIZE_64
    size_int = (size.value, size.value, 3)

    params = [
        {
            'se_group': {'class_filter': cf_easy,
                         'image_size': size},
            'model': {'conv_layers_params': cp,
                      'top_layer_params': tp,
                      'image_size': size_int}
        }
    ]

    exp_params.experiment_params.num_layers = num_conv_layers + 1  # needs to be there

    run_measurement(name, params, args, exp_params)
Esempio n. 21
0
def good_one_layer_config_for_four_objects() -> List[Dict[str, Any]]:
    """A topology which might achieve 100% SE accuracy on 4 objects"""

    cp = MultipleLayersParams()
    cp.compute_reconstruction = False
    cp.conv_classes = ConvLayer
    cp.sp_buffer_size = 6000
    cp.sp_batch_size = 4000
    cp.learning_rate = 0.1
    cp.cluster_boost_threshold = 1000
    cp.max_encountered_seqs = 2000
    cp.max_frequent_seqs = 1000
    cp.seq_lookahead = 1
    cp.seq_length = 5

    cp.n_cluster_centers = 200

    cp.rf_size = (8, 8)
    cp.rf_stride = (8, 8)
    cp.num_conv_layers = 1

    tp = MultipleLayersParams()
    tp.n_cluster_centers = 150
    tp.sp_buffer_size = 3000
    tp.sp_batch_size = 2000
    tp.learning_rate = 0.15
    tp.cluster_boost_threshold = 1000
    tp.compute_reconstruction = True

    cf_easy = [1, 2, 3, 4]
    size = SeDatasetSize.SIZE_64
    size_int = (size.value, size.value, 3)

    params = [
        {
            'se_group': {'class_filter': cf_easy,
                         'image_size': size},
            'model': {'conv_layers_params': cp.change(opp=0.5),
                      'top_layer_params': tp,
                      'image_size': size_int}
        }
    ]

    return params
Esempio n. 22
0
def run_two_layer_net_new(args, debug: bool = False):
    """An example of the new experiment configuration"""

    name = "two_layer_new"

    two = [2, 15]
    four = [2, 15, 4, 7]
    six = [2, 15, 4, 7, 10, 19]

    cp = MultipleLayersParams()
    cp.compute_reconstruction = True
    cp.conv_classes = ConvLayer
    cp.sp_buffer_size = 6000
    cp.sp_batch_size = 4000
    cp.learning_rate = 0.02
    cp.cluster_boost_threshold = 1000
    cp.max_encountered_seqs = 10000
    cp.max_frequent_seqs = 1000
    cp.seq_length = 4
    cp.seq_lookahead = 1
    cp.num_conv_layers = 1

    cp.n_cluster_centers = 400
    cp.rf_size = (8, 8)
    cp.rf_stride = (8, 8)

    tp = MultipleLayersParams()
    tp.n_cluster_centers = 250
    tp.sp_buffer_size = 4000
    tp.sp_batch_size = 500
    tp.learning_rate = 0.2
    tp.cluster_boost_threshold = 1000
    tp.compute_reconstruction = True

    params = [{
        'conv_layers_params': cp,
        'top_layer_params': tp,
        'image_size': SeDatasetSize.SIZE_64,
        'class_filter': six,
        'noise_amp': 0.0,
        'model_seed': None,
        'baseline_seed': None
    }]

    run_measurement(name, params, args, debug=debug, num_layers=2)
Esempio n. 23
0
        Connector.connect(self.se_group.outputs.image, self.model.inputs.image)

        if isinstance(self.model, Nc1r1GroupWithAdapter):
            Connector.connect(self.se_group.outputs.labels,
                              self.model.inputs.label)

    def restart(self):
        pass


if __name__ == '__main__':

    num_conv_layers = 1
    use_top_layer = False

    cp = MultipleLayersParams()
    cp.compute_reconstruction = True
    cp.conv_classes = ConvLayer
    cp.sp_buffer_size = 6000
    cp.sp_batch_size = 4000
    cp.learning_rate = 0.1
    cp.cluster_boost_threshold = 1000
    cp.max_encountered_seqs = 2000
    cp.max_frequent_seqs = 1000
    cp.seq_lookahead = 1
    cp.seq_length = 5

    if num_conv_layers == 2:
        cp.n_cluster_centers = [100, 230]

        cp.rf_size = (8, 8)
    def _create_and_connect_agent(self, input_image: MemoryBlock,
                                  output_reconstruction: InputSlot,
                                  input_size: Tuple[int, int, int]):

        params = MultipleLayersParams()
        params.num_conv_layers = 3
        params.n_cluster_centers = [28, 14, 7]
        params.compute_reconstruction = True
        params.conv_classes = ConvLayer
        params.sp_buffer_size = 5000
        params.sp_batch_size = 500
        params.learning_rate = 0.2
        params.cluster_boost_threshold = 1000
        params.max_encountered_seqs = 1000
        params.max_frequent_seqs = 500
        params.seq_lookahead = 2
        params.seq_length = 4
        params.exploration_probability = 0
        params.rf_size = [(input_size[0], input_size[1]), (1, 1), (1, 1)]
        params.rf_stride = None
        ta_group = R1NCMGroup(conv_layers_params=params,
                              model_seed=None,
                              image_size=input_size)

        self.add_node(ta_group)

        Connector.connect(input_image, ta_group.inputs.image)

        Connector.connect(ta_group.outputs.predicted_reconstructed_input,
                          output_reconstruction)
def test_create_flock_params():
    """Read from the params class and convert to list of params class instances"""

    params = MultipleLayersParams()
    params.seq_length = [1, 2, 3]
    params.n_cluster_centers = [1, 2, 3]
    params.cluster_boost_threshold = 123
    params.num_conv_layers = 3

    params_list = params.convert_to_expert_params()
    assert len(params_list) == 3

    conv_classes = params.read_list_of_params('conv_classes')
    assert len(conv_classes) == 3

    conv_classes += [params.read_param('conv_classes', 0)]
    assert len(conv_classes) == 4

    conv_classes += params.read_list_of_params('conv_classes')
    assert len(conv_classes) == 7

    num_cc = params.read_list_of_params('n_cluster_centers')
    assert len(num_cc) == 3
    n_cc = params.read_param('n_cluster_centers', 1)
    assert n_cc == 2

    cbt = params.read_list_of_params('cluster_boost_threshold')
    assert isinstance(cbt, list)
    assert len(cbt) == 3

    top_params = MultipleLayersParams()
    cbt = top_params.read_list_of_params('cluster_boost_threshold')
    assert isinstance(cbt, list)
    assert len(cbt) == 1