def __init__(self,
              n,
              m,
              l,
              w,
              p,
              cap,
              options=Options(),
              update_scheme=UpdateScheme()):
     rule_class = TableRule_PostCount
     hl_rule = rule_class() if options.use_graph_rule else None
     output_rule = rule_class() if options.use_output_rule else None
     super().__init__(n=n,
                      m=m,
                      l=l,
                      w=w,
                      p=p,
                      cap=cap,
                      hl_rules=hl_rule,
                      output_rule=output_rule,
                      options=options,
                      update_scheme=update_scheme)
dim = 10  # Dimension of datasets
N = 10000  # Size of datasets

# Feed-forward brain config
m = 2  # Output layer size.
w = 32  # Width of hidden layers.
p = 0.5  # Connectivity probability.
cap = 16  # Number of nodes firing per layer.

# Training config
num_retrain = 5
num_rule_epochs = 10
num_epochs_upstream = 1
num_epochs_downstream = 1
scheme = UpdateScheme(cross_entropy_loss=True,
                      mse_loss=False,
                      update_misclassified_only=False,
                      update_all_edges=True)
rules_to_skip = []

# ----------------------------------------------------------------------------------------------------------------------

# Compare rules for this configuration
for plas_rules in ['output', 'hidden-layer']:
    compare_rules(dataset=dataset,
                  dim=dim,
                  N=N,
                  m=m,
                  w=w,
                  p=p,
                  cap=cap,
                  plas_rules=plas_rules,
Beispiel #3
0
    def __init__(self,
                 n,
                 m,
                 l,
                 w,
                 p,
                 cap,
                 hl_rules=None,
                 output_rule=None,
                 options=Options(),
                 update_scheme=UpdateScheme(),
                 use_gpu=False):
        super().__init__(
            n=n,
            m=m,
            l=l,
            w=w,
            p=p,
            cap=cap,
            gd_input=options.gd_input,
            gd_output=options.gd_output,
            use_softmax=options.use_softmax,
        )

        # Make sure the options are consistent
        assert not options.use_input_rule, "There is currently no support for an input layer plasticity rule"
        assert options.gd_input, "If we don't use GD on the input weights, they will never be learned"

        assert options.use_graph_rule == (
            l > 1
        ), "A graph rule should be used iff there is more than 1 hidden layer"
        assert options.use_graph_rule or not options.gd_graph_rule, "gd_graph_rule is not applicable when use_graph_rule is False"

        assert options.use_output_rule or not options.gd_output_rule, "gd_output_rule is not applicable when use_output_rule is False"
        assert options.use_output_rule != options.gd_output, "use_output_rule and gd_output should be mutually exclusive"

        # Store additional params
        self.options = options
        self.update_scheme = update_scheme
        self.use_gpu = use_gpu
        self.step_sz = 0.01

        # Define our plasticity rules:

        # Hidden Layer rules
        # Make sure a hidden-layer rule was supplied if needed
        hl_rules_supplied = bool(hl_rules)
        hl_rule_needed = options.use_graph_rule and (l > 1)
        assert hl_rules_supplied == hl_rule_needed, "The hl_rules parameter does not agree with the other parameters"

        # Convert to a list if a single rule was supplied
        if not hl_rules_supplied:
            hl_rules = [None
                        ]  # First hidden layer never uses a plasticity rule
        elif isinstance(hl_rules, PlasticityRule):
            # Common plasticity rule for all hidden layers
            hl_rules = [None] + [hl_rules] * (l - 1)
        else:
            assert len(hl_rules) == (l -
                                     1), "hl_rules list must have length (l-1)"
            hl_rules = [None] + hl_rules

        # Initialize the rules
        unique_rules = set(hl_rules) - {None}
        for rule in unique_rules:
            # Assign basic params
            rule.ff_net = self
            rule.isOutputRule = False

            # Ask the rule to initialize itself
            layers = [i for i, r in enumerate(hl_rules) if r == rule]
            rule.initialize(layers=layers)

        # Store these rules
        self.hidden_layer_rules = hl_rules

        # Output rule
        # Make sure an output rule was supplied if needed
        output_rule_supplied = bool(output_rule)
        assert output_rule_supplied == options.use_output_rule, "The output_rule parameter does not agree with options.use_output_rule"

        # Initialize the rule
        if output_rule_supplied:
            # Make sure the output rule is distinct from all hidden-layer rules
            assert output_rule not in hl_rules, "The output rule must be distinct from all hidden-layer rules"

            # Assign basic params
            output_rule.ff_net = self
            output_rule.isOutputRule = True

            # Ask the rule to initialize itself
            output_rule.initialize()
        else:
            output_rule = None

        # Store the rule
        self.output_rule = output_rule