Esempio n. 1
0
    def __init__(self, config, input_shape=None):
        super(Net, self).__init__(config, input_shape)

        self.NUM_CLASSES = P.GLB_PARAMS[P.KEY_DATASET_METADATA][
            P.KEY_DS_NUM_CLASSES]
        self.ALPHA = config.CONFIG_OPTIONS.get(P.KEY_ALPHA, 1.)

        # Here we define the layers of our network

        self.fc = H.HebbianConv2d(
            in_channels=self.get_input_shape()[0],
            out_size=self.NUM_CLASSES,
            kernel_size=(self.get_input_shape()[1], self.get_input_shape()[2])
            if len(self.get_input_shape()) >= 3 else 1,
            reconstruction=H.HebbianConv2d.REC_QNT_SGN,
            reduction=H.HebbianConv2d.RED_W_AVG,
            lrn_sim=HF.raised_cos2d_pow(2),
            lrn_act=HF.identity,
            out_sim=HF.vector_proj2d,
            out_act=HF.identity,
            weight_upd_rule=H.HebbianConv2d.RULE_BASE,
            alpha=self.ALPHA,
        )  # input_shape-shaped input, NUM_CLASSES-dimensional output (one per class)
Esempio n. 2
0
    def __init__(self, config, input_shape=None):
        super(Net, self).__init__(config, input_shape)

        self.NUM_CLASSES = P.GLB_PARAMS[P.KEY_DATASET_METADATA][
            P.KEY_DS_NUM_CLASSES]
        self.DEEP_TEACHER_SIGNAL = config.CONFIG_OPTIONS.get(
            P.KEY_DEEP_TEACHER_SIGNAL, False)
        self.COMPETITIVE = False
        self.K = 0
        self.RECONSTR = H.HebbianConv2d.REC_LIN_CMB
        self.RED = H.HebbianConv2d.RED_AVG
        self.LRN_SIM = HF.kernel_mult2d
        self.LRN_ACT = F.relu
        self.OUT_SIM = HF.kernel_mult2d
        self.OUT_ACT = F.relu
        self.WEIGHT_UPD_RULE = H.HebbianConv2d.RULE_HEBB
        self.LOC_LRN_RULE = config.CONFIG_OPTIONS.get(P.KEY_LOCAL_LRN_RULE,
                                                      'hpca')
        if self.LOC_LRN_RULE == 'hwta':
            self.COMPETITIVE = True
            self.K = config.CONFIG_OPTIONS.get(PP.KEY_WTA_K, 1)
            self.RECONSTR = H.HebbianConv2d.REC_QNT_SGN
            self.RED = H.HebbianConv2d.RED_W_AVG
            self.LRN_SIM = HF.raised_cos2d_pow(2)
            self.LRN_ACT = HF.identity
            self.OUT_SIM = HF.vector_proj2d
            self.OUT_ACT = F.relu
            self.WEIGHT_UPD_RULE = H.HebbianConv2d.RULE_BASE
        self.ALPHA = config.CONFIG_OPTIONS.get(P.KEY_ALPHA, 1.)

        # Here we define the layers of our network

        # Fourth convolutional layer
        self.conv4 = H.HebbianConv2d(
            in_channels=self.get_input_shape()[0],
            out_size=(12, 16),
            kernel_size=3,
            competitive=self.COMPETITIVE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            lrn_sim=self.LRN_SIM,
            lrn_act=F.relu,
            out_sim=self.OUT_SIM,
            out_act=F.relu,
            weight_upd_rule=self.WEIGHT_UPD_RULE,
            alpha=self.ALPHA,
        )  # 192 input channels, 12x16=192 output channels, 3x3 convolutions
        self.bn4 = nn.BatchNorm2d(192)  # Batch Norm layer

        # Fifth convolutional layer
        self.conv5 = H.HebbianConv2d(
            in_channels=192,
            out_size=(16, 16),
            kernel_size=3,
            competitive=self.COMPETITIVE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            lrn_sim=self.LRN_SIM,
            lrn_act=F.relu,
            out_sim=self.OUT_SIM,
            out_act=F.relu,
            weight_upd_rule=self.WEIGHT_UPD_RULE,
            alpha=self.ALPHA,
        )  # 192 input channels, 16x16=256 output channels, 3x3 convolutions
        self.bn5 = nn.BatchNorm2d(256)  # Batch Norm layer

        # Sixth convolutional layer
        self.conv6 = H.HebbianConv2d(
            in_channels=256,
            out_size=(16, 16),
            kernel_size=3,
            competitive=self.COMPETITIVE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            lrn_sim=self.LRN_SIM,
            lrn_act=F.relu,
            out_sim=self.OUT_SIM,
            out_act=F.relu,
            weight_upd_rule=self.WEIGHT_UPD_RULE,
            alpha=self.ALPHA,
        )  # 256 input channels, 16x16=256 output channels, 3x3 convolutions
        self.bn6 = nn.BatchNorm2d(256)  # Batch Norm layer

        # Seventh convolutional layer
        self.conv7 = H.HebbianConv2d(
            in_channels=256,
            out_size=(16, 24),
            kernel_size=3,
            competitive=self.COMPETITIVE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            lrn_sim=self.LRN_SIM,
            lrn_act=F.relu,
            out_sim=self.OUT_SIM,
            out_act=F.relu,
            weight_upd_rule=self.WEIGHT_UPD_RULE,
            alpha=self.ALPHA,
        )  # 256 input channels, 16x24=384 output channels, 3x3 convolutions
        self.bn7 = nn.BatchNorm2d(384)  # Batch Norm layer

        # Eighth convolutional layer
        self.conv8 = H.HebbianConv2d(
            in_channels=384,
            out_size=(16, 32),
            kernel_size=3,
            competitive=self.COMPETITIVE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            lrn_sim=self.LRN_SIM,
            lrn_act=F.relu,
            out_sim=self.OUT_SIM,
            out_act=F.relu,
            weight_upd_rule=self.WEIGHT_UPD_RULE,
            alpha=self.ALPHA,
        )  # 384 input channels, 16x32=512 output channels, 3x3 convolutions
        self.bn8 = nn.BatchNorm2d(512)  # Batch Norm layer

        self.CONV_OUTPUT_SHAPE = self.get_output_fmap_shape(self.CONV_OUTPUT)

        # FC Layers (convolution with kernel size equal to the entire feature map size is like a fc layer)

        self.fc9 = H.HebbianConv2d(
            in_channels=self.CONV_OUTPUT_SHAPE[0],
            out_size=(64, 64),
            kernel_size=(self.CONV_OUTPUT_SHAPE[1], self.CONV_OUTPUT_SHAPE[2]),
            competitive=self.COMPETITIVE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            lrn_sim=self.LRN_SIM,
            lrn_act=F.relu,
            out_sim=self.OUT_SIM,
            out_act=F.relu,
            weight_upd_rule=self.WEIGHT_UPD_RULE,
            alpha=self.ALPHA,
        )  # conv_output_shape-shaped input, 64x64=4096 output channels
        self.bn9 = nn.BatchNorm2d(4096)  # Batch Norm layer

        self.fc10 = H.HebbianConv2d(
            in_channels=4096,
            out_size=self.NUM_CLASSES,
            kernel_size=1,
            reconstruction=H.HebbianConv2d.REC_QNT_SGN,
            reduction=H.HebbianConv2d.RED_W_AVG,
            lrn_sim=HF.raised_cos2d_pow(2),
            lrn_act=HF.identity,
            out_sim=HF.vector_proj2d,
            out_act=HF.identity,
            weight_upd_rule=H.HebbianConv2d.RULE_BASE,
            alpha=self.ALPHA,
        )  # 4096-dimensional input, NUM_CLASSES-dimensional output (one per class)
Esempio n. 3
0
    def __init__(self, config, input_shape=None):
        super(Net, self).__init__(config, input_shape)

        self.NUM_CLASSES = P.GLB_PARAMS[P.KEY_DATASET_METADATA][
            P.KEY_DS_NUM_CLASSES]
        self.DEEP_TEACHER_SIGNAL = config.CONFIG_OPTIONS.get(
            P.KEY_DEEP_TEACHER_SIGNAL, False)
        self.COMPETITIVE = False
        self.K = 0
        self.RECONSTR = H.HebbianConv2d.REC_LIN_CMB
        self.RED = H.HebbianConv2d.RED_AVG
        self.LRN_SIM = HF.kernel_mult2d
        self.LRN_ACT = F.relu
        self.OUT_SIM = HF.kernel_mult2d
        self.OUT_ACT = F.relu
        self.WEIGHT_UPD_RULE = H.HebbianConv2d.RULE_HEBB
        self.LOC_LRN_RULE = config.CONFIG_OPTIONS.get(P.KEY_LOCAL_LRN_RULE,
                                                      'hpca')
        if self.LOC_LRN_RULE == 'hwta':
            self.COMPETITIVE = True
            self.K = config.CONFIG_OPTIONS.get(PP.KEY_WTA_K, 1)
            self.RECONSTR = H.HebbianConv2d.REC_QNT_SGN
            self.RED = H.HebbianConv2d.RED_W_AVG
            self.LRN_SIM = HF.raised_cos2d_pow(2)
            self.LRN_ACT = HF.identity
            self.OUT_SIM = HF.vector_proj2d
            self.OUT_ACT = F.relu
            self.WEIGHT_UPD_RULE = H.HebbianConv2d.RULE_BASE
        self.ALPHA = config.CONFIG_OPTIONS.get(P.KEY_ALPHA, 1.)

        # Here we define the layers of our network

        # First convolutional layer
        self.conv1 = H.HebbianConv2d(
            in_channels=3,
            out_size=(8, 12),
            kernel_size=5,
            competitive=self.COMPETITIVE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            lrn_sim=self.LRN_SIM,
            lrn_act=F.relu,
            out_sim=self.OUT_SIM,
            out_act=F.relu,
            weight_upd_rule=self.WEIGHT_UPD_RULE,
            alpha=self.ALPHA,
        )  # 3 input channels, 8x12=96 output channels, 5x5 convolutions
        self.bn1 = nn.BatchNorm2d(96)  # Batch Norm layer

        self.CONV_OUTPUT_SHAPE = self.get_output_fmap_shape(self.CONV_OUTPUT)

        # FC Layers (convolution with kernel size equal to the entire feature map size is like a fc layer)

        self.fc2 = H.HebbianConv2d(
            in_channels=self.CONV_OUTPUT_SHAPE[0],
            out_size=self.NUM_CLASSES,
            kernel_size=(self.CONV_OUTPUT_SHAPE[1], self.CONV_OUTPUT_SHAPE[2]),
            reconstruction=H.HebbianConv2d.REC_QNT_SGN,
            reduction=H.HebbianConv2d.RED_W_AVG,
            lrn_sim=HF.raised_cos2d_pow(2),
            lrn_act=HF.identity,
            out_sim=HF.vector_proj2d,
            out_act=HF.identity,
            weight_upd_rule=H.HebbianConv2d.RULE_BASE,
            alpha=self.ALPHA,
        )  # conv_output_shape-shaped input, 10-dimensional output (one per class)