Ejemplo n.º 1
0
    def __init__(self, config, input_shape=None):
        super(Net, self).__init__(config, input_shape)

        self.NUM_CLASSES = P.GLB_PARAMS[P.KEY_DATASET_METADATA][
            P.KEY_DS_NUM_CLASSES]
        self.ALPHA_L = config.CONFIG_OPTIONS.get(P.KEY_ALPHA_L, 1.)
        self.ALPHA_G = config.CONFIG_OPTIONS.get(P.KEY_ALPHA_G, 0.)

        # Here we define the layers of our network

        self.fc = H.HebbianConv2d(
            in_channels=self.get_input_shape()[0],
            out_channels=self.NUM_CLASSES,
            kernel_size=1,
            lrn_sim=HF.get_affine_sim(HF.raised_cos_sim2d, p=2),
            lrn_act=HF.identity,
            lrn_cmp=True,
            lrn_t=True,
            out_sim=HF.vector_proj2d
            if self.ALPHA_G == 0. else HF.kernel_mult2d,
            out_act=HF.identity,
            competitive=H.Competitive(),
            gating=H.HebbianConv2d.GATE_BASE,
            upd_rule=H.HebbianConv2d.UPD_RECONSTR
            if self.ALPHA_G == 0. else None,
            reconstruction=H.HebbianConv2d.REC_QNT_SGN,
            reduction=H.HebbianConv2d.RED_W_AVG,
            alpha_l=self.ALPHA_L,
            alpha_g=self.ALPHA_G if self.ALPHA_G == 0. else 1.,
        )  # input_shape-shaped input, NUM_CLASSES-dimensional output (one per class)
Ejemplo n.º 2
0
    def __init__(self, config, input_shape=None):
        super(Net, self).__init__(config, input_shape)

        self.NUM_CLASSES = P.GLB_PARAMS[P.KEY_DATASET_METADATA][
            P.KEY_DS_NUM_CLASSES]
        self.ALPHA = config.CONFIG_OPTIONS.get(P.KEY_ALPHA, 1.)

        # Here we define the layers of our network

        self.fc = H.HebbianConv2d(
            in_channels=self.get_input_shape()[0],
            out_size=self.NUM_CLASSES,
            kernel_size=(self.get_input_shape()[1], self.get_input_shape()[2])
            if len(self.get_input_shape()) >= 3 else 1,
            reconstruction=H.HebbianConv2d.REC_QNT_SGN,
            reduction=H.HebbianConv2d.RED_W_AVG,
            lrn_sim=HF.raised_cos2d_pow(2),
            lrn_act=HF.identity,
            out_sim=HF.vector_proj2d,
            out_act=HF.identity,
            weight_upd_rule=H.HebbianConv2d.RULE_BASE,
            alpha=self.ALPHA,
        )  # input_shape-shaped input, NUM_CLASSES-dimensional output (one per class)
Ejemplo n.º 3
0
    def __init__(self, config, input_shape=None):
        super(Net, self).__init__(config, input_shape)

        self.NUM_CLASSES = P.GLB_PARAMS[P.KEY_DATASET_METADATA][
            P.KEY_DS_NUM_CLASSES]
        self.DEEP_TEACHER_SIGNAL = config.CONFIG_OPTIONS.get(
            P.KEY_DEEP_TEACHER_SIGNAL, False)
        LRN_SIM = config.CONFIG_OPTIONS.get(PP.KEY_LRN_SIM, None)
        LRN_ACT = config.CONFIG_OPTIONS.get(PP.KEY_LRN_ACT, None)
        OUT_SIM = config.CONFIG_OPTIONS.get(PP.KEY_OUT_SIM, None)
        OUT_ACT = config.CONFIG_OPTIONS.get(PP.KEY_OUT_ACT, None)
        self.lrn_sim = utils.retrieve(
            LRN_SIM) if LRN_SIM is not None else HF.kernel_mult2d
        self.lrn_act = utils.retrieve(
            LRN_ACT) if LRN_ACT is not None else F.relu
        self.out_sim = utils.retrieve(
            OUT_SIM) if OUT_SIM is not None else HF.kernel_mult2d
        self.out_act = utils.retrieve(
            OUT_ACT) if OUT_ACT is not None else F.relu
        self.competitive_act = config.CONFIG_OPTIONS.get(
            PP.KEY_COMPETITIVE_ACT, None)
        if self.competitive_act is not None:
            self.competitive_act = utils.retrieve(self.competitive_act)
        self.K = config.CONFIG_OPTIONS.get(PP.KEY_COMPETITIVE_K, 1)
        self.LRN_SIM_B = config.CONFIG_OPTIONS.get(PP.KEY_LRN_SIM_B, 0.)
        self.LRN_SIM_S = config.CONFIG_OPTIONS.get(PP.KEY_LRN_SIM_S, 1.)
        self.LRN_SIM_P = config.CONFIG_OPTIONS.get(PP.KEY_LRN_SIM_P, 1.)
        self.LRN_SIM_EXP = config.CONFIG_OPTIONS.get(PP.KEY_LRN_SIM_EXP, None)
        self.LRN_ACT_SCALE_IN = config.CONFIG_OPTIONS.get(
            PP.KEY_LRN_ACT_SCALE_IN, 1)
        self.LRN_ACT_SCALE_OUT = config.CONFIG_OPTIONS.get(
            PP.KEY_LRN_ACT_SCALE_OUT, 1)
        self.LRN_ACT_OFFSET_IN = config.CONFIG_OPTIONS.get(
            PP.KEY_LRN_ACT_OFFSET_IN, 0)
        self.LRN_ACT_OFFSET_OUT = config.CONFIG_OPTIONS.get(
            PP.KEY_LRN_ACT_OFFSET_OUT, 0)
        self.LRN_ACT_P = config.CONFIG_OPTIONS.get(PP.KEY_LRN_ACT_P, 1)
        self.OUT_SIM_B = config.CONFIG_OPTIONS.get(PP.KEY_OUT_SIM_B, 0.)
        self.OUT_SIM_S = config.CONFIG_OPTIONS.get(PP.KEY_OUT_SIM_S, 1.)
        self.OUT_SIM_P = config.CONFIG_OPTIONS.get(PP.KEY_OUT_SIM_P, 1.)
        self.OUT_SIM_EXP = config.CONFIG_OPTIONS.get(PP.KEY_OUT_SIM_EXP, None)
        self.OUT_ACT_SCALE_IN = config.CONFIG_OPTIONS.get(
            PP.KEY_OUT_ACT_SCALE_IN, 1)
        self.OUT_ACT_SCALE_OUT = config.CONFIG_OPTIONS.get(
            PP.KEY_OUT_ACT_SCALE_OUT, 1)
        self.OUT_ACT_OFFSET_IN = config.CONFIG_OPTIONS.get(
            PP.KEY_OUT_ACT_OFFSET_IN, 0)
        self.OUT_ACT_OFFSET_OUT = config.CONFIG_OPTIONS.get(
            PP.KEY_OUT_ACT_OFFSET_OUT, 0)
        self.OUT_ACT_P = config.CONFIG_OPTIONS.get(PP.KEY_OUT_ACT_P, 1)
        self.ACT_COMPLEMENT_INIT = None
        self.ACT_COMPLEMENT_RATIO = 0.
        self.ACT_COMPLEMENT_ADAPT = None
        self.ACT_COMPLEMENT_GRP = False
        self.GATING = H.HebbianConv2d.GATE_HEBB
        self.UPD_RULE = H.HebbianConv2d.UPD_RECONSTR
        self.RECONSTR = H.HebbianConv2d.REC_LIN_CMB
        self.RED = H.HebbianConv2d.RED_AVG
        self.VAR_ADAPTIVE = False
        self.LOC_LRN_RULE = config.CONFIG_OPTIONS.get(P.KEY_LOCAL_LRN_RULE,
                                                      'hpca')
        if self.LOC_LRN_RULE in ['hpcat', 'hpcat_ada']:
            if LRN_ACT is None: self.lrn_act = HF.tanh
            if OUT_ACT is None: self.out_act = HF.tanh
            if self.LOC_LRN_RULE == 'hpcat_ada': self.VAR_ADAPTIVE = True
        if self.LOC_LRN_RULE == 'hwta':
            if LRN_SIM is None:
                self.lrn_sim = HF.raised_cos_sim2d
                self.LRN_SIM_P = config.CONFIG_OPTIONS.get(
                    PP.KEY_LRN_SIM_P, 2.
                )  # NB: In hwta the default lrn_sim is squared raised cosine
            if LRN_ACT is None: self.lrn_act = HF.identity
            if OUT_SIM is None: self.out_sim = HF.vector_proj2d
            if OUT_ACT is None: self.out_act = F.relu
            self.GATING = H.HebbianConv2d.GATE_BASE
            self.RECONSTR = H.HebbianConv2d.REC_QNT_SGN
            self.RED = H.HebbianConv2d.RED_W_AVG
        if self.LOC_LRN_RULE in ['ica', 'hica', 'ica_nrm', 'hica_nrm']:
            if LRN_ACT is None: self.lrn_act = HF.tanh
            if OUT_ACT is None: self.out_act = HF.tanh
            self.ACT_COMPLEMENT_INIT = config.CONFIG_OPTIONS.get(
                PP.KEY_ACT_COMPLEMENT_INIT, None)
            self.ACT_COMPLEMENT_RATIO = config.CONFIG_OPTIONS.get(
                PP.KEY_ACT_COMPLEMENT_RATIO, 0.)
            self.ACT_COMPLEMENT_ADAPT = config.CONFIG_OPTIONS.get(
                PP.KEY_ACT_COMPLEMENT_ADAPT, None)
            self.ACT_COMPLEMENT_GRP = config.CONFIG_OPTIONS.get(
                PP.KEY_ACT_COMPLEMENT_GRP, False)
            self.UPD_RULE = H.HebbianConv2d.UPD_ICA
            if self.LOC_LRN_RULE == 'hica':
                self.UPD_RULE = H.HebbianConv2d.UPD_HICA
            if self.LOC_LRN_RULE == 'ica_nrm':
                self.UPD_RULE = H.HebbianConv2d.UPD_ICA_NRM
            if self.LOC_LRN_RULE == 'hica_nrm':
                self.UPD_RULE = H.HebbianConv2d.UPD_HICA_NRM
            if self.LOC_LRN_RULE in ['ica_nrm', 'hica_nrm']:
                self.VAR_ADAPTIVE = True
            self.GATING = H.HebbianConv2d.GATE_BASE
        if self.LRN_SIM_EXP is not None:
            self.lrn_sim = HF.get_exp_sim(
                HF.get_affine_sim(self.lrn_sim, p=self.LRN_SIM_EXP),
                HF.get_pow_nc(
                    utils.retrieve(
                        config.CONFIG_OPTIONS.get(PP.KEY_LRN_SIM_NC, None)),
                    self.LRN_SIM_EXP))
        self.lrn_sim = HF.get_affine_sim(self.lrn_sim, self.LRN_SIM_B,
                                         self.LRN_SIM_S, self.LRN_SIM_P)
        self.lrn_act = HF.get_affine_act(self.lrn_act, self.LRN_ACT_SCALE_IN,
                                         self.LRN_ACT_SCALE_OUT,
                                         self.LRN_ACT_OFFSET_IN,
                                         self.LRN_ACT_OFFSET_OUT,
                                         self.LRN_ACT_P)
        if self.OUT_SIM_EXP is not None:
            self.out_sim = HF.get_exp_sim(
                HF.get_affine_sim(self.out_sim, p=self.OUT_SIM_EXP),
                HF.get_pow_nc(
                    utils.retrieve(
                        config.CONFIG_OPTIONS.get(PP.KEY_OUT_SIM_NC, None)),
                    self.OUT_SIM_EXP))
        self.out_sim = HF.get_affine_sim(self.out_sim, self.OUT_SIM_B,
                                         self.OUT_SIM_S, self.OUT_SIM_P)
        self.out_act = HF.get_affine_act(self.out_act, self.OUT_ACT_SCALE_IN,
                                         self.OUT_ACT_SCALE_OUT,
                                         self.OUT_ACT_OFFSET_IN,
                                         self.OUT_ACT_OFFSET_OUT,
                                         self.OUT_ACT_P)
        self.ALPHA_L = config.CONFIG_OPTIONS.get(P.KEY_ALPHA_L, 1.)
        self.ALPHA_G = config.CONFIG_OPTIONS.get(P.KEY_ALPHA_G, 0.)

        # Here we define the layers of our network

        # Fourth convolutional layer
        self.conv4 = H.HebbianConv2d(
            in_channels=self.get_input_shape()[0],
            out_channels=256,
            kernel_size=3,
            lrn_sim=self.lrn_sim,
            lrn_act=self.lrn_act,
            lrn_cmp=True,
            lrn_t=True,
            out_sim=self.out_sim,
            out_act=self.out_act,
            competitive=H.Competitive(out_size=(16, 16),
                                      competitive_act=self.competitive_act,
                                      k=self.K),
            act_complement_init=self.ACT_COMPLEMENT_INIT,
            act_complement_ratio=self.ACT_COMPLEMENT_RATIO,
            act_complement_adapt=self.ACT_COMPLEMENT_ADAPT,
            act_complement_grp=self.ACT_COMPLEMENT_GRP,
            var_adaptive=self.VAR_ADAPTIVE,
            gating=self.GATING,
            upd_rule=self.UPD_RULE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            alpha_l=self.ALPHA_L,
            alpha_g=self.ALPHA_G,
        )  # 192 input channels, 16x16=256 output channels, 3x3 convolutions
        self.bn4 = nn.BatchNorm2d(256)  # Batch Norm layer

        self.CONV_OUTPUT_SHAPE = utils.tens2shape(
            self.get_dummy_fmap()[self.CONV_OUTPUT])

        # FC Layers (convolution with kernel size equal to the entire feature map size is like a fc layer)

        self.fc5 = H.HebbianConv2d(
            in_channels=self.CONV_OUTPUT_SHAPE[0],
            out_channels=4096,
            kernel_size=(self.CONV_OUTPUT_SHAPE[1], self.CONV_OUTPUT_SHAPE[2]),
            lrn_sim=self.lrn_sim,
            lrn_act=self.lrn_act,
            lrn_cmp=True,
            lrn_t=True,
            out_sim=self.out_sim,
            out_act=self.out_act,
            competitive=H.Competitive(out_size=(64, 64),
                                      competitive_act=self.competitive_act,
                                      k=self.K),
            act_complement_init=self.ACT_COMPLEMENT_INIT,
            act_complement_ratio=self.ACT_COMPLEMENT_RATIO,
            act_complement_adapt=self.ACT_COMPLEMENT_ADAPT,
            act_complement_grp=self.ACT_COMPLEMENT_GRP,
            var_adaptive=self.VAR_ADAPTIVE,
            gating=self.GATING,
            upd_rule=self.UPD_RULE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            alpha_l=self.ALPHA_L,
            alpha_g=self.ALPHA_G,
        )  # conv_output_shape-shaped input, 64x64=4096 output channels
        self.bn5 = nn.BatchNorm2d(4096)  # Batch Norm layer

        self.fc6 = H.HebbianConv2d(
            in_channels=4096,
            out_channels=self.NUM_CLASSES,
            kernel_size=1,
            lrn_sim=HF.get_affine_sim(HF.raised_cos_sim2d, p=2),
            lrn_act=HF.identity,
            lrn_cmp=True,
            lrn_t=True,
            out_sim=HF.vector_proj2d
            if self.ALPHA_G == 0. else HF.kernel_mult2d,
            out_act=HF.identity,
            competitive=H.Competitive(),
            gating=H.HebbianConv2d.GATE_BASE,
            upd_rule=H.HebbianConv2d.UPD_RECONSTR
            if self.ALPHA_G == 0. else None,
            reconstruction=H.HebbianConv2d.REC_QNT_SGN,
            reduction=H.HebbianConv2d.RED_W_AVG,
            alpha_l=self.ALPHA_L,
            alpha_g=self.ALPHA_G if self.ALPHA_G == 0. else 1.,
        )  # 4096-dimensional input, NUM_CLASSES-dimensional output (one per class)
Ejemplo n.º 4
0
    def __init__(self, config, input_shape=None):
        super(Net, self).__init__(config, input_shape)

        self.NUM_CLASSES = P.GLB_PARAMS[P.KEY_DATASET_METADATA][
            P.KEY_DS_NUM_CLASSES]
        self.DEEP_TEACHER_SIGNAL = config.CONFIG_OPTIONS.get(
            P.KEY_DEEP_TEACHER_SIGNAL, False)
        self.COMPETITIVE = False
        self.K = 0
        self.RECONSTR = H.HebbianConv2d.REC_LIN_CMB
        self.RED = H.HebbianConv2d.RED_AVG
        self.LRN_SIM = HF.kernel_mult2d
        self.LRN_ACT = F.relu
        self.OUT_SIM = HF.kernel_mult2d
        self.OUT_ACT = F.relu
        self.WEIGHT_UPD_RULE = H.HebbianConv2d.RULE_HEBB
        self.LOC_LRN_RULE = config.CONFIG_OPTIONS.get(P.KEY_LOCAL_LRN_RULE,
                                                      'hpca')
        if self.LOC_LRN_RULE == 'hwta':
            self.COMPETITIVE = True
            self.K = config.CONFIG_OPTIONS.get(PP.KEY_WTA_K, 1)
            self.RECONSTR = H.HebbianConv2d.REC_QNT_SGN
            self.RED = H.HebbianConv2d.RED_W_AVG
            self.LRN_SIM = HF.raised_cos2d_pow(2)
            self.LRN_ACT = HF.identity
            self.OUT_SIM = HF.vector_proj2d
            self.OUT_ACT = F.relu
            self.WEIGHT_UPD_RULE = H.HebbianConv2d.RULE_BASE
        self.ALPHA = config.CONFIG_OPTIONS.get(P.KEY_ALPHA, 1.)

        # Here we define the layers of our network

        # Fourth convolutional layer
        self.conv4 = H.HebbianConv2d(
            in_channels=self.get_input_shape()[0],
            out_size=(12, 16),
            kernel_size=3,
            competitive=self.COMPETITIVE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            lrn_sim=self.LRN_SIM,
            lrn_act=F.relu,
            out_sim=self.OUT_SIM,
            out_act=F.relu,
            weight_upd_rule=self.WEIGHT_UPD_RULE,
            alpha=self.ALPHA,
        )  # 192 input channels, 12x16=192 output channels, 3x3 convolutions
        self.bn4 = nn.BatchNorm2d(192)  # Batch Norm layer

        # Fifth convolutional layer
        self.conv5 = H.HebbianConv2d(
            in_channels=192,
            out_size=(16, 16),
            kernel_size=3,
            competitive=self.COMPETITIVE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            lrn_sim=self.LRN_SIM,
            lrn_act=F.relu,
            out_sim=self.OUT_SIM,
            out_act=F.relu,
            weight_upd_rule=self.WEIGHT_UPD_RULE,
            alpha=self.ALPHA,
        )  # 192 input channels, 16x16=256 output channels, 3x3 convolutions
        self.bn5 = nn.BatchNorm2d(256)  # Batch Norm layer

        # Sixth convolutional layer
        self.conv6 = H.HebbianConv2d(
            in_channels=256,
            out_size=(16, 16),
            kernel_size=3,
            competitive=self.COMPETITIVE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            lrn_sim=self.LRN_SIM,
            lrn_act=F.relu,
            out_sim=self.OUT_SIM,
            out_act=F.relu,
            weight_upd_rule=self.WEIGHT_UPD_RULE,
            alpha=self.ALPHA,
        )  # 256 input channels, 16x16=256 output channels, 3x3 convolutions
        self.bn6 = nn.BatchNorm2d(256)  # Batch Norm layer

        # Seventh convolutional layer
        self.conv7 = H.HebbianConv2d(
            in_channels=256,
            out_size=(16, 24),
            kernel_size=3,
            competitive=self.COMPETITIVE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            lrn_sim=self.LRN_SIM,
            lrn_act=F.relu,
            out_sim=self.OUT_SIM,
            out_act=F.relu,
            weight_upd_rule=self.WEIGHT_UPD_RULE,
            alpha=self.ALPHA,
        )  # 256 input channels, 16x24=384 output channels, 3x3 convolutions
        self.bn7 = nn.BatchNorm2d(384)  # Batch Norm layer

        # Eighth convolutional layer
        self.conv8 = H.HebbianConv2d(
            in_channels=384,
            out_size=(16, 32),
            kernel_size=3,
            competitive=self.COMPETITIVE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            lrn_sim=self.LRN_SIM,
            lrn_act=F.relu,
            out_sim=self.OUT_SIM,
            out_act=F.relu,
            weight_upd_rule=self.WEIGHT_UPD_RULE,
            alpha=self.ALPHA,
        )  # 384 input channels, 16x32=512 output channels, 3x3 convolutions
        self.bn8 = nn.BatchNorm2d(512)  # Batch Norm layer

        self.CONV_OUTPUT_SHAPE = self.get_output_fmap_shape(self.CONV_OUTPUT)

        # FC Layers (convolution with kernel size equal to the entire feature map size is like a fc layer)

        self.fc9 = H.HebbianConv2d(
            in_channels=self.CONV_OUTPUT_SHAPE[0],
            out_size=(64, 64),
            kernel_size=(self.CONV_OUTPUT_SHAPE[1], self.CONV_OUTPUT_SHAPE[2]),
            competitive=self.COMPETITIVE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            lrn_sim=self.LRN_SIM,
            lrn_act=F.relu,
            out_sim=self.OUT_SIM,
            out_act=F.relu,
            weight_upd_rule=self.WEIGHT_UPD_RULE,
            alpha=self.ALPHA,
        )  # conv_output_shape-shaped input, 64x64=4096 output channels
        self.bn9 = nn.BatchNorm2d(4096)  # Batch Norm layer

        self.fc10 = H.HebbianConv2d(
            in_channels=4096,
            out_size=self.NUM_CLASSES,
            kernel_size=1,
            reconstruction=H.HebbianConv2d.REC_QNT_SGN,
            reduction=H.HebbianConv2d.RED_W_AVG,
            lrn_sim=HF.raised_cos2d_pow(2),
            lrn_act=HF.identity,
            out_sim=HF.vector_proj2d,
            out_act=HF.identity,
            weight_upd_rule=H.HebbianConv2d.RULE_BASE,
            alpha=self.ALPHA,
        )  # 4096-dimensional input, NUM_CLASSES-dimensional output (one per class)
Ejemplo n.º 5
0
    def __init__(self, config, input_shape=None):
        super(Net, self).__init__(config, input_shape)

        self.NUM_CLASSES = P.GLB_PARAMS[P.KEY_DATASET_METADATA][
            P.KEY_DS_NUM_CLASSES]
        self.DEEP_TEACHER_SIGNAL = config.CONFIG_OPTIONS.get(
            P.KEY_DEEP_TEACHER_SIGNAL, False)
        self.COMPETITIVE = False
        self.K = 0
        self.RECONSTR = H.HebbianConv2d.REC_LIN_CMB
        self.RED = H.HebbianConv2d.RED_AVG
        self.LRN_SIM = HF.kernel_mult2d
        self.LRN_ACT = F.relu
        self.OUT_SIM = HF.kernel_mult2d
        self.OUT_ACT = F.relu
        self.WEIGHT_UPD_RULE = H.HebbianConv2d.RULE_HEBB
        self.LOC_LRN_RULE = config.CONFIG_OPTIONS.get(P.KEY_LOCAL_LRN_RULE,
                                                      'hpca')
        if self.LOC_LRN_RULE == 'hwta':
            self.COMPETITIVE = True
            self.K = config.CONFIG_OPTIONS.get(PP.KEY_WTA_K, 1)
            self.RECONSTR = H.HebbianConv2d.REC_QNT_SGN
            self.RED = H.HebbianConv2d.RED_W_AVG
            self.LRN_SIM = HF.raised_cos2d_pow(2)
            self.LRN_ACT = HF.identity
            self.OUT_SIM = HF.vector_proj2d
            self.OUT_ACT = F.relu
            self.WEIGHT_UPD_RULE = H.HebbianConv2d.RULE_BASE
        self.ALPHA = config.CONFIG_OPTIONS.get(P.KEY_ALPHA, 1.)

        # Here we define the layers of our network

        # First convolutional layer
        self.conv1 = H.HebbianConv2d(
            in_channels=3,
            out_size=(8, 12),
            kernel_size=5,
            competitive=self.COMPETITIVE,
            reconstruction=self.RECONSTR,
            reduction=self.RED,
            lrn_sim=self.LRN_SIM,
            lrn_act=F.relu,
            out_sim=self.OUT_SIM,
            out_act=F.relu,
            weight_upd_rule=self.WEIGHT_UPD_RULE,
            alpha=self.ALPHA,
        )  # 3 input channels, 8x12=96 output channels, 5x5 convolutions
        self.bn1 = nn.BatchNorm2d(96)  # Batch Norm layer

        self.CONV_OUTPUT_SHAPE = self.get_output_fmap_shape(self.CONV_OUTPUT)

        # FC Layers (convolution with kernel size equal to the entire feature map size is like a fc layer)

        self.fc2 = H.HebbianConv2d(
            in_channels=self.CONV_OUTPUT_SHAPE[0],
            out_size=self.NUM_CLASSES,
            kernel_size=(self.CONV_OUTPUT_SHAPE[1], self.CONV_OUTPUT_SHAPE[2]),
            reconstruction=H.HebbianConv2d.REC_QNT_SGN,
            reduction=H.HebbianConv2d.RED_W_AVG,
            lrn_sim=HF.raised_cos2d_pow(2),
            lrn_act=HF.identity,
            out_sim=HF.vector_proj2d,
            out_act=HF.identity,
            weight_upd_rule=H.HebbianConv2d.RULE_BASE,
            alpha=self.ALPHA,
        )  # conv_output_shape-shaped input, 10-dimensional output (one per class)