Esempio n. 1
0
    def __init__(self, input_shape=P.INPUT_SHAPE):
        super(Net, self).__init__()

        # Shape of the tensors that we expect to receive as input
        self.input_shape = input_shape

        # Here we define the layers of our network

        # First convolutional layer
        self.conv1 = H.HebbianMap2d(
            in_channels=3,
            out_size=(8, 12),
            kernel_size=5,
            out=H.clp_cos_sim2d,
            eta=0.1,
        )  # 3 input channels, 8x12=96 output channels, 5x5 convolutions
        self.bn1 = nn.BatchNorm2d(96)  # Batch Norm layer

        self.conv_output_shape = utils.get_conv_output_shape(self)

        # FC Layers (convolution with kernel size equal to the entire feature map size is like a fc layer)

        self.fc2 = H.HebbianMap2d(
            in_channels=self.conv_output_shape[0],
            out_size=P.NUM_CLASSES,
            kernel_size=(self.conv_output_shape[1], self.conv_output_shape[2]),
            competitive=False,
            eta=0.1,
        )  # conv_output_shape-shaped input, 10-dimensional output (one per class)
Esempio n. 2
0
    def __init__(self, input_shape=P.INPUT_SHAPE):
        super(Net, self).__init__()

        # Shape of the tensors that we expect to receive as input
        self.input_shape = input_shape
        if len(input_shape) != 3: self.input_shape = (input_shape[0], 1, 1)

        # Here we define the layers of our network

        # FC Layers (convolution with kernel size equal to the entire feature map size is like a fc layer)

        self.fc5 = H.HebbianMap2d(
            in_channels=self.input_shape[0],
            out_size=(15, 20),
            kernel_size=(self.input_shape[1], self.input_shape[2]),
            out=H.clp_cos_sim2d,
            eta=0.1,
        )  # input_shape-shaped input, 15x20=300 output channels
        self.bn5 = nn.BatchNorm2d(300)  # Batch Norm layer

        self.fc6 = H.HebbianMap2d(
            in_channels=300,
            out_size=P.NUM_CLASSES,
            kernel_size=1,
            competitive=False,
            eta=0.1,
        )  # 300-dimensional input, 10-dimensional output (one per class)
Esempio n. 3
0
    def __init__(self, input_shape=P.INPUT_SHAPE):
        super(Net, self).__init__()

        # Shape of the tensors that we expect to receive as input
        self.input_shape = input_shape

        # Here we define the layers of our network

        # Second convolutional layer
        self.conv2 = H.HebbianMap2d(
            in_channels=96,
            out_size=(8, 16),
            kernel_size=3,
            out=H.clp_cos_sim2d,
            eta=0.1,
        )  # 96 input channels, 8x16=128 output channels, 3x3 convolutions
        self.bn2 = nn.BatchNorm2d(128)  # Batch Norm layer

        # Third convolutional layer
        self.conv3 = H.HebbianMap2d(
            in_channels=128,
            out_size=(12, 16),
            kernel_size=3,
            out=H.clp_cos_sim2d,
            eta=0.1,
        )  # 128 input channels, 12x16=192 output channels, 3x3 convolutions
        self.bn3 = nn.BatchNorm2d(192)  # Batch Norm layer

        # Fourth convolutional layer
        self.conv4 = H.HebbianMap2d(
            in_channels=192,
            out_size=(16, 16),
            kernel_size=3,
            out=H.clp_cos_sim2d,
            eta=0.1,
        )  # 192 input channels, 16x16=256 output channels, 3x3 convolutions
        self.bn4 = nn.BatchNorm2d(256)  # Batch Norm layer

        self.conv_output_shape = utils.get_conv_output_shape(self)

        # FC Layers (convolution with kernel size equal to the entire feature map size is like a fc layer)

        self.fc5 = H.HebbianMap2d(
            in_channels=self.conv_output_shape[0],
            out_size=(15, 20),
            kernel_size=(self.conv_output_shape[1], self.conv_output_shape[2]),
            out=H.clp_cos_sim2d,
            eta=0.1,
        )  # conv_output_shape-shaped input, 15x20=300 output channels
        self.bn5 = nn.BatchNorm2d(300)  # Batch Norm layer

        self.fc6 = H.HebbianMap2d(
            in_channels=300,
            out_size=P.NUM_CLASSES,
            kernel_size=1,
            competitive=False,
            eta=0.1,
        )  # 300-dimensional input, 10-dimensional output (one per class)
Esempio n. 4
0
	def __init__(self, input_shape=P.INPUT_SHAPE):
		super(Net, self).__init__()
		
		# Shape of the tensors that we expect to receive as input
		self.input_shape = input_shape
		if len(input_shape) != 3: self.input_shape = (input_shape[0], 1, 1)
		
		# Here we define the layers of our network
		
		# FC Layers
		self.fc = H.HebbianMap2d(
			in_channels=self.input_shape[0],
			out_size=P.NUM_CLASSES,
			kernel_size=(self.input_shape[1], self.input_shape[2]),
			competitive=False,
			eta=0.1,
		)  # conv kernels with the same height, width depth as input (equivalent to a FC layer), 10 kernels (one per class)