コード例 #1
0
    def __init__(self, input_shape=P.INPUT_SHAPE):
        super(Net, self).__init__()

        # Shape of the tensors that we expect to receive as input
        self.input_shape = input_shape

        # Here we define the layers of our network

        # Third convolutional layer
        self.conv3 = nn.Conv2d(
            128, 192,
            3)  # 128 input channels, 192 output channels, 3x3 convolutions
        self.bn3 = nn.BatchNorm2d(192)  # Batch Norm layer
        # Fourth convolutional layer
        self.conv4 = nn.Conv2d(
            192, 256,
            3)  # 192 input channels, 256 output channels, 3x3 convolutions
        self.bn4 = nn.BatchNorm2d(256)  # Batch Norm layer

        self.conv_output_size = utils.shape2size(
            utils.get_conv_output_shape(self))

        # FC Layers
        self.fc5 = nn.Linear(
            self.conv_output_size,
            300)  # conv_output_size-dimensional input, 300-dimensional output
        self.bn5 = nn.BatchNorm1d(300)  # Batch Norm layer
        self.fc6 = nn.Linear(
            300, P.NUM_CLASSES
        )  # 300-dimensional input, 10-dimensional output (one per class)
コード例 #2
0
    def __init__(self, input_shape=P.INPUT_SHAPE):
        super(Net, self).__init__()

        # Shape of the tensors that we expect to receive as input
        self.input_shape = input_shape

        # Here we define the layers of our network

        # First convolutional layer
        self.conv1 = H.HebbianMap2d(
            in_channels=3,
            out_size=(8, 12),
            kernel_size=5,
            out=H.clp_cos_sim2d,
            eta=0.1,
        )  # 3 input channels, 8x12=96 output channels, 5x5 convolutions
        self.bn1 = nn.BatchNorm2d(96)  # Batch Norm layer

        self.conv_output_shape = utils.get_conv_output_shape(self)

        # FC Layers (convolution with kernel size equal to the entire feature map size is like a fc layer)

        self.fc2 = H.HebbianMap2d(
            in_channels=self.conv_output_shape[0],
            out_size=P.NUM_CLASSES,
            kernel_size=(self.conv_output_shape[1], self.conv_output_shape[2]),
            competitive=False,
            eta=0.1,
        )  # conv_output_shape-shaped input, 10-dimensional output (one per class)
コード例 #3
0
    def __init__(self, input_shape=P.INPUT_SHAPE):
        super(Net, self).__init__()

        # Shape of the tensors that we expect to receive as input
        self.input_shape = input_shape

        # Here we define the layers of our network

        # First convolutional layer
        self.conv1 = nn.Conv2d(
            3, 96, 5)  # 3 input channels, 96 output channels, 5x5 convolutions
        self.bn1 = nn.BatchNorm2d(96)  # Batch Norm layer
        # Second convolutional layer
        self.conv2 = nn.Conv2d(
            96, 128,
            3)  # 96 input channels, 128 output channels, 3x3 convolutions
        self.bn2 = nn.BatchNorm2d(128)  # Batch Norm layer
        # Third convolutional layer
        self.conv3 = nn.Conv2d(
            128, 192,
            3)  # 128 input channels, 192 output channels, 3x3 convolutions
        self.bn3 = nn.BatchNorm2d(192)  # Batch Norm layer

        self.conv_output_size = utils.shape2size(
            utils.get_conv_output_shape(self))

        # FC Layers
        self.fc4 = nn.Linear(
            self.conv_output_size, P.NUM_CLASSES
        )  # conv_output_size-dimensional input, 10-dimensional output (one per class)
コード例 #4
0
ファイル: top1.py プロジェクト: TanmDL/HebbianLearningThesis
    def __init__(self, input_shape=P.INPUT_SHAPE):
        super(Net, self).__init__()

        # Shape of the tensors that we expect to receive as input
        self.input_shape = input_shape

        # Here we define the layers of our network

        # Second convolutional layer
        self.conv2 = H.HebbianMap2d(
            in_channels=96,
            out_size=(8, 16),
            kernel_size=3,
            out=H.clp_cos_sim2d,
            eta=0.1,
        )  # 96 input channels, 8x16=128 output channels, 3x3 convolutions
        self.bn2 = nn.BatchNorm2d(128)  # Batch Norm layer

        # Third convolutional layer
        self.conv3 = H.HebbianMap2d(
            in_channels=128,
            out_size=(12, 16),
            kernel_size=3,
            out=H.clp_cos_sim2d,
            eta=0.1,
        )  # 128 input channels, 12x16=192 output channels, 3x3 convolutions
        self.bn3 = nn.BatchNorm2d(192)  # Batch Norm layer

        # Fourth convolutional layer
        self.conv4 = H.HebbianMap2d(
            in_channels=192,
            out_size=(16, 16),
            kernel_size=3,
            out=H.clp_cos_sim2d,
            eta=0.1,
        )  # 192 input channels, 16x16=256 output channels, 3x3 convolutions
        self.bn4 = nn.BatchNorm2d(256)  # Batch Norm layer

        self.conv_output_shape = utils.get_conv_output_shape(self)

        # FC Layers (convolution with kernel size equal to the entire feature map size is like a fc layer)

        self.fc5 = H.HebbianMap2d(
            in_channels=self.conv_output_shape[0],
            out_size=(15, 20),
            kernel_size=(self.conv_output_shape[1], self.conv_output_shape[2]),
            out=H.clp_cos_sim2d,
            eta=0.1,
        )  # conv_output_shape-shaped input, 15x20=300 output channels
        self.bn5 = nn.BatchNorm2d(300)  # Batch Norm layer

        self.fc6 = H.HebbianMap2d(
            in_channels=300,
            out_size=P.NUM_CLASSES,
            kernel_size=1,
            competitive=False,
            eta=0.1,
        )  # 300-dimensional input, 10-dimensional output (one per class)
コード例 #5
0
    def __init__(self, input_shape=P.INPUT_SHAPE):
        super(Net, self).__init__()

        # Shape of the tensors that we expect to receive as input
        self.input_shape = input_shape

        # Here we define the layers of our network

        # First convolutional layer
        self.conv1 = nn.Conv2d(
            3, 96, 5, bias=False
        )  # 3 input channels, 96 output channels, 5x5 convolutions
        self.bn1 = nn.BatchNorm2d(96, affine=False)  # Batch Norm layer

        self.conv_output_size = utils.shape2size(
            utils.get_conv_output_shape(self))

        # FC Layers
        self.fc2 = nn.Linear(
            self.conv_output_size, P.NUM_CLASSES, bias=False
        )  # conv_output_size-dimensional input, 10-dimensional output (one per class)