Exemple #1
0
    def __init__(self,
                 features,
                 num_classes=1000,
                 init_weights=True,
                 smaller_head=False):
        super().__init__(num_classes)

        self.features = features
        if smaller_head:
            # self.avgpool = nn.Identity()
            self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
            self.classifier = nn.Sequential(
                mc_dropout.MCDropout(),
                nn.Linear(512 * 1 * 1, 512),
                nn.BatchNorm1d(512),
                nn.ReLU(True),
                nn.Linear(512, num_classes),
            )
        else:
            # self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
            self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
            self.classifier = nn.Sequential(
                # nn.Linear(512 * 7 * 7, 4096),
                nn.Linear(512 * 1 * 1, 4096),
                nn.ReLU(True),
                mc_dropout.MCDropout(),
                nn.Linear(4096, 4096),
                nn.ReLU(True),
                mc_dropout.MCDropout(),
                nn.Linear(4096, num_classes),
            )

        if init_weights:
            self.apply(self.initialize_weights)
Exemple #2
0
    def __init__(self, num_classes):
        super().__init__(num_classes)

        self.num_classes = num_classes
        self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)
        self.conv1_drop = mc_dropout.MCDropout2d()
        self.bn1 = nn.BatchNorm2d(32)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
        self.conv2_drop = mc_dropout.MCDropout2d()
        self.bn2 = nn.BatchNorm2d(64)
        self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
        self.conv3_drop = mc_dropout.MCDropout2d()
        self.bn3 = nn.BatchNorm2d(128)
        self.conv4 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
        self.conv4_drop = mc_dropout.MCDropout2d()
        self.bn4 = nn.BatchNorm2d(128)
        self.conv5 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
        self.conv5_drop = mc_dropout.MCDropout2d()
        self.bn5 = nn.BatchNorm2d(256)
        self.conv6 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv6_drop = mc_dropout.MCDropout2d()
        self.bn6 = nn.BatchNorm2d(256)
        self.conv7 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv7_drop = mc_dropout.MCDropout2d()
        self.bn7 = nn.BatchNorm2d(256)
        self.conv8 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv8_drop = mc_dropout.MCDropout2d()
        self.bn8 = nn.BatchNorm2d(256)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        #self.avgpool = nn.AvgPool2d(kernel_size=1, stride=1)
        self.fc1 = nn.Linear(256, 512)
        self.fc1_drop = mc_dropout.MCDropout()
        self.fc2 = nn.Linear(512, 512)
        self.fc2_drop = mc_dropout.MCDropout()
        self.fc3 = nn.Linear(512, num_classes)
Exemple #3
0
 def __init__(self, num_classes):
     super().__init__(num_classes)
     self.num_classes = num_classes
     self.resnet = resnet.ResNet18(num_classes=512)
     self.fc1_drop = mc_dropout.MCDropout()
     self.fc1 = nn.Linear(512, 256)
     self.fc2_drop = mc_dropout.MCDropout()
     self.fc2 = nn.Linear(256, num_classes)
    def cnn(self):
        self._network['input'] = pelu(batch_norm(lasagne.layers.InputLayer(shape=(None, self._number_of_channel,
                                                                                   8, 14),
                                                                            input_var=self._x, pad='same',
                                                                            W=lasagne.init.HeNormal(gain='relu'))))
        print self._network['input'].output_shape

        first_part_input = SliceLayer(self._network['input'], indices=slice(0, 2), axis=1)
        print first_part_input.output_shape
        second_part_input = SliceLayer(self._network['input'], indices=slice(2, 4), axis=1)
        print second_part_input.output_shape
        first_dropout_2 = self.cnn_separate_convolutions(first_part_input, first_part=True)
        second_dropout_2 = self.cnn_separate_convolutions(second_part_input, first_part=False)

        self._network['sumwise_layer'] = ElemwiseSumLayer([first_dropout_2, second_dropout_2,
                                                           ScaleLayer(self._network['sumwise_layer_pre_training'])])

        self._network['conv3'] = pelu(batch_norm(lasagne.layers.Conv2DLayer(self._network['sumwise_layer'],
                                                                             num_filters=48,
                                                                             filter_size=(3, 3),
                                                                             W=lasagne.init.HeNormal(gain='relu'))))

        print self._network['conv3'].output_shape

        self._network['dropout_3'] = mc_dropout.MCDropout(self._network['conv3'], p=self._percentage_dropout_cnn_layers)


        self._network['merge_with_pre_training_dense_layer_1'] = ElemwiseSumLayer(
            [ScaleLayer(self._network['dropout_3_pre_training']), self._network['dropout_3']])
        print np.shape(self._network['pre_training_fc1_full'].W.get_value())
        self._network['fc1'] = mc_dropout.MCDropout(pelu(batch_norm(lasagne.layers.DenseLayer(
            self._network['merge_with_pre_training_dense_layer_1'], num_units=100, W=lasagne.init.HeNormal(gain='relu')))),
            p=self._percentage_dropout_dense_layers)

        print self._network['fc1'].output_shape

        self._network['merge_with_pre_training_dense_layer_2'] = ElemwiseSumLayer(
            [ScaleLayer(self._network['fc1_pre_training']), self._network['fc1']])


        self._network['fc2'] = mc_dropout.MCDropout(pelu(batch_norm(
            lasagne.layers.DenseLayer(self._network['merge_with_pre_training_dense_layer_2'], num_units=100,
                                      W=lasagne.init.HeNormal(gain='relu')))),
            p=self._percentage_dropout_dense_layers)

        print self._network['fc2'].output_shape

        self._network['merge_with_pre_training_output'] = ElemwiseSumLayer(
            [ScaleLayer(self._network['fc2_pre_training']), self._network['fc2']])
        self._network['output'] = lasagne.layers.DenseLayer(self._network['merge_with_pre_training_output'],
                                                            num_units=self._number_of_class,
                                                            nonlinearity=lasagne.nonlinearities.softmax,
                                                            W=lasagne.init.HeNormal(gain='relu'))

        print self._network['output'].output_shape
    def pre_training_cnn(self):
        self._network = {}
        self._network['input_pre_training'] = lasagne.layers.InputLayer(shape=(None,self._number_of_channel,
                                                                                                8, 14),
                                                                                         input_var=self._x, pad='same',
                                                                                         W=lasagne.init.HeNormal(gain='relu'))
        self._network['input_normalized'] = prelu(batch_norm(self._network['input_pre_training']))
        print self._network['input_normalized'].output_shape

        first_part_input = SliceLayer(self._network['input_normalized'], indices=slice(0, 2), axis=1)
        print first_part_input.output_shape
        second_part_input = SliceLayer(self._network['input_normalized'], indices=slice(2, 4), axis=1)
        print second_part_input.output_shape
        first_network = self.cnn_separate_convolutions_pre_training(first_part_input, first_part=True)
        second_network = self.cnn_separate_convolutions_pre_training(second_part_input, first_part=False)

        self._network['sumwise_layer_pre_training'] = ElemwiseSumLayer([first_network, second_network])

        self._network['conv3_pre_training_cnn'] = lasagne.layers.Conv2DLayer(self._network['sumwise_layer_pre_training'],
                                                                      num_filters=48,
                                                                      filter_size=(3, 3),
                                                                      W=lasagne.init.HeNormal(gain='relu'))

        self._network['conv3_pre_training'] = prelu(batch_norm(self._network['conv3_pre_training_cnn']))
        print self._network['conv3_pre_training'].output_shape

        self._network['dropout_3_pre_training'] = mc_dropout.MCDropout(self._network['conv3_pre_training'],
                                                                p=self._percentage_dropout_cnn_layers)

        self._network['pre_training_fc1_full'] = lasagne.layers.DenseLayer(self._network['dropout_3_pre_training'], num_units=100,
                                                                    W=lasagne.init.HeNormal(gain='relu'))

        self._network['fc1_pre_training'] = mc_dropout.MCDropout(prelu(batch_norm(self._network['pre_training_fc1_full'])),
                                                          p=self._percentage_dropout_dense_layers)

        print self._network['fc1_pre_training'].output_shape

        self._network['pre_training_fc2_full'] = lasagne.layers.DenseLayer(self._network['fc1_pre_training'], num_units=100,
                                                                    W=lasagne.init.HeNormal(gain='relu'))

        self._network['fc2_pre_training'] = mc_dropout.MCDropout(prelu(batch_norm(self._network['pre_training_fc2_full'])), p=self._percentage_dropout_dense_layers)

        print self._network['fc2_pre_training'].output_shape


        self._network['output_gesture_pre_training'] = lasagne.layers.DenseLayer(self._network['fc2_pre_training'],
                                                                         num_units=self._number_of_class,
                                                                         nonlinearity=lasagne.nonlinearities.softmax,
                                                                         W=lasagne.init.HeNormal(gain='relu'))
        print self._network['output_gesture_pre_training'].output_shape


        print "Pre-Training done printing"
Exemple #6
0
    def __init__(self, num_classes):
        super().__init__(num_classes)

        self.conv1 = nn.Conv2d(1, 32, kernel_size=5)
        self.conv1_drop = mc_dropout.MCDropout2d()
        self.conv2 = nn.Conv2d(32, 64, kernel_size=5)
        self.conv2_drop = mc_dropout.MCDropout2d()
        self.fc1 = nn.Linear(1024, 128)
        self.fc1_drop = mc_dropout.MCDropout()
        self.fc2 = nn.Linear(128, num_classes)
    def cnn_separate_convolutions(self, input, first_part):
        if first_part:
            conv1_first_part = pelu(batch_norm(lasagne.layers.Conv2DLayer(input, num_filters=12,
                                                                           filter_size=(4, 3), stride=(1, 1),
                                                                           W=lasagne.init.HeNormal(gain='relu'))))
            print conv1_first_part.output_shape

            first_dropout_1 = mc_dropout.MCDropout(conv1_first_part, p=self._percentage_dropout_cnn_layers)


            sumwise_first_part = ElemwiseSumLayer([ScaleLayer(self._network['first_part_dropout1_pre_training']),
                                                   first_dropout_1])


            conv2_first_part = pelu(batch_norm(lasagne.layers.Conv2DLayer(
                sumwise_first_part, num_filters=24,
                filter_size=(3, 3), W=lasagne.init.HeNormal(gain='relu'))))
            print conv2_first_part.output_shape

            first_dropout_2 = mc_dropout.MCDropout(conv2_first_part, p=self._percentage_dropout_cnn_layers)
            return first_dropout_2
        else:
            conv1_second_part = pelu(batch_norm(lasagne.layers.Conv2DLayer(input, num_filters=12,
                                                                            filter_size=(4, 3), stride=(1, 1),
                                                                            W=lasagne.init.HeNormal(gain='relu'))))
            print conv1_second_part.output_shape

            second_dropout_1 = mc_dropout.MCDropout(conv1_second_part, p=self._percentage_dropout_cnn_layers)

            sumwise_second_part = ElemwiseSumLayer([ScaleLayer(self._network['second_part_dropout1_pre_training']),
                                                    second_dropout_1])

            conv2_second_part = pelu(batch_norm(lasagne.layers.Conv2DLayer(
                sumwise_second_part, num_filters=24,
                filter_size=(3, 3), W=lasagne.init.HeNormal(gain='relu'))))

            print conv2_second_part.output_shape

            second_dropout_2 = mc_dropout.MCDropout(conv2_second_part, p=self._percentage_dropout_cnn_layers)
            return second_dropout_2
    def cnn_separate_convolutions_pre_training(self, input, first_part):
        if first_part:
            self._network['input_first_part'] = input
            self._network['conv1_first_part_pre_training'] = lasagne.layers.Conv2DLayer(self._network['input_first_part'], num_filters=12,
                filter_size=(4, 3), stride=(1, 1), W=lasagne.init.HeNormal(gain='relu'))
            conv1_first_part_pre_training = pelu(batch_norm(self._network['conv1_first_part_pre_training']))
            print conv1_first_part_pre_training.output_shape

            self._network['first_part_dropout1_pre_training'] = mc_dropout.MCDropout(conv1_first_part_pre_training, p=self._percentage_dropout_cnn_layers)

            # Do the convolutional layer for dimension reduction of the frequency

            self._network['conv2_first_part_pre_training'] = lasagne.layers.Conv2DLayer(self._network['first_part_dropout1_pre_training'], num_filters=24,
                filter_size=(3, 3), stride=(1, 1), W=lasagne.init.HeNormal(gain='relu'))

            conv2_first_part = pelu(batch_norm(self._network['conv2_first_part_pre_training']))
            print conv2_first_part.output_shape

            first_dropout_2 = mc_dropout.MCDropout(conv2_first_part, p=self._percentage_dropout_cnn_layers)
            return first_dropout_2
        else:
            self._network['input_second_part'] = input
            self._network['conv1_second_part_pre_training'] = lasagne.layers.Conv2DLayer(self._network['input_second_part'], num_filters=12,
                                                                                 filter_size=(4, 3), stride=(1, 1),
                                                                                 W=lasagne.init.HeNormal(gain='relu'))
            conv1_second_part_pre_training = pelu(batch_norm(self._network['conv1_second_part_pre_training']))
            print conv1_second_part_pre_training.output_shape

            self._network['second_part_dropout1_pre_training'] = mc_dropout.MCDropout(conv1_second_part_pre_training, p=self._percentage_dropout_cnn_layers)

            self._network['conv2_second_part_pre_training'] = lasagne.layers.Conv2DLayer(self._network['second_part_dropout1_pre_training'], num_filters=24,
                                                                                 filter_size=(3, 3), stride=(1, 1),
                                                                                 W=lasagne.init.HeNormal(gain='relu'))

            conv2_second_part = pelu(batch_norm(self._network['conv2_second_part_pre_training']))
            print conv2_second_part.output_shape

            second_dropout_2 = mc_dropout.MCDropout(conv2_second_part, p=self._percentage_dropout_cnn_layers)
            return second_dropout_2
    def cnn_separate_convolutions_pre_training(self, input, first_part, time=0):
        if first_part:
            self._network['input_first_part'] = input
            self._network['conv1_first_part_pre_training'] = pelu(batch_norm(lasagne.layers.Conv2DLayer(self._network['input_first_part'], num_filters=8,
                                                                           filter_size=(3, 3), stride=(1, 1),
                                                                           W=lasagne.init.HeNormal(gain='relu'))))
                                                            #prelu v2
            print self._network['conv1_first_part_pre_training'].output_shape


            if time == 0:
                name = 'first_part_dropout1_pre_training_0'
            elif time == 1:
                name = 'first_part_dropout1_pre_training_1'
            elif time == 2:
                name = 'first_part_dropout1_pre_training_2'
            else:
                name = 'first_part_dropout1_pre_training_3'
            self._network[name] = mc_dropout.MCDropout(
                self._network['conv1_first_part_pre_training'], p=self._percentage_dropout_training)

            self._network['conv2_first_part_pre_training'] = pelu(batch_norm(lasagne.layers.Conv2DLayer(
                self._network[name], num_filters=12,
                filter_size=(3, 3), W=lasagne.init.HeNormal(gain='relu'))))
            print self._network['conv2_first_part_pre_training'].output_shape

            first_dropout_2 = mc_dropout.MCDropout(self._network['conv2_first_part_pre_training'],
                                                   p=self._percentage_dropout_training)
            return first_dropout_2
        else:
            self._network['input_second_part'] = input
            self._network['conv1_second_part_pre_training'] = pelu(batch_norm(
                lasagne.layers.Conv2DLayer(self._network['input_second_part'], num_filters=24, filter_size=(3, 2),
                                           stride=(1, 1), W=lasagne.init.HeNormal(gain='relu'))))
            print self._network['conv1_second_part_pre_training'].output_shape

            second_dropout_1 = mc_dropout.MCDropout(self._network['conv1_second_part_pre_training'],
                                                    p=self._percentage_dropout_training)
            return second_dropout_1
Exemple #10
0
    def __init__(self, num_classes):
        super().__init__(num_classes)

        self.num_classes = num_classes
        self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
        self.conv1_drop = mc_dropout.MCDropout2d()
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.conv2_drop = mc_dropout.MCDropout2d()
        self.conv3 = nn.Conv2d(64, 128, kernel_size=3)
        self.conv3_drop = mc_dropout.MCDropout2d()
        self.fc1 = nn.Linear(128 * 4 * 4, 512)
        self.fc1_drop = mc_dropout.MCDropout()
        self.fc2 = nn.Linear(512, num_classes)
    def cnn_separate_convolutions(self, input_signal, first_part, time=0):
        if first_part:
            conv1_first_part = pelu(batch_norm(lasagne.layers.Conv2DLayer(input_signal, num_filters=8,
                                                                           filter_size=(3, 3), stride=(1, 1),
                                                                           W=lasagne.init.HeNormal(gain='relu'))))
            print conv1_first_part.output_shape

            first_dropout_1 = mc_dropout.MCDropout(
                conv1_first_part, p=self._percentage_dropout_training)

            if time == 0:
                name = 'first_part_dropout1_pre_training_0'
            elif time == 1:
                name = 'first_part_dropout1_pre_training_1'
            elif time == 2:
                name = 'first_part_dropout1_pre_training_2'
            else:
                name = 'first_part_dropout1_pre_training_3'
            concat_first_part = ElemwiseSumLayer([lasagne.layers.ScaleLayer(self._network[name]), first_dropout_1])

            conv2_first_part = pelu(batch_norm(lasagne.layers.Conv2DLayer(
                concat_first_part, num_filters=12,
                filter_size=(3, 3), W=lasagne.init.HeNormal(gain='relu'))))
            print conv2_first_part.output_shape

            first_dropout_2 = mc_dropout.MCDropout(conv2_first_part, p=self._percentage_dropout_training)
            return first_dropout_2
        else:
            conv1_second_part = pelu(batch_norm(
                lasagne.layers.Conv2DLayer(input_signal, num_filters=24, filter_size=(3, 2),
                                           stride=(1, 1), W=lasagne.init.HeNormal(gain='relu'))))
            print conv1_second_part.output_shape

            second_dropout_1 = mc_dropout.MCDropout(conv1_second_part,
                                                    p=self._percentage_dropout_training)
            return second_dropout_1
Exemple #12
0
    def cnn(self):
        self._network = {}
        self._network['input_pre_training'] = lasagne.layers.InputLayer(shape=(None,self._number_of_channel,
                                                                                                8, 7),
                                                                                         input_var=self._x, pad='same',
                                                                                         W=lasagne.init.HeNormal(gain='relu'))
        self._network['input_normalized'] = pelu(batch_norm(self._network['input_pre_training']))

        print self._network['input_normalized'].output_shape

        first_part_input = SliceLayer(self._network['input_normalized'], indices=slice(0, 3), axis=1)
        print first_part_input.output_shape
        second_part_input = SliceLayer(self._network['input_normalized'], indices=slice(3, 6), axis=1)
        print second_part_input.output_shape

        third_part_input = SliceLayer(self._network['input_normalized'], indices=slice(6, 9), axis=1)
        print third_part_input.output_shape
        fourth_part_input = SliceLayer(self._network['input_normalized'], indices=slice(9, 12), axis=1)
        print fourth_part_input.output_shape

        first_dropout_2 = self.cnn_separate_convolutions_pre_training(first_part_input, first_part=True, time=0)
        second_dropout_2 = self.cnn_separate_convolutions_pre_training(second_part_input, first_part=True, time=1)

        third_dropout_2 = self.cnn_separate_convolutions_pre_training(third_part_input, first_part=True, time=2)
        fourth_dropout_2 = self.cnn_separate_convolutions_pre_training(fourth_part_input, first_part=True, time=3)

        self._network['concat_layer_1_pre_training'] = ElemwiseSumLayer([first_dropout_2, second_dropout_2])
        self._network['concat_layer_2_pre_training'] = ElemwiseSumLayer([third_dropout_2, fourth_dropout_2])

        self._network['last_fusion_layer1_pre_training'] = self.cnn_separate_convolutions_pre_training(
            self._network['concat_layer_1_pre_training'], first_part=False)
        self._network['last_fusion_layer2_pre_training'] = self.cnn_separate_convolutions_pre_training(
            self._network['concat_layer_2_pre_training'], first_part=False)

        self._network['concat_layer_final_pre_training'] = ElemwiseSumLayer(
            [self._network['last_fusion_layer1_pre_training'],
             self._network['last_fusion_layer2_pre_training']])

        self._network['conv3_pre_training_cnn'] = lasagne.layers.Conv2DLayer(
            self._network['concat_layer_final_pre_training'],
            num_filters=48,
            filter_size=(2, 2),
            W=lasagne.init.HeNormal(gain='relu'))

        self._network['conv3_pre_training'] = pelu(batch_norm(self._network['conv3_pre_training_cnn']))
        print self._network['conv3_pre_training'].output_shape

        self._network['dropout_3_pre_training'] = mc_dropout.MCDropout(self._network['conv3_pre_training'],
                                                                       p=self._percentage_dropout_cnn_layers)

        self._network['pre_training_fc1_full'] = lasagne.layers.DenseLayer(self._network['dropout_3_pre_training'],
                                                                           num_units=100,
                                                                           W=lasagne.init.HeNormal(gain='relu'))

        self._network['fc1_pre_training'] = mc_dropout.MCDropout(
            pelu(batch_norm(self._network['pre_training_fc1_full'])),
            p=self._percentage_dropout_dense_layers)

        print self._network['fc1_pre_training'].output_shape

        self._network['pre_training_fc2_full'] = lasagne.layers.DenseLayer(self._network['fc1_pre_training'],
                                                                           num_units=100,
                                                                           W=lasagne.init.HeNormal(gain='relu'))

        self._network['fc2_pre_training'] = mc_dropout.MCDropout(
            pelu(batch_norm(self._network['pre_training_fc2_full'])), p=self._percentage_dropout_dense_layers)

        print self._network['fc2_pre_training'].output_shape

        self._network['output'] = lasagne.layers.DenseLayer(self._network['fc2_pre_training'],
                                                                                 num_units=self._number_of_class,
                                                                                 nonlinearity=lasagne.nonlinearities.softmax,
                                                                                 W=lasagne.init.HeNormal(gain='relu'))
        print self._network['output'].output_shape

        print "ConvNet done printing"