def __init__(self, num_classes=3):
        self.latent = 1000
        self.num_classes = num_classes
        super(AutoPretrainNet, self).__init__()
        self.efficientNet = EfficientNet.from_name('efficientnet-b3')
        feature = self.efficientNet._fc.in_features
        self.efficientNet._fc = nn.Sequential(
            nn.Linear(in_features=feature, out_features=2 * self.latent), )
        self.fc2 = nn.Sequential(nn.Linear(1000, 4 * 5 * 256),
                                 nn.BatchNorm1d(4 * 5 * 256),
                                 nn.ReLU(inplace=True))
        self.deconv0 = self._make_deconv_layer(256, 128)
        self.inplanes = 128
        self.conv0 = self._make_layer(BasicBlock, 128, 2)
        self.deconv1 = self._make_deconv_layer(128, 64)
        self.inplanes = 64
        self.conv1 = self._make_layer(BasicBlock, 64, 2)
        self.deconv2 = self._make_deconv_layer(64, 32)
        self.inplanes = 32
        self.conv2 = self._make_layer(BasicBlock, 32, 2)
        self.deconv3 = self._make_deconv_layer(32, 3, last=True)
        self.upSample = nn.Upsample(scale_factor=2)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
Exemple #2
0
    def __init__(self, freeze=False, device=None):
        self.latent = 1000
        self.fc_num = 400
        self.device = device
        super(AutoNet, self).__init__()
        self.efficientNet = EfficientNet.from_name('efficientnet-b3',
                                                   freeze=freeze)
        feature = self.efficientNet._fc.in_features
        self.efficientNet._fc = nn.Sequential(
            nn.Linear(in_features=feature, out_features=self.latent * 2),
            nn.BatchNorm1d(self.latent * 2), nn.ReLU(inplace=True),
            nn.Dropout(0.25))
        self.fc1 = nn.Sequential(
            nn.Linear(self.latent, self.fc_num, bias=False),
            nn.BatchNorm1d(self.fc_num), nn.ReLU(inplace=True),
            nn.Dropout(0.25))
        self.fc2 = nn.Sequential(
            nn.Linear(self.fc_num * 6, 25 * 25 * 32, bias=False),
            nn.BatchNorm1d(25 * 25 * 32), nn.ReLU(inplace=True),
            nn.Dropout(0.25))

        self.inplanes = 32
        self.conv0 = self._make_layer(BasicBlock, 32, 2)
        self.deconv0 = self._make_deconv_layer(32, 16)
        self.inplanes = 16
        self.conv1 = self._make_layer(BasicBlock, 16, 2)
        self.deconv1 = self._make_deconv_layer(16, 8)
        self.inplanes = 8
        self.conv2 = self._make_layer(BasicBlock, 8, 2)
        self.deconv2 = self._make_deconv_layer(8, 4, last=True)
        self.inplanes = 4
        self.conv3 = self._make_layer(BasicBlock, 4, 2)
        self.deconv3 = self._make_deconv_layer(4, 2, last=True)
        self.convfinal = nn.Conv2d(2, 2, 1)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
Exemple #3
0
    def __init__(self, anchors, detection_classes, freeze=False, device=None):
        self.fc_num1 = 200
        self.fc_num2 = 120
        self.device = device
        self.anchors = anchors
        self.anchors1 = np.reshape(anchors[0], [1, 2])
        self.anchors0 = anchors[1:5, :]
        self.detection_classes = detection_classes
        super(AutoNet, self).__init__()
        self.efficientNet = EfficientNet.from_name('efficientnet-b3',
                                                   freeze=freeze)
        self.efficientNet._fc = nn.Sequential()
        self.fc1_1_1 = nn.Sequential(
            nn.Linear(384 * 4 * 5, self.fc_num1 * 3, bias=False),
            nn.BatchNorm1d(self.fc_num1 * 3),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc1_1_2 = nn.Sequential(
            nn.Linear(136 * 8 * 10, self.fc_num1 * 3, bias=False),
            nn.BatchNorm1d(self.fc_num1 * 3),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc1_1_3 = nn.Sequential(
            nn.Linear(48 * 16 * 20, self.fc_num1 * 3, bias=False),
            nn.BatchNorm1d(self.fc_num1 * 3),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc2_1_1 = nn.Sequential(
            nn.Linear(self.fc_num1 * 6 * 3, 25 * 25 * 32, bias=False),
            nn.BatchNorm1d(25 * 25 * 32),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc2_1_2 = nn.Sequential(
            nn.Linear(self.fc_num1 * 6 * 3, 50 * 50 * 8, bias=False),
            nn.BatchNorm1d(50 * 50 * 8),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc2_1_3 = nn.Sequential(
            nn.Linear(self.fc_num1 * 6 * 3, 100 * 100 * 2, bias=False),
            nn.BatchNorm1d(100 * 100 * 2),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc1_2_1 = nn.Sequential(
            nn.Linear(384 * 4 * 5, self.fc_num2 * 2, bias=False),
            nn.BatchNorm1d(self.fc_num2 * 2),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc1_2_2 = nn.Sequential(
            nn.Linear(136 * 8 * 10, self.fc_num2 * 2, bias=False),
            nn.BatchNorm1d(self.fc_num2 * 2),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc1_2_3 = nn.Sequential(
            nn.Linear(48 * 16 * 20, self.fc_num2 * 2, bias=False),
            nn.BatchNorm1d(self.fc_num2 * 2),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc2_2_1 = nn.Sequential(
            nn.Linear(self.fc_num2 * 6 * 3, 25 * 25 * 64, bias=False),
            nn.BatchNorm1d(25 * 25 * 64),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc2_2_2 = nn.Sequential(
            nn.Linear(self.fc_num2 * 6 * 3, 50 * 50 * 8, bias=False),
            nn.BatchNorm1d(50 * 50 * 8),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )

        self.inplanes = 32
        self.conv0 = self._make_layer(BasicBlock, 32, 2)
        self.deconv0 = self._make_deconv_layer(32, 8)
        self.inplanes = 16
        self.conv1 = self._make_layer(BasicBlock, 16, 2)
        self.deconv1 = self._make_deconv_layer(16, 2)
        self.inplanes = 4
        self.conv2 = self._make_layer(BasicBlock, 4, 2)
        self.deconv2 = self._make_deconv_layer(4, 4)
        self.inplanes = 4
        self.conv3 = self._make_layer(BasicBlock, 4, 2)
        self.deconv3 = self._make_deconv_layer(4, 2)
        self.convfinal = nn.Conv2d(2, 2, 1)

        self.inplanes = 64
        self.conv0_1_detect = self._make_layer(BasicBlock, 64, 2)
        self.convfinal_0 = nn.Conv2d(
            64,
            len(self.anchors0) * (self.detection_classes + 5), 1)
        self.yolo0 = YOLOLayer(self.anchors0,
                               self.detection_classes,
                               800,
                               device=self.device)
        self.conv0_1 = self._make_layer(BasicBlock, 64, 2)
        self.deconv0_1 = self._make_deconv_layer(64, 8)
        self.conv0_1 = self._make_layer(BasicBlock, 64, 2)

        self.inplanes = 16
        self.conv1_1_detect = self._make_layer(BasicBlock, 16, 2)
        self.convfinal_1 = nn.Conv2d(
            16,
            len(self.anchors1) * (self.detection_classes + 5), 1)
        self.yolo1 = YOLOLayer(self.anchors1,
                               self.detection_classes,
                               800,
                               device=self.device)
        self.conv1_1 = self._make_layer(BasicBlock, 16, 2)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
    def __init__(self,
                 batch_size,
                 step_size,
                 anchors,
                 detection_classes,
                 freeze=False,
                 device=None):
        self.latent = 1000
        self.fc_num = 400
        self.batch_size = batch_size
        self.step_size = step_size
        self.device = device
        self.anchors = anchors
        self.anchors1 = np.reshape(anchors[0], [1, 2])
        self.anchors2 = anchors[1:]
        self.detection_classes = detection_classes
        super(AutoNet, self).__init__()
        self.efficientNet = EfficientNet.from_name('efficientnet-b3',
                                                   freeze=freeze)
        feature = self.efficientNet._fc.in_features
        self.efficientNet._fc = nn.Sequential(
            nn.Linear(in_features=feature, out_features=2 * self.latent), )
        self.rnn1 = nn.LSTM(self.latent,
                            self.fc_num,
                            2,
                            batch_first=True,
                            dropout=0.25)
        self.fc2 = nn.Sequential(
            nn.Linear(self.fc_num * 6, 25 * 25 * 32, bias=False),
            nn.BatchNorm1d(25 * 25 * 32),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.rnn1_1 = nn.LSTM(self.latent,
                              self.fc_num,
                              2,
                              batch_first=True,
                              dropout=0.25)
        self.fc2_1 = nn.Sequential(
            nn.Linear(self.fc_num * 6, 25 * 25 * 64, bias=False),
            nn.BatchNorm1d(25 * 25 * 64),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.inplanes = 32
        self.conv0 = self._make_layer(BasicBlock, 32, 2)
        self.deconv0 = self._make_deconv_layer(32, 16)
        self.inplanes = 16
        self.conv1 = self._make_layer(BasicBlock, 16, 2)
        self.deconv1 = self._make_deconv_layer(16, 8)
        self.inplanes = 8
        self.conv2 = self._make_layer(BasicBlock, 8, 2)
        self.deconv2 = self._make_deconv_layer(8, 4)
        self.inplanes = 4
        self.conv3 = self._make_layer(BasicBlock, 4, 2)
        self.deconv3 = self._make_deconv_layer(4, 2)
        self.convfinal = nn.Conv2d(2, 2, 1)

        self.inplanes = 64
        self.conv0_1 = self._make_layer(BasicBlock, 64, 2)
        self.deconv0_1 = self._make_deconv_layer(64, 16)
        self.conv0_1_detect = self._make_layer(BasicBlock, 64, 2)
        self.convfinal_0 = nn.Conv2d(
            64,
            len(self.anchors2) * (self.detection_classes + 5), 1)
        self.yolo0 = YOLOLayer(self.anchors2,
                               self.detection_classes,
                               800,
                               device=device)
        self.inplanes = 16
        self.conv1_1_detect = self._make_layer(BasicBlock, 16, 2)
        self.convfinal_1 = nn.Conv2d(
            16,
            len(self.anchors1) * (self.detection_classes + 5), 1)
        self.yolo1 = YOLOLayer(self.anchors1,
                               self.detection_classes,
                               800,
                               device=device)
        self.conv1_1 = self._make_layer(BasicBlock, 16, 2)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.LSTM):
                nn.init.xavier_normal_(m.all_weights[0][0])
                nn.init.xavier_normal_(m.all_weights[0][1])
                nn.init.xavier_normal_(m.all_weights[1][0])
                nn.init.xavier_normal_(m.all_weights[1][1])
Exemple #5
0
    def __init__(self, hparams):
        super().__init__()
        self.fc_num1 = 200
        self.fc_num2 = 300
        self.hparams = hparams
        self.learning_rate = hparams.learning_rate
        self.anchors = get_anchors(hparams.anchors_file)
        self.anchors1 = np.reshape(self.anchors[0], [1, 2])
        self.anchors0 = self.anchors[1:]
        self.detection_classes = hparams.detection_classes
        super(AutoNet, self).__init__()
        self.efficientNet = EfficientNet.from_name('efficientnet-b2', freeze=hparams.freeze)
        self.compressed = nn.Sequential(
            nn.Conv2d(352, 16, 1, bias=False),
            nn.BatchNorm2d(16),
            nn.ReLU(inplace=True),
            nn.Dropout(0.2),
        )
        self.compressed_1 = nn.Sequential(
            nn.Conv2d(120, 8, 1, bias=False),
            nn.BatchNorm2d(8),
            nn.ReLU(inplace=True),
            nn.Dropout(0.2),
        )
        self.fc1_1_1 = nn.Sequential(
            nn.Linear(16 * 8 * 10, self.fc_num1 * 2, bias=False),
            nn.BatchNorm1d(self.fc_num1 * 2),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc1_1_2 = nn.Sequential(
            nn.Linear(8 * 16 * 20, self.fc_num1 * 2, bias=False),
            nn.BatchNorm1d(self.fc_num1 * 2),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc1_2_1 = nn.Sequential(
            nn.Linear(16 * 8 * 10, self.fc_num2 * 2, bias=False),
            nn.BatchNorm1d(self.fc_num2 * 2),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc1_2_2 = nn.Sequential(
            nn.Linear(8 * 16 * 20, self.fc_num2 * 2, bias=False),
            nn.BatchNorm1d(self.fc_num2 * 2),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc2_0_1 = nn.ModuleList([])
        for i in range(6):
            if i != 1 and i != 4:
                self.fc2_0_1.append(nn.Sequential(
                    nn.Linear(self.fc_num1 * 2, 14 * 13 * 32, bias=False),
                    nn.BatchNorm1d(14 * 13 * 32),
                    nn.ReLU(inplace=True),
                    nn.Dropout(0.2),
                ))
            else:
                self.fc2_0_1.append(nn.Sequential(
                    nn.Linear(self.fc_num1 * 2, 13 * 18 * 32, bias=False),
                    nn.BatchNorm1d(13 * 18 * 32),
                    nn.ReLU(inplace=True),
                    nn.Dropout(0.2),
                ))
        self.fc2_0_2 = nn.ModuleList([])
        for i in range(6):
            if i != 1 and i != 4:
                self.fc2_0_2.append(nn.Sequential(
                    nn.Linear(self.fc_num1 * 2, 28 * 26 * 8, bias=False),
                    nn.BatchNorm1d(28 * 26 * 8),
                    nn.ReLU(inplace=True),
                    nn.Dropout(0.2),
                ))
            else:
                self.fc2_0_2.append(nn.Sequential(
                    nn.Linear(self.fc_num1 * 2, 26 * 36 * 8, bias=False),
                    nn.BatchNorm1d(26 * 36 * 8),
                    nn.ReLU(inplace=True),
                    nn.Dropout(0.2),
                ))
        self.fc2_1_1 = nn.ModuleList([])
        for i in range(6):
            if i != 1 and i != 4:
                self.fc2_1_1.append(nn.Sequential(
                    nn.Linear(self.fc_num2 * 2, 14 * 13 * 64, bias=False),
                    nn.BatchNorm1d(14 * 13 * 64),
                    nn.ReLU(inplace=True),
                    nn.Dropout(0.2),
                ))
            else:
                self.fc2_1_1.append(nn.Sequential(
                    nn.Linear(self.fc_num2 * 2, 13 * 18 * 64, bias=False),
                    nn.BatchNorm1d(13 * 18 * 64),
                    nn.ReLU(inplace=True),
                    nn.Dropout(0.2),
                ))
        self.fc2_1_2 = nn.ModuleList([])
        for i in range(6):
            if i != 1 and i != 4:
                self.fc2_1_2.append(nn.Sequential(
                    nn.Linear(self.fc_num2 * 2, 28 * 26 * 8, bias=False),
                    nn.BatchNorm1d(28 * 26 * 8),
                    nn.ReLU(inplace=True),
                    nn.Dropout(0.2),
                ))
            else:
                self.fc2_1_2.append(nn.Sequential(
                    nn.Linear(self.fc_num2 * 2, 26 * 36 * 8, bias=False),
                    nn.BatchNorm1d(26 * 36 * 8),
                    nn.ReLU(inplace=True),
                    nn.Dropout(0.2),
                ))
        self.inplanes = 32
        self.conv0 = self._make_layer(BasicBlock, 32, 2)
        self.deconv0 = self._make_deconv_layer(32, 8)
        self.inplanes = 16
        self.conv1 = self._make_layer(BasicBlock, 16, 2)
        self.deconv1 = self._make_deconv_layer(16, 8)
        self.inplanes = 8
        self.conv2 = self._make_layer(BasicBlock, 8, 2)
        self.deconv2 = self._make_deconv_layer(8, 4)
        self.inplanes = 4
        self.conv3 = self._make_layer(BasicBlock, 4, 2)
        self.deconv3 = self._make_deconv_layer(4, 2)
        self.convfinal = nn.Conv2d(2, 2, 1)

        self.inplanes = 64
        self.conv0_1_detect = self._make_layer(BasicBlock, 64, 2)
        self.convfinal_0 = nn.Conv2d(64, len(self.anchors0) * (self.detection_classes + 5), 1)
        self.yolo0 = YOLOLayer(self.anchors0, self.detection_classes, 800)
        self.conv0_1 = self._make_layer(BasicBlock, 64, 2)
        self.deconv0_1 = self._make_deconv_layer(64, 8)

        self.inplanes = 16
        self.conv1_1_detect = self._make_layer(BasicBlock, 16, 2)
        self.convfinal_1 = nn.Conv2d(16, len(self.anchors1) * (self.detection_classes + 5), 1)
        self.yolo1 = YOLOLayer(self.anchors1, self.detection_classes, 800)
        self.conv1_1 = self._make_layer(BasicBlock, 16, 2)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
Exemple #6
0
    def __init__(self, hparams):
        super().__init__()
        self.latent = 600
        self.fc_num = 300
        self.hparams = hparams
        self.learning_rate = hparams.learning_rate
        self.anchors = get_anchors(hparams.anchors_file)
        self.anchors1 = np.reshape(self.anchors[0], [1, 2])
        self.anchors0 = self.anchors[1:]
        self.detection_classes = hparams.detection_classes
        super(AutoNet, self).__init__()
        self.efficientNet = EfficientNet.from_name('efficientnet-b1',
                                                   freeze=hparams.freeze)
        feature = self.efficientNet._fc.in_features
        self.efficientNet._fc = nn.Sequential(
            nn.Linear(in_features=feature, out_features=self.latent),
            nn.BatchNorm1d(self.latent),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc1 = nn.Sequential(
            nn.Linear(self.latent, self.fc_num, bias=False),
            nn.BatchNorm1d(self.fc_num),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc2 = nn.Sequential(
            nn.Linear(self.fc_num * 6, 25 * 25 * 32, bias=False),
            nn.BatchNorm1d(25 * 25 * 32),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc1_1 = nn.Sequential(
            nn.Linear(self.latent, self.fc_num, bias=False),
            nn.BatchNorm1d(self.fc_num),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.fc2_1 = nn.Sequential(
            nn.Linear(self.fc_num * 6, 25 * 25 * 64, bias=False),
            nn.BatchNorm1d(25 * 25 * 64),
            nn.ReLU(inplace=True),
            nn.Dropout(0.25),
        )
        self.inplanes = 32
        self.conv0 = self._make_layer(BasicBlock, 32, 2)
        self.deconv0 = self._make_deconv_layer(32, 16)
        self.inplanes = 16
        self.conv1 = self._make_layer(BasicBlock, 16, 2)
        self.deconv1 = self._make_deconv_layer(16, 8)
        self.inplanes = 8
        self.conv2 = self._make_layer(BasicBlock, 8, 2)
        self.deconv2 = self._make_deconv_layer(8, 4)
        self.inplanes = 4
        self.conv3 = self._make_layer(BasicBlock, 4, 2)
        self.deconv3 = self._make_deconv_layer(4, 2)
        self.convfinal = nn.Conv2d(2, 2, 1)

        self.inplanes = 64
        self.conv0_1_detect = self._make_layer(BasicBlock, 64, 2)
        self.convfinal_0 = nn.Conv2d(
            64,
            len(self.anchors0) * (self.detection_classes + 5), 1)
        self.yolo0 = YOLOLayer(self.anchors0, self.detection_classes, 800)
        self.conv0_1 = self._make_layer(BasicBlock, 64, 2)
        self.deconv0_1 = self._make_deconv_layer(64, 16)
        self.conv0_1 = self._make_layer(BasicBlock, 64, 2)

        self.inplanes = 16
        self.conv1_1_detect = self._make_layer(BasicBlock, 16, 2)
        self.convfinal_1 = nn.Conv2d(
            16,
            len(self.anchors1) * (self.detection_classes + 5), 1)
        self.yolo1 = YOLOLayer(self.anchors1, self.detection_classes, 800)
        self.conv1_1 = self._make_layer(BasicBlock, 16, 2)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')