コード例 #1
0
from torchsummary import summary
from torchvision.models import mobilenet_v3_small, mobilenet_v3_large

if __name__ == '__main__':
    model = mobilenet_v3_small()
    summary(model, (3, 160, 160), batch_size=1)
コード例 #2
0
def mobilenetv3(num_classes, pretrained=True):
    net = models.mobilenet_v3_small(pretrained=pretrained)
    net.classifier[-1] = nn.Linear(1024, num_classes)
    return net
コード例 #3
0
def mobilenet_v3_small():
    return models.mobilenet_v3_small(pretrained=True)
コード例 #4
0
ファイル: vision_models.py プロジェクト: llvm/mlir-npcomp
 def __init__(self):
     super().__init__()
     # Reset seed to make model deterministic.
     torch.manual_seed(0)
     self.mobilenetv3 = models.mobilenet_v3_small()
     self.train(False)
コード例 #5
0
    def __init__(self,
                 device,
                 cnn_type='new',
                 cnn_freeze=False,
                 rnn_type='lstm',
                 bidirection='False',
                 regression='last_only',
                 input_sequence_length=2,
                 hidden_size=100,
                 num_layers=2,
                 learning_rate=0.0001):

        super().__init__()

        ### Deep Neural Network Setup ###
        self.cnn_type = cnn_type

        if cnn_type == 'new':
            self.CNN = nn.Sequential(
                nn.Conv2d(in_channels=3,
                          out_channels=64,
                          kernel_size=(7, 7),
                          stride=(2, 2),
                          padding=(3, 3),
                          bias=False),
                nn.BatchNorm2d(64),
                nn.LeakyReLU(0.1),
                nn.Conv2d(in_channels=64,
                          out_channels=128,
                          kernel_size=(5, 5),
                          stride=(2, 2),
                          padding=(2, 2),
                          bias=False),
                nn.BatchNorm2d(128),
                nn.LeakyReLU(0.1),
                nn.Conv2d(in_channels=128,
                          out_channels=256,
                          kernel_size=(5, 5),
                          stride=(2, 2),
                          padding=(2, 2),
                          bias=False),
                nn.BatchNorm2d(256),
                nn.LeakyReLU(0.1),
                nn.Conv2d(in_channels=256,
                          out_channels=256,
                          kernel_size=(3, 3),
                          stride=(1, 1),
                          padding=(1, 1),
                          bias=False),
                nn.BatchNorm2d(256),
                nn.LeakyReLU(0.1),
                nn.Conv2d(in_channels=256,
                          out_channels=512,
                          kernel_size=(3, 3),
                          stride=(2, 2),
                          padding=(1, 1),
                          bias=False),
                nn.BatchNorm2d(512),
                nn.LeakyReLU(0.1),
                nn.MaxPool2d(kernel_size=3, stride=1),
                nn.Conv2d(in_channels=512,
                          out_channels=512,
                          kernel_size=(3, 3),
                          stride=(1, 1),
                          padding=(1, 1),
                          bias=False),
                nn.BatchNorm2d(512),
                nn.LeakyReLU(0.1),
                nn.Conv2d(in_channels=512,
                          out_channels=512,
                          kernel_size=(3, 3),
                          stride=(2, 2),
                          padding=(1, 1),
                          bias=False),
                nn.BatchNorm2d(512),
                nn.LeakyReLU(0.1),
                nn.Conv2d(in_channels=512,
                          out_channels=512,
                          kernel_size=(3, 3),
                          stride=(1, 1),
                          padding=(1, 1),
                          bias=False),
                nn.BatchNorm2d(512),
                nn.LeakyReLU(0.1),

                # nn.AvgPool2d(kernel_size=3, stride=1),
                nn.Conv2d(in_channels=512,
                          out_channels=1024,
                          kernel_size=(3, 3),
                          stride=(2, 2),
                          padding=(1, 1),
                          bias=False),
                nn.BatchNorm2d(1024),
                nn.LeakyReLU(0.1),
                nn.Conv2d(in_channels=1024,
                          out_channels=1024,
                          kernel_size=(3, 3),
                          stride=(1, 1),
                          padding=(1, 1),
                          bias=False),
                nn.BatchNorm2d(1024),
                nn.LeakyReLU(0.1),
                nn.AvgPool2d(kernel_size=3, stride=1),
                nn.Conv2d(in_channels=1024,
                          out_channels=2048,
                          kernel_size=(3, 3),
                          stride=(1, 1),
                          padding=(1, 1),
                          bias=False),
                nn.BatchNorm2d(2048),
                nn.LeakyReLU(0.1),
            )

            CNN_flat_output_size = 16384
            self.CNN.to(device)

        elif cnn_type == 'mobilenetV3_large':

            self.CNN = models.mobilenet_v3_large(pretrained=True)

            CNN_flat_output_size = 1000

            if cnn_freeze == True:
                print('Freeze ' + cnn_type)
                for name, module in self.CNN.named_children():
                    for layer in module.children():
                        for param in layer.parameters():
                            param.requires_grad = False
                            # print(param)

        elif cnn_type == 'mobilenetV3_small':

            self.CNN = models.mobilenet_v3_small(pretrained=True)

            CNN_flat_output_size = 1000

            if cnn_freeze == True:
                print('Freeze ' + cnn_type)
                for name, module in self.CNN.named_children():
                    for layer in module.children():
                        for param in layer.parameters():
                            param.requires_grad = False
                            # print(param)

        elif cnn_type == 'vgg16':

            self.CNN = models.vgg16(pretrained=True)

            CNN_flat_output_size = 1000

            if cnn_freeze == True:
                print('Freeze ' + cnn_type)
                for name, module in self.CNN.named_children():
                    for layer in module.children():
                        for param in layer.parameters():
                            param.requires_grad = False
                            # print(param)

        self.hidden_size = hidden_size
        self.num_layers = num_layers

        self.rnn_type = rnn_type
        self.regression = regression

        if rnn_type == 'rnn':

            if num_layers > 1:
                self.RNN = nn.RNN(input_size=CNN_flat_output_size,
                                  hidden_size=hidden_size,
                                  num_layers=num_layers,
                                  batch_first=True,
                                  dropout=0.5)

            else:
                self.RNN = nn.RNN(input_size=CNN_flat_output_size,
                                  hidden_size=hidden_size,
                                  num_layers=num_layers,
                                  batch_first=True)

        elif rnn_type == 'lstm':

            if num_layers > 1:
                self.RNN = nn.LSTM(input_size=CNN_flat_output_size,
                                   hidden_size=hidden_size,
                                   num_layers=num_layers,
                                   batch_first=True,
                                   dropout=0.5)

            else:
                self.RNN = nn.LSTM(input_size=CNN_flat_output_size,
                                   hidden_size=hidden_size,
                                   num_layers=num_layers,
                                   batch_first=True)

        # self.linear = nn.Linear(in_features=hidden_size, out_features=6)

        if regression == 'last_only':
            self.linear = nn.Linear(
                in_features=hidden_size,
                out_features=3)  # For last sequence only case

        elif regression == 'full_sequence':
            in_features_num = input_sequence_length * hidden_size
            out_features_num = (num_layers * hidden_size) // 2
            self.linear1 = nn.Linear(
                in_features=in_features_num,
                out_features=out_features_num)  # For full sequence
            self.batchnorm_linear1 = nn.BatchNorm1d(out_features_num)
            self.leakyrelu_linear1 = nn.LeakyReLU(0.1)
            self.dropout_linear1 = nn.Dropout(p=0.5)

            in_features_num = out_features_num
            out_features_num = out_features_num // 2
            self.linear2 = nn.Linear(
                in_features=in_features_num,
                out_features=out_features_num)  # For full sequence
            self.batchnorm_linear2 = nn.BatchNorm1d(out_features_num)
            self.leakyrelu_linear2 = nn.LeakyReLU(0.1)
            self.dropout_linear2 = nn.Dropout(p=0.5)

            in_features_num = out_features_num
            out_features_num = 3
            self.linear3 = nn.Linear(
                in_features=in_features_num,
                out_features=out_features_num)  # For full sequence

        ### Training Setup ###
        self.device = device
        self.to(self.device)

        # self.optimizer = optim.RMSprop(self.parameters(), lr=learning_rate)
        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)

        self.translation_loss = nn.MSELoss()
コード例 #6
0
 def __init__(self):
     super().__init__()
     self.model = models.mobilenet_v3_small(pretrained=True)
     # Normalization parameters
     self.mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)
     self.std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
コード例 #7
0
ファイル: training.py プロジェクト: bcaitech1/p1-img-PJHgh
def set_model(model_name, num_classes):
    if model_name == 'resnext50_32x4d':
        model = models.resnext50_32x4d(pretrained=True)
        model.fc = torch.nn.Sequential(torch.nn.Linear(in_features=2048, out_features=num_classes))
    elif model_name == 'resnet18':
        model = models.resnet18(pretrained=True)
        model.fc = torch.nn.Linear(in_features=512, out_features=num_classes)
    elif model_name == 'resnet34':
        model = models.resnet34(pretrained=True)
        model.fc = torch.nn.Linear(in_features=512, out_features=num_classes)
    elif model_name == 'resnet50':
        model = models.resnet50(pretrained=True)
        model.fc = torch.nn.Linear(in_features=2048, out_features=num_classes)
    elif model_name == 'vgg16':
        model = models.vgg16(pretrained=True)
        model.classifier[6] = torch.nn.Linear(in_features=4096, out_features=num_classes)
    elif model_name == 'densenet121':
        model = models.densenet121(pretrained=True)
        model.classifier = torch.nn.Linear(in_features=1024, out_features=num_classes)
    elif model_name == 'densenet161':
        model = models.densenet161(pretrained=True)
        model.classifier = torch.nn.Linear(in_features=2208, out_features=num_classes)
    elif model_name == 'inception':
        model = models.inception_v3(pretrained=True)
        model.fc = torch.nn.Linear(in_features=2048, out_features=num_classes)
    elif model_name == 'googlenet':
        model = models.googlenet(pretrained=True)
        model.fc = torch.nn.Linear(in_features=1024, out_features=num_classes)
    elif model_name == 'shufflenet_v2_x0_5':
        model = models.shufflenet_v2_x0_5(pretrained=True)
        model.fc = torch.nn.Linear(in_features=1024, out_features=num_classes)
    elif model_name == 'shufflenet_v2_x1_0':
        model = models.shufflenet_v2_x1_0(pretrained=True)
        model.fc = torch.nn.Linear(in_features=1024, out_features=num_classes)
    elif model_name == 'mobilenet_v2':
        model = models.mobilenet_v2(pretrained=True)
        model.classifier[1] = torch.nn.Linear(in_features=1280, out_features=num_classes)
    elif model_name == 'mobilenet_v3_large':
        model = models.mobilenet_v3_large(pretrained=True)
        model.classifier[3] = torch.nn.Linear(in_features=1280, out_features=num_classes)
    elif model_name == 'mobilenet_v3_small':
        model = models.mobilenet_v3_small(pretrained=True)
        model.classifier[3] = torch.nn.Linear(in_features=1280, out_features=num_classes)
    elif model_name == 'wide_resnet50_2':
        model = models.wide_resnet50_2(pretrained=True)
        model.fc = torch.nn.Linear(in_features=2048, out_features=num_classes)
    elif model_name == 'mnasnet0_5':
        model = models.mnasnet0_5(pretrained=True)
        model.classifier[1] = torch.nn.Linear(in_features=1280, out_features=num_classes)
    elif model_name == 'mnasnet1_0':
        model = models.mnasnet1_0(pretrained=True)
        model.classifier[1] = torch.nn.Linear(in_features=1280, out_features=num_classes)
    elif model_name == 'alexnet':
        model = models.alexnet(pretrained=True)
        model.classifier[6] = torch.nn.Linear(in_features=4096, out_features=num_classes)
    elif model_name == 'vgg19_bn':
        model = models.vgg19_bn(pretrained=True)
        model.classifier[6] = torch.nn.Linear(in_features=4096, out_features=num_classes)    
    elif model_name == 'efficientnet-b0':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    elif model_name == 'efficientnet-b1':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    elif model_name == 'efficientnet-b2':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    elif model_name == 'efficientnet-b3':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    elif model_name == 'efficientnet-b4':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    elif model_name == 'efficientnet-b5':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    elif model_name == 'efficientnet-b6':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    elif model_name == 'efficientnet-b7':
        model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    else:
        raise NameError(f'!!!!! Model ERROR : {model_name} !!!!!')
    return model
コード例 #8
0
     net=models.vgg16(pretrained=True)
     num_fc = net.classifier[6].in_features
     net.classifier[6] = torch.nn.Linear(num_fc, 2)
     for param in net.parameters():
         param.requires_grad = False
     # 但是参数全部固定了,也没法进行学习,所以我们不固定最后一层,即全连接层
     for param in net.classifier[6].parameters():
         param.requires_grad = True
 elif args.model=="Inception_v3":
     net=models.inception_v3()
     net.AuxLogits.fc=nn.Linear(768,2)
     net.fc = nn.Linear(2048, 2)
     net.aux_logits=False
     # net=net.cuda()
 elif args.model=="mobilenet_v3_small":
     net=models.mobilenet_v3_small()
     net.classifier[3]=nn.Linear(1024,2)
     # net.fc = nn.Linear(num_fc, 2)
 elif args.model=="mobilenet_v3_large":
     net = models.mobilenet_v3_large()
     net.classifier[3] = nn.Linear(1280, 2)
 elif args.model=="ShuffleNet_v2":
     net=models.shufflenet_v2_x1_0()
     net.fc=nn.Linear(1024,2)
 elif args.model == "mobilenet_v2":
     net = models.mobilenet_v2()
     net.classifier[1] = nn.Linear(1280, 2)
 elif args.model=='stn_shuf':
     net=stn_shufflenet()
 elif args.model=='stn_trans_shuf':
     net=stn_trans_shufflenet()
コード例 #9
0
 def __init__(self, dims=512):
     super().__init__()
     model = models.mobilenet_v3_small(pretrained=True)
     self.features = model.features