def __init__(self, device=None, jit=False): super().__init__() self.device = device self.jit = jit self.model = models.mobilenet_v3_large().to(self.device) self.eval_model = models.mobilenet_v3_large().to(self.device) if self.jit: self.model = torch.jit.script(self.model) self.eval_model = torch.jit.script(self.eval_model) # model needs to in `eval` # in order to be optimized for inference self.eval_model.eval() self.eval_model = torch.jit.optimize_for_inference(self.eval_model) self.example_inputs = (torch.randn((32, 3, 224, 224)).to(self.device),)
def __init__(self, num_classes, pretrain_backbone: bool = False): super(MobileV3Unet, self).__init__() backbone = mobilenet_v3_large(pretrained=pretrain_backbone) # if pretrain_backbone: # # 载入mobilenetv3 large backbone预训练权重 # # https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth # backbone.load_state_dict(torch.load("mobilenet_v3_large.pth", map_location='cpu')) backbone = backbone.features stage_indices = [1, 3, 6, 12, 15] self.stage_out_channels = [backbone[i].out_channels for i in stage_indices] return_layers = dict([(str(j), f"stage{i}") for i, j in enumerate(stage_indices)]) self.backbone = IntermediateLayerGetter(backbone, return_layers=return_layers) c = self.stage_out_channels[4] + self.stage_out_channels[3] self.up1 = Up(c, self.stage_out_channels[3]) c = self.stage_out_channels[3] + self.stage_out_channels[2] self.up2 = Up(c, self.stage_out_channels[2]) c = self.stage_out_channels[2] + self.stage_out_channels[1] self.up3 = Up(c, self.stage_out_channels[1]) c = self.stage_out_channels[1] + self.stage_out_channels[0] self.up4 = Up(c, self.stage_out_channels[0]) self.conv = OutConv(self.stage_out_channels[0], num_classes=num_classes)
def __init__(self): super().__init__() mobilenet = models.mobilenet_v3_large(pretrained=True) features = nn.ModuleList(mobilenet.children())[:-1] self.features = nn.Sequential(*features) classifier = nn.ModuleList(mobilenet.classifier.children())[:-1] self.classifier = nn.Sequential(*classifier) self.in_features = 960 self.fc_res = nn.Linear(1280, 3)
def get_mobilenet_v3_large(class_num): model = models.mobilenet_v3_large(pretrained=True) set_parameter_requires_grad(model) model.name = 'mobilenet_v3_large' n_inputs = model.classifier[3].in_features model.classifier[3] = nn.Linear(n_inputs, class_num) return model, 224
def main(): model = models.mobilenet_v3_large(pretrained=True) target_layers = [model.features[-1]] # model = models.vgg16(pretrained=True) # target_layers = [model.features] # model = models.resnet34(pretrained=True) # target_layers = [model.layer4] # model = models.regnet_y_800mf(pretrained=True) # target_layers = [model.trunk_output] # model = models.efficientnet_b0(pretrained=True) # target_layers = [model.features] data_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) # load image img_path = "both.png" assert os.path.exists(img_path), "file: '{}' dose not exist.".format( img_path) img = Image.open(img_path).convert('RGB') img = np.array(img, dtype=np.uint8) # img = center_crop_img(img, 224) # [C, H, W] img_tensor = data_transform(img) # expand batch dimension # [C, H, W] -> [N, C, H, W] input_tensor = torch.unsqueeze(img_tensor, dim=0) cam = GradCAM(model=model, target_layers=target_layers, use_cuda=False) target_category = 281 # tabby, tabby cat # target_category = 254 # pug, pug-dog grayscale_cam = cam(input_tensor=input_tensor, target_category=target_category) grayscale_cam = grayscale_cam[0, :] visualization = show_cam_on_image(img.astype(dtype=np.float32) / 255., grayscale_cam, use_rgb=True) plt.imshow(visualization) plt.show()
def __init__(self, args): super(IAQA_model, self).__init__() self.args = args MobileNet = model_template.mobilenet_v3_large(pretrained=True) self.features1 = MobileNet.features.apply(insert_inplate_1_2) self.features2 = MobileNet.features self.features3 = MobileNet.features.apply(insert_inplate_2_1) self.classifer1 = nn.Sequential(nn.Linear(576 * 3, args.layer_num), nn.LeakyReLU(True), nn.Dropout(0.5)) # nn.Linear(args.layer_num,1)) self.classifer2 = nn.Sequential(nn.Linear(576 * 3, args.layer_num), nn.LeakyReLU(True), nn.Dropout(0.5), nn.Linear(args.layer_num, 3)) self.classifer_att = nn.Sequential(nn.Linear(576 * 3, args.layer_num), nn.LeakyReLU(True), nn.Dropout(0.5)) self.linear = nn.Linear(args.layer_num, 3) self.linear2 = nn.Linear(args.layer_num * 2, 1)
def get_model(model_str, device): if (model_str == 'squeezenet1_1'): model = models.squeezenet1_1(num_classes=10).to(device) elif (model_str == 'resnet50'): model = models.resnet50(num_classes=10).to(device) elif (model_str == 'squeezenet1_0'): model = models.squeezenet1_0(num_classes=10).to(device) elif (model_str == 'resnet18'): model = models.resnet18(num_classes=10).to(device) elif (model_str == 'alexnet'): model = models.alexnet(num_classes=10).to(device) elif (model_str == 'vgg16'): model = models.vgg16(num_classes=10).to(device) elif (model_str == 'densenet161'): model = models.densenet161(num_classes=10).to(device) elif (model_str == 'inception_v3'): model = models.inception_v3(num_classes=10).to(device) elif (model_str == 'googlenet'): model = models.googlenet(num_classes=10).to(device) elif (model_str == 'shufflenet_v2_x1_0'): model = models.shufflenet_v2_x1_0(num_classes=10).to(device) elif (model_str == 'mobilenet_v2'): model = models.mobilenet_v2(num_classes=10).to(device) elif (model_str == 'mobilenet_v3_large'): model = models.mobilenet_v3_large(num_classes=10).to(device) elif (model_str == 'mobilenet_v3_small'): model = models.mobilenet_v3_small(num_classes=10).to(device) elif (model_str == 'resnext50_32x4d'): model = models.resnext50_32x4d(num_classes=10).to(device) elif (model_str == 'wide_resnet50_2'): model = models.wide_resnet50_2(num_classes=10).to(device) elif (model_str == 'mnasnet1_0'): model = models.mnasnet1_0(num_classes=10).to(device) else: raise Exception(f"Model {model_str} is not supported yet!") checkpoint = get_checkpoint_folder(model_str, device) if (exists(checkpoint)): model.load_state_dict(torch.load(checkpoint)) return model
def __init__(self): self.model = mobilenet_v3_large(pretrained=True) for param in self.model.parameters(): param.requires_grad = False self.model.classifier[2] = BatchNorm1d(1280) self.model.classifier[3] = Linear(1280, 133, bias=True) self.error = None try: with open('dogs', 'r') as file: self.names = file.read().split('\n') except FileNotFoundError: self.error = 'No dogs file' else: try: map_location = 'cpu' self.model.load_state_dict( load('mobilenet_model.pt', map_location=map_location)) self.model.eval() except FileNotFoundError: self.error = 'No model file'
import os import torch.nn as nn import torch from torchvision import models os.environ['TORCH_HOME'] = 'models' # alexnet_model = models.alexnet(pretrained=True) mobilenet_v3l_model = models.mobilenet_v3_large(pretrained=True) resnet50_model = models.resnet50(pretrained=True) # class AlexNetPlusLatent(nn.Module): # def __init__(self, n_classes, bits=128): # super(AlexNetPlusLatent, self).__init__() # self.bits = bits # self.features = nn.Sequential(*list(alexnet_model.features.children())) # self.remain = nn.Sequential(*list(alexnet_model.classifier.children())[:-1]) # self.Linear1 = nn.Linear(4096, self.bits) # self.sigmoid = nn.Sigmoid() # self.Linear2 = nn.Linear(self.bits, n_classes) # # def forward(self, x, predict=False): # x = self.features(x) # x = x.view(x.size(0), 256 * 6 * 6) # x = self.remain(x) # x = self.Linear1(x) # features = self.sigmoid(x) # if predict: # return features # result = self.Linear2(features) # return features, result
def mobilenet_v3_large(): return models.mobilenet_v3_large(pretrained=True)
def __init__(self, device, cnn_type='new', cnn_freeze=False, rnn_type='lstm', bidirection='False', regression='last_only', input_sequence_length=2, hidden_size=100, num_layers=2, learning_rate=0.0001): super().__init__() ### Deep Neural Network Setup ### self.cnn_type = cnn_type if cnn_type == 'new': self.CNN = nn.Sequential( nn.Conv2d(in_channels=3, out_channels=64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False), nn.BatchNorm2d(64), nn.LeakyReLU(0.1), nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2), bias=False), nn.BatchNorm2d(128), nn.LeakyReLU(0.1), nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2), bias=False), nn.BatchNorm2d(256), nn.LeakyReLU(0.1), nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False), nn.BatchNorm2d(256), nn.LeakyReLU(0.1), nn.Conv2d(in_channels=256, out_channels=512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False), nn.BatchNorm2d(512), nn.LeakyReLU(0.1), nn.MaxPool2d(kernel_size=3, stride=1), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False), nn.BatchNorm2d(512), nn.LeakyReLU(0.1), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False), nn.BatchNorm2d(512), nn.LeakyReLU(0.1), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False), nn.BatchNorm2d(512), nn.LeakyReLU(0.1), # nn.AvgPool2d(kernel_size=3, stride=1), nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False), nn.BatchNorm2d(1024), nn.LeakyReLU(0.1), nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False), nn.BatchNorm2d(1024), nn.LeakyReLU(0.1), nn.AvgPool2d(kernel_size=3, stride=1), nn.Conv2d(in_channels=1024, out_channels=2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False), nn.BatchNorm2d(2048), nn.LeakyReLU(0.1), ) CNN_flat_output_size = 16384 self.CNN.to(device) elif cnn_type == 'mobilenetV3_large': self.CNN = models.mobilenet_v3_large(pretrained=True) CNN_flat_output_size = 1000 if cnn_freeze == True: print('Freeze ' + cnn_type) for name, module in self.CNN.named_children(): for layer in module.children(): for param in layer.parameters(): param.requires_grad = False # print(param) elif cnn_type == 'mobilenetV3_small': self.CNN = models.mobilenet_v3_small(pretrained=True) CNN_flat_output_size = 1000 if cnn_freeze == True: print('Freeze ' + cnn_type) for name, module in self.CNN.named_children(): for layer in module.children(): for param in layer.parameters(): param.requires_grad = False # print(param) elif cnn_type == 'vgg16': self.CNN = models.vgg16(pretrained=True) CNN_flat_output_size = 1000 if cnn_freeze == True: print('Freeze ' + cnn_type) for name, module in self.CNN.named_children(): for layer in module.children(): for param in layer.parameters(): param.requires_grad = False # print(param) self.hidden_size = hidden_size self.num_layers = num_layers self.rnn_type = rnn_type self.regression = regression if rnn_type == 'rnn': if num_layers > 1: self.RNN = nn.RNN(input_size=CNN_flat_output_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True, dropout=0.5) else: self.RNN = nn.RNN(input_size=CNN_flat_output_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True) elif rnn_type == 'lstm': if num_layers > 1: self.RNN = nn.LSTM(input_size=CNN_flat_output_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True, dropout=0.5) else: self.RNN = nn.LSTM(input_size=CNN_flat_output_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True) # self.linear = nn.Linear(in_features=hidden_size, out_features=6) if regression == 'last_only': self.linear = nn.Linear( in_features=hidden_size, out_features=3) # For last sequence only case elif regression == 'full_sequence': in_features_num = input_sequence_length * hidden_size out_features_num = (num_layers * hidden_size) // 2 self.linear1 = nn.Linear( in_features=in_features_num, out_features=out_features_num) # For full sequence self.batchnorm_linear1 = nn.BatchNorm1d(out_features_num) self.leakyrelu_linear1 = nn.LeakyReLU(0.1) self.dropout_linear1 = nn.Dropout(p=0.5) in_features_num = out_features_num out_features_num = out_features_num // 2 self.linear2 = nn.Linear( in_features=in_features_num, out_features=out_features_num) # For full sequence self.batchnorm_linear2 = nn.BatchNorm1d(out_features_num) self.leakyrelu_linear2 = nn.LeakyReLU(0.1) self.dropout_linear2 = nn.Dropout(p=0.5) in_features_num = out_features_num out_features_num = 3 self.linear3 = nn.Linear( in_features=in_features_num, out_features=out_features_num) # For full sequence ### Training Setup ### self.device = device self.to(self.device) # self.optimizer = optim.RMSprop(self.parameters(), lr=learning_rate) self.optimizer = optim.Adam(self.parameters(), lr=learning_rate) self.translation_loss = nn.MSELoss()
def set_model(model_name, num_classes): if model_name == 'resnext50_32x4d': model = models.resnext50_32x4d(pretrained=True) model.fc = torch.nn.Sequential(torch.nn.Linear(in_features=2048, out_features=num_classes)) elif model_name == 'resnet18': model = models.resnet18(pretrained=True) model.fc = torch.nn.Linear(in_features=512, out_features=num_classes) elif model_name == 'resnet34': model = models.resnet34(pretrained=True) model.fc = torch.nn.Linear(in_features=512, out_features=num_classes) elif model_name == 'resnet50': model = models.resnet50(pretrained=True) model.fc = torch.nn.Linear(in_features=2048, out_features=num_classes) elif model_name == 'vgg16': model = models.vgg16(pretrained=True) model.classifier[6] = torch.nn.Linear(in_features=4096, out_features=num_classes) elif model_name == 'densenet121': model = models.densenet121(pretrained=True) model.classifier = torch.nn.Linear(in_features=1024, out_features=num_classes) elif model_name == 'densenet161': model = models.densenet161(pretrained=True) model.classifier = torch.nn.Linear(in_features=2208, out_features=num_classes) elif model_name == 'inception': model = models.inception_v3(pretrained=True) model.fc = torch.nn.Linear(in_features=2048, out_features=num_classes) elif model_name == 'googlenet': model = models.googlenet(pretrained=True) model.fc = torch.nn.Linear(in_features=1024, out_features=num_classes) elif model_name == 'shufflenet_v2_x0_5': model = models.shufflenet_v2_x0_5(pretrained=True) model.fc = torch.nn.Linear(in_features=1024, out_features=num_classes) elif model_name == 'shufflenet_v2_x1_0': model = models.shufflenet_v2_x1_0(pretrained=True) model.fc = torch.nn.Linear(in_features=1024, out_features=num_classes) elif model_name == 'mobilenet_v2': model = models.mobilenet_v2(pretrained=True) model.classifier[1] = torch.nn.Linear(in_features=1280, out_features=num_classes) elif model_name == 'mobilenet_v3_large': model = models.mobilenet_v3_large(pretrained=True) model.classifier[3] = torch.nn.Linear(in_features=1280, out_features=num_classes) elif model_name == 'mobilenet_v3_small': model = models.mobilenet_v3_small(pretrained=True) model.classifier[3] = torch.nn.Linear(in_features=1280, out_features=num_classes) elif model_name == 'wide_resnet50_2': model = models.wide_resnet50_2(pretrained=True) model.fc = torch.nn.Linear(in_features=2048, out_features=num_classes) elif model_name == 'mnasnet0_5': model = models.mnasnet0_5(pretrained=True) model.classifier[1] = torch.nn.Linear(in_features=1280, out_features=num_classes) elif model_name == 'mnasnet1_0': model = models.mnasnet1_0(pretrained=True) model.classifier[1] = torch.nn.Linear(in_features=1280, out_features=num_classes) elif model_name == 'alexnet': model = models.alexnet(pretrained=True) model.classifier[6] = torch.nn.Linear(in_features=4096, out_features=num_classes) elif model_name == 'vgg19_bn': model = models.vgg19_bn(pretrained=True) model.classifier[6] = torch.nn.Linear(in_features=4096, out_features=num_classes) elif model_name == 'efficientnet-b0': model = EfficientNet.from_pretrained(model_name, num_classes=num_classes) elif model_name == 'efficientnet-b1': model = EfficientNet.from_pretrained(model_name, num_classes=num_classes) elif model_name == 'efficientnet-b2': model = EfficientNet.from_pretrained(model_name, num_classes=num_classes) elif model_name == 'efficientnet-b3': model = EfficientNet.from_pretrained(model_name, num_classes=num_classes) elif model_name == 'efficientnet-b4': model = EfficientNet.from_pretrained(model_name, num_classes=num_classes) elif model_name == 'efficientnet-b5': model = EfficientNet.from_pretrained(model_name, num_classes=num_classes) elif model_name == 'efficientnet-b6': model = EfficientNet.from_pretrained(model_name, num_classes=num_classes) elif model_name == 'efficientnet-b7': model = EfficientNet.from_pretrained(model_name, num_classes=num_classes) else: raise NameError(f'!!!!! Model ERROR : {model_name} !!!!!') return model
def __init__(self): super(MobileNetV3, self).__init__() num_classes = 10 self.model = models.mobilenet_v3_large(pretrained=True) self.model.classifier[3] = nn.Linear(in_features=1280, out_features=10)
param.requires_grad = False # 但是参数全部固定了,也没法进行学习,所以我们不固定最后一层,即全连接层 for param in net.classifier[6].parameters(): param.requires_grad = True elif args.model=="Inception_v3": net=models.inception_v3() net.AuxLogits.fc=nn.Linear(768,2) net.fc = nn.Linear(2048, 2) net.aux_logits=False # net=net.cuda() elif args.model=="mobilenet_v3_small": net=models.mobilenet_v3_small() net.classifier[3]=nn.Linear(1024,2) # net.fc = nn.Linear(num_fc, 2) elif args.model=="mobilenet_v3_large": net = models.mobilenet_v3_large() net.classifier[3] = nn.Linear(1280, 2) elif args.model=="ShuffleNet_v2": net=models.shufflenet_v2_x1_0() net.fc=nn.Linear(1024,2) elif args.model == "mobilenet_v2": net = models.mobilenet_v2() net.classifier[1] = nn.Linear(1280, 2) elif args.model=='stn_shuf': net=stn_shufflenet() elif args.model=='stn_trans_shuf': net=stn_trans_shufflenet() elif args.model=='shuf': net=torchvision.models.shufflenet_v2_x1_0(pretrained=False,num_classes=2)