def __init__(self, model_name, num_classes=5, pretrained=False, aux_logits=False, freeze_features=False): super(DR_Classifier, self).__init__() self.model_name = model_name self.num_classes = num_classes self.aux_logits = aux_logits if self.model_name == 'inception-v3': self.model = models.inception_v3(pretrained=pretrained, aux_logits=aux_logits) num_ftrs = self.model.fc.in_features self.model.fc = self.createLayer(num_ftrs) if self.aux_logits: num_ftrs = self.model.AuxLogits.fc.in_features self.model.AuxLogits.fc = self.createLayer(num_ftrs) elif self.model_name == "densenet-161": self.model = models.densenet161(pretrained=pretrained) num_ftrs = self.model.classifier.in_features self.model.classifier = self.createLayer(num_ftrs) elif self.model_name == "efficientnet-b7": self.model = EfficientNet.from_pretrained(model_name) num_ftrs = self.model._fc.in_features self.model._fc = self.createLayer(num_ftrs) elif self.model_name == "efficientnet-b5": self.model = EfficientNet.from_pretrained(model_name) num_ftrs = self.model._fc.in_features self.model._fc = self.createLayer(num_ftrs) elif self.model_name == "wideresnet": self.model = models.wide_resnet101_2(pretrained = True) num_ftrs = self.model.fc.in_features self.model.fc = self.createLayer(num_ftrs) else: print("Invalid model name, exiting...") return;
def feature_wide_resnet101(pretrained=True, **kwargs): kwargs['width_per_group'] = 64 * 2 model = FeatureResNet(Bottleneck, [3, 4, 23, 3], **kwargs) if pretrained: model.load_state_dict( models.wide_resnet101_2(pretrained=True).state_dict()) return model
def wide_resnet101_2(pretrained: bool, progress: bool = True, requires_grad: bool = True): model = models.wide_resnet101_2(pretrained=pretrained, progress=progress) for params in model.parameters(): params.requires_grad = requires_grad return model
def __init__(self, args: Namespace) -> None: # Save parameters self.data_dir = args.data_dir self.tsvname = args.tsvname self.embed_dir = args.embed_dir self.timeout = args.timeout self.log_every = args.log_every # Turn PIL warnings into exceptions to filter out bad images simplefilter('error', Image.DecompressionBombWarning) simplefilter('error', UserWarning) # Automatically use GPU if available if not cuda.is_available(): raise RuntimeError( 'Must have CUDA installed in order to run this program.') self.device = device('cuda') self.model = wide_resnet101_2(pretrained=True, progress=True) # Move model to device self.model.to(self.device) self.model.eval() # Don't forget to put model in evaluation mode! # Use recommended sequence of transforms for ImageNet pretrained models self.transforms = Compose([Resize(256, interpolation=Image.BICUBIC), # Default is bilinear CenterCrop(224), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) self.embeddings = Queue(self.log_every) # Thread-safe self.model.avgpool.register_forward_hook(lambda m, m_in, m_out: self.embeddings.put( m_out.data.detach().cpu().squeeze().numpy()))
def get_model(name, pretrained=True): if name == 'resnet18': model = models.resnet18(pretrained=pretrained) out_channels = [64, 128, 256, 512] elif name == 'resnet34': model = models.resnet34(pretrained=pretrained) out_channels = [64, 128, 256, 512] elif name == 'resnet50': model = models.resnet50(pretrained=pretrained) out_channels = [256, 512, 1024, 2048] elif name == 'resnet101': model = models.resnet101(pretrained=pretrained) out_channels = [256, 512, 1024, 2048] # ResNeXt elif name == 'resnext50': model = models.resnext50_32x4d(pretrained=pretrained) out_channels = [256, 512, 1024, 2048] elif name == 'resnext101': model = models.resnext101_32x8d(pretrained=pretrained) out_channels = [256, 512, 1024, 2048] elif name == 'wide_resnet50': model = models.wide_resnet50_2(pretrained=pretrained) out_channels = [256, 512, 1024, 2048] elif name == 'wide_resnet101': model = models.wide_resnet101_2(pretrained=pretrained) out_channels = [256, 512, 1024, 2048] # Error else: raise NotImplemented('{} backbone model is not implemented so far.'.format(name)) return model, out_channels
def __init__(self, c_in, model_name): super(PretrainedNet, self).__init__() if model_name == 'resnet101': model_ft = models.resnet101(pretrained=True) elif model_name == 'densenet121': model_ft = models.densenet121(pretrained=True) elif model_name == 'densenet161': model_ft = models.densenet161(pretrained=True) elif model_name == 'squeezenet': model_ft = models.squeezenet1_0(pretrained=True) elif model_name == 'resnext101_32x8d': model_ft = models.wide_resnet101_2(pretrained=True) elif model_name == 'inception_v3': model_ft = models.inception_v3(pretrained=True) self._set_parameter_requires_grad(model_ft) if model_name.startswith('res'): layers = [i for j, i in enumerate(model_ft.children()) if j < 7] self.model = nn.Sequential(*layers, nn.MaxPool2d((4, 1))) elif model_name.startswith('dense'): layers = model_ft.features[:-3] self.model = nn.Sequential(*layers, nn.MaxPool2d((4, 1))) elif model_name.startswith('squeeze'): layers = model_ft.features[:-2] self.model = nn.Sequential(*layers, nn.MaxPool2d((4, 1))) elif model_name.startswith('incep'): layers = [i for j, i in enumerate(model_ft.children()) if j < 13] self.model = nn.Sequential(*layers, nn.MaxPool2d((8, 4)))
def __init__(self, device: Union[str, torch.device] = "cpu", load_from_path: str = None, augmentations: nn.Sequential = None) -> None: """Initialise model and learner setup.""" self.device = device self.model = models.wide_resnet101_2(pretrained=False).to(self.device) self.timestamp = datetime.now().strftime("%m-%d-%Y_%H-%M-%S") if load_from_path is not None: print("Loading model...") state_dict = torch.load(load_from_path) self.model.load_state_dict(state_dict) if augmentations is None: self.learner = BYOL(self.model, image_size=64, hidden_layer="avgpool") if augmentations is not None: self.learner = BYOL(self.model, image_size=64, hidden_layer="avgpool", augment_fn=augmentations) self.opt = torch.optim.Adam(self.learner.parameters(), lr=0.0001, betas=(0.9, 0.999)) self.loss_history: list[float] = []
def extract_resnet_features(): dataset = MetalAlbumsDataset("D:\\metal_data\\album_id_titles.json", "D:\\metal_data\\covers_128") data_loader = DataLoader(dataset, 50, False, collate_fn=pack_collate_images) resnet = torch.nn.Sequential( *list(models.wide_resnet101_2( pretrained=True).children())[:-1]).to("cuda") resnet.eval() all_features = np.zeros((len(dataset), 2048)) with open('album_resnet.csv', 'w', newline='') as f: writer = csv.writer(f, delimiter=',', quoting=csv.QUOTE_NONNUMERIC) index = 0 for i, (images, ids) in enumerate(data_loader): images = images.to("cuda") with torch.no_grad(): features = resnet(images).cpu().numpy() all_features[index:index + len(ids), :] = features.reshape( len(ids), 2048) index += len(ids) for j, album_id in enumerate(ids): writer.writerow( [album_id, features[j, :, :].reshape(-1).tolist()]) np.save("album_resnet.npy", all_features)
def __init__(self,num_classes = 60,model_name='resnet50'): super(VA, self).__init__() self.num_classes = num_classes self.conv1 = nn.Conv2d(3, 128, kernel_size=5, stride=2, bias=False) self.bn1 = nn.BatchNorm2d(128) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(128, 128, kernel_size=5, stride=2, bias=False) self.bn2 = nn.BatchNorm2d(128) self.relu2 = nn.ReLU(inplace=True) self.avepool = nn.MaxPool2d(7) self.fc = nn.Linear(6272, 6) if model_name=='resnet50': self.classifier = models.resnet50(pretrained=True) elif model_name=='resnext50_32x4d': self.classifier = models.resnext50_32x4d(pretrained=True) elif model_name=='resnext101_32x8d': self.classifier = models.resnext101_32x8d(pretrained=True) elif model_name=='wide_resnet50_2': self.classifier = models.wide_resnet50_2(pretrained=True) elif model_name=='wide_resnet101_2': self.classifier = models.wide_resnet101_2(pretrained=True) elif model_name=='xception': self.classifier = pretrainedmodels.xception(num_classes=1000, pretrained='imagenet') total_params = 0 for nname, pparameter in self.classifier.named_parameters(): if not pparameter.requires_grad: continue param = pparameter.numel() total_params+=param print(f"Total Trainable Params: {total_params}") self.init_weight()
def wide_resnet101(pretrained=False, imagenet_weight=False): """Constructs a Wide_ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ print('=== Using wide_resnet 101 ===') model = models.wide_resnet101_2() if pretrained: if imagenet_weight: print('=== use {} as backbone'.format(imagenet_weight)) state_dict = torch.load(imagenet_weight)['state_dict'] state_dict = exchange_weightkey_in_state_dict(state_dict) model.load_state_dict(state_dict) else: print('=== use pytorch default backbone') model = models.wide_resnet101_2(pretrained=True) return model
def get_wide_resnet101_2(class_num): model = models.wide_resnet101_2(pretrained=True) set_parameter_requires_grad(model) model.name = 'wide_resnet101_2' n_inputs = model.fc.in_features model.fc = nn.Linear(n_inputs, class_num) return model, 224
def get_model(model_name): """ Get specific modified pre-trained models by name """ model = None if model_name == "AlexNet": model = models.alexnet(pretrained=True) in_features = model.fc.in_features model.fc = nn.Linear(in_features, 26) elif model_name == "VGG": model = models.vgg16(pretrained=True) in_features = model.fc.in_features model.fc = nn.Linear(in_features, 26) elif model_name == "ResNet": model = models.resnet18(pretrained=True) in_features = model.fc.in_features model.fc = nn.Linear(in_features, 26) elif model_name == "SqueezNet": model = models.squeezenet1_1(pretrained=True) in_features = model.fc.in_features model.fc = nn.Linear(in_features, 26) elif model_name == "DenseNet": model = models.densenet121(pretrained=True) in_features = model.classifier.in_features model.classifier = nn.Linear(in_features, 26) elif model_name == "Inception": model = models.inception_v3(pretrained=True) in_features = model.fc.in_features model.fc = nn.Linear(in_features, 26) elif model_name == "GoogleNet": model = models.googlenet(pretrained=True) in_features = model.fc.in_features model.fc = nn.Linear(in_features, 26) elif model_name == "ShuffleNet": model = models.shufflenet_v2_x1_0(pretrained=True) in_features = model.fc.in_features model.fc = nn.Linear(in_features, 26) elif model_name == "MobileNet": model = models.mobilenet_v2(pretrained=True) in_features = model.fc.in_features model.fc = nn.Linear(in_features, 26) elif model_name == "ResNext": model = models.resnext101_32x8d(pretrained=True) in_features = model.fc.in_features model.fc = nn.Linear(in_features, 26) elif model_name == "WResNet": model = models.wide_resnet101_2(pretrained=True) in_features = model.fc.in_features model.fc = nn.Linear(in_features, 26) elif model_name == "MNASNet": model = models.mnasnet1_0(pretrained=True) in_features = model.fc.in_features model.fc = nn.Linear(in_features, 26) return model
def __init__(self): super(feature_extractor, self).__init__() #resnet152 = models.resnet152(pretrained=True) wideresnet_101_2 = models.wide_resnet101_2(pretrained = True) modules = list(wideresnet_101_2.children())[:-1] #modules = list(resnet152.children())[:-1] self.net = nn.Sequential(*modules) self.ft_size = 2048
def create_poseresnet_model(enable_binary=False): # TODO: Test trained vs. pretrained # training loop backbone_layer = models.wide_resnet101_2(pretrained=True) classification = five_layer_classification(512, 256, 512, 256, 3) if enable_binary: classification = five_layer_classification(512, 256, 512, 256, 2) model = pose_resnet(backbone_layer, classification) return model
def getValueFromJson(file_json, model_path_load): global is_cuda, optimizer, train_transform is_cuda = False if torch.cuda.is_available(): is_cuda = True gaugedataset = DigitGaugeDataset(file_json, train_transform) len_digit = len(gaugedataset) gauge_data_loader = torch.utils.data.DataLoader(gaugedataset, batch_size=len_digit, shuffle=False) data, target = next(iter(gauge_data_loader)) resnet = models.wide_resnet101_2(pretrained=False) resnet.fc.out_features = 10 if is_cuda == True: resnet = resnet.cuda() if is_cuda == True: dev = torch.device('cuda') else: dev = torch.device('cpu') checkpoint = torch.load(model_path_load, map_location=dev) resnet.load_state_dict(checkpoint['model_state_dict']) # for layer in resnet.classifier.children(): # if (type(layer) == nn.Dropout): # layer.train(False) # layer.p = 0 resnet.eval() # resnet.train() volatile = True if is_cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile), Variable(target) output = resnet(data) preds = output.data.max(dim=1, keepdim=True)[1] count_correct = preds.eq(target.data.view_as(preds)).cpu().sum() list_preds = preds.view(-1).tolist() print( f'target:{target.view(-1).tolist()}, preds:{list_preds}, accuracy :{count_correct / len_digit}' ) npred = int(''.join([str(aa) for aa in list_preds])) if gaugedataset.dict_json_info['digitFractNo'] > 0: npred = npred / (10**gaugedataset.dict_json_info['digitFractNo']) return npred
def get_model(model_name, train_set=None): if model_name in ["linear", "logistic"]: batch = train_set[0] model = LinearRegression(input_dim=batch['images'].shape[0], output_dim=1) if model_name == "mlp": model = Mlp(n_classes=10, dropout=False) if model_name == "mlp_dropout": model = Mlp(n_classes=10, dropout=True) elif model_name == "resnet34": model = ResNet([3, 4, 6, 3], num_classes=10) elif model_name == "resnet34_100": model = ResNet([3, 4, 6, 3], num_classes=100) elif model_name == "resnet18": model = models.resnet18() elif model_name == "resnet50": model = models.resnet50() elif model_name == "resnet101": model = models.resnet101() elif model_name == "resnet152": model = models.resnet152() elif model_name == "wide_resnet101": model = models.wide_resnet101_2() elif model_name == "densenet121": model = DenseNet121(num_classes=10) elif model_name == "densenet121_100": model = DenseNet121(num_classes=100) elif model_name == "matrix_fac_1": model = LinearNetwork(6, [1], 10, bias=False) elif model_name == "matrix_fac_4": model = LinearNetwork(6, [4], 10, bias=False) elif model_name == "matrix_fac_10": model = LinearNetwork(6, [10], 10, bias=False) elif model_name == "linear_fac": model = LinearNetwork(6, [], 10, bias=False) return model
def __init__(self, in_channels, pretrained=False, version='resnet101', clf=None): super(ResNet_BB, self).__init__() version = version.strip() if version == 'resnet18': resnet = models.resnet18(pretrained) elif version == 'resnet34': resnet = models.resnet34(pretrained) elif version == 'resnet50': resnet = models.resnet50(pretrained) elif version == 'resnet101': resnet = models.resnet101(pretrained) elif version == 'resnet152': resnet = models.resnet152(pretrained) elif version == 'resnext50_32x4d': resnet = models.resnext50_32x4d(pretrained) elif version == 'resnext101_32x8d': resnet = models.resnext101_32x8d(pretrained) elif version == 'wide_resnet50_2': resnet = models.wide_resnet50_2(pretrained) elif version == 'wide_resnet101_2': resnet = models.wide_resnet101_2(pretrained) elif version == 'resnest50': resnet = resnest50(pretrained) elif version == 'resnest101': resnet = resnest101(pretrained) elif version == 'resnest200': resnet = resnest200(pretrained) elif version == 'resnest269': resnet = resnest269(pretrained) else: raise NotImplementedError( 'version {} is not supported as of now'.format(version)) resnet.conv1 = nn.Conv2d(in_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) if clf == 'deeplabv3Plus': return_layers = {'layer4': 'out', 'layer1': 'low_level'} elif clf == 'PointRend': return_layers = {'layer4': 'out', 'layer2': 'fine_grained'} else: return_layers = {'layer4': 'out'} self.backbone = IntermediateLayerGetter(resnet, return_layers) self.out_channels = 2048 self.low_channels = 256 self.fine_grained_channels = 512
def get_Oracle(args, chk=False): model = models.wide_resnet101_2(pretrained=True) model = model.to(args.device) model.eval() if chk is True: _, test_loader = ImageNet_dataloader(args) best_acc = test(args, model, args.device, test_loader) print("\nOracle Acc for all classes=%.2f%%" % (best_acc * 100)) return model
def __init__(self, embed_size, use_images): super(Encoder, self).__init__() self.use_images = use_images resnet = models.wide_resnet101_2(pretrained=True) self.connection_layer = torch.nn.Sequential( torch.nn.Linear(resnet.fc.in_features, embed_size), torch.nn.BatchNorm1d(embed_size), torch.nn.Dropout(0.5) ) self.connection_layer.apply(init_weights()) self.resnet = torch.nn.Sequential(*list(resnet.children())[:-1])
def load_wide_resnet(num_classes, device): model = models.wide_resnet101_2(pretrained=True).to(device) trained_layer_indices = [7, 9] for i, child in enumerate(model.children()): if i not in trained_layer_indices: for param in child.parameters(): param.requires_grad = False num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, num_classes).to(device) train_transform, valid_transform = load_transform(image_size=256, crop_size=224) return model, train_transform, valid_transform, "pretrained-wide-resnet"
def __init__(self: 'Encoder', z_size: int) -> None: """__init__ Initialize the model architecture. Parameters ---------- z_size: int size of the latent space ( output size ) """ super(Encoder, self).__init__() self.z_size = z_size self.resnet = models.wide_resnet101_2(pretrained=True) self._disable_grad() n_features = self.resnet.fc.in_features self.resnet.fc = nn.Linear(n_features, z_size)
def load_model(model_name): if model_name == 'resnet18': return models.resnet18(pretrained=True) if model_name == 'resnet34': return models.resnet34(pretrained=True) if model_name == 'resnet50': return models.resnet50(pretrained=True) if model_name == 'resnet101': return models.resnet101(pretrained=True) if model_name == 'resnet152': return models.resnet152(pretrained=True) if model_name == 'resnext50_32x4d': return models.resnext50_32x4d(pretrained=True) if model_name == 'resnext101_32x8d': return models.resnext101_32x8d(pretrained=True) if model_name == 'wide_resnet50_2': return models.wide_resnet50_2(pretrained=True) if model_name == 'wide_resnet101_2': return models.wide_resnet101_2(pretrained=True) if model_name == 'resnet18_swsl': return torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnet18_swsl') if model_name == 'resnet50_swsl': return torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnet50_swsl') if model_name == 'resnext50_32x4d_swsl': return torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext50_32x4d_swsl') if model_name == 'resnext101_32x4d_swsl': return torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext101_32x4d_swsl') if model_name == 'resnext101_32x8d_swsl': return torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext101_32x8d_swsl') if model_name == 'resnext101_32x16d_swsl': return torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext101_32x16d_swsl') if model_name == 'resnet18_ssl': return torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnet18_ssl') if model_name == 'resnet50_ssl': return torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnet50_ssl') if model_name == 'resnext50_32x4d_ssl': return torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext50_32x4d_ssl') if model_name == 'resnext101_32x4d_ssl': return torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext101_32x4d_ssl') if model_name == 'resnext101_32x8d_ssl': return torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext101_32x8d_ssl') if model_name == 'resnext101_32x16d_ssl': return torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext101_32x16d_ssl') return None
def __init__(self, args: Namespace, dim: int = 2048) -> None: self.data_dir = args.data_dir self.images_dir = args.images_dir with open(path.join(args.data_dir, args.captions)) as infile: self.captions = infile.readlines() self.embeddings = np.load(path.join(args.data_dir, args.embeddings)) self.k = k self.metric = metric if self.metric == -1: # Cosine similarity self.index = faiss.IndexFlatIP(dim) faiss.normalize_L2(self.embeddings) self.index.add(self.embeddings) elif self.metric == 1: # Euclidean distance (no square root) self.index = faiss.IndexFlatL2(dim) self.index.add(self.embeddings) elif self.metric == 23: # Mahalanobis distance self.index = faiss.IndexFlatL2(dim) x_centered = self.embeddings - self.embeddings.mean(0) self.transform = np.linalg.inv(np.linalg.cholesky( np.dot(x_centered.T, x_centered) / x_centered.shape[0])).T self.index.add( np.dot(self.embeddings, self.transform).astype(np.float32)) elif self.metric == 0: # Inner project self.index = faiss.IndexFlatIP(dim) self.index.add(self.embeddings) else: self.index = faiss.IndexFlat(dim, self.metric) self.index.add(self.embeddings) self.model = wide_resnet101_2(pretrained=True, progress=True) self.model.eval() # Don't forget to put model in evaluation mode! self.model.fc = Identity() # Use recommended sequence of transforms for ImageNet pretrained models self.transforms = Compose([Resize(256, interpolation=Image.BICUBIC), # Default is bilinear CenterCrop(224), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def process_network(network_type, model_file, num_classes): if network_type == 'densenet': model = models.densenet121(num_classes=num_classes) elif network_type == 'resnet': model = models.wide_resnet101_2(num_classes=num_classes) elif network_type == 'shuffle': model = models.shufflenet_v2_x2_0(num_classes=num_classes) print(network_type) model_params = torch.load(model_file, map_location='cpu') model.load_state_dict(model_params) model.eval() model.cpu() s = nn.Softmax(dim=-1) class_pred_all = None conf_all = None all_raw_preds = None with torch.no_grad(): start = time.time() for i, imgs in enumerate(img_loader): if i % 2 == 0: print(f"{i}/{len(img_loader)}") pred = model(imgs.cpu()) pred = s(pred) confidence, class_pred = torch.max(pred, 1) if conf_all is None: conf_all = confidence class_pred_all = class_pred all_raw_preds = pred else: conf_all = torch.cat((conf_all, confidence)) class_pred_all = torch.cat((class_pred_all, class_pred)) all_raw_preds = torch.cat((all_raw_preds, pred)) print(time.time() - start) return conf_all.numpy(), class_pred_all.numpy(), all_raw_preds.numpy()
def load_model(name : str): if name.startswith('resnext101'): model = torch.hub.load('pytorch/vision:v0.5.0', 'resnext101_32x8d', pretrained=True) num_features = model.fc.in_features model.fc = nn.Linear(num_features, N_CLASSES) return model elif name.startswith('resnet152'): model = models.resnet152(pretrained=True) num_features = model.fc.in_features model.fc = nn.Linear(num_features, N_CLASSES) return model elif name.startswith('resnet101'): model = models.resnet101(pretrained=True) num_features = model.fc.in_features model.fc = nn.Linear(num_features, N_CLASSES) return model elif name.startswith('wide_resnet101'): model = models.wide_resnet101_2(pretrained=True) num_features = model.fc.in_features model.fc = nn.Linear(num_features, N_CLASSES) return model elif name.startswith('densenet161'): model = models.densenet161(pretrained=True) num_features = model.classifier.in_features model.classifier = nn.Linear(num_features, N_CLASSES) return model elif name.startswith('densenet169'): model = models.densenet169(pretrained=True) num_features = model.classifier.in_features model.classifier = nn.Linear(num_features, N_CLASSES) return model elif name.startswith('densenet201'): model = models.densenet201(pretrained=True) num_features = model.classifier.in_features model.classifier = nn.Linear(num_features, N_CLASSES) return model else: raise(ValueError('Select another model'))
def get_pretrained_ResNet(rn_type, classes, mode): model = None if rn_type == "18": model = models.resnet18(pretrained=True) elif rn_type == "34": model = models.resnet34(pretrained=True) elif rn_type == "50": model = models.resnet50(pretrained=True) elif rn_type == "101": model = models.resnet101(pretrained=True) elif rn_type == "152": model = models.resnet152(pretrained=True) elif rn_type == "50Wide": model = models.wide_resnet50_2(pretrained=True) elif rn_type == "101Wide": model = models.wide_resnet101_2(pretrained=True) if mode not in ["feature", "finetuning"]: raise RuntimeError("mode must be 'feature' or 'finetuning' ") if mode == "feature": for param in model.parameters(): param.requires_grad = False model.fc = torch.nn.Sequential( nn.Linear(model.fc.in_features, classes, bias=True), nn.Sigmoid()) return model
def __init__(self, model='ResNet-50'): super(CNNModel, self).__init__() if model == 'AlexNet': print("use AlexNet") self.features = nn.Sequential( *list(models.alexnet(pretrained=True).children())[:-2]) elif model == 'ResNet-152': print("use ResNet-152") self.features = nn.Sequential( *list(models.resnet152(pretrained=True).children())[:-2]) elif model == 'ResNeXt-101-32x8d': print("use ResNetXt-101-32x8d") self.features = nn.Sequential( *list(models.resnext101_32x8d( pretrained=True).children())[:-2]) elif model == 'Wide ResNet-101-2': print("use Wide ResNet-101-2") self.features = nn.Sequential( *list(models.wide_resnet101_2( pretrained=True).children())[:-2]) else: print("use default ResNet-50") self.features = nn.Sequential( *list(models.resnet50(pretrained=True).children())[:-2])
elif TopModelName=='resnet34': pre_model = models.resnet34(pretrained=True, progress=True) elif TopModelName=='resnet50': pre_model = models.resnet50(pretrained=True, progress=True) elif TopModelName=='resnet101': pre_model = models.resnet101(pretrained=True, progress=True) elif TopModelName=='resnet152': pre_model = models.resnet152(pretrained=True, progress=True) elif TopModelName=="resnext50_32x4d": pre_model = models.resnext50_32x4d(pretrained=True, progress=True) elif TopModelName=='resnext101_32x8d': pre_model = models.resnext101_32x8d(pretrained=True, progress=True) elif TopModelName=="wide_resnet50_2": pre_model = models.wide_resnet50_2(pretrained=True, progress=True) elif TopModelName=='wide_resnet101_2': pre_model = models.wide_resnet101_2(pretrained=True, progress=True) ############################## elif TopModelName=="mnasnet0_5": pre_model = models.mnasnet0_5(pretrained=True, progress=True) elif TopModelName=="mnasnet0_75": pre_model = models.mnasnet0_75(pretrained=True, progress=True) elif TopModelName=="mnasnet1_0": pre_model = models.mnasnet1_0(pretrained=True, progress=True) elif TopModelName=="mnasnet1_3": pre_model = models.mnasnet1_3(pretrained=True, progress=True) elif TopModelName=="efficientnet-b0": from efficientnet_pytorch import EfficientNet pre_model = EfficientNet.from_pretrained('efficientnet-b0') ######################### Testing Models #################################
## Test for model_name in model_name_lst: # define model: with torch.no_grad(): if model_name is 'vgg16bn': model = models.vgg16_bn(pretrained=True) elif model_name is 'vgg19bn': model = models.vgg19_bn(pretrained=True) elif model_name is 'resnet34': model = models.resnet34(pretrained=True) elif model_name is 'resnet101': model = models.resnet101(pretrained=True) elif model_name is 'resnet152': model = models.resnet152(pretrained=True) elif model_name is 'wrn-101-2': model = models.wide_resnet101_2(pretrained=True) elif model_name is 'resnext101_32x4d': model = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet') elif model_name is 'se_resnet101': model = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet') elif model_name is 'senet154': model = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet') elif model_name is 'nasnetalarge': model = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet') elif model_name is 'pnasnet5large': model = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet') elif model_name is 'resnext101_32x48d_wsl': model = torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x48d_wsl') elif model_name is 'effnetE7': from efficientnet_pytorch import EfficientNet model = EfficientNet.from_pretrained('efficientnet-b7')
def __init__(self, out_size, pretrained=True): super().__init__() model = models.wide_resnet101_2(pretrained=pretrained) model = list(model.children())[:-1] model.append(nn.Conv2d(2048, out_size, 1)) self.net = nn.Sequential(*model)