def net(model_name, pretrained, is_local, NUM_CLASS): if model_name == 'resnest50': if pretrained and not is_local: model = resnest50(pretrained=True) else: model = resnest50(pretrained=False) elif model_name == 'resnest101': if pretrained and not is_local: model = resnest101(pretrained=True) else: model = resnest101(pretrained=False) elif model_name == 'resnest200': if pretrained and not is_local: model = resnest200(pretrained=True) else: model = resnest200(pretrained=False) else: print('Error model name') if is_local: model_path = PATH[model_name] model.load_state_dict(torch.load(model_path), strict=False) model.fc = nn.Sequential( nn.Dropout(0.2), nn.Linear(in_features[model_name], NUM_CLASS) ) return model
def __init__(self, nclass, backbone, dilated=True, norm_layer=None, pretrained=True): super(BaseNet, self).__init__() self.nclass = nclass # copying modules from pretrained HRNet+OCR if backbone == 'resnet50': self.pretrained = resnet.resnet50(pretrained=False, dilated=dilated, norm_layer=norm_layer) if pretrained: self.pretrained.load_state_dict(torch.load("./resnet50-19c8e357.pth")) elif backbone == 'resnet101': self.pretrained = resnet.resnet101(pretrained=False, dilated=dilated, norm_layer=norm_layer) if pretrained: self.pretrained.load_state_dict(torch.load("./resnet101-5d3b4d8f.pth")) elif backbone == 'resnet152': self.pretrained = resnet.resnet152(pretrained=False, dilated=dilated, norm_layer=norm_layer) if pretrained: self.pretrained.load_state_dict(torch.load("./resnet152-b121ed2d.pth")) elif backbone == 'resnext50': self.pretrained = models.resnext50_32x4d(pretrained=False, progress=True, replace_stride_with_dilation=[0, 1, 1], norm_layer=norm_layer) if pretrained: self.pretrained.load_state_dict(torch.load("./resnext50_32x4d-7cdf4587.pth")) elif backbone == 'resnext101': self.pretrained = models.resnext101_32x8d(pretrained=False, progress=True, replace_stride_with_dilation=[0, 1, 1], norm_layer=norm_layer) if pretrained: self.pretrained.load_state_dict(torch.load("./resnext101_32x8d-8ba56ff5.pth")) elif backbone == 'resnest50': self.pretrained = resnest50(pretrained=False, dilated=True, norm_layer=norm_layer) if pretrained: self.pretrained.load_state_dict(torch.load("./resnest50-528c19ca.pth")) elif backbone == 'resnest101': self.pretrained = resnest101(pretrained=False, dilated=True, norm_layer=norm_layer) if pretrained: self.pretrained.load_state_dict(torch.load("./resnest101-22405ba7.pth")) else: self.pretrained = models.resnet101(pretrained=False)
def __init__(self, enet_type, out_dim): super(ResNest101_Landmark, self).__init__() self.enet = resnest101(pretrained=True) self.feat = nn.Linear(self.enet.fc.in_features, 512) self.swish = Swish_module() self.metric_classify = ArcMarginProduct_subcenter(512, out_dim) self.enet.fc = nn.Identity()
def __init__(self, enet_type, out_dim, n_meta_features=0, n_meta_dim=[512, 128], pretrained=False): super(Resnest_MMC, self).__init__() self.n_meta_features = n_meta_features self.enet = resnest101(pretrained=pretrained) self.dropouts = nn.ModuleList([ nn.Dropout(0.5) for _ in range(5) ]) in_ch = self.enet.fc.in_features if n_meta_features > 0: self.meta = nn.Sequential( nn.Linear(n_meta_features, n_meta_dim[0]), nn.BatchNorm1d(n_meta_dim[0]), Swish_Module(), nn.Dropout(p=0.3), nn.Linear(n_meta_dim[0], n_meta_dim[1]), nn.BatchNorm1d(n_meta_dim[1]), Swish_Module(), ) in_ch += n_meta_dim[1] self.myfc = nn.Sequential( nn.Linear(in_ch, out_dim), nn.BatchNorm1d(n_meta_dim[0]), # swish activation function Swish_Module(), nn.Dropout(p=0.3), nn.Linear(n_meta_dim[0], n_meta_dim[1]), nn.BatchNorm1d(n_meta_dim[1]), Swish_Module(), ) self.enet.fc = nn.Identity()
def __init__(self,backbone): super(Encoder_M, self).__init__() if backbone == 'resnest101': self.conv1_m = nn.Conv2d(1, 128, kernel_size=7, stride=2, padding=3, bias=False) self.conv1_o = nn.Conv2d(1, 128, kernel_size=7, stride=2, padding=3, bias=False) else: self.conv1_m = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False) self.conv1_o = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False) if backbone == 'resnet50': resnet = models.resnet50(pretrained=True) elif backbone == 'resnet18': resnet = models.resnet18(pretrained=True) elif backbone == 'resnest101': resnet = resnest101() self.conv1 = resnet.conv1 self.bn1 = resnet.bn1 self.relu = resnet.relu # 1/2, 64 self.maxpool = resnet.maxpool self.res2 = resnet.layer1 # 1/4, 256 self.res3 = resnet.layer2 # 1/8, 512 self.res4 = resnet.layer3 # 1/8, 1024 self.register_buffer('mean', torch.FloatTensor([0.485, 0.456, 0.406]).view(1,3,1,1)) self.register_buffer('std', torch.FloatTensor([0.229, 0.224, 0.225]).view(1,3,1,1))
def __init__(self, enet_type, out_dim, n_meta_features=0, n_meta_dim=[512, 128], pretrained=False): super(Resnest_Melanoma, self).__init__() self.n_meta_features = n_meta_features # Déclaration du modèle CNN de type resnest101 : il prend en entrée les images self.enet = resnest101(pretrained=pretrained) self.dropouts = nn.ModuleList([nn.Dropout(0.5) for _ in range(5)]) in_ch = self.enet.fc.in_features # Déclaration du modèle FNN : il prend en entrée les data CSV if n_meta_features > 0: self.meta = nn.Sequential( nn.Linear(n_meta_features, n_meta_dim[0]), nn.BatchNorm1d(n_meta_dim[0]), Swish_Module(), nn.Dropout(p=0.3), nn.Linear(n_meta_dim[0], n_meta_dim[1]), nn.BatchNorm1d(n_meta_dim[1]), Swish_Module(), ) in_ch += n_meta_dim[1] # define a classifier self.myfc = nn.Linear(in_ch, out_dim) self.enet.fc = nn.Identity()
def __init__(self, backbone="resnest101", norm_layer=nn.BatchNorm2d, pretrained=True): super(ResNeSt, self).__init__() if backbone == 'resnest50': self.pretrained = resnest50(pretrained=False, dilated=True, norm_layer=norm_layer) if pretrained: self.pretrained.load_state_dict(torch.load("./resnest50-528c19ca.pth")) elif backbone == 'resnest101': self.pretrained = resnest101(pretrained=False, dilated=True, norm_layer=norm_layer) if pretrained: self.pretrained.load_state_dict(torch.load("./resnest101-22405ba7.pth"))
def __init__(self, in_channels, pretrained=False, version='resnet101', clf=None): super(ResNet_BB, self).__init__() version = version.strip() if version == 'resnet18': resnet = models.resnet18(pretrained) elif version == 'resnet34': resnet = models.resnet34(pretrained) elif version == 'resnet50': resnet = models.resnet50(pretrained) elif version == 'resnet101': resnet = models.resnet101(pretrained) elif version == 'resnet152': resnet = models.resnet152(pretrained) elif version == 'resnext50_32x4d': resnet = models.resnext50_32x4d(pretrained) elif version == 'resnext101_32x8d': resnet = models.resnext101_32x8d(pretrained) elif version == 'wide_resnet50_2': resnet = models.wide_resnet50_2(pretrained) elif version == 'wide_resnet101_2': resnet = models.wide_resnet101_2(pretrained) elif version == 'resnest50': resnet = resnest50(pretrained) elif version == 'resnest101': resnet = resnest101(pretrained) elif version == 'resnest200': resnet = resnest200(pretrained) elif version == 'resnest269': resnet = resnest269(pretrained) else: raise NotImplementedError( 'version {} is not supported as of now'.format(version)) resnet.conv1 = nn.Conv2d(in_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) if clf == 'deeplabv3Plus': return_layers = {'layer4': 'out', 'layer1': 'low_level'} elif clf == 'PointRend': return_layers = {'layer4': 'out', 'layer2': 'fine_grained'} else: return_layers = {'layer4': 'out'} self.backbone = IntermediateLayerGetter(resnet, return_layers) self.out_channels = 2048 self.low_channels = 256 self.fine_grained_channels = 512
def __init__(self, backbone): super(Encoder_Q, self).__init__() if backbone == 'resnet50': resnet = models.resnet50(pretrained=True) elif backbone == 'resnet18': resnet = models.resnet18(pretrained=True) elif backbone == 'resnest101': resnet = resnest101() self.conv1 = resnet.conv1 self.bn1 = resnet.bn1 self.relu = resnet.relu # 1/2, 64 self.maxpool = resnet.maxpool self.res2 = resnet.layer1 # 1/4, 256 self.res3 = resnet.layer2 # 1/8, 512 self.res4 = resnet.layer3 # 1/8, 1024 self.register_buffer('mean', torch.FloatTensor([0.485, 0.456, 0.406]).view(1,3,1,1)) self.register_buffer('std', torch.FloatTensor([0.229, 0.224, 0.225]).view(1,3,1,1))
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True): # 初始化将在此if语句中设置的这些变量。 # 每个变量都是模型特定的。 model_ft = None input_size = 0 if model_name.split('-')[0] == "resnet": if model_name.split('-')[1] == "18": """ Resnet18 """ model_ft = models.resnet18(pretrained=use_pretrained) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, num_classes) input_size = 224 elif model_name.split('-')[1] == "34": """ Resnet34 """ model_ft = models.resnet34(pretrained=use_pretrained) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, num_classes) input_size = 224 elif model_name.split('-')[1] == "50": """ Resnet50 """ model_ft = models.resnet50(pretrained=use_pretrained) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, num_classes) input_size = 224 elif model_name.split('-')[1] == "152": """ Resnet152 """ model_ft = models.resnet152(pretrained=use_pretrained) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, num_classes) input_size = 224 else: print("Invalid resnet model name, exiting...") exit() elif model_name == "resnest50": model_ft = resnest50(pretrained=True) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, num_classes) input_size = 224 elif model_name == "resnest101": model_ft = resnest101(pretrained=True) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, num_classes) elif model_name == "resnest200": model_ft = resnest200(pretrained=True) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, num_classes) elif model_name == "resnest269": model_ft = resnest269(pretrained=True) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, num_classes) elif model_name == "alexnet": """ Alexnet """ model_ft = models.alexnet(pretrained=use_pretrained) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.classifier[6].in_features model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes) input_size = 224 elif model_name == "vgg": """ VGG11_bn """ model_ft = models.vgg11_bn(pretrained=use_pretrained) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.classifier[6].in_features model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes) input_size = 224 elif model_name == "squeezenet": """ Squeezenet """ model_ft = models.squeezenet1_0(pretrained=use_pretrained) set_parameter_requires_grad(model_ft, feature_extract) model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1)) model_ft.num_classes = num_classes input_size = 224 elif model_name == "densenet": """ Densenet """ model_ft = models.densenet161(pretrained=use_pretrained) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.classifier.in_features model_ft.classifier = nn.Linear(num_ftrs, num_classes) input_size = 224 elif model_name == "inception": """ Inception v3 Be careful, expects (299,299) sized images and has auxiliary output """ model_ft = models.inception_v3(pretrained=use_pretrained) set_parameter_requires_grad(model_ft, feature_extract) # 处理辅助网络 num_ftrs = model_ft.AuxLogits.fc.in_features model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes) # 处理主要网络 num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, num_classes) input_size = 299 elif model_name.split('-')[0] == "efficientnet": model_ft = EfficientNet.from_pretrained(model_name) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft._fc.in_features model_ft._fc = nn.Sequential(nn.Linear(num_ftrs, 1000, bias=True), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(1000, num_classes, bias=True)) input_size = 299 else: print("Invalid model name, exiting...") exit() return model_ft, input_size
elif args.model == "res_cbam_nd": model = resnext101_32x8d(pretrained=True, progress=True) model.fc = nn.Linear(2048, 3) elif args.model == "res_wsl": model = resnext101_32x8d_wsl(progress=True) # model = resnext101_32x16d_wsl(progress=True) model.fc = nn.Sequential(nn.Dropout(0.2), nn.Linear(2048, 3)) elif args.model == "res_18": model = resnet18(pretrained=True, progress=True) model.fc = nn.Sequential(nn.Dropout(0.5), nn.Linear(512, 3)) elif args.model == "senet": model = se_resnext101(num_classes=3) elif args.model == "efficient": model = efficientnet(size='b4', num_classes=3) # too big elif args.model == "resnest": model = resnest101(pretrained=True) # 50, 101, 200, 269 model.fc = nn.Sequential(nn.Dropout(0.2), nn.Linear(2048, 3)) elif args.model == "hrnet": from models.hrnet.lib.config import config from models.hrnet.lib.config import update_config from models.hrnet.lib.models.cls_hrnet import get_cls_net update_config(config, args) model = get_cls_net(config) model.fc = nn.Sequential(nn.Dropout(0.2), nn.Linear(2048, 3)) else: raise ValueError model.cuda() if args.mode == "train": train_dataset = data_loader.build_dataset_train(args.data_path, (args.h, args.w)) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,
def resnest101(config): return resnest.resnest101(pretrained=True)