Ejemplo n.º 1
0
def build_model(model_name):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # load pretrained model

    model_name = model_name  # could be fbresnet152 or inceptionresnetv2

    if (model_name == 'senet154'):
        model = pretrainedmodels.senet154(pretrained='imagenet')
    elif (model_name == 'se_resnet152'):
        model = pretrainedmodels.se_resnet152(pretrained='imagenet')
    elif (model_name == 'se_resnext101_32x4d'):
        model = pretrainedmodels.se_resnext101_32x4d(pretrained='imagenet')
    elif (model_name == 'resnet152'):
        model = pretrainedmodels.resnet152(pretrained='imagenet')
    elif (model_name == 'resnet101'):
        model = pretrainedmodels.resnet101(pretrained='imagenet')
    elif (model_name == 'densenet201'):
        model = pretrainedmodels.densenet201(pretrained='imagenet')

    model.to(device)
    for param in model.parameters():
        param.requires_grad = False

    num_ftrs = model.last_linear.in_features

    class CustomModel(nn.Module):
        def __init__(self, model):
            super(CustomModel, self).__init__()
            self.features = nn.Sequential(*list(model.children())[:-1])
            self.classifier = nn.Sequential(
                torch.nn.Linear(num_ftrs, 128),
                torch.nn.Dropout(0.3),  # drop 50% of the neuron
                torch.nn.Linear(128, 7))

        def forward(self, x):
            x = self.features(x)
            x = x.view(x.size(0), -1)
            x = self.classifier(x)
            return x

    model = CustomModel(model)
    freeze_layer(model.features)
    num_ftrs = list(model.classifier.children())[-1].out_features

    model.to(device)
    model.name = model_name
    PATH = os.path.abspath(os.path.dirname(__file__))

    PATH_par = os.path.abspath(os.path.join(PATH, os.pardir))
    path_to_model = os.path.join(PATH_par, 'pretrained_model', '128_7')

    model.load_state_dict(
        torch.load(os.path.join(path_to_model, '%s.pth' % (model_name))))
    model.to(device)
    for param in model.parameters():
        param.requires_grad = False

    return model, num_ftrs
Ejemplo n.º 2
0
    def __init__(self, num_classes):
        super(SENet, self).__init__()
        self.name = "SENET"
        pretrained = model_zoo.se_resnet152()

        self.features = nn.Sequential(pretrained.layer0, pretrained.layer1,
                                      pretrained.layer2, pretrained.layer3,
                                      pretrained.layer4)
        self.fc = nn.Linear(2048, num_classes)
 def __init__(self,model_path):
     super(seresnet152, self).__init__()
     se152 = se_resnet152(pretrained=None)
     checkpoint=torch.load(model_path)
     se152.load_state_dict(checkpoint)
     self.norm=L2N()
     self.backbone=nn.Sequential(*list(se152.children())[:-2])
     self.rmac=Rmac_Pooling()
     self.ramac=Ramac_Pooling()
     self.Grmac=Grmac_Pooling(p=3.5)
     self.Mac=Mac_Pooling()
     self.SPoC=SPoC_pooling()
Ejemplo n.º 4
0
def se_resnet152(num_classes=1000,pretrained=None):
    model = pretrainedmodels.se_resnet152(num_classes=num_classes,pretrained=pretrained)
    return model
 def __init__(self, num_classes):
     super(model_seresnet, self).__init__()
     self.senet = pretrainedmodels.se_resnet152()
     self.avgp = nn.AdaptiveAvgPool2d(1)
     self.fc = nn.Linear(2048, num_classes)
Ejemplo n.º 6
0
def build_model(model_name):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # load pretrained model

    model_name = model_name # could be fbresnet152 or inceptionresnetv2

    if(model_name == 'senet154'):
        model = pretrainedmodels.senet154(pretrained='imagenet')
    elif(model_name == 'se_resnet152'):
        model = pretrainedmodels.se_resnet152(pretrained='imagenet')
    elif(model_name == 'se_resnext101_32x4d'):
        model = pretrainedmodels.se_resnext101_32x4d(pretrained='imagenet')
    elif(model_name == 'resnet152'):
        model = pretrainedmodels.resnet152(pretrained='imagenet')
    elif(model_name == 'resnet101'):
        model = pretrainedmodels.resnet101(pretrained='imagenet')
    elif(model_name == 'densenet201'):
        model = pretrainedmodels.densenet201(pretrained='imagenet')

    model.to(device)
    for param in model.parameters():
        param.requires_grad = False

    num_ftrs = model.last_linear.in_features

    class CustomModel(nn.Module):
        def __init__(self, model):
            super(CustomModel, self).__init__()
            self.features = nn.Sequential(*list(model.children())[:-1]  )
            self.classifier = nn.Sequential(
                torch.nn.Linear(num_ftrs, 128),
                torch.nn.Dropout(0.3),  # drop 50% of the neuron
                torch.nn.Linear(128, 7)
            )
        
        def forward(self, x):
            x = self.features(x)
            x = x.view(x.size(0), -1)
            x = self.classifier(x)
            return x
    model = CustomModel(model)
    freeze_layer(model.features)
    model.to(device)
    for param in model.parameters():
        param.requires_grad = False

    
    class CustomModel1(nn.Module):
        def __init__(self, model):
            super(CustomModel1, self).__init__()
            self.features = nn.Sequential(*list(model.children())[:-1])
            self.classifier = nn.Sequential(
                *[list(model.classifier.children())[i] for i in [0]]
            )
        
        def forward(self, x):
            x = self.features(x)
            x = x.view(x.size(0), -1)
            x = self.classifier(x)
            return x

    CustomModel = CustomModel1(model)
    num_ftrs = list(CustomModel.classifier.children())[-1].out_features
    CustomModel.to(device)
    return CustomModel, num_ftrs