コード例 #1
0
def densenet121(input_size=(3, 224, 224), num_classes=1000, pretrained=None):
    model = models.densenet121(num_classes=num_classes, pretrained=pretrained)
    model = add_instances_to_torchvisionmodel(model)
    # Change the First Convol2D layer into new input shape
    if input_size != (3, 224, 224):
        model.features[0] = nn.Conv2d(input_size[0],
                                      64,
                                      kernel_size=(7, 7),
                                      stride=(2, 2),
                                      padding=(3, 3),
                                      bias=False)
        model.input_size = input_size

    # calc kernel_size on new_avgpool2d layer
    test_tensor = torch.randn((1, input_size[0], input_size[1], input_size[2]))
    features = model.features(test_tensor)
    #print(features, features.shape[2], features.shape[3])
    avg_pool2d_kernel_size = (features.shape[2], features.shape[3])

    # calc last linear size
    x = F.avg_pool2d(features, kernel_size=avg_pool2d_kernel_size, stride=1)
    x = x.view(x.size(0), -1).shape[1]
    model.last_linear = nn.Linear(in_features=x, out_features=num_classes)

    #del model.logits
    #del model.forward
    def logits(self, features):
        x = F.relu(features, inplace=True)
        x = F.avg_pool2d(x, kernel_size=avg_pool2d_kernel_size, stride=1)
        x = x.view(x.size(0), -1)
        x = self.last_linear(x)
        return x

    def forward(self, input):
        x = self.features(input)
        x = self.logits(x)
        return x

    # Modify methods
    model.logits = types.MethodType(logits, model)
    model.forward = types.MethodType(forward, model)
    return model
コード例 #2
0
ファイル: models.py プロジェクト: dodler/kgl
def get_model(name):
    if name == 'effb3':
        model = EfficientNet.from_pretrained('efficientnet-b3')
        model._fc = torch.nn.Linear(1536, 2)
        return model

    if name == 'effb1':
        model = EfficientNet.from_pretrained('efficientnet-b1')
        model._fc = torch.nn.Linear(1280, 2)
        return model

    if name == 'dnet161':
        model = pm.densenet161()
        model.last_linear = torch.nn.Linear(2048, 2)
        return model

    if name == 'effb0':
        model = EfficientNet.from_pretrained('efficientnet-b0')
        model._fc = torch.nn.Linear(1280, 2)
        return model

    if name == 'dnet121':
        model = pm.densenet121(pretrained='imagenet')
        model.last_linear = torch.nn.Linear(1024, 2)
        return model

    if name == 'se_resnext50_32x4d':
        model = pm.se_resnext50_32x4d()
        model.last_linear = torch.nn.Linear(2048, 2)
        return model

    if name == 'effb7':
        model = EfficientNet.from_pretrained('efficientnet-b7')
        model._fc = torch.nn.Linear(2048, 2)
        return model

    raise Exception('model {} is not supported'.format(name))
コード例 #3
0
    def __init__(self):
        super().__init__()

        model = pm.densenet121()
        model._fc = torch.nn.Linear(1536, 2)
        self.model = model
コード例 #4
0
 def __init__(self, num_classes):
     super(DenseNet121, self).__init__()
     self.base = pretrainedmodels.densenet121()
     self.backbone = nn.Sequential(*list(self.base.children())[:-1])
     self.head = nn.Linear(self.base.last_linear.in_features, num_classes)
コード例 #5
0
def Model_builder(configer):

    model_name = configer.model['name']
    No_classes = configer.dataset_cfg["id_cfg"]["num_classes"]
    model_pretrained = configer.model['pretrained']
    model_dataparallel = configer.model["DataParallel"]
    model_gpu_replica = configer.model["Multi_GPU_replica"]
    gpu_ids = configer.train_cfg["gpu"]

    if model_name == "Inceptionv3":
        model = PM.inceptionv3(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Xception":
        model = PM.xception(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "VGG_19":
        model = PM.vgg19(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet18":
        model = PM.resnet18(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet50":
        model = PM.resnet50(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet101":
        model = PM.resnet101(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet152":
        model = PM.resnet152(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet34":
        model = PM.resnet34(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Densenet121":
        model = PM.densenet121(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "ResNeXt101-32":
        model = PM.resnext101_32x4d(num_classes=1000,
                                    pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "ResNeXt101-64":
        model = PM.resnext101_64x4d(num_classes=1000,
                                    pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "MobilenetV2":
        model = MobileNetV2(n_class=No_classes)

    else:
        raise ImportError("Model Architecture not supported")

    # Performing Data Parallelism if configured

    if model_dataparallel:

        model = torch.nn.DataParallel(model.to(device), device_ids=gpu_ids)

    elif model_gpu_replica:

        torch.distributed.init_process_group(backend='nccl',
                                             world_size=1,
                                             rank=1)
        model = torch.nn.DistributedDataParallel(model.to(device),
                                                 device_ids=gpu_ids)

    else:
        model = model.to(device)

    print('---------- Model Loaded')

    return model