示例#1
0
def classification_model_resnet34(**kwargs):
    base_model = pretrainedmodels.resnet34()
    return ClassificationModelResnet(base_model,
                                     base_model_features=512,
                                     nb_features=6,
                                     base_model_l1_outputs=64,
                                     **kwargs)
示例#2
0
def SK_resnet34(num_classes=1000, pretrained=True):
    model = pretrainedmodels.resnet34(pretrained='imagenet')
    model.last_linear = nn.Sequential(
        nn.Dropout(),
        nn.Linear(512, num_classes, bias=True)
    )
    return model
示例#3
0
 def __init__(self, classes=2):
     super(Resnet, self).__init__()
     model = resnet34()
     features = list(model.children())[:-1]
     features[0] = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
     self.features = nn.Sequential(*features)
     self.classifier = nn.Linear(512, classes, bias=True)
示例#4
0
def segmentation_model_resnet34_combine_last_var2_dec2(**kwargs):
    base_model = pretrainedmodels.resnet34()
    return ClassificationModelResnetCombineLastVariable2(
        base_model,
        DecoderBlock=DecoderBlock2,
        nb_features=6,
        base_model_l1_outputs=64,
        **kwargs)
示例#5
0
def classification_model_resnet34_combine_l3(**kwargs):
    base_model = pretrainedmodels.resnet34()
    return ClassificationModelResnetCombineL3(base_model,
                                              base_model_features=512,
                                              base_model_l3_features=256,
                                              nb_features=6,
                                              base_model_l1_outputs=64,
                                              **kwargs)
示例#6
0
def classification_model_resnet34_combine_last_var(**kwargs):
    base_model = pretrainedmodels.resnet34()
    return ClassificationModelResnetCombineLastVariable(
        base_model,
        base_model_features=512,
        nb_features=6,
        base_model_l1_outputs=64,
        **kwargs)
示例#7
0
    def __init__(self, freeze=True):
        import pretrainedmodels
        super(PretrConvNet, self).__init__()

        self.backbone = pretrainedmodels.resnet34(pretrained='imagenet')

        if freeze:
            for param in self.backbone.parameters():
                param.requires_grad = False

        self.backbone.last_linear = torch.nn.Linear(self.backbone.last_linear.in_features,256)
示例#8
0
def resnet34(input_size=(3, 224, 224), num_classes=1000, pretrained=None):
    model = models.resnet34(pretrained=pretrained)
    model = add_instances_to_torchvisionmodel(model)
    # Change the First Convol2D layer into new input shape
    if input_size != (3, 224, 224):
        model.conv1 = nn.Conv2d(input_size[0],
                                64,
                                kernel_size=(7, 7),
                                stride=(2, 2),
                                padding=(3, 3),
                                bias=False)
        model.input_size = input_size

    del model.fc
    del model.avgpool

    # calc kernel_size on new_avgpool2d layer
    test_tensor = torch.randn((1, input_size[0], input_size[1], input_size[2]))
    features = model.features(test_tensor)
    # print(features, features.shape[2], features.shape[3])
    avg_pool2d_kernel_size = (features.shape[2], features.shape[3])

    # calc last linear size
    x = F.avg_pool2d(features, kernel_size=avg_pool2d_kernel_size)
    x = x.view(x.size(0), -1).shape[1]
    model.last_linear = nn.Linear(in_features=x, out_features=num_classes)

    #del model.logits
    #del model.forward
    def logits(self, features):
        x = F.relu(features, inplace=False)
        x = F.avg_pool2d(x, kernel_size=avg_pool2d_kernel_size, stride=1)
        x = x.view(x.size(0), -1)
        x = self.last_linear(x)
        return x

    def forward(self, input):
        x = self.features(input)
        x = self.logits(x)
        return x

    model.logits = types.MethodType(logits, model)
    model.forward = types.MethodType(forward, model)
    return model
示例#9
0
def get_resnet34():
    model = resnet34(pretrained=True)
    w = model.conv1.weight
    model.conv1 = nn.Conv2d(4,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias=False)
    w = torch.nn.Parameter(
        torch.cat((w, torch.mean(w, dim=1).unsqueeze(1)), dim=1))
    model.conv1.weight = w

    model.avgpool = nn.Sequential(nn.AvgPool2d(7, stride=1), nn.Dropout2d(),
                                  nn.AvgPool2d(7, stride=1))

    model.fc = nn.Sequential(nn.Linear(8192, 2048), nn.Dropout(),
                             nn.Linear(2048, 28))

    return model
    print('Model: %s' % params['model'])
    print('The extracted features will be saved to --> %s' %
          params['feat_dir'])

    if params['model'] == 'resnet101':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet101(pretrained='imagenet')
    elif params['model'] == 'resnet152':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet152(pretrained='imagenet')
    elif params['model'] == 'resnet18':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet18(pretrained='imagenet')
    elif params['model'] == 'resnet34':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet34(pretrained='imagenet')
    elif params['model'] == 'inceptionresnetv2':
        C, H, W = 3, 299, 299
        model = pretrainedmodels.inceptionresnetv2(
            num_classes=1001, pretrained='imagenet+background')
    elif params['model'] == 'googlenet':
        C, H, W = 3, 224, 224
        model = googlenet(pretrained=True)
        print(model)
    else:
        print("doesn't support %s" % (params['model']))

    if params['model'] != 'googlenet':
        load_image_fn = utils.LoadTransformImage(model)
        model.last_linear = utils.Identity()
    else:
def get_base_model(config):
    model_name = config.backbone
    pretrained = config.pretrained

    if pretrained is not None and pretrained != 'imagenet':
        weights_path = pretrained
        pretrained = None
    else:
        weights_path = None

    if config.multibranch:
        input_channels = config.multibranch_input_channels
    else:
        input_channels = config.num_slices
        if hasattr(config, 'append_masks') and config.append_masks:
            input_channels *= 2

    _available_models = ['senet154', 'se_resnext50', 'resnet34', 'resnet18']

    if model_name == 'senet154':
        cut_point = -3
        model = nn.Sequential(
            *list(pretrainedmodels.senet154(
                pretrained=pretrained).children())[:cut_point])
        num_features = 2048
    elif model_name == 'se_resnext50':
        cut_point = -2
        model = nn.Sequential(*list(
            pretrainedmodels.se_resnext50_32x4d(
                pretrained=pretrained).children())[:cut_point])
        num_features = 2048
    elif model_name == 'resnet34':
        cut_point = -2
        model = nn.Sequential(
            *list(pretrainedmodels.resnet34(
                pretrained=pretrained).children())[:cut_point])
        num_features = 512
    elif model_name == 'resnet18':
        cut_point = -2
        model = nn.Sequential(
            *list(pretrainedmodels.resnet18(
                pretrained=pretrained).children())[:cut_point])
        num_features = 512
    else:
        raise ValueError('Unavailable backbone, choose one from {}'.format(
            _available_models))

    if model_name in ['senet154', 'se_resnext50']:
        conv1 = model[0].conv1
    else:
        conv1 = model[0]

    if input_channels != 3:
        conv1_weights = deepcopy(conv1.weight)
        new_conv1 = nn.Conv2d(input_channels,
                              conv1.out_channels,
                              kernel_size=conv1.kernel_size,
                              stride=conv1.stride,
                              padding=conv1.padding,
                              bias=conv1.bias)

        if weights_path is None:
            if input_channels == 1:
                new_conv1.weight.data.fill_(0.)
                new_conv1.weight[:, 0, :, :].data.copy_(conv1_weights[:,
                                                                      0, :, :])
            elif input_channels > 3:
                diff = (input_channels - 3) // 2

                new_conv1.weight.data.fill_(0.)
                new_conv1.weight[:,
                                 diff:diff + 3, :, :].data.copy_(conv1_weights)

        if model_name in ['senet154', 'se_resnext50']:
            model[0].conv1 = new_conv1
        else:
            model[0] = new_conv1

    if weights_path is not None:
        if model_name in ['senet154', 'se_resnext50']:
            conv1_str = '0.conv1.weight'
        else:
            conv1_str = '0.weight'
        weights = load_base_weights(weights_path, input_channels, conv1_str)
        model.load_state_dict(weights)

    return model, num_features
for train_index, test_index in skf.split(train_data['id'].values, train_data['label'].values, train_data['wsi'].values):
    folds_id_train.append(train_data['id'].values[train_index])
    folds_id_val.append(train_data['id'].values[test_index])
    folds_label_train.append(train_data['label'].values[train_index])
    folds_label_val.append(train_data['label'].values[test_index])
test_id = test_data['id'].values
test_label = test_data['label'].values



val_preds = []
val_labels = []
test_preds = []
scores_CV = []
for valid_idx in range(n_groups):
    base_model = pretrainedmodels.resnet34(num_classes=1000,pretrained='imagenet').to(device) #load pretrained as base
    model = Net(base_model, 512).to(device) # create model
    model.load_state_dict(torch.load(model_dir+'/resnet34.fold{}.best.pt'.format(valid_idx))) #loading weights
    model.eval()
    valid_preds_idx = np.zeros((len(folds_id_val[valid_idx])))
    valid_target_idx = np.zeros((len(folds_id_val[valid_idx])))
    test_preds_idx = np.zeros((len(test_label)))
    val_loader = torch.utils.data.DataLoader(DataGenerator(folds_id_val[valid_idx], folds_label_val[valid_idx], validation_aug, train_im_dir), 
                                             shuffle=False, pin_memory=False, num_workers=1,batch_size=1)
    test_loader = torch.utils.data.DataLoader(DataGenerator(test_id, test_label, validation_aug, test_im_dir), 
                                              shuffle=False, pin_memory=False, num_workers=1,batch_size=1)  
    #predction for validation data
    with torch.no_grad():
        for batch_idx, (x, target) in enumerate(tqdm_notebook(val_loader)):
            #output = protein_model(x.to(device, dtype=torch.float))
            image = np.rollaxis(x.numpy()[0], 0, 3)
 def __init__(self, num_class):
     super(Resnet34Classification, self).__init__()
     models = pretrainedmodels.resnet34()
     self.resnet = nn.Sequential(*list(models.children())[:-2])
     self.feature = nn.Conv2d(512, 32, kernel_size=1)
     self.out = nn.Conv2d(32, num_class, kernel_size=1)
示例#14
0
def Model_builder(configer):

    model_name = configer.model['name']
    No_classes = configer.dataset_cfg["id_cfg"]["num_classes"]
    model_pretrained = configer.model['pretrained']
    model_dataparallel = configer.model["DataParallel"]
    model_gpu_replica = configer.model["Multi_GPU_replica"]
    gpu_ids = configer.train_cfg["gpu"]

    if model_name == "Inceptionv3":
        model = PM.inceptionv3(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Xception":
        model = PM.xception(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "VGG_19":
        model = PM.vgg19(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet18":
        model = PM.resnet18(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet50":
        model = PM.resnet50(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet101":
        model = PM.resnet101(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet152":
        model = PM.resnet152(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet34":
        model = PM.resnet34(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Densenet121":
        model = PM.densenet121(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "ResNeXt101-32":
        model = PM.resnext101_32x4d(num_classes=1000,
                                    pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "ResNeXt101-64":
        model = PM.resnext101_64x4d(num_classes=1000,
                                    pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "MobilenetV2":
        model = MobileNetV2(n_class=No_classes)

    else:
        raise ImportError("Model Architecture not supported")

    # Performing Data Parallelism if configured

    if model_dataparallel:

        model = torch.nn.DataParallel(model.to(device), device_ids=gpu_ids)

    elif model_gpu_replica:

        torch.distributed.init_process_group(backend='nccl',
                                             world_size=1,
                                             rank=1)
        model = torch.nn.DistributedDataParallel(model.to(device),
                                                 device_ids=gpu_ids)

    else:
        model = model.to(device)

    print('---------- Model Loaded')

    return model
示例#15
0
def classification_model_se_preresnext26b(**kwargs):
    base_model = SE_PreResNet_BC_26b()
    return BaseClassificationModel(base_model,
                                   base_model_features=2048,
                                   combine_conv_features=128,
                                   nb_features=6,
                                   **kwargs)


def classification_model_resnext50(**kwargs):
    base_model = ResNext50()
    return BaseClassificationModel(base_model,
                                   base_model_features=2048,
                                   nb_features=6,
                                   **kwargs)


if __name__ == '__main__':
    base_model = pretrainedmodels.resnet34()
    model = ClassificationModelResnetCombineLastVariable(
        base_model,
        base_model_features=512,
        nb_features=6,
        base_model_l1_outputs=64)

    x5 = torch.zeros((4, 5, 384, 384))
    print(model(x5).shape)

    x1 = torch.zeros((4, 1, 384, 384))
    print(model(x1).shape)
示例#16
0
    def __init__(self,
                 backbone,
                 heads,
                 head_conv=128,
                 num_filters=[256, 256, 256],
                 pretrained=True,
                 dcn=False,
                 gn=False,
                 ws=False,
                 freeze_bn=False,
                 after_non_local='layer1',
                 non_local_hidden_channels=None):
        super().__init__()

        self.heads = heads

        if backbone == 'resnet18':
            pretrained = 'imagenet' if pretrained else None
            self.backbone = pretrainedmodels.resnet18(pretrained=pretrained)
            num_bottleneck_filters = 512
        elif backbone == 'resnet34':
            pretrained = 'imagenet' if pretrained else None
            self.backbone = pretrainedmodels.resnet34(pretrained=pretrained)
            num_bottleneck_filters = 512
        elif backbone == 'resnet50':
            pretrained = 'imagenet' if pretrained else None
            self.backbone = pretrainedmodels.resnet50(pretrained=pretrained)
            num_bottleneck_filters = 2048
        elif backbone == 'resnet101':
            pretrained = 'imagenet' if pretrained else None
            self.backbone = pretrainedmodels.resnet101(pretrained=pretrained)
            num_bottleneck_filters = 2048
        elif backbone == 'resnet152':
            pretrained = 'imagenet' if pretrained else None
            self.backbone = pretrainedmodels.resnet152(pretrained=pretrained)
            num_bottleneck_filters = 2048
        elif backbone == 'se_resnext50_32x4d':
            pretrained = 'imagenet' if pretrained else None
            self.backbone = pretrainedmodels.se_resnext50_32x4d(
                pretrained=pretrained)
            num_bottleneck_filters = 2048
        elif backbone == 'se_resnext101_32x4d':
            pretrained = 'imagenet' if pretrained else None
            self.backbone = pretrainedmodels.se_resnext101_32x4d(
                pretrained=pretrained)
            num_bottleneck_filters = 2048
        elif backbone == 'resnet34_v1b':
            self.backbone = timm.create_model('gluon_resnet34_v1b',
                                              pretrained=pretrained)
            convert_to_inplace_relu(self.backbone)
            num_bottleneck_filters = 512
        elif backbone == 'resnet50_v1d':
            self.backbone = timm.create_model('gluon_resnet50_v1d',
                                              pretrained=pretrained)
            convert_to_inplace_relu(self.backbone)
            num_bottleneck_filters = 2048
        elif backbone == 'resnet101_v1d':
            self.backbone = timm.create_model('gluon_resnet101_v1d',
                                              pretrained=pretrained)
            convert_to_inplace_relu(self.backbone)
            num_bottleneck_filters = 2048
        elif backbone == 'resnext50_32x4d':
            self.backbone = timm.create_model('resnext50_32x4d',
                                              pretrained=pretrained)
            convert_to_inplace_relu(self.backbone)
            num_bottleneck_filters = 2048
        elif backbone == 'resnext50d_32x4d':
            self.backbone = timm.create_model('resnext50d_32x4d',
                                              pretrained=pretrained)
            convert_to_inplace_relu(self.backbone)
            num_bottleneck_filters = 2048
        elif backbone == 'seresnext26_32x4d':
            self.backbone = timm.create_model('seresnext26_32x4d',
                                              pretrained=pretrained)
            convert_to_inplace_relu(self.backbone)
            num_bottleneck_filters = 2048
        elif backbone == 'resnet18_ctdet':
            self.backbone = models.resnet18()
            state_dict = torch.load(
                'pretrained_weights/ctdet_coco_resdcn18.pth')['state_dict']
            self.backbone.load_state_dict(state_dict, strict=False)
            num_bottleneck_filters = 512
        elif backbone == 'resnet50_maskrcnn':
            self.backbone = models.detection.maskrcnn_resnet50_fpn(
                pretrained=pretrained).backbone.body
            print(self.backbone)
            num_bottleneck_filters = 2048
        else:
            raise NotImplementedError

        if after_non_local is not None:
            self.after_non_local = after_non_local
            in_channels = getattr(self.backbone,
                                  after_non_local)[0].conv1.in_channels
            if non_local_hidden_channels is None:
                non_local_hidden_channels = in_channels // 2
            self.non_local = NonLocal2d(in_channels, non_local_hidden_channels)

        if freeze_bn:
            for m in self.backbone.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.weight.requires_grad = False
                    m.bias.requires_grad = False

        self.lateral4 = nn.Sequential(
            Conv2d(num_bottleneck_filters,
                   num_filters[0],
                   kernel_size=1,
                   bias=False,
                   ws=ws),
            nn.GroupNorm(32, num_filters)
            if gn else nn.BatchNorm2d(num_filters[0]), nn.ReLU(inplace=True))
        self.lateral3 = nn.Sequential(
            Conv2d(num_bottleneck_filters // 2,
                   num_filters[0],
                   kernel_size=1,
                   bias=False,
                   ws=ws),
            nn.GroupNorm(32, num_filters[0])
            if gn else nn.BatchNorm2d(num_filters[0]), nn.ReLU(inplace=True))
        self.lateral2 = nn.Sequential(
            Conv2d(num_bottleneck_filters // 4,
                   num_filters[1],
                   kernel_size=1,
                   bias=False,
                   ws=ws),
            nn.GroupNorm(32, num_filters[1])
            if gn else nn.BatchNorm2d(num_filters[1]), nn.ReLU(inplace=True))
        self.lateral1 = nn.Sequential(
            Conv2d(num_bottleneck_filters // 8,
                   num_filters[2],
                   kernel_size=1,
                   bias=False,
                   ws=ws),
            nn.GroupNorm(32, num_filters)
            if gn else nn.BatchNorm2d(num_filters[2]), nn.ReLU(inplace=True))

        self.decode3 = nn.Sequential(
            DCN(num_filters[0], num_filters[1],
                kernel_size=3, padding=1, stride=1) if dcn else \
            Conv2d(num_filters[0], num_filters[1],
                   kernel_size=3, padding=1, bias=False, ws=ws),
            nn.GroupNorm(32, num_filters[1]) if gn else nn.BatchNorm2d(num_filters[1]),
            nn.ReLU(inplace=True))
        self.decode2 = nn.Sequential(
            Conv2d(num_filters[1],
                   num_filters[2],
                   kernel_size=3,
                   padding=1,
                   bias=False,
                   ws=ws),
            nn.GroupNorm(32, num_filters[2])
            if gn else nn.BatchNorm2d(num_filters[2]), nn.ReLU(inplace=True))
        self.decode1 = nn.Sequential(
            Conv2d(num_filters[2],
                   num_filters[2],
                   kernel_size=3,
                   padding=1,
                   bias=False,
                   ws=ws),
            nn.GroupNorm(32, num_filters[2])
            if gn else nn.BatchNorm2d(num_filters[2]), nn.ReLU(inplace=True))

        for head in sorted(self.heads):
            num_output = self.heads[head]
            fc = nn.Sequential(
                Conv2d(num_filters[2],
                       head_conv,
                       kernel_size=3,
                       padding=1,
                       bias=False,
                       ws=ws),
                nn.GroupNorm(32, head_conv)
                if gn else nn.BatchNorm2d(head_conv), nn.ReLU(inplace=True),
                nn.Conv2d(head_conv, num_output, kernel_size=1))
            if 'hm' in head:
                fc[-1].bias.data.fill_(-2.19)
            else:
                fill_fc_weights(fc)
            self.__setattr__(head, fc)