Beispiel #1
0
def classification_model_resnet18_combine_last_var(**kwargs):
    base_model = pretrainedmodels.resnet18()
    return ClassificationModelResnetCombineLastVariable(
        base_model,
        base_model_features=512,
        nb_features=6,
        base_model_l1_outputs=64,
        **kwargs)
Beispiel #2
0
def segmentation_model_resnet18_bn_filters8_masked(**kwargs):
    base_model = pretrainedmodels.resnet18()
    return ResnetWeightedSegmentatation(base_model,
                                        DecoderBlock=DecoderBlockBN,
                                        nb_features=6,
                                        base_model_l1_outputs=64,
                                        filters=8,
                                        **kwargs)
Beispiel #3
0
def segmentation_model_resnet18_bn_filters8(**kwargs):
    base_model = pretrainedmodels.resnet18()
    return ClassificationModelResnetCombineLastVariable3(
        base_model,
        DecoderBlock=DecoderBlockBN,
        nb_features=6,
        base_model_l1_outputs=64,
        filters=8,
        **kwargs)
Beispiel #4
0
    def __init__(self, weight='imagenet'):
        super(DiscriminatorModel, self).__init__()
        self.layer = nn.Conv2d(1, 3, (2, 2), padding=2)

        model = cm.resnet18(num_classes=1000, pretrained=weight)
        model.last_linear = nn.Linear(in_features=512,
                                      out_features=1,
                                      bias=True)

        self.model = model
Beispiel #5
0
 def __init__(self, num_classes, pretrained="imagenet"):
     super().__init__()
     self.model = resnet18(pretrained=pretrained)
     self.model.avg_pool = nn.AdaptiveAvgPool2d(1)
     new_last_linear = nn.Linear(self.model.last_linear.in_features,
                                 num_classes)
     new_last_linear.weight.data = self.model.last_linear.weight.data[:
                                                                      num_classes]
     new_last_linear.bias.data = self.model.last_linear.bias.data[:
                                                                  num_classes]
     self.model.last_linear = new_last_linear
 def __init__(self):
     #扩展的VGG
     super(VGG, self).__init__()
     self.model = pretrainedmodels.resnet18()
     in_dim = self.model.last_linear.in_features
     classifier = torch.nn.Sequential(torch.nn.Linear(in_dim, 448),
                                      torch.nn.ReLU(),
                                      torch.nn.Dropout(0.2),
                                      torch.nn.Linear(448, 15))
     self.model.last_linear = classifier
     for param in self.parameters():
         param.require_grad = True
Beispiel #7
0
 def __init__(self):
     super().__init__()
     resnet = pretrainedmodels.resnet18()
     self.a = nn.Sequential(
         resnet.conv1,
         resnet.bn1,
         resnet.relu,
     )
     self.b = nn.Sequential(
         resnet.maxpool,
         resnet.layer1,
     )
     self.c = resnet.layer2
     self.d = resnet.layer3
Beispiel #8
0
def load_models(conf_file):
    out = {}
    with open(conf_file) as f:
        doc = yaml.full_load(f)
    conf = doc['config']
    for k, stat in conf.items():
        if stat['net'] == 'inception_v4':
            out[k] = (pretrainedmodels.inceptionv4(pretrained=None),
                      stat['sparisity'])
        elif stat['net'] == 'resnet18':
            out[k] = (pretrainedmodels.resnet18(pretrained=None),
                      stat['sparisity'])
        elif stat['net'] == 'mobilenet_v2':
            out[k] = (models.mobilenet_v2(), stat['sparisity'])
        else:
            raise "Unknown net"
    return out
Beispiel #9
0
 def _get_branch_net(self, channels, num_classes):
     model = resnet18(pretrained="imagenet")
     model.global_pool = nn.AdaptiveAvgPool2d(1)
     model.conv1_7x7_s2 = nn.Conv2d(channels,
                                    64,
                                    kernel_size=(7, 7),
                                    stride=(2, 2),
                                    padding=(3, 3))
     model.last_linear = nn.Sequential(
         nn.BatchNorm1d(1024),
         nn.Dropout(0.5),
         nn.Linear(1024, num_classes),
     )
     # print('Model architecture:')
     # print(model)
     # total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
     # print(f'\n\n\n\nModel trainable parameters {total_params}')
     return model
Beispiel #10
0
def resnet18(input_size=(3, 224, 224), num_classes=1000, pretrained=None):
    model = models.resnet18(pretrained=pretrained)
    model = add_instances_to_torchvisionmodel(model)
    # Change the First Convol2D layer into new input shape
    if input_size != (3, 224, 224):
        model.conv1 = nn.Conv2d(input_size[0],
                                64,
                                kernel_size=(7, 7),
                                stride=(2, 2),
                                padding=(3, 3),
                                bias=False)
        model.input_size = input_size

    del model.fc
    del model.avgpool

    # calc kernel_size on new_avgpool2d layer
    test_tensor = torch.randn((1, input_size[0], input_size[1], input_size[2]))
    features = model.features(test_tensor)
    # print(features, features.shape[2], features.shape[3])
    avg_pool2d_kernel_size = (features.shape[2], features.shape[3])

    # calc last linear size
    x = F.avg_pool2d(features, kernel_size=avg_pool2d_kernel_size)
    x = x.view(x.size(0), -1).shape[1]
    model.last_linear = nn.Linear(in_features=x, out_features=num_classes)

    ##del model.logits
    ##del model.forward
    def logits(self, features):
        x = F.relu(features, inplace=False)
        x = F.avg_pool2d(x, kernel_size=avg_pool2d_kernel_size, stride=1)
        x = x.view(x.size(0), -1)
        x = self.last_linear(x)
        return x

    def forward(self, input):
        x = self.features(input)
        x = self.logits(x)
        return x

    model.logits = types.MethodType(logits, model)
    model.forward = types.MethodType(forward, model)
    return model
Beispiel #11
0
def get_resnet18():
    model = resnet18()
    w = model.conv1.weight
    w = torch.nn.Parameter(
        torch.cat((w, torch.mean(w, dim=1).unsqueeze(1)), dim=1))

    model.conv1 = nn.Conv2d(4,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias=False)
    model.conv1.weight = w

    model.avgpool = nn.Sequential(nn.AvgPool2d(9, stride=1),
                                  nn.AvgPool2d(7, stride=1))

    model.fc = nn.Sequential(nn.Dropout(), nn.Linear(2048, 28))

    return model
Beispiel #12
0
 def __init__(self, nb_embeddings=config.NB_EMBEDDINGS):
     super().__init__()
     self.base_model = pretrainedmodels.resnet18()
     self.fc = nn.Linear(2048, nb_embeddings)
        params['feat_path'], params['logit_name'] +
        ('' if '.hdf5' in params['logit_name'] else '.hdf5'))

    print('Model: %s' % params['model'])
    print('The extracted features will be saved to --> %s' %
          params['feat_dir'])

    if params['model'] == 'resnet101':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet101(pretrained='imagenet')
    elif params['model'] == 'resnet152':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet152(pretrained='imagenet')
    elif params['model'] == 'resnet18':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet18(pretrained='imagenet')
    elif params['model'] == 'resnet34':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet34(pretrained='imagenet')
    elif params['model'] == 'inceptionresnetv2':
        C, H, W = 3, 299, 299
        model = pretrainedmodels.inceptionresnetv2(
            num_classes=1001, pretrained='imagenet+background')
    elif params['model'] == 'googlenet':
        C, H, W = 3, 224, 224
        model = googlenet(pretrained=True)
        print(model)
    else:
        print("doesn't support %s" % (params['model']))

    if params['model'] != 'googlenet':
Beispiel #14
0
    def __init__(self,
                 backbone,
                 heads,
                 head_conv=128,
                 num_filters=[256, 256, 256],
                 pretrained=True,
                 dcn=False,
                 gn=False,
                 ws=False,
                 freeze_bn=False,
                 after_non_local='layer1',
                 non_local_hidden_channels=None):
        super().__init__()

        self.heads = heads

        if backbone == 'resnet18':
            pretrained = 'imagenet' if pretrained else None
            self.backbone = pretrainedmodels.resnet18(pretrained=pretrained)
            num_bottleneck_filters = 512
        elif backbone == 'resnet34':
            pretrained = 'imagenet' if pretrained else None
            self.backbone = pretrainedmodels.resnet34(pretrained=pretrained)
            num_bottleneck_filters = 512
        elif backbone == 'resnet50':
            pretrained = 'imagenet' if pretrained else None
            self.backbone = pretrainedmodels.resnet50(pretrained=pretrained)
            num_bottleneck_filters = 2048
        elif backbone == 'resnet101':
            pretrained = 'imagenet' if pretrained else None
            self.backbone = pretrainedmodels.resnet101(pretrained=pretrained)
            num_bottleneck_filters = 2048
        elif backbone == 'resnet152':
            pretrained = 'imagenet' if pretrained else None
            self.backbone = pretrainedmodels.resnet152(pretrained=pretrained)
            num_bottleneck_filters = 2048
        elif backbone == 'se_resnext50_32x4d':
            pretrained = 'imagenet' if pretrained else None
            self.backbone = pretrainedmodels.se_resnext50_32x4d(
                pretrained=pretrained)
            num_bottleneck_filters = 2048
        elif backbone == 'se_resnext101_32x4d':
            pretrained = 'imagenet' if pretrained else None
            self.backbone = pretrainedmodels.se_resnext101_32x4d(
                pretrained=pretrained)
            num_bottleneck_filters = 2048
        elif backbone == 'resnet34_v1b':
            self.backbone = timm.create_model('gluon_resnet34_v1b',
                                              pretrained=pretrained)
            convert_to_inplace_relu(self.backbone)
            num_bottleneck_filters = 512
        elif backbone == 'resnet50_v1d':
            self.backbone = timm.create_model('gluon_resnet50_v1d',
                                              pretrained=pretrained)
            convert_to_inplace_relu(self.backbone)
            num_bottleneck_filters = 2048
        elif backbone == 'resnet101_v1d':
            self.backbone = timm.create_model('gluon_resnet101_v1d',
                                              pretrained=pretrained)
            convert_to_inplace_relu(self.backbone)
            num_bottleneck_filters = 2048
        elif backbone == 'resnext50_32x4d':
            self.backbone = timm.create_model('resnext50_32x4d',
                                              pretrained=pretrained)
            convert_to_inplace_relu(self.backbone)
            num_bottleneck_filters = 2048
        elif backbone == 'resnext50d_32x4d':
            self.backbone = timm.create_model('resnext50d_32x4d',
                                              pretrained=pretrained)
            convert_to_inplace_relu(self.backbone)
            num_bottleneck_filters = 2048
        elif backbone == 'seresnext26_32x4d':
            self.backbone = timm.create_model('seresnext26_32x4d',
                                              pretrained=pretrained)
            convert_to_inplace_relu(self.backbone)
            num_bottleneck_filters = 2048
        elif backbone == 'resnet18_ctdet':
            self.backbone = models.resnet18()
            state_dict = torch.load(
                'pretrained_weights/ctdet_coco_resdcn18.pth')['state_dict']
            self.backbone.load_state_dict(state_dict, strict=False)
            num_bottleneck_filters = 512
        elif backbone == 'resnet50_maskrcnn':
            self.backbone = models.detection.maskrcnn_resnet50_fpn(
                pretrained=pretrained).backbone.body
            print(self.backbone)
            num_bottleneck_filters = 2048
        else:
            raise NotImplementedError

        if after_non_local is not None:
            self.after_non_local = after_non_local
            in_channels = getattr(self.backbone,
                                  after_non_local)[0].conv1.in_channels
            if non_local_hidden_channels is None:
                non_local_hidden_channels = in_channels // 2
            self.non_local = NonLocal2d(in_channels, non_local_hidden_channels)

        if freeze_bn:
            for m in self.backbone.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.weight.requires_grad = False
                    m.bias.requires_grad = False

        self.lateral4 = nn.Sequential(
            Conv2d(num_bottleneck_filters,
                   num_filters[0],
                   kernel_size=1,
                   bias=False,
                   ws=ws),
            nn.GroupNorm(32, num_filters)
            if gn else nn.BatchNorm2d(num_filters[0]), nn.ReLU(inplace=True))
        self.lateral3 = nn.Sequential(
            Conv2d(num_bottleneck_filters // 2,
                   num_filters[0],
                   kernel_size=1,
                   bias=False,
                   ws=ws),
            nn.GroupNorm(32, num_filters[0])
            if gn else nn.BatchNorm2d(num_filters[0]), nn.ReLU(inplace=True))
        self.lateral2 = nn.Sequential(
            Conv2d(num_bottleneck_filters // 4,
                   num_filters[1],
                   kernel_size=1,
                   bias=False,
                   ws=ws),
            nn.GroupNorm(32, num_filters[1])
            if gn else nn.BatchNorm2d(num_filters[1]), nn.ReLU(inplace=True))
        self.lateral1 = nn.Sequential(
            Conv2d(num_bottleneck_filters // 8,
                   num_filters[2],
                   kernel_size=1,
                   bias=False,
                   ws=ws),
            nn.GroupNorm(32, num_filters)
            if gn else nn.BatchNorm2d(num_filters[2]), nn.ReLU(inplace=True))

        self.decode3 = nn.Sequential(
            DCN(num_filters[0], num_filters[1],
                kernel_size=3, padding=1, stride=1) if dcn else \
            Conv2d(num_filters[0], num_filters[1],
                   kernel_size=3, padding=1, bias=False, ws=ws),
            nn.GroupNorm(32, num_filters[1]) if gn else nn.BatchNorm2d(num_filters[1]),
            nn.ReLU(inplace=True))
        self.decode2 = nn.Sequential(
            Conv2d(num_filters[1],
                   num_filters[2],
                   kernel_size=3,
                   padding=1,
                   bias=False,
                   ws=ws),
            nn.GroupNorm(32, num_filters[2])
            if gn else nn.BatchNorm2d(num_filters[2]), nn.ReLU(inplace=True))
        self.decode1 = nn.Sequential(
            Conv2d(num_filters[2],
                   num_filters[2],
                   kernel_size=3,
                   padding=1,
                   bias=False,
                   ws=ws),
            nn.GroupNorm(32, num_filters[2])
            if gn else nn.BatchNorm2d(num_filters[2]), nn.ReLU(inplace=True))

        for head in sorted(self.heads):
            num_output = self.heads[head]
            fc = nn.Sequential(
                Conv2d(num_filters[2],
                       head_conv,
                       kernel_size=3,
                       padding=1,
                       bias=False,
                       ws=ws),
                nn.GroupNorm(32, head_conv)
                if gn else nn.BatchNorm2d(head_conv), nn.ReLU(inplace=True),
                nn.Conv2d(head_conv, num_output, kernel_size=1))
            if 'hm' in head:
                fc[-1].bias.data.fill_(-2.19)
            else:
                fill_fc_weights(fc)
            self.__setattr__(head, fc)
def Model_builder(configer):

    model_name = configer.model['name']
    No_classes = configer.dataset_cfg["id_cfg"]["num_classes"]
    model_pretrained = configer.model['pretrained']
    model_dataparallel = configer.model["DataParallel"]
    model_gpu_replica = configer.model["Multi_GPU_replica"]
    gpu_ids = configer.train_cfg["gpu"]

    if model_name == "Inceptionv3":
        model = PM.inceptionv3(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Xception":
        model = PM.xception(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "VGG_19":
        model = PM.vgg19(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet18":
        model = PM.resnet18(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet50":
        model = PM.resnet50(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet101":
        model = PM.resnet101(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet152":
        model = PM.resnet152(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet34":
        model = PM.resnet34(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Densenet121":
        model = PM.densenet121(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "ResNeXt101-32":
        model = PM.resnext101_32x4d(num_classes=1000,
                                    pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "ResNeXt101-64":
        model = PM.resnext101_64x4d(num_classes=1000,
                                    pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "MobilenetV2":
        model = MobileNetV2(n_class=No_classes)

    else:
        raise ImportError("Model Architecture not supported")

    # Performing Data Parallelism if configured

    if model_dataparallel:

        model = torch.nn.DataParallel(model.to(device), device_ids=gpu_ids)

    elif model_gpu_replica:

        torch.distributed.init_process_group(backend='nccl',
                                             world_size=1,
                                             rank=1)
        model = torch.nn.DistributedDataParallel(model.to(device),
                                                 device_ids=gpu_ids)

    else:
        model = model.to(device)

    print('---------- Model Loaded')

    return model
def get_base_model(config):
    model_name = config.backbone
    pretrained = config.pretrained

    if pretrained is not None and pretrained != 'imagenet':
        weights_path = pretrained
        pretrained = None
    else:
        weights_path = None

    if config.multibranch:
        input_channels = config.multibranch_input_channels
    else:
        input_channels = config.num_slices
        if hasattr(config, 'append_masks') and config.append_masks:
            input_channels *= 2

    _available_models = ['senet154', 'se_resnext50', 'resnet34', 'resnet18']

    if model_name == 'senet154':
        cut_point = -3
        model = nn.Sequential(
            *list(pretrainedmodels.senet154(
                pretrained=pretrained).children())[:cut_point])
        num_features = 2048
    elif model_name == 'se_resnext50':
        cut_point = -2
        model = nn.Sequential(*list(
            pretrainedmodels.se_resnext50_32x4d(
                pretrained=pretrained).children())[:cut_point])
        num_features = 2048
    elif model_name == 'resnet34':
        cut_point = -2
        model = nn.Sequential(
            *list(pretrainedmodels.resnet34(
                pretrained=pretrained).children())[:cut_point])
        num_features = 512
    elif model_name == 'resnet18':
        cut_point = -2
        model = nn.Sequential(
            *list(pretrainedmodels.resnet18(
                pretrained=pretrained).children())[:cut_point])
        num_features = 512
    else:
        raise ValueError('Unavailable backbone, choose one from {}'.format(
            _available_models))

    if model_name in ['senet154', 'se_resnext50']:
        conv1 = model[0].conv1
    else:
        conv1 = model[0]

    if input_channels != 3:
        conv1_weights = deepcopy(conv1.weight)
        new_conv1 = nn.Conv2d(input_channels,
                              conv1.out_channels,
                              kernel_size=conv1.kernel_size,
                              stride=conv1.stride,
                              padding=conv1.padding,
                              bias=conv1.bias)

        if weights_path is None:
            if input_channels == 1:
                new_conv1.weight.data.fill_(0.)
                new_conv1.weight[:, 0, :, :].data.copy_(conv1_weights[:,
                                                                      0, :, :])
            elif input_channels > 3:
                diff = (input_channels - 3) // 2

                new_conv1.weight.data.fill_(0.)
                new_conv1.weight[:,
                                 diff:diff + 3, :, :].data.copy_(conv1_weights)

        if model_name in ['senet154', 'se_resnext50']:
            model[0].conv1 = new_conv1
        else:
            model[0] = new_conv1

    if weights_path is not None:
        if model_name in ['senet154', 'se_resnext50']:
            conv1_str = '0.conv1.weight'
        else:
            conv1_str = '0.weight'
        weights = load_base_weights(weights_path, input_channels, conv1_str)
        model.load_state_dict(weights)

    return model, num_features
Beispiel #17
0
    def __init__(self,
                 config_file: Optional[str] = None,
                 override_list: List[Any] = []):
        _C = CN()
        _C.VALID_IMAGES = [
            'CXR1576_IM-0375-2001.png', 'CXR1581_IM-0378-2001.png',
            'CXR3177_IM-1497-2001.png', 'CXR2585_IM-1082-1001.png',
            'CXR1125_IM-0082-1001.png', 'CXR3_IM-1384-2001.png',
            'CXR1565_IM-0368-1001.png', 'CXR1105_IM-0072-1001-0001.png',
            'CXR2874_IM-1280-1001.png', 'CXR1886_IM-0574-1001.png'
        ]

        _C.MODELS = [{
            'resnet18': (pretrainedmodels.resnet18(pretrained=None), 512, 224),
            'resnet50':
            (pretrainedmodels.resnet50(pretrained=None), 2048, 224),
            'resnet101':
            (pretrainedmodels.resnet101(pretrained=None), 2048, 224),
            'resnet152':
            (pretrainedmodels.resnet152(pretrained=None), 2048, 224),
            'inception_resnet_v2':
            (pretrainedmodels.inceptionresnetv2(pretrained=None), 1536, 299)
        }]

        # _C.MODELS_FEATURE_SIZE = {'resnet18':512, 'resnet50':2048, 'resnet101':2048, 'resnet152':2048,
        #                           'inception_v3':2048, 'inception_resnet_v2':1536}

        # Random seed for NumPy and PyTorch, important for reproducibility.
        _C.RANDOM_SEED = 42
        # Opt level for mixed precision training using NVIDIA Apex. This can be
        # one of {0, 1, 2}. Refer NVIDIA Apex docs for their meaning.
        _C.FP16_OPT = 2

        # Path to the dataset root, which structure as per README. Path is
        # assumed to be relative to project root.
        _C.IMAGE_PATH = '/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/Images_2'
        _C.TRAIN_JSON_PATH = '/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/iu_xray_train_2.json'
        _C.VAL_JSON_PATH = '/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/iu_xray_val_2.json'
        _C.TEST_JSON_PATH = '/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/iu_xray_test_2.json'
        _C.PRETRAINED_EMDEDDING = False
        # Path to .vocab file generated by ``sentencepiece``.
        _C.VOCAB_FILE_PATH = "/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/Vocab/indiana.vocab"
        # Path to .model file generated by ``sentencepiece``.
        _C.VOCAB_MODEL_PATH = "/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/Vocab/indiana.model"
        _C.VOCAB_SIZE = 3000
        _C.EPOCHS = 1024
        _C.BATCH_SIZE = 10
        _C.TEST_BATCH_SIZE = 100
        _C.ITERATIONS_PER_EPOCHS = 1
        _C.WEIGHT_DECAY = 1e-5
        _C.NUM_LABELS = 41
        _C.IMAGE_SIZE = 299
        _C.MAX_SEQUENCE_LENGTH = 130
        _C.DROPOUT_RATE = 0.1
        _C.D_HEAD = 64

        _C.TRAIN_DATASET_LENGTH = 25000
        _C.INFERENCE_TIME = False
        _C.COMBINED_N_LAYERS = 1
        _C.BEAM_SIZE = 50
        _C.PADDING_INDEX = 0
        _C.EOS_INDEX = 3
        _C.SOS_INDEX = 2
        _C.USE_BEAM_SEARCH = True
        _C.EXTRACTED_FEATURES = False
        _C.IMAGE_MODEL_PATH = '/netscratch/gsingh/MIMIC_CXR/Results/Image_Feature_Extraction/MIMIC_CXR_No_ES/model.pth'

        _C.EMBEDDING_DIM = 8192
        _C.CONTEXT_SIZE = 1024
        _C.LR_COMBINED = 1e-4
        _C.MAX_LR = 1e-1
        _C.SAVED_DATASET = False
        _C.MODEL_NAME = 'inception_resnet_v2'
        INIT_PATH = '/netscratch/gsingh/MIMIC_CXR/Results/Modified_Transformer/Indiana_15_10_2020_2/'
        _C.SAVED_DATASET_PATH_TRAIN = INIT_PATH + 'DataSet/train_dataloader.pth'
        _C.SAVED_DATASET_PATH_VAL = INIT_PATH + 'DataSet/val_dataloader.pth'
        _C.SAVED_DATASET_PATH_TEST = INIT_PATH + 'DataSet/test_dataloader.pth'

        _C.CHECKPOINT_PATH = INIT_PATH + 'CheckPoints'
        _C.MODEL_PATH = INIT_PATH + 'combined_model.pth'
        _C.MODEL_STATE_DIC = INIT_PATH + 'combined_model_state_dic.pth'
        _C.FIGURE_PATH = INIT_PATH + 'Graphs'
        _C.CSV_PATH = INIT_PATH
        _C.TEST_CSV_PATH = INIT_PATH + 'test_output_image_feature_input.json'
        self._C = _C
        if config_file is not None:
            self._C.merge_from_file(config_file)
        self._C.merge_from_list(override_list)

        self.add_derived_params()

        # Make an instantiated object of this class immutable.
        self._C.freeze()
Beispiel #18
0
# 指定RGB三个通道的均值和方差来将图像通道归一化
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
train_augs = transforms.Compose([
    transforms.RandomResizedCrop(size=224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(), normalize
])

test_augs = transforms.Compose([
    transforms.Resize(size=256),
    transforms.CenterCrop(size=224),
    transforms.ToTensor(), normalize
])

pretrained_net = pretrainedmodels.resnet18()
#  fc是输出层函数
# 源码:self.fc = nn.Linear(512 * block.expansion, num_classes)
pretrained_net.fc = nn.Linear(512, 2)

output_params = list(map(id, pretrained_net.fc.parameters()))
feature_params = filter(lambda p: id(p) not in output_params,
                        pretrained_net.parameters())

lr = 0.01
optimizer = optim.SGD([{
    'params': feature_params
}, {
    'params': pretrained_net.fc.parameters(),
    'lr': lr * 10
}],