def __init__(self):
        super().__init__()

        # pretrained encoder
        self.backbone = models.densenet169(pretrained=True)

        # do not train encoder
        for param in self.backbone.parameters():
            param.requires_grad = False
def densenet169(num_classes=1000, pretrained='imagenet'):
    r"""Densenet-169 model from
    `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
    """
    model = models.densenet169(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['densenet169'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    return model
Esempio n. 3
0
def create_dense169(load_weights=False):
    desnet_ft = models.densenet169(pretrained=True)
    num_ftrs = desnet_ft.classifier.in_features
    desnet_ft.classifier = nn.Linear(num_ftrs, 17)
    desnet_ft = desnet_ft.cuda()

    desnet_ft.name = 'dense169'
    #desnet_ft.batch_size = 32
    return desnet_ft
Esempio n. 4
0
def get_backbone(name, pretrained=True):
    """ Loading backbone, defining names for skip-connections and encoder output. """

    # TODO: More backbones

    if name == 'resnet18':
        backbone = models.resnet50(pretrained=pretrained)
    elif name == 'resnet34':
        backbone = models.resnet34(pretrained=pretrained)
    elif name == 'resnet50':
        backbone = models.resnet50(pretrained=pretrained)
    elif name == 'resnet101':
        backbone = models.resnet101(pretrained=pretrained)
    elif name == 'resnet152':
        backbone = models.resnet152(pretrained=pretrained)
    elif name == 'vgg16':
        backbone = models.vgg16_bn(pretrained=pretrained).features
    elif name == 'vgg19':
        backbone = models.vgg19_bn(pretrained=pretrained).features
    # elif name == 'inception_v3':
    #     backbone = models.inception_v3(pretrained=pretrained, aux_logits=False)
    elif name == 'densenet121':
        backbone = models.densenet121(pretrained=True).features
    elif name == 'densenet161':
        backbone = models.densenet161(pretrained=True).features
    elif name == 'densenet169':
        backbone = models.densenet169(pretrained=True).features
    elif name == 'densenet201':
        backbone = models.densenet201(pretrained=True).features
    else:
        raise NotImplemented(
            '{} backbone model is not implemented so far.'.format(name))

    if name.startswith('resnet'):
        feature_names = [None, 'relu', 'layer1', 'layer2', 'layer3']
        backbone_output = 'layer4'
    elif name == 'vgg16':
        # TODO: consider using a 'bridge' for VGG models, there is just a MaxPool between last skip and backbone output
        feature_names = ['5', '12', '22', '32', '42']
        backbone_output = '43'
    elif name == 'vgg19':
        feature_names = ['5', '12', '25', '38', '51']
        backbone_output = '52'
    # elif name == 'inception_v3':
    #     feature_names = [None, 'Mixed_5d', 'Mixed_6e']
    #     backbone_output = 'Mixed_7c'
    elif name.startswith('densenet'):
        feature_names = [
            None, 'relu0', 'denseblock1', 'denseblock2', 'denseblock3'
        ]
        backbone_output = 'denseblock4'
    else:
        raise NotImplemented(
            '{} backbone model is not implemented so far.'.format(name))

    return backbone, feature_names, backbone_output
Esempio n. 5
0
 def __init__(self,
              layers=None,
              pretrained=True,
              memory_efficient=False,
              first_avg_pool=False):
     densenet = densenet169(pretrained=pretrained,
                            memory_efficient=memory_efficient)
     strides = [2, 4, 8, 16, 32]
     channels = [64, 128, 256, 640, 1664]
     super().__init__(densenet, strides, channels, layers, first_avg_pool)
def densenet169():
    densenet = models.densenet169(pretrained=True)
    densenet.eval()
    for batch in 1, 2, 4, 8, 16, 32:
        filename = 'densenet169i' + str(batch) + '.onnx'
        print(filename)
        torch.onnx.export(densenet,
                          torch.randn(batch, 3, 224, 224),
                          filename,
                          keep_initializers_as_inputs=True)
Esempio n. 7
0
def densenet169(num_classes=1000, pretrained='imagenet'):
    r"""Densenet-169 model from
    `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
    """
    model = models.densenet169(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['densenet169'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    model = modify_densenets(model)
    return model
Esempio n. 8
0
    def __init__(self,
                 sample_rate: int,
                 window_size: int,
                 hop_size: int,
                 mel_bins: int,
                 fmin: int,
                 fmax: int,
                 classes_num: int,
                 apply_aug: bool,
                 top_db=None):
        super().__init__()

        window = 'hann'
        center = True
        pad_mode = 'reflect'
        ref = 1.0
        amin = 1e-10
        self.interpolate_ratio = 32  # Downsampled ratio
        self.apply_aug = apply_aug

        # Spectrogram extractor
        self.spectrogram_extractor = Spectrogram(n_fft=window_size,
                                                 hop_length=hop_size,
                                                 win_length=window_size,
                                                 window=window,
                                                 center=center,
                                                 pad_mode=pad_mode,
                                                 freeze_parameters=True)

        # Logmel feature extractor
        self.logmel_extractor = LogmelFilterBank(sr=sample_rate,
                                                 n_fft=window_size,
                                                 n_mels=mel_bins,
                                                 fmin=fmin,
                                                 fmax=fmax,
                                                 ref=ref,
                                                 amin=amin,
                                                 top_db=top_db,
                                                 freeze_parameters=True)

        # Spec augmenter
        self.spec_augmenter = SpecAugmentation(time_drop_width=64,
                                               time_stripes_num=2,
                                               freq_drop_width=8,
                                               freq_stripes_num=2)

        self.bn0 = nn.BatchNorm2d(mel_bins)

        self.fc1 = nn.Linear(1664, 1024, bias=True)
        self.att_block = AttBlock(1024, classes_num, activation='sigmoid')

        self.init_weight()

        self.densenet_features = models.densenet169(pretrained=True).features
    def build_model(self, model_path, num_classes=40):
        # create model
        model = models.densenet169(num_classes=40)

        modelState = torch.load(model_path, map_location='cpu')
        d = OrderedDict()
        for key, value in modelState.items():
            tmp = key[7:]
            d[tmp] = value
        model.load_state_dict(d)
        return model
Esempio n. 10
0
def initialize_model(model_name,
                     num_classes,
                     feature_extract,
                     use_pretrained=True):
    # Initialize these variables which will be set in this if statement. Each of these
    #   variables is model specific.
    model_ft = None
    input_size = 0

    if model_name == "resnet":
        """ Resnet18, resnet34, resnet50, resnet101
        """
        model_ft = models.resnet50(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "vgg":
        """ VGG11_bn
        """
        model_ft = models.vgg11_bn(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "densenet":
        """ Densenet169
        """
        model_ft = models.densenet169(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "inception":
        """ Inception v3
        Be careful, expects (299,299) sized images and has auxiliary output
        """
        model_ft = models.inception_v3(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        # Handle the auxilary net
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
        # Handle the primary net
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 299

    else:
        print("Invalid model name, exiting...")
        exit()
    return model_ft, input_size
Esempio n. 11
0
def get_net(net_name, weight_path=None):
    """
    根据网络名称获取模型
    :param net_name: 网络名称
    :param weight_path: 与训练权重路径
    :return:
    """
    pretrain = weight_path is None  # 没有指定权重路径,则加载默认的预训练权重
    if net_name in ['vgg16']:
        net = models.vgg16(pretrained=pretrain)
    elif net_name == 'vgg19':
        net = models.vgg19(pretrained=pretrain)
    elif net_name in ['resnet18']:
        net = models.resnet18(pretrained=pretrain)
    elif net_name in ['frcnn']:
        net = models.frcnn(pretrained=pretrain)
    elif net_name in ['resnet50']:
        net = models.resnet50(pretrained=pretrain)
    elif net_name == 'resnet101':
        net = models.resnet101(pretrained=pretrain)
    elif net_name in ['densenet121']:
        net = models.densenet121(pretrained=pretrain)
    elif net_name in ['densenet169']:
        net = models.densenet169(pretrained=pretrain)
    elif net_name in ['inception']:
        net = models.inception_v3(pretrained=pretrain)
    elif net_name in ['mobilenet_v2']:
        net = models.mobilenet_v2(pretrained=pretrain)
    elif net_name in ['shufflenet_v2']:
        net = models.shufflenet_v2_x1_0(pretrained=pretrain)
    elif net_name == 'efficientnet':
        net = EfficientNet.from_name('efficientnet-b0')
        feature = net._fc.in_features
        net._fc = nn.Linear(in_features=feature, out_features=2,
                            bias=True)  # 修改分类层结构
        net.load_state_dict(torch.load("efficientNet-b0.pt"))  # 加载CT集上训练参数
    else:
        raise ValueError('invalid network name:{}'.format(net_name))
    # 加载指定路径的权重参数
    if weight_path is not None and net_name.startswith('densenet'):
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
        )
        state_dict = torch.load(weight_path)
        for key in list(state_dict.keys()):
            res = pattern.match(key)
            if res:
                new_key = res.group(1) + res.group(2)
                state_dict[new_key] = state_dict[key]
                del state_dict[key]
        net.load_state_dict(state_dict)
    elif weight_path is not None:
        net.load_state_dict(torch.load(weight_path))
    return net
def model_fn(model_dir):
    logger.info('Loading the model.')
    with open(os.path.join(model_dir, 'model_data.json')) as json_file:
        data = json.load(json_file)
    n_classes = data['num_classes']
    model = models.densenet169(pretrained=False, num_classes = n_classes)
    model.load_state_dict(torch.load(os.path.join(model_dir, 'model.pth'),map_location=torch.device('cpu') ))
    model.to('cpu')
    logger.info('Done loading model')
    
    return {'model': model, 'data': data}
Esempio n. 13
0
    def __init__(self, pretrained=True):
        super(MyModel_dnet169, self).__init__()
        self.cnn = models.densenet169(pretrained=pretrained)

        self.bn1 = nn.BatchNorm1d(1000)
        self.dr1 = nn.Dropout(p=0.25, inplace=False)
        self.fc1 = nn.Linear(1000 + 1, 512)
        self.r1 = nn.ReLU(inplace=True)
        self.bn2 = nn.BatchNorm1d(512)
        self.dr2 = nn.Dropout(p=0.5, inplace=False)
        self.fc2 = nn.Linear(512, 3)
def densenet169(num_classes, pretrained=True, freeze=False):

    model = models.densenet169(pretrained=pretrained)
    if freeze:
        model = freeze_all_layers(model)
    num_features = model.classifier.in_features
    model.classifier = nn.Linear(num_features, num_classes)

    model = Add_Sigmoid(model)

    return model, 'Densenet169'
def init_delf_pca():

    model_ft = models.densenet169(pretrained=True)
    features = list(model_ft.children())[:-1]
    features.append(nn.ReLU(inplace=True))
    features.append(AttentionPool(1664))
    model = nn.Sequential(*features)
    model = nn.DataParallel(model)
    model_pca = nn.Sequential(*list(model.children()), nn.Linear(1664, 768))

    return model_pca
Esempio n. 16
0
    def __init__(self, num_channel=21, classCount=2, isTrained=True):

        super(DenseNet169, self).__init__()

        self.first_conv = nn.Sequential(
            nn.Conv2d(num_channel, 3, kernel_size=3, padding=1),
            nn.BatchNorm2d(3))
        densenet = models.densenet169(pretrained=isTrained)
        self.features = densenet.features
        kernelCount = densenet.classifier.in_features
        self.classifier = nn.Sequential(nn.Linear(kernelCount, classCount))
Esempio n. 17
0
 def __init__(self, num_class=1):
     super().__init__()
     self.channels = 1664
     densenet_169 = models.densenet169(pretrained=True)
     for params in densenet_169.parameters():
         params.requires_grad_(False)
     self.conv1 = nn.Conv2d(in_channels=2, out_channels=3, kernel_size=4)
     self.features = nn.Sequential(*list(densenet_169.features.children()))
     self.relu = nn.ReLU(inplace=True)
     self.fc1 = nn.Linear(self.channels, num_class)
     self.sigmoid = nn.Sigmoid()
Esempio n. 18
0
def create_model(device, ema=False):
    model = models.densenet169(pretrained=True)
    ft = model.classifier.in_features
    model.classifier = torch.nn.Linear(ft, 7)
    model = model.to(device)

    if ema:
        for param in model.parameters():
            param.detach_()

    return model
def delf_densenet169():

    model_ft = models.densenet169(pretrained=True)

    features = list(model_ft.children())[:-1]
    features.append(nn.ReLU(inplace=True))
    features.append(AttentionPool(1664))
    #     features.append(list(model_ft.children())[-1])
    delf_model = nn.Sequential(*features)

    return delf_model
Esempio n. 20
0
def create_dense169(load_weights=False):
    desnet_ft = models.densenet169(pretrained=True)
    num_ftrs = desnet_ft.classifier.in_features
    desnet_ft.classifier = nn.Linear(num_ftrs, 3)
    print(num_ftrs)
    desnet_ft = desnet_ft.cuda()
    w_file = MODEL_DIR + '/dense169.pth'

    if load_weights:
        load_weights_file(desnet_ft, w_file)
    desnet_ft.name = 'dense169'
    return desnet_ft, w_file
Esempio n. 21
0
 def __init__(self):
     super(MydenseNet169, self).__init__()
     model = models.densenet169(pretrained=True)
     self.resnet_lay = nn.Sequential(*list(model.children())[:-1])
     self.conv1_lay = nn.Conv2d(1664,
                                512,
                                kernel_size=(1, 1),
                                stride=(1, 1))
     self.relu1_lay = nn.ReLU(inplace=True)
     self.drop_lay = nn.Dropout2d(0.5)
     self.global_average = nn.AdaptiveAvgPool2d((1, 1))
     self.fc_Linear_lay2 = nn.Linear(512, 2)
Esempio n. 22
0
def _load_model(name='squeezenet'):
    if name in _models_to_use:
        return _models_to_use[name]
    if name == 'densenet':
        net = _models_to_use[name] = models.densenet169(pretrained=True)
    elif name == 'squeezenet':
        net = _models_to_use[name] = models.squeezenet1_1(pretrained=True)
    else:
        print('Unknown model name, using squeezenet')
        net = _models_to_use[name] = models.squeezenet1_1(pretrained=True)
    net._modules.get('features').register_forward_hook(_hook_feature)
    return net
Esempio n. 23
0
def main(opt):
    train_dataset = mura_data(lists =opt['train_path'],
                     transform = tf_train)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                            batch_size=opt['batch_size'], shuffle=True,num_workers=3)

    test_dataset = mura_data(lists =opt['test_path'],
                     train = False,     
                     transform = tf_test)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                            batch_size=opt['batch_size'], shuffle=False,num_workers=3)
    
    model = densenet169(pretrained='/root/workspace/pre_models/densenet169.pth')
    model.fc = torch.nn.Linear(1664,1)
    model.to(device)
    
    optimizer = torch.optim.Adam(
            model.parameters(),lr=opt['lr'], weight_decay=1e-3)
#     optimizer = torch.optim.SGD(
#             model.parameters(),lr=opt['lr'], weight_decay=1e-4)
    binary_loss_fn = torch.nn.BCEWithLogitsLoss(weight=None)
    
    start_epoch = 1
    results = {'loss':10,'kappa':0,'train':[],'test':[],'loss_list':[],'kappa_list':[]}
    for epoch in range(start_epoch, opt['num_epochs']):
        #utils.normalize_batch(x)
        if opt['decay_epochs']:
            adjust_learning_rate(optimizer, epoch, initial_lr=opt['lr'], decay_epochs=opt['decay_epochs'])
        train_metrics = train_epoch(epoch, model, train_loader, optimizer, binary_loss_fn)
        eval_metrics,out_list = val(model, test_loader, binary_loss_fn,opt['test_path'])
        results['train'].append(train_metrics)
        results['test'].append(eval_metrics)
        
        if eval_metrics[1]<results['loss']:
            results['loss'] = eval_metrics[1]
            results['loss_list'] = out_list
            if epoch>1:
                model.cpu()
                torch.save(model.state_dict(), opt['out_dir']+'.th') 
                print('save to %s'%opt['out_dir'])
                model.to(device)
                
        if eval_metrics[-2]>results['kappa']:
            results['kappa'] = eval_metrics[-2]
            results['kappa_list'] = out_list
            if epoch>1:
                model.cpu()
                torch.save(model.state_dict(), opt['out_dir']+'_kappa.th') 
                print('save to %s'%opt['out_dir'])
                model.to(device)
        x = open(opt['out_dir']+'.pkl','wb')
        pickle.dump(results,x)
        x.close()
Esempio n. 24
0
def densenet169(input_shape=(1, 128, 128)):
    model = models.densenet169(pretrained=False)
    model.features.conv0 = nn.Conv2d(
        in_channels=input_shape[0],
        out_channels=64,
        kernel_size=(3, 3),
        stride=(1, 1),
        padding=(1, 1),
        bias=False
    )
    model.classifier = nn.Linear(in_features=1664, out_features=80)
    return model
Esempio n. 25
0
    def __init__(
        self,
        in_channels,
        out_channels,
    ):
        super(DenseNet_pytorch, self).__init__()
        # self.model = models.resnet34(pretrained=False)
        self.model = models.densenet169(pretrained=False)
        # self.model.load_state_dict(torch.load(Windows_filepath+'densenet169-b2777c0a.pth'))
        self.conv0 = self.model.features.conv0
        self.norm0 = self.model.features.norm0
        self.relu0 = self.model.features.relu0
        self.pool0 = self.model.features.pool0

        ############# Block1-down 64-64  ##############
        self.dense_block1 = self.model.features.denseblock1
        self.trans_block1 = self.model.features.transition1

        ############# Block2-down 32-32  ##############
        self.dense_block2 = self.model.features.denseblock2
        self.trans_block2 = self.model.features.transition2

        ############# Block3-down  16-16 ##############
        self.dense_block3 = self.model.features.denseblock3
        self.trans_block3 = self.model.features.transition3

        ############# Block4-down  16-16 ##############
        self.dense_block4 = self.model.features.denseblock4

        self.model_out = self.model.features.norm5
        self.model_relu = F.relu

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.model_out_channels = 1664
        self.midconv = nn.Conv2d(in_channels=self.model_out_channels,
                                 out_channels=self.model_out_channels,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0,
                                 bias=True)
        # self.midrelu = nn.LeakyReLU(0.2,inplace=True)
        # 输出:1664
        self.up1 = Upproject(1920, 832)
        self.up2 = Upproject(960, 416)
        self.up3 = Upproject(480, 208)
        self.up4 = Upproject(272, 104)
        self.finalconv = nn.Conv2d(in_channels=104,
                                   out_channels=1,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1,
                                   bias=True)
Esempio n. 26
0
def create_dense169(load_weights=False, freeze=False):
    desnet_ft = models.densenet169(pretrained=True)
    if freeze:
        for param in desnet_ft.parameters():
            param.requires_grad = False
    num_ftrs = desnet_ft.classifier.in_features
    desnet_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, 1), nn.Sigmoid())
    desnet_ft = desnet_ft.cuda()

    desnet_ft.name = 'dense169'
    #desnet_ft.batch_size = 32
    return desnet_ft
Esempio n. 27
0
    def __init__(self,
                 backbone,
                 expert_dims,
                 backbone_pretrain,
                 same_dim=512,
                 feat2d=False):
        super(ImageCEModule, self).__init__()

        modalities = list(expert_dims.keys())
        self.expert_dims = expert_dims
        self.modalities = modalities
        self.backbone_name = backbone
        self.feat2d = feat2d

        # -------- Backbone ---------------------------------------------------
        if backbone == 'resnet':
            resnet = models.resnet50(pretrained=True)
            modules = list(resnet.children())[:-2]
            self.backbone = nn.Sequential(*modules)
        elif backbone == 'resnet152':
            resnet = models.resnet152(pretrained=True)
            modules = list(resnet.children())[:-2]
            self.backbone = nn.Sequential(*modules)
        elif backbone == 'densenet':
            densenet = models.densenet169(pretrained=True)
            modules = list(densenet.children())[:-1]
            self.backbone = nn.Sequential(*modules)
        else:
            raise ValueError
        ## Load deepfashion pretrained backbone
        if backbone_pretrain == "deepfashion":
            print("Load %s_%s as backbone" % (backbone_pretrain, backbone))
            param_dict = torch.load(
                'deepfashion/logdir/%s_%s/best_ckpt.pth.tar' %
                (backbone_pretrain, backbone))['state_dict']
            for i in param_dict:
                if "backbone." in i:
                    self.backbone.state_dict()[i[9:]].copy_(param_dict[i])
        if backbone == 'densenet':
            self.backbone = self.backbone[0]
        self.dropout = nn.Dropout(p=0.2)
        self.avg_pooling = nn.AdaptiveAvgPool2d((1, 1))

        # ------ Embedding for modalities -------------------------------------
        vis_embed_dict = {}
        for key in modalities:
            vis_embed_dict[key] = nn.Sequential(
                nn.Linear(expert_dims[key], same_dim),
                nn.ReLU()  # NoReLU
            )
            #vis_embed_dict[key] = ReduceDim(expert_dims[key], same_dim)
        self.vis_embed = nn.ModuleDict(vis_embed_dict)
Esempio n. 28
0
 def get_feat_model(self, feature_model, read_type):
     if read_type == 'RGB':
         num_input_features = 3
     else:
         num_input_features = 1
     #copy_opt = Namespace(**vars(self.opt))
     feat_model = None
     if feature_model == 'attention':
         feat_model = AttentionNetwork(self.opt, num_input_features)
     elif feature_model == 'densenet169':
         feat_model = models.densenet169(pretrained=not self.opt.no_densenet_pretrain)
         feat_model.classifier = nn.Linear(feat_model.classifier.in_features, self.opt.feat_size)
     return feat_model
    def __init__(self, num_classes, pretrained=True, fine_tune=False):
        super(DenseNetClassifier, self).__init__()

        self.densenet = densenet169(pretrained)
        self._num_classes = num_classes
        set_parameter_requires_grad(self.densenet, True)
        self._build_last_layer()
        self.densenet.features.conv0 = torch.nn.Conv2d(6,
                                                       64,
                                                       kernel_size=7,
                                                       stride=2,
                                                       padding=3)
        torch.nn.init.xavier_uniform_(self.densenet.features.conv0.weight)
Esempio n. 30
0
def init_densenet169():
    
    model_ft = models.densenet169(pretrained=True)
#     num_ftrs = model_ft.classifier.in_features
#     model_ft.classifier = nn.Linear(num_ftrs, 2000)

    features = list(model_ft.children())[:-1]
    features.append(nn.ReLU(inplace=True))
    features.append(Pool(1664))
#     features.append(list(model_ft.children())[-1])
    model_ft = nn.Sequential(*features) 
    
    return model_ft
Esempio n. 31
0
def prepare_densenet(is_pre_trained, fine_tune, num_classes):

    model = models.densenet169(
        pretrained=is_pre_trained,
        num_classes=1000 if is_pre_trained else num_classes)

    if fine_tune:
        frozen = nn.Sequential(*[model.features[i] for i in range(4)])
        set_parameter_requires_grad(frozen)

    model.classifier = nn.Linear(model.classifier.in_features, num_classes)

    return model
Esempio n. 32
0
def dn169(pre): return children(densenet169(pre))[0]
def dn201(pre): return children(densenet201(pre))[0]
Esempio n. 33
0
def dn169(pre): return children(densenet169(pre))[0]

@_fastai_model('Densenet-201', 'Densely Connected Convolutional Networks',