Beispiel #1
0
 def get_densenet201(self, pretrained=True, file_path=None):
     model = models.densenet201(pretrained=pretrained)
     in_features = model.classifier.in_features
     model.classifier = t.nn.Linear(in_features, self._num_classes)
     model.to(self._device)
     if file_path:
         model.load_state_dict(t.load(file_path))
     return model
Beispiel #2
0
def get_model():
    model = models.densenet201(pretrained=True)
    model.classifier = nn.Sequential(nn.Linear(1920, 1024), nn.LeakyReLU(),
                                     nn.Linear(1024, 101))
    model.load_state_dict(torch.load('food_classifier.pt', map_location='cpu'),
                          strict=False)
    model.eval()
    return model
Beispiel #3
0
def create_dense201(load_weights=False):
    desnet_ft = models.densenet201(pretrained=True)
    num_ftrs = desnet_ft.classifier.in_features
    desnet_ft.classifier = nn.Linear(num_ftrs, 3)
    desnet_ft = desnet_ft.cuda()

    desnet_ft.name = 'dense201'
    return desnet_ft
Beispiel #4
0
def model_setter(model_name,
                 learning_rate=0.001,
                 output_size=2,
                 usePretrained=True,
                 isTest=False,
                 dropouts=True):

    if model_name == 'resnet18':
        model = models.resnet18(pretrained=usePretrained)
        num_ftrs = model.fc.in_features
        if dropouts:
            en = EnsembleDropout()
            model.fc = nn.Sequential(en, nn.Linear(num_ftrs, output_size))
        else:
            model.fc = nn.Linear(num_ftrs, output_size)
    elif model_name == 'resnet34':
        model = models.resnet34(pretrained=usePretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, output_size)
    elif model_name == 'resnet50':
        model = models.resnet50(pretrained=usePretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, output_size)
    elif model_name == 'resnet101':
        model = models.resnet101(pretrained=usePretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, output_size)
    elif model_name == 'resnet152':
        model = models.resnet152(pretrained=usePretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, output_size)
    elif model_name == 'densenet121':
        model = models.densenet121(pretrained=usePretrained)
        num_ftrs = model.classifier.in_features
        model.classifier = nn.Linear(num_ftrs, output_size)
    elif model_name == 'densenet161':
        model = models.densenet161(pretrained=usePretrained)
        num_ftrs = model.classifier.in_features
        model.classifier = nn.Linear(num_ftrs, output_size)
    elif model_name == 'densenet169':
        model = models.densenet169(pretrained=usePretrained)
        num_ftrs = model.classifier.in_features
        model.classifier = nn.Linear(num_ftrs, output_size)
    elif model_name == 'densenet201':
        model = models.densenet201(pretrained=usePretrained)
        num_ftrs = model.classifier.in_features
        model.classifier = nn.Linear(num_ftrs, output_size)

    model = torch.nn.DataParallel(model).cuda()
    if not isTest:
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
        criterion = nn.CrossEntropyLoss().cuda()
    else:
        optimizer = 0
        criterion = 0
        scheduler = 0
    return model, optimizer, criterion, scheduler
Beispiel #5
0
 def __init__(self):
     # ### # Initialize model
     model = models.densenet201(pretrained=False)
     # ### Load state_dict
     state_dict = torch.load(Model3_path)
     model.load_state_dict(state_dict)
     # model.eval()
     #self.model1 = torch.load(Model1_path)['state_dict']
     self.model3 = model
 def __init__(self, n_classes = 54):
     super(Dense201, self).__init__()
     planes = 1920
     self.features = densenet201(pretrained=False).features
     self.classifier = nn.Linear(planes,n_classes)
     nn.init.constant_(self.classifier.bias, 0)
     # nn.init.normal_(self.classifier.weight, std=0.001)
     self.bottleneck_g = nn.BatchNorm1d(planes)
     self.bottleneck_g.bias.requires_grad_(False)  # no shift
def get_model_from_arch(arch, hidden_units):
    ''' Load an existing PyTorch model, freeze parameters and subsitute classifier.
    Refer https://pytorch.org/docs/stable/torchvision/models.html
    '''

    if arch == 'densenet121':
        model = models.densenet121(pretrained=True)
        classifier_input_size = model.classifier.in_features
    elif arch == 'densenet161':
        model = models.densenet161(pretrained=True)
        classifier_input_size = model.classifier.in_features
    elif arch == 'densenet201':
        model = models.densenet201(pretrained=True)
        classifier_input_size = model.classifier.in_features
    elif arch == 'vgg13_bn':
        model = models.vgg13_bn(pretrained=True)
        classifier_input_size = model.classifier[0].in_features
    elif arch == 'vgg16_bn':
        model = models.vgg16_bn(pretrained=True)
        classifier_input_size = model.classifier[0].in_features
    elif arch == 'vgg19_bn':
        model = models.vgg19_bn(pretrained=True)
        classifier_input_size = model.classifier[0].in_features
    elif arch == 'resnet18':
        model = models.resnet18(pretrained=True)
        classifier_input_size = model.fc.in_features
    elif arch == 'resnet34':
        model = models.resnet34(pretrained=True)
        classifier_input_size = model.fc.in_features
    elif arch == 'resnet50':
        model = models.resnet50(pretrained=True)
        classifier_input_size = model.fc.in_features
    else:
        raise RuntimeError("Unknown model")

    # Freeze parameters so we don't backprop through them
    for param in model.parameters():
        param.requires_grad = False

    # Replace classifier, ensure input and output sizes match
    classifier_output_size = 102

    classifier = nn.Sequential(OrderedDict([
        ('fc1', nn.Linear(classifier_input_size, hidden_units)),
        ('relu', nn.ReLU()),
        ('fc2', nn.Linear(hidden_units, classifier_output_size)),
        ('output', nn.LogSoftmax(dim=1))
    ]))

    if arch.startswith('densenet'):
        model.classifier = classifier
    elif arch.startswith('vgg'):
        model.classifier = classifier
    elif arch.startswith('resnet'):
        model.fc = classifier

    return model
def get_densenet201(class_num):
    model = models.densenet201(pretrained=True)
    set_parameter_requires_grad(model)
    model.name = 'densenet201'

    n_inputs = model.classifier.in_features
    model.classifier = nn.Linear(n_inputs, class_num)

    return model, 224
Beispiel #9
0
 def __init__(self):
     super(MydenseNet201, self).__init__()
     model = models.densenet201(pretrained = True)
     self.resnet_lay=nn.Sequential(*list(model.children())[:-1])
     self.conv1_lay = nn.Conv2d(1920, 512, kernel_size = (1,1),stride=(1,1))
     self.relu1_lay = nn.ReLU(inplace = True)
     self.drop_lay = nn.Dropout2d(0.5)
     self.global_average = nn.AdaptiveAvgPool2d((1,1))
     self.fc_Linear_lay2 = nn.Linear(512,2)
Beispiel #10
0
 def __init__(self, num_classes, pretrained=True):
     super(FCN32DenseNet, self).__init__()
     dense = models.densenet201()
     if pretrained:
         dense.load_state_dict(torch.load(dense201_path))
     self.features5 = dense.features
     self.fconv5 = nn.Sequential(
         nn.ReLU(inplace=True), nn.Conv2d(1920, num_classes, kernel_size=7))
     initialize_weights(self.fconv5)
def densenet201(num_classes=1000, pretrained='imagenet'):
    r"""Densenet-201 model from
    `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
    """
    model = models.densenet201(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['densenet201'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    return model
Beispiel #12
0
def densenet201(num_classes=1000, pretrained='imagenet'):
    r"""Densenet-201 model from
    `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
    """
    model = models.densenet201(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['densenet201'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    return model
Beispiel #13
0
def get_backbone(name, pretrained=True):
    """ Loading backbone, defining names for skip-connections and encoder output. """

    # TODO: More backbones

    if name == 'resnet18':
        backbone = models.resnet50(pretrained=pretrained)
    elif name == 'resnet34':
        backbone = models.resnet34(pretrained=pretrained)
    elif name == 'resnet50':
        backbone = models.resnet50(pretrained=pretrained)
    elif name == 'resnet101':
        backbone = models.resnet101(pretrained=pretrained)
    elif name == 'resnet152':
        backbone = models.resnet152(pretrained=pretrained)
    elif name == 'vgg16':
        backbone = models.vgg16_bn(pretrained=pretrained).features
    elif name == 'vgg19':
        backbone = models.vgg19_bn(pretrained=pretrained).features
    # elif name == 'inception_v3':
    #     backbone = models.inception_v3(pretrained=pretrained, aux_logits=False)
    elif name == 'densenet121':
        backbone = models.densenet121(pretrained=True).features
    elif name == 'densenet161':
        backbone = models.densenet161(pretrained=True).features
    elif name == 'densenet169':
        backbone = models.densenet169(pretrained=True).features
    elif name == 'densenet201':
        backbone = models.densenet201(pretrained=True).features
    else:
        raise NotImplemented(
            '{} backbone model is not implemented so far.'.format(name))

    if name.startswith('resnet'):
        feature_names = [None, 'relu', 'layer1', 'layer2', 'layer3']
        backbone_output = 'layer4'
    elif name == 'vgg16':
        # TODO: consider using a 'bridge' for VGG models, there is just a MaxPool between last skip and backbone output
        feature_names = ['5', '12', '22', '32', '42']
        backbone_output = '43'
    elif name == 'vgg19':
        feature_names = ['5', '12', '25', '38', '51']
        backbone_output = '52'
    # elif name == 'inception_v3':
    #     feature_names = [None, 'Mixed_5d', 'Mixed_6e']
    #     backbone_output = 'Mixed_7c'
    elif name.startswith('densenet'):
        feature_names = [
            None, 'relu0', 'denseblock1', 'denseblock2', 'denseblock3'
        ]
        backbone_output = 'denseblock4'
    else:
        raise NotImplemented(
            '{} backbone model is not implemented so far.'.format(name))

    return backbone, feature_names, backbone_output
Beispiel #14
0
 def __init__(self,
              layers=None,
              pretrained=True,
              memory_efficient=False,
              first_avg_pool=False):
     densenet = densenet201(pretrained=pretrained,
                            memory_efficient=memory_efficient)
     strides = [2, 4, 8, 16, 32]
     channels = [64, 128, 256, 896, 1920]
     super().__init__(densenet, strides, channels, layers, first_avg_pool)
def densenet201():
    densenet = models.densenet201(pretrained=True)
    densenet.eval()
    for batch in 1, 2, 4, 8, 16:
        filename = 'densenet201i' + str(batch) + '.onnx'
        print(filename)
        torch.onnx.export(densenet,
                          torch.randn(batch, 3, 224, 224),
                          filename,
                          keep_initializers_as_inputs=True)
Beispiel #16
0
def densenet_201_two(num_classes=5, attr_filters=(40, 128, 512)):
    model = models.densenet201(pretrained=True)
    pool_size = 1

    model = DenseNet_Two(model,
                         pool_size=pool_size,
                         num_classes=num_classes,
                         attr_filters=attr_filters)

    return model
Beispiel #17
0
def densenet201_(num_classes=NUM_CLASSES, pretrained=True):
    """
    lr=0.1, b=52
    """
    model = densenet201(pretrained=pretrained)
    model.classifier = nn.Linear(1920, num_classes)

    layers = [(model.classifier, 1), (model.features, 0.1)]

    return model, layers
Beispiel #18
0
def get_model():
    checkpoint_path = 'food_classifier.pt'
    model = models.densenet201(pretrained=True)
    model.classifier = model.classifier = nn.Sequential(
        nn.Linear(1920, 1024), nn.LeakyReLU(), nn.Linear(1024, 101))
    model.load_state_dict(torch.load(checkpoint_path, map_location='cpu'),
                          strict=False)
    model.eval()
    #from PIL import Image
    return model
Beispiel #19
0
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
    # Model specific variables
    model_ft = None
    input_size = 0

    if model_name == "vgg16": #VGG w/BN
        # model_ft = models.vgg11_bn(pretrained=use_pretrained)
        model_ft = models.vgg16_bn()
        model_ft.load_state_dict(torch.load(MODELS[model_name]))
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
        input_size = 224
    elif model_name == "densenet121": # Dense-121
        # model_ft = models.densenet121(pretrained=use_pretrained)
        import re
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
        state_dict = torch.load(MODELS[model_name])
        for key in list(state_dict.keys()):
            res = pattern.match(key)
            if res:
                new_key = res.group(1) + res.group(2)
                state_dict[new_key] = state_dict[key]
                del state_dict[key]
        model_ft = models.densenet121()
        model_ft.load_state_dict(state_dict)
        
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)
        input_size = 224
    elif model_name == "densenet201": # Dense-121
        # model_ft = models.densenet121(pretrained=use_pretrained)
        import re
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
        state_dict = torch.load(MODELS[model_name])
        for key in list(state_dict.keys()):
            res = pattern.match(key)
            if res:
                new_key = res.group(1) + res.group(2)
                state_dict[new_key] = state_dict[key]
                del state_dict[key]
        model_ft = models.densenet201()
        model_ft.load_state_dict(state_dict)
        
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)
        input_size = 224
    else:
        print("Invalid model name, exiting...")
        sys.exit()
    return model_ft, input_size
Beispiel #20
0
def nn_classifier(model_arch='resnet152',
                  hidden_units=512,
                  hidden_activation=nn.ReLU(),
                  dropout=0,
                  output_activation=nn.LogSoftmax(dim=1),
                  lr,
                  train_on_gpu=True):
    if model_arch == 'densenet161':
        model = models.densenet161(pretrained=True)
    elif model_arch == 'densenet121':
        model = models.densenet121(pretrained=True)
    elif model_arch == 'resnet152':
        model = models.resnet152(pretrained=True)
    elif model_arch == 'densenet201':
        model = models.densenet201(pretrained=True)
    else:
        print(
            "This classifier only accepts densenet121, densenet161 and resnet152 as the pretrained models"
        )

    for param in model.parameters():
        param.requires_grad = False

    classifier = nn.Sequential(
        OrderedDict([
            #('adaptive_pool',nn.AdaptiveAvgPool2d((1,1))),
            #('adaptive_maxpool',nn.AdaptiveMaxPool2d((1,1))),
            #('flatten',nn.Flatten()),
            ('batch_norm',
             nn.BatchNorm1d(model_architectures[model_arch],
                            eps=1e-05,
                            momentum=0.1,
                            affine=True)),
            ('Dropout', nn.Dropout(dropout)),
            ('fc1', nn.Linear(model_architectures[model_arch], hidden_units)),
            ('activation', hidden_activation),
            ('fc2', nn.Linear(hidden_units, 102)),
            ('output', output_activation)
        ]))
    if model_arch == 'resnet152':
        model.fc = classifier
    else:
        model.classifier = classifier

    if train_on_gpu:
        model.cuda()

    criterion = nn.NLLLoss()
    if model_name == 'resnet152':
        optimizer = optim.Adam(model.fc.parameters(), lr=learn_rate)
    else:
        optimizer = optim.Adam(model.classifier.parameters(), lr=learn_rate)
    lrsheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)

    return model, criterion, optimizer, lrsheduler
Beispiel #21
0
 def __init__(self, train_fe=False, feature_extraction_cnn='vgg', normalization=True, last_layer='', use_cuda=True):
     super(FeatureExtraction, self).__init__()
     self.normalization = normalization
     if feature_extraction_cnn == 'vgg':
         self.model = models.vgg16(pretrained=True)
         # keep feature extraction network up to indicated layer
         vgg_feature_layers=['conv1_1','relu1_1','conv1_2','relu1_2','pool1','conv2_1',
                      'relu2_1','conv2_2','relu2_2','pool2','conv3_1','relu3_1',
                      'conv3_2','relu3_2','conv3_3','relu3_3','pool3','conv4_1',
                      'relu4_1','conv4_2','relu4_2','conv4_3','relu4_3','pool4',
                      'conv5_1','relu5_1','conv5_2','relu5_2','conv5_3','relu5_3','pool5']
         if last_layer=='':
             last_layer = 'pool4'
         last_layer_idx = vgg_feature_layers.index(last_layer)
         self.model = nn.Sequential(*list(self.model.features.children())[:last_layer_idx+1])
     if feature_extraction_cnn == 'resnet101':
         self.model = models.resnet101(pretrained=True)
         resnet_feature_layers = ['conv1',
                                  'bn1',
                                  'relu',
                                  'maxpool',
                                  'layer1',
                                  'layer2',
                                  'layer3',
                                  'layer4']
         if last_layer=='':
             last_layer = 'layer3'
         last_layer_idx = resnet_feature_layers.index(last_layer)
         resnet_module_list = [self.model.conv1,
                               self.model.bn1,
                               self.model.relu,
                               self.model.maxpool,
                               self.model.layer1,
                               self.model.layer2,
                               self.model.layer3,
                               self.model.layer4]
         
         self.model = nn.Sequential(*resnet_module_list[:last_layer_idx+1])
     if feature_extraction_cnn == 'resnet101_v2':
         self.model = models.resnet101(pretrained=True)
         # keep feature extraction network up to pool4 (last layer - 7)
         self.model = nn.Sequential(*list(self.model.children())[:-3])
     if feature_extraction_cnn == 'densenet201':
         self.model = models.densenet201(pretrained=True)
         # keep feature extraction network up to denseblock3
         # self.model = nn.Sequential(*list(self.model.features.children())[:-3])
         # keep feature extraction network up to transitionlayer2
         self.model = nn.Sequential(*list(self.model.features.children())[:-4])
     if not train_fe:
         # freeze parameters
         for param in self.model.parameters():
             param.requires_grad = False
     # move to GPU
     if use_cuda:
         self.model = self.model.cuda()
def get_model(tl_model):
    # load the pretrained model, Resnet34 was used in the paper
    if tl_model == "Resnet34":
        if (train_scratch):
            model_ft = models.resnet34(pretrained=False)
        else:
            model_ft = models.resnet34(pretrained=False)
            # Download torchvision pretrained model from: https://download.pytorch.org/models/resnet34-333f7ec4.pth
            model_ft.load_state_dict(torch.load('resnet34-333f7ec4.pth'))
        if image_size == 1024:
            model_ft.avgpool = nn.AvgPool2d(kernel_size=28,
                                            stride=1,
                                            padding=0)
    elif tl_model == "Resnet50":
        if (train_scratch):
            model_ft = models.resnet50(pretrained=False)
        else:
            model_ft = models.resnet50(pretrained=True)
        if image_size == 1024:
            model_ft.avgpool = nn.AvgPool2d(kernel_size=28,
                                            stride=1,
                                            padding=0)
    elif tl_model == "DenseNet":
        if (train_scratch):
            model_ft = models.densenet201(pretrained=False)
        else:
            model_ft = models.densenet201(pretrained=True)
        if image_size == 1024:
            model_ft.avgpool = nn.AvgPool2d(kernel_size=28,
                                            stride=1,
                                            padding=0)

    if tl_model == "DenseNet":
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Sequential()
        model_ft = multi_output_model(model_ft, num_ftrs)
    else:
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Sequential()
        model_ft = multi_output_model(model_ft, num_ftrs)

    return model_ft
def densenet201(num_classes, pretrained=True, freeze=False):

    model = models.densenet201( pretrained=pretrained)
    if freeze:
        model = freeze_all_layers(model)
    num_features = model.classifier.in_features
    model.classifier = nn.Linear(num_features, num_classes)

    model = Add_Sigmoid(model)

    return model, 'Densenet201'
 def load(self,
          path='./f_trainner/fruitor_densenet201_model_linear_class.pt'):
     data = torch.load(path)
     self.class_to_index = data['class_to_index']
     self.densenet = models.densenet201(pretrained=True)
     self.densenet.classifier = nn.Linear(data['classifier_input_size'],
                                          len(data['class_to_index']))
     self.densenet = self.densenet.to(self.device)
     self.densenet.load_state_dict(data['state_dict'])
     print('Model loaded => {}'.format(path))
     print('Model Ready to predict')
Beispiel #25
0
 def _get_convnet(self, backbone, pretrained):
     if backbone == 'resnet34':
         convnet = nn.Sequential(
             *list(models.resnet34(pretrained=pretrained).children())[:-1])
         shape = (512, 4 * 5)
         add_bn = True
     elif backbone == 'resnet50':
         convnet = nn.Sequential(
             *list(models.resnet50(pretrained=pretrained).children())[:-1])
         shape = (2048, 4 * 5)
         add_bn = True
     elif backbone == 'resnet101':
         convnet = nn.Sequential(
             *list(models.resnet101(pretrained=pretrained).children())[:-1])
         shape = (2048, 4 * 5)
         add_bn = True
     elif backbone == 'resnet152':
         convnet = nn.Sequential(
             *list(models.resnet152(pretrained=pretrained).children())[:-1])
         shape = (2048, 4 * 5)
         add_bn = True
     elif backbone == 'densenet121':
         convnet = nn.Sequential(
             models.densenet121(pretrained=pretrained).features,
             nn.ReLU(inplace=True), nn.AvgPool2d(7, stride=1))
         shape = (1024, 4 * 5)
         add_bn = False
     elif backbone == 'densenet201':
         convnet = nn.Sequential(
             models.densenet201(pretrained=pretrained).features,
             nn.ReLU(inplace=True), nn.AvgPool2d(7, stride=1))
         shape = (1920, 4 * 5)
         add_bn = False
     elif backbone == 'se_resnext_50':
         pretrain = 'imagenet' if pretrained else None
         convnet = nn.Sequential(*list(
             se_resnext50_32x4d(num_classes=1000,
                                pretrained=pretrain).children())[:-1])
         shape = (2048, 4 * 5)
         add_bn = True
     elif backbone == 'se_resnext_101':
         pretrain = 'imagenet' if pretrained else None
         convnet = nn.Sequential(*list(
             se_resnext101_32x4d(num_classes=1000,
                                 pretrained=pretrain).children())[:-1])
         shape = (2048, 4 * 5)
         add_bn = True
     elif backbone == 'sphere_net':
         convnet = PlainNet()
         shape = (512, 16 * 16)
         add_bn = False
     else:
         raise ValueError("Backbone [%s] not recognized." % backbone)
     return convnet, shape, add_bn
Beispiel #26
0
    def __init__(self, num_channel=21, classCount=2, isTrained=True):

        super(DenseNet201, self).__init__()

        self.first_conv = nn.Sequential(
            nn.Conv2d(num_channel, 3, kernel_size=3, padding=1),
            nn.BatchNorm2d(3))
        densenet = models.densenet201(pretrained=isTrained)
        self.features = densenet.features
        kernelCount = densenet.classifier.in_features
        self.classifier = nn.Sequential(nn.Linear(kernelCount, classCount))
Beispiel #27
0
    def __init__(self, pretrained=True):
        super(MyModel_dnet201, self).__init__()
        self.cnn = models.densenet201(pretrained=pretrained)

        self.bn1 = nn.BatchNorm1d(1000)
        self.dr1 = nn.Dropout(p=0.25, inplace=False)
        self.fc1 = nn.Linear(1000 + 1, 512)
        self.r1 = nn.ReLU(inplace=True)
        self.bn2 = nn.BatchNorm1d(512)
        self.dr2 = nn.Dropout(p=0.5, inplace=False)
        self.fc2 = nn.Linear(512, 3)
Beispiel #28
0
    def __init__(self,
                 sample_rate: int,
                 window_size: int,
                 hop_size: int,
                 mel_bins: int,
                 fmin: int,
                 fmax: int,
                 classes_num: int,
                 apply_aug: bool,
                 top_db=None):
        super().__init__()

        window = 'hann'
        center = True
        pad_mode = 'reflect'
        ref = 1.0
        amin = 1e-10
        self.interpolate_ratio = 32  # Downsampled ratio
        self.apply_aug = apply_aug

        # Spectrogram extractor
        self.spectrogram_extractor = Spectrogram(n_fft=window_size,
                                                 hop_length=hop_size,
                                                 win_length=window_size,
                                                 window=window,
                                                 center=center,
                                                 pad_mode=pad_mode,
                                                 freeze_parameters=True)

        # Logmel feature extractor
        self.logmel_extractor = LogmelFilterBank(sr=sample_rate,
                                                 n_fft=window_size,
                                                 n_mels=mel_bins,
                                                 fmin=fmin,
                                                 fmax=fmax,
                                                 ref=ref,
                                                 amin=amin,
                                                 top_db=top_db,
                                                 freeze_parameters=True)

        # Spec augmenter
        self.spec_augmenter = SpecAugmentation(time_drop_width=64,
                                               time_stripes_num=2,
                                               freq_drop_width=8,
                                               freq_stripes_num=2)

        self.bn0 = nn.BatchNorm2d(mel_bins)

        self.fc1 = nn.Linear(1920, 1024, bias=True)
        self.att_block = AttBlock(1024, classes_num, activation='sigmoid')

        self.init_weight()

        self.densenet_features = models.densenet201(pretrained=True).features
 def __init__(self, outnum=14, gpsize=4):
     super(MyDensNet201, self).__init__()
     original_model = models.densenet201(pretrained=True)
     self.features = original_model.features
     self.features.add_module(
         'transit',
         nn.Sequential(nn.Conv2d(1920, 1024, 3, padding=1),
                       nn.BatchNorm2d(1024), nn.ReLU(inplace=True),
                       nn.MaxPool2d(2, padding=1)))
     self.features.add_module('gpool', nn.MaxPool2d(gpsize))
     self.classifier = nn.Linear(1024, outnum)
Beispiel #30
0
def pre_trained_model(checkpoint_name='densenet201'):
    #Load pre-trained model
    if (checkpoint_name.find('densenet201') != -1):
        x = models.densenet201(pretrained=True)
        model_name = 'densenet201'

    if (checkpoint_name.find('densenet121') != -1):
        x = models.densenet121(pretrained=True)
        model_name = 'densenet121'

    return x
Beispiel #31
0
def loadClassifierModel(checkpointpath):
    from Utilities.Network import Network
    checkpoint = torch.load(checkpointpath, map_location='cpu')
    ##model = TheModelClass(*args, **kwargs)

    model_onload = models.densenet201(pretrained=True)

    model_onload.classifier = checkpoint['classifier']
    model_onload.load_state_dict = checkpoint['state_dict']
    model_onload.class_to_idx = checkpoint['class_to_idx']
    return model_onload
def get_model(num_classes, model_type='resnet50'):
    if model_type == 'resnet50':
        model = resnet50(pretrained=True).cuda()
        model.fc = nn.Linear(model.fc.in_features, num_classes).cuda()
    elif model_type == 'resnet101':
        model = resnet101(pretrained=True).cuda()
        model.fc = nn.Linear(model.fc.in_features, num_classes).cuda()
    elif model_type == 'resnet152':
        model = resnet152(pretrained=True).cuda()
        model.fc = nn.Linear(model.fc.in_features, num_classes).cuda()
    elif model_type == 'densenet121':
        model = densenet121(pretrained=True).cuda()
        model.classifier = nn.Linear(model.classifier.in_features, num_classes).cuda()
    elif model_type == 'densenet161':
        model = densenet161(pretrained=True).cuda()
        model.classifier = nn.Linear(model.classifier.in_features, num_classes).cuda()
    elif model_type == 'densenet201':
        model = densenet201(pretrained=True).cuda()
        model.classifier = nn.Linear(model.classifier.in_features, num_classes).cuda()
    return model
Beispiel #33
0
def dn201(pre): return children(densenet201(pre))[0]
def vgg16(pre): return children(vgg16_bn(pre))[0]
Beispiel #34
0
#----------------------------------------------------------------------------------------------

import argparse
import os
from six import text_type as _text_type
from mmdnn.conversion.examples.imagenet_test import TestKit
import torch
import torchvision.models as models


NETWORKS_MAP = {
    'inception_v3'      : lambda : models.inception_v3(pretrained=True),
    'vgg16'             : lambda : models.vgg16(pretrained=True),
    'vgg19'             : lambda : models.vgg19(pretrained=True),
    'resnet152'         : lambda : models.resnet152(pretrained=True),
    'densenet'          : lambda : models.densenet201(pretrained=True),
    'squeezenet'        : lambda : models.squeezenet1_1(pretrained=True)
}


def _main():
    parser = argparse.ArgumentParser()

    parser.add_argument('-n', '--network',
                        type=_text_type, help='Model Type', required=True,
                        choices=NETWORKS_MAP.keys())

    parser.add_argument('-i', '--image', type=_text_type, help='Test Image Path')

    args = parser.parse_args()
Beispiel #35
0
def dn201(pre): return children(densenet201(pre))[0]

@_fastai_model('Vgg-16 with batch norm added', 'Very Deep Convolutional Networks for Large-Scale Image Recognition',