コード例 #1
0
    def setUp(self):
        self.model = models.VGG(make_layers(cfgs['A']), 10)
        self.model_name = 'vgg'
        self.file_path = './tmp/vgg.pth'
        self.mt = ModelTool(self.model, self.model_name, self.file_path)

        self.train_transform = transforms.Compose([
            transforms.Resize(32),
            transforms.ToTensor(),
        ])
        self.test_transform = transforms.Compose([
            transforms.Resize(32),
            transforms.ToTensor(),
        ])

        self.train_set = datasets.MNIST(
            './tmp/dataset/mnist',
            train=True,
            transform=self.train_transform,
            download=True,
        )
        self.test_set = datasets.MNIST('./tmp/dataset/mnist',
                                       train=False,
                                       transform=self.test_transform,
                                       download=True)

        self.train_loader = DataLoader(self.train_set, 128)
        self.test_loader = DataLoader(self.test_set, 128)
コード例 #2
0
ファイル: craft.py プロジェクト: w121211/CRAFT-pytorch
    def __init__(self, pretrained=True, freeze=True):
        super(vgg16_bn, self).__init__()
        # features = models.vgg16_bn(pretrained=pretrained).features
        cfg = [
            64,
            64,
            "M",
            128,
            128,
            "M",
            256,
            256,
            256,
            "M",
            512,
            512,
            512,
            "M",
            512,
            512,
            512,
            "M",
        ]
        features = models.VGG(
            make_layers(cfg, batch_norm=True, in_channels=4 + 3)).features

        self.slice1 = torch.nn.Sequential()
        self.slice2 = torch.nn.Sequential()
        self.slice3 = torch.nn.Sequential()
        self.slice4 = torch.nn.Sequential()
        self.slice5 = torch.nn.Sequential()
        for x in range(12):  # conv2_2
            self.slice1.add_module(str(x), features[x])
        for x in range(12, 19):  # conv3_3
            self.slice2.add_module(str(x), features[x])
        for x in range(19, 29):  # conv4_3
            self.slice3.add_module(str(x), features[x])
        for x in range(29, 39):  # conv5_3
            self.slice4.add_module(str(x), features[x])

        # fc6, fc7 without atrous conv
        self.slice5 = torch.nn.Sequential(
            nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
            nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6),
            nn.Conv2d(1024, 1024, kernel_size=1),
        )

        if not pretrained:
            init_weights(self.slice1.modules())
            init_weights(self.slice2.modules())
            init_weights(self.slice3.modules())
            init_weights(self.slice4.modules())

        init_weights(
            self.slice5.modules())  # no pretrained model for fc6 and fc7

        if freeze:
            for param in self.slice1.parameters():  # only first conv
                param.requires_grad = False
コード例 #3
0
def download_model(saving_path='.'):
    # inception net
    # model = models.Inception3()
    # model.load_state_dict(model_zoo.load_url(model_urls['inception_v3_google'], model_dir=saving_path, progress=True))

    # resnet
    model = models.ResNet(_Bottleneck, [3, 8, 36, 3])
    model.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir=saving_path, progress=True))
    # save_model(model, 'resnet152.pkl', saving_path)

    # alex net
    model = models.AlexNet()
    model.load_state_dict(model_zoo.load_url(model_urls['alexnet'], model_dir=saving_path, progress=True))
    # save_model(model, 'alexnet.pkl', saving_path)

    # vgg
    model = models.VGG(_vgg_make_layers(_vgg_cfg['E'], batch_norm=True), init_weights=False)
    model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn'], model_dir=saving_path, progress=True))
    # save_model(model, 'vgg19.pkl', saving_path)

    # squeeze net
    model = models.SqueezeNet(version=1.1)
    model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_1'], model_dir=saving_path, progress=True))
    # save_model(model, 'squeezenet1_1.pkl', saving_path)

    # dense net
    model = models.DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32))
    pattern = re.compile(
        r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
    state_dict = model_zoo.load_url(model_urls['densenet201'], model_dir=saving_path, progress=True)
    for key in list(state_dict.keys()):
        res = pattern.match(key)
        if res:
            new_key = res.group(1) + res.group(2)
            state_dict[new_key] = state_dict[key]
            del state_dict[key]
    model.load_state_dict(state_dict)
    # save_model(model, 'densenet201.pkl', saving_path)

    # googlenet
    kwargs = dict()
    kwargs['transform_input'] = True
    kwargs['aux_logits'] = False
    # if kwargs['aux_logits']:
    #     warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '
    #                   'so make sure to train them')
    original_aux_logits = kwargs['aux_logits']
    kwargs['aux_logits'] = True
    kwargs['init_weights'] = False
    model = models.GoogLeNet(**kwargs)
    model.load_state_dict(model_zoo.load_url(model_urls['googlenet']))
    if not original_aux_logits:
        model.aux_logits = False
        del model.aux1, model.aux2
        # save_model(model, 'googlenet.pkl', saving_path)

    # resnext
    model = models.resnext101_32x8d(pretrained=False)
    model.load_state_dict(model_zoo.load_url(model_urls['resnext101_32x8d'], model_dir=saving_path, progress=True))
コード例 #4
0
ファイル: model.py プロジェクト: kkraoj/leafnet
def selectModel(MODEL_ID):
    if MODEL_ID == 1:
        BATCH_SIZE = 128
        NUM_EPOCHS = 100
        LEARNING_RATE = 1e-1  #start from learning rate after 40 epochs
        ALPHA = 6
        model = models.resnet18(pretrained=False)
        model.fc = nn.Linear(512,
                             NUM_CLASSES)  #nn.Linear(input_size, num_classes)
        modelName = "resnet18_augment"
    elif MODEL_ID == 2:
        BATCH_SIZE = 128
        NUM_EPOCHS = 72
        LEARNING_RATE = 1e-1  #start from learning rate after 40 epochs
        ALPHA = 6
        model = models.resnet18(pretrained=False)
        model.fc = nn.Linear(512,
                             NUM_CLASSES)  #nn.Linear(input_size, num_classes)
        modelName = "resnet18_decay_adam"
    elif MODEL_ID == 3:
        BATCH_SIZE = 128
        NUM_EPOCHS = 50
        LEARNING_RATE = 1e-1  #start from learning rate after 40 epochs
        ALPHA = 6
        model = models.VGG('VGG16')
        model.fc = nn.Linear(512, NUM_CLASSES)
        modelName = "VGG16"
    elif MODEL_ID == 4:
        BATCH_SIZE = 64
        NUM_EPOCHS = 100
        LEARNING_RATE = 1e-0  #start from learning rate after 40 epochs
        ALPHA = 10
        model = models.resnet50()
        model.fc = nn.Linear(2048, NUM_CLASSES)
        modelName = "resnet50"
    elif MODEL_ID == 5:
        BATCH_SIZE = 8
        NUM_EPOCHS = 100
        LEARNING_RATE = 1e-1  #start from learning rate after 40 epochs
        ALPHA = 6
        model = models.densenet121()
        model.fc = nn.Linear(512, NUM_CLASSES)
        modelName = "densenet121"
    elif MODEL_ID == 6:
        BATCH_SIZE = 1024
        NUM_EPOCHS = 100
        LEARNING_RATE = 1e-1  #start from learning rate after 40 epochs
        ALPHA = 6
        model = nn.Sequential()
        model.add_module(
            "linear", torch.nn.Linear(224 * 224 * 3, NUM_CLASSES, bias=False))
        # RuntimeError: size mismatch, m1: [172032 x 224], m2: [150528 x 185] at /opt/conda/conda-bld/pytorch_1524586445097/work/aten/src/THC/generic/THCTensorMathBlas.cu:249
        # model = nn.Linear(224*224*3, NUM_CLASSES)
        #error size mismatch, m1: [86016 x 224], m2: [150528 x 185] at /opt/conda/conda-bld/pytorch_1524586445097/work/aten/src/THC/generic/THCTensorMathBlas.cu:249
        modelName = "logisticRegression"
    else:
        raise ValueError('Model ID must be an integer between 1 and 6')
    return model, modelName, BATCH_SIZE, NUM_EPOCHS, LEARNING_RATE, ALPHA
コード例 #5
0
def vgg16(pretrained, **kwargs):
    """VGG 16-layer model (configuration "D")

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = models.VGG(make_layers(cfg['D']), **kwargs)
    model.load_state_dict(torch.load(pretrained))
    return model
コード例 #6
0
ファイル: classifier.py プロジェクト: soo1234/pynet
    def __init__(self, cfg, num_classes, batch_norm=False, init_weights=True,
                 pretrained=None, make_layers=models.vgg.make_layers,
                 optimizer_name="Adam", learning_rate=1e-3,
                 loss_name="NLLLoss", metrics=None, use_cuda=False, **kwargs):
        """ Class initilization.

        Parameters
        ----------
        cfg: list
            the model features: output channels number for convolution or
            'M' for max pooling.
        num_classes: int
            the number of classes to predict.
        batch_norm: bool, default False
            use batch normalization after each convolution.
        init_weights: bool, default True
            initialize the model weights.
        pretrained: str, default None
            update the weights of the model using this state information.
        make_layers: @func
            a function to create the feature layers: default 2d max pooling
            with kernel size 2 and stride 2, and convolution with kernel size
            3 and padding 1.
        optimizer_name: str, default 'Adam'
            the name of the optimizer: see 'torch.optim' for a description
            of available optimizer.
        learning_rate: float, default 1e-3
            the optimizer learning rate.
        loss_name: str, default 'NLLLoss'
            the name of the loss: see 'torch.nn' for a description
            of available loss.
        metrics: list of str
            a list of extra metrics that will be computed.
        use_cuda: bool, default False
            wether to use GPU or CPU.
        kwargs: dict
            specify directly a custom 'optimizer' or 'loss'. Can also be used
            to set specific optimizer parameters.
        """
        self.model = models.VGG(
            features=make_layers(cfg, batch_norm=batch_norm),
            num_classes=num_classes,
            init_weights=init_weights)
        super().__init__(
            optimizer_name=optimizer_name,
            learning_rate=learning_rate,
            loss_name=loss_name,
            metrics=metrics,
            use_cuda=use_cuda,
            pretrained=pretrained,
            **kwargs)
コード例 #7
0
def load_model(model_name):
    global MODEL_NAME
    # Detect if we have a GPU available
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if model_name == 'ResNet':
        model = models.resnet152(pretrained=False)
        model.load_state_dict(torch.load(os.path.join(PRETRAINED_DEEP_MODEL_DIR, MODEL_NAME[model_name]), map_location=device))
    elif model_name == 'AlexNet':
        model = models.AlexNet()
        model.load_state_dict(torch.load(os.path.join(PRETRAINED_DEEP_MODEL_DIR, MODEL_NAME[model_name]), map_location=device))
    elif model_name == 'VGG':
        model = models.VGG(_vgg_make_layers(_vgg_cfg['E'], batch_norm=True), init_weights=False)
        model.load_state_dict(torch.load(os.path.join(PRETRAINED_DEEP_MODEL_DIR, MODEL_NAME[model_name]), map_location=device))
    elif model_name == 'DenseNet':
        model = models.DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32))
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
        state_dict = torch.load(os.path.join(PRETRAINED_DEEP_MODEL_DIR, MODEL_NAME[model_name]), map_location=device)
        for key in list(state_dict.keys()):
            res = pattern.match(key)
            if res:
                new_key = res.group(1) + res.group(2)
                state_dict[new_key] = state_dict[key]
                del state_dict[key]
        model.load_state_dict(state_dict)
    elif model_name == 'GoogleNet':
        # googlenet
        kwargs = dict()
        kwargs['transform_input'] = True
        kwargs['aux_logits'] = False
        # if kwargs['aux_logits']:
        #     warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '
        #                   'so make sure to train them')
        original_aux_logits = kwargs['aux_logits']
        kwargs['aux_logits'] = True
        kwargs['init_weights'] = False
        model = models.GoogLeNet(**kwargs)
        model.load_state_dict(torch.load(os.path.join(PRETRAINED_DEEP_MODEL_DIR, MODEL_NAME[model_name]), map_location=device))
        if not original_aux_logits:
            model.aux_logits = False
            del model.aux1, model.aux2
    elif model_name == 'ResNext101':
        model = models.resnext101_32x8d(pretrained=False)
        model.load_state_dict(torch.load(os.path.join(PRETRAINED_DEEP_MODEL_DIR, MODEL_NAME[model_name]), map_location=device))
    else:
        raise ValueError("Model name must be one of ['VGG', 'ResNet', 'DenseNet', 'AlexNet', 'GoogleNet', 'ResNext101']")

    return model
コード例 #8
0
def create_net(num_classes, dnn='resnet20', **kwargs):
    ext = None
    if dnn in ['resnet20', 'resnet56', 'resnet110']:
        net = models.__dict__[dnn](num_classes=num_classes)
    elif dnn == 'resnet50':
        #net = models.__dict__['resnet50'](num_classes=num_classes)
        net = torchvision.models.resnet50(num_classes=num_classes)
    elif dnn == 'inceptionv4':
        net = models.inceptionv4(num_classes=num_classes)
    elif dnn == 'inceptionv3':
        net = torchvision.models.inception_v3(num_classes=num_classes)
    elif dnn == 'vgg16i':  # vgg16 for imagenet
        net = torchvision.models.vgg16(num_classes=num_classes)
    elif dnn == 'vgg19':  # vgg19 for imagenet
        net = torchvision.models.vgg19(num_classes=num_classes)
    elif dnn == 'googlenet':
        net = models.googlenet()
    elif dnn == 'mnistnet':
        net = MnistNet()
    elif dnn == 'fcn5net':
        net = models.FCN5Net()
    elif dnn == 'lenet':
        net = models.LeNet()
    elif dnn == 'lr':
        net = models.LinearRegression()
    elif dnn == 'vgg16':
        net = models.VGG(dnn.upper())
    elif dnn == 'alexnet':
        net = torchvision.models.alexnet()
    elif dnn == 'lstman4':
        net, ext = models.LSTMAN4(datapath=kwargs['datapath'])
    elif dnn == 'lstm':
        net = lstmpy.lstm(vocab_size=kwargs['vocab_size'],
                          batch_size=kwargs['batch_size'])

    else:
        errstr = 'Unsupport neural network %s' % dnn
        logger.error(errstr)
        raise errstr
    return net, ext
コード例 #9
0
 def load_vgg19(self, model_path):
     """加载vgg19预训练模型;"""
     cnn = models.VGG(models.vgg.make_layers(models.vgg.cfg['E']))
     cnn.load_state_dict(torch.load(model_path))
     return cnn.features