Example #1
0
def download_model(saving_path='.'):
    # inception net
    # model = models.Inception3()
    # model.load_state_dict(model_zoo.load_url(model_urls['inception_v3_google'], model_dir=saving_path, progress=True))

    # resnet
    model = models.ResNet(_Bottleneck, [3, 8, 36, 3])
    model.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir=saving_path, progress=True))
    # save_model(model, 'resnet152.pkl', saving_path)

    # alex net
    model = models.AlexNet()
    model.load_state_dict(model_zoo.load_url(model_urls['alexnet'], model_dir=saving_path, progress=True))
    # save_model(model, 'alexnet.pkl', saving_path)

    # vgg
    model = models.VGG(_vgg_make_layers(_vgg_cfg['E'], batch_norm=True), init_weights=False)
    model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn'], model_dir=saving_path, progress=True))
    # save_model(model, 'vgg19.pkl', saving_path)

    # squeeze net
    model = models.SqueezeNet(version=1.1)
    model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_1'], model_dir=saving_path, progress=True))
    # save_model(model, 'squeezenet1_1.pkl', saving_path)

    # dense net
    model = models.DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32))
    pattern = re.compile(
        r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
    state_dict = model_zoo.load_url(model_urls['densenet201'], model_dir=saving_path, progress=True)
    for key in list(state_dict.keys()):
        res = pattern.match(key)
        if res:
            new_key = res.group(1) + res.group(2)
            state_dict[new_key] = state_dict[key]
            del state_dict[key]
    model.load_state_dict(state_dict)
    # save_model(model, 'densenet201.pkl', saving_path)

    # googlenet
    kwargs = dict()
    kwargs['transform_input'] = True
    kwargs['aux_logits'] = False
    # if kwargs['aux_logits']:
    #     warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '
    #                   'so make sure to train them')
    original_aux_logits = kwargs['aux_logits']
    kwargs['aux_logits'] = True
    kwargs['init_weights'] = False
    model = models.GoogLeNet(**kwargs)
    model.load_state_dict(model_zoo.load_url(model_urls['googlenet']))
    if not original_aux_logits:
        model.aux_logits = False
        del model.aux1, model.aux2
        # save_model(model, 'googlenet.pkl', saving_path)

    # resnext
    model = models.resnext101_32x8d(pretrained=False)
    model.load_state_dict(model_zoo.load_url(model_urls['resnext101_32x8d'], model_dir=saving_path, progress=True))
Example #2
0
def resnet50():
    # define resnet50 with 2 output class
    resnet50 = models.ResNet(Bottleneck, [3, 4, 6, 3], num_classes=2)

    # change the initial conv layer to 1 input channel to match grayscale input
    resnet50.conv1 = nn.Conv2d(1,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
    return resnet50
Example #3
0
def make_resnet_layers(inplanes, layer_blocks, layer_planes, layer_strides):
    resnet = models.ResNet(models.resnet.BasicBlock, [2, 2, 2, 2])
    resnet.inplanes = inplanes

    layers = []
    for blocks, planes, stride in zip(layer_blocks, layer_planes,
                                      layer_strides):
        layer = resnet._make_layer(models.resnet.BasicBlock,
                                   planes=planes,
                                   blocks=blocks,
                                   stride=stride)
        layers.append(layer)

    return nn.Sequential(*layers)
Example #4
0
    def __init__(self, num_classes=1):
        super(Custom34, self).__init__()

        # resnet = models.resnet34(pretrained=True)
        resnet = models.ResNet(BasicBlock, [3, 4, 6, 3], num_classes=1)

        # self.first = nn.Sequential(
        #     shy.layer.Conv2d(3, 64, kernel_size=7, padding=3, bn=True, activation='relu'),
        # )

        self.first = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu)

        self.encoder2 = resnet.layer1
        self.encoder3 = resnet.layer2
        self.encoder4 = resnet.layer3
        self.encoder5 = resnet.layer4

        self.center = nn.Sequential(
            shy.layer.Conv2d(512,
                             512,
                             kernel_size=3,
                             padding=1,
                             bn=True,
                             activation='relu'),
            shy.layer.Conv2d(512,
                             256,
                             kernel_size=3,
                             padding=1,
                             bn=True,
                             activation='relu'),
            nn.MaxPool2d(kernel_size=2, stride=2))

        # Decoder
        self.decoder5 = Decoder(256 + 512, 512, 64)
        self.decoder4 = Decoder(64 + 256, 256, 64)
        self.decoder3 = Decoder(64 + 128, 128, 64)
        self.decoder2 = Decoder(64 + 64, 64, 64)
        self.decoder1 = Decoder(64, 32, 64)

        # Final Classifier
        self.logit = nn.Sequential(
            nn.Dropout2d(p=0.5),
            shy.layer.Conv2d(320,
                             64,
                             kernel_size=3,
                             padding=1,
                             activation='relu'),
            shy.layer.Conv2d(64, num_classes, kernel_size=1),
            nn.MaxPool2d(kernel_size=2, stride=2))
Example #5
0
    def __init__(self, pretrained=False, pretrained_model=None):
        super(ResNet3_18, self).__init__()

        # self.model = models.ResNet(Bottleneck, [3, 8, 36, 3],num_classes=1000)
        # self.model.load_state_dict(model_zoo.load_url('https://s3.amazonaws.com/pytorch/models/resnet152-b121ed2d.pth'))

        self.model = models.ResNet(BasicBlock, [2, 2, 2, 2], num_classes=1000)
        if pretrained:
            self.model.load_state_dict(torch.load(pretrained_model))
        # self.model.load_state_dict(torch.load('1.pth'))
        for param in self.model.parameters():
            param.requires_grad = False

        self.model.fc = nn.Linear(512 * 4, 1000)
        self.fc = nn.Linear(1000, 5)
Example #6
0
    def __init__(self, embed_size):
        """Load the pretrained ResNet-152 and replace top fc layer."""
        super(EncoderCNN, self).__init__()
        # resnet = models.resnet152(pretrained=True, model_dir='cache')  # hyli modified
        resnet = models.ResNet(Bottleneck, [3, 8, 36, 3])
        pretrained = True
        if pretrained:
            resnet.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir='cache'))

        modules = list(resnet.children())[:-1]      # delete the last fc layer.
        self.resnet = nn.Sequential(*modules)
        self.linear = nn.Linear(resnet.fc.in_features, embed_size)
        self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)

        for param in self.resnet.named_parameters():
            # layer_name = param[0]
            param[1].requires_grad = False
Example #7
0
    def __init__(self, pretrained=True):
        super(ResNet2, self).__init__()
        self.pretrained = pretrained
        self.conv1 = conv3x3(3, 32, stride=2)
        self.bn1 = nn.BatchNorm2d(32)

        m = self.conv1
        n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
        m.weight.data.normal_(0, math.sqrt(2. / n))

        m = self.bn1
        m.weight.data.fill_(1)
        m.bias.data.zero_()

        self.relu = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(32, 3)
        self.bn2 = nn.BatchNorm2d(3)

        m = self.conv2
        n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
        m.weight.data.normal_(0, math.sqrt(2. / n))

        m = self.bn2
        m.weight.data.fill_(1)
        m.bias.data.zero_()

        self.model = models.ResNet(Bottleneck, [3, 8, 36, 3], num_classes=1000)
        # self.model.load_state_dict(model_zoo.load_url('https://s3.amazonaws.com/pytorch/models/resnet152-b121ed2d.pth'))

        # self.model = models.ResNet(BasicBlock, [2, 2, 2, 2], num_classes=1000)
        # self.model.load_state_dict(torch.load('1.pth'))

        for param in self.model.parameters():
            param.requires_grad = False

        self.fc = nn.Linear(1000, 5)
Example #8
0
    def __init__(self, block, layers, num_classes, zero_init_residual=False,
                 groups=1, width_per_group=64,
                 replace_stride_with_dilation=None, norm_layer=None,
                 pretrained=None, optimizer_name="Adam", learning_rate=1e-3,
                 loss_name="NLLLoss", metrics=None, use_cuda=False, **kwargs):
        """ Class initilization.

        Parameters
        ----------
        block: nn Module
            one block architecture.
        layers: 4-uplet
            control the number of element in each layer.
        num_classes: int
            number of classification classes.
        zero_init_residual: bool, default False
            zero-initialize the last BN in each residual branch, so that the
            residual branch starts with zeros, and each residual block behaves
            like an identity.
        groups: int, default 1
            controls the connections between inputs and outputs during
            convolution.
        width_per_group: int, default 64
            control the number of input and output channels during convolution.
        replace_stride_with_dilation: uplet, default None
            each element in the tuple indicates if we should replace
            the 2x2 stride with a dilated convolution instead.
        norm_layer: nn Module, default None
            use the specified normalization module, by default use batch
            normalization.
        pretrained: str, default None
            update the weights of the model using this state information.
        optimizer_name: str, default 'Adam'
            the name of the optimizer: see 'torch.optim' for a description
            of available optimizer.
        learning_rate: float, default 1e-3
            the optimizer learning rate.
        loss_name: str, default 'NLLLoss'
            the name of the loss: see 'torch.nn' for a description
            of available loss.
        metrics: list of str
            a list of extra metrics that will be computed.
        use_cuda: bool, default False
            wether to use GPU or CPU.
        kwargs: dict
            specify directly a custom 'optimizer' or 'loss'. Can also be used
            to set specific optimizer parameters.
        """
        self.model = models.ResNet(
            block=block,
            layers=layers,
            num_classes=num_classes,
            zero_init_residual=zero_init_residual,
            groups=groups,
            width_per_group=width_per_group,
            replace_stride_with_dilation=replace_stride_with_dilation,
            norm_layer=norm_layer)
        super().__init__(
            optimizer_name=optimizer_name,
            learning_rate=learning_rate,
            loss_name=loss_name,
            metrics=metrics,
            use_cuda=use_cuda,
            pretrained=pretrained,
            **kwargs)
Example #9
0
def main():
    global args, best_prec1, seed
    args = parser.parse_args()


    #os.environ['CUDA_VISIBLE_DEVICES']=', '.join(str(x) for x in args.gpu)



    model=models.ResNet(depth=args.depth, pretrained=args.pretrained, cut_at_pooling=False, num_features=0, norm=False, dropout=0, num_classes=2)


    # # create model
    # if args.pretrained:  # from system models
    #     print("=> using pre-trained model '{}'".format(args.arch))
    #     model = models.__dict__[args.arch](pretrained=True)   #from pytorch system





    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    if use_cuda:
        model=model.cuda()  ############################# important, it means model operation is conducted on cuda
    else:
        model=model()





    print ('Loading data from '+ args.root_path+args.trainFile)


    if args.da==0:
        train_loader = torch.utils.data.DataLoader(
            ImageList(root=args.root_path, fileList=args.root_path+args.trainFile,
                      transform=transforms.Compose([
                                            transforms.RandomHorizontalFlip(),  # The order seriously matters: RandomHorizontalFlip, ToTensor, Normalize
                                            transforms.ToTensor(),
                                            transforms.Normalize(mean = [0.5, 0.5, 0.5 ], std = [ 0.5, 0.5, 0.5 ]),
                                            ])),
            batch_size=args.batch_size, shuffle=True,
            num_workers=args.workers, pin_memory=True)
    elif args.da==1:
        train_loader = torch.utils.data.DataLoader(
            ImageList(root=args.root_path, fileList=args.root_path+args.trainFile,
                      transform=transforms.Compose([
                                            transforms.RandomHorizontalFlip(),  # The order seriously matters: RandomHorizontalFlip, ToTensor, Normalize
                                            transforms.ColorJitter(),
                                            transforms.ToTensor(),
                                            transforms.Normalize(mean = [0.5, 0.5, 0.5 ], std = [ 0.5, 0.5, 0.5 ]),
                                            ])),
            batch_size=args.batch_size, shuffle=True,
            num_workers=args.workers, pin_memory=True)
    elif args.da==2:
        deg=random.random()*10
        train_loader = torch.utils.data.DataLoader(
            ImageList(root=args.root_path, fileList=args.root_path+args.trainFile,
                      transform=transforms.Compose([
                                            transforms.RandomHorizontalFlip(),  # The order seriously matters: RandomHorizontalFlip, ToTensor, Normalize
                                            transforms.ColorJitter(),
                                            transforms.RandomRotation(deg),
                                            transforms.ToTensor(),
                                            transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [ 0.5, 0.5, 0.5 ]),
                                            ])),
            batch_size=args.batch_size, shuffle=True,
            num_workers=args.workers, pin_memory=True)
    elif args.da==3:
        deg=random.random()*10
        train_loader = torch.utils.data.DataLoader(
            ImageList(root=args.root_path, fileList=args.root_path+args.trainFile,
                      transform=transforms.Compose([
                                            transforms.RandomHorizontalFlip(),  # The order seriously matters: RandomHorizontalFlip, ToTensor, Normalize
                                            transforms.RandomRotation(deg),
                                            transforms.ToTensor(),
                                            transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [ 0.5, 0.5, 0.5 ]),
                                            ])),
            batch_size=args.batch_size, shuffle=True,
            num_workers=args.workers, pin_memory=True)
    elif args.da==4:
        deg=random.random()*20
        train_loader = torch.utils.data.DataLoader(
            ImageList(root=args.root_path, fileList=args.root_path+args.trainFile,
                      transform=transforms.Compose([
                                            transforms.RandomHorizontalFlip(),  # The order seriously matters: RandomHorizontalFlip, ToTensor, Normalize
                                            transforms.ColorJitter(),
                                            transforms.RandomRotation(deg),
                                            transforms.ToTensor(),
                                            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
                                            ])),
            batch_size=args.batch_size, shuffle=True,
            num_workers=args.workers, pin_memory=True)
    elif args.da==5:
        deg=random.random()*10
        train_loader = torch.utils.data.DataLoader(
            ImageList(root=args.root_path, fileList=args.root_path+args.trainFile,
                      transform=transforms.Compose([
                                            transforms.RandomCrop(114),
                                            transforms.RandomHorizontalFlip(),  # The order seriously matters: RandomHorizontalFlip, ToTensor, Normalize
                                            transforms.RandomRotation(deg),
                                            transforms.ToTensor(),
                                            transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [ 0.5, 0.5, 0.5 ]),
                                            ])),
            batch_size=args.batch_size, shuffle=True,
            num_workers=args.workers, pin_memory=True)
    else:
        pass

    print ('Loading data from ' + args.root_path + args.testFile)



    if args.da==5:
        val_loader = torch.utils.data.DataLoader(
            ImageList(root=args.root_path, fileList=args.root_path+args.testFile,
                      transform=transforms.Compose([
                                            transforms.CenterCrop(114),
                                            transforms.ToTensor(),
                                            transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5 ])
                                            ])),
            batch_size=args.test_batch_size, shuffle=False,  #### Shuffle should be switched off for face recognition of LFW
            num_workers=args.workers, pin_memory=True)

    else:
        val_loader = torch.utils.data.DataLoader(
            ImageList(root=args.root_path, fileList=args.root_path+args.testFile,
                      transform=transforms.Compose([
                                            transforms.ToTensor(),
                                            transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5 ])
                                            ])),
            batch_size=args.test_batch_size, shuffle=False,  #### Shuffle should be switched off for face recognition of LFW
            num_workers=args.workers, pin_memory=True)





    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)


    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True  # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware.


    if args.evaluate:
        validate(val_loader, model, criterion)
        return



    fp = open(args.root_path + 'results_ResNet' + str(args.depth) + '_DA' + str(args.da) + '_LR_' + str(args.lr) + '.txt', "a")
    fp.write(str(seed)+'\n')
    fp.close()
    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)   # every epoch=10, the learning rate is divided by 10

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)
        fp = open(args.root_path + 'results_ResNet' + str(args.depth) + '_DA' + str(args.da) + '_LR_' + str(args.lr) + '.txt', "a")

        fp.write('{0:.3f} \n'.format(prec1))
        if epoch==args.epochs-1:
            fp.write('\n \n \n')
            fp.close()



        # remember best prec@1 and save checkpoint
        is_best = prec1 >best_prec1
        best_prec1 = max(prec1, best_prec1)
Example #10
0
def resnet_wide_18_2(pretrained=False, **kwargs):
  return models.ResNet(Bottleneck, [2,2,2,2], width_per_group=64 * 2, **kwargs)
Example #11
0
 def load_resnet18(self, model_path):
     """此函数暂不可用,加载的Resnet没有features(sequential);"""
     cnn = models.ResNet(models.resnet.BasicBlock, [2, 2, 2, 2])
     cnn.load_state_dict(torch.load(model_path))
     return cnn
Example #12
0
# ===============================================================================================================================


def weights_init2(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


# ===============================================================================================================================

resnet_inits = {
    10: lambda flag: vmodels.ResNet(BasicBlock, [1, 1, 1, 1]),
    14: lambda flag: vmodels.ResNet(BasicBlock, [1, 1, 2, 2]),
    18: lambda flag: vmodels.resnet18(pretrained=flag),
    34: lambda flag: vmodels.resnet34(pretrained=flag),
    50: lambda flag: vmodels.resnet50(pretrained=flag),
    101: lambda flag: vmodels.resnet101(pretrained=flag),
    152: lambda flag: vmodels.resnet152(pretrained=flag),
}

possible_resnets = resnet_inits.keys()
resnet_outsize = {
    10: 512,
    14: 512,
    18: 512,
    34: 512,
    50: 2048,
 pytest.param(
     "models.alexnet",
     "AlexNet",
     {},
     [],
     {},
     models.AlexNet(),
     id="AlexNetConf",
 ),
 pytest.param(
     "models.resnet",
     "ResNet",
     {"layers": [2, 2, 2, 2]},
     [],
     {"block": Bottleneck},
     models.ResNet(block=Bottleneck, layers=[2, 2, 2, 2]),
     id="ResNetConf",
 ),
 pytest.param(
     "models.densenet",
     "DenseNet",
     {},
     [],
     {},
     models.DenseNet(),
     id="DenseNetConf",
 ),
 pytest.param(
     "models.squeezenet",
     "SqueezeNet",
     {},
    prefix = "set_copyOfExper_"
    runningAvgSize = 10
    num_classes = 10
    layers = [2, 2, 2, 2]
    block = modResnet.BasicBlock


    types = ('predefModel', 'CIFAR10', 'disabled')
    try:
        stats = []
        rootFolder = prefix + sf.Output.getTimeStr() + ''.join(x + "_" for x in types)
        smoothingMetadata = dc.DisabledSmoothing_Metadata()

        for r in range(loop):

            obj = models.ResNet(block, layers, num_classes=num_classes)

            data = dc.DefaultDataCIFAR10(dataMetadata)
            model = dc.DefaultModelPredef(obj=obj, modelMetadata=modelMetadata, name=modelName)
            smoothing = dc.DisabledSmoothing(smoothingMetadata)

            optimizer = optim.SGD(model.getNNModelModule().parameters(), lr=optimizerDataDict['learning_rate'], 
                weight_decay=optimizerDataDict['weight_decay'], momentum=optimizerDataDict['momentum'])
            scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)
            loss_fn = nn.CrossEntropyLoss()     

            stat=dc.run(metadataObj=metadata, data=data, model=model, smoothing=smoothing, optimizer=optimizer, lossFunc=loss_fn,
                modelMetadata=modelMetadata, dataMetadata=dataMetadata, smoothingMetadata=smoothingMetadata, rootFolder=rootFolder,
                schedulers=[([30, 60, 90, 120, 150, 180], scheduler)])

            stat.saveSelf(name="stat")
    def __init__(self, in_shape, num_classes=1, resnet=152):
        super(LKM_02, self).__init__()

        self.num_classes = num_classes

        if resnet == 50:
            #resnet = models.resnet50(pretrained=True)
            resnet = models.ResNet(models.resnet.Bottleneck, [3, 4, 6, 3])
            resnet.load_state_dict(
                torch.load('/home/lhc/.torch/models/resnet50-19c8e357.pth'))
        elif resnet == 101:
            resnet = models.ResNet(models.resnet.Bottleneck, [3, 4, 23, 3])
            resnet.load_state_dict(
                torch.load('/home/lhc/.torch/models/resnet101-5d3b4d8f.pth'))
        elif resnet == 152:
            resnet = models.ResNet(models.resnet.Bottleneck, [3, 8, 36, 3])
            resnet.load_state_dict(
                torch.load('/home/lhc/.torch/models/resnet152-b121ed2d.pth'))

        self.conv1 = resnet.conv1
        self.bn0 = resnet.bn1
        self.relu = resnet.relu
        self.maxpool = resnet.maxpool

        self.layer1 = resnet.layer1
        self.layer2 = resnet.layer2
        self.layer3 = resnet.layer3
        self.layer4 = resnet.layer4

        self.gcn1 = GCN(2048, self.num_classes)
        self.gcn2 = GCN(1024, self.num_classes)
        self.gcn3 = GCN(512, self.num_classes)
        self.gcn4 = GCN(256, self.num_classes)
        self.gcn5 = GCN(64, self.num_classes)

        self.refine1 = Refine(self.num_classes)
        self.refine2 = Refine(self.num_classes)
        self.refine3 = Refine(self.num_classes)
        self.refine4 = Refine(self.num_classes)
        self.refine5 = Refine(self.num_classes)
        self.refine6 = Refine(self.num_classes)
        self.refine7 = Refine(self.num_classes)
        self.refine8 = Refine(self.num_classes)
        self.refine9 = Refine(self.num_classes)
        self.refine10 = Refine(self.num_classes)

        self.dconv5 = nn.ConvTranspose2d(num_classes,
                                         num_classes,
                                         kernel_size=2,
                                         stride=2)
        self.dconv4 = nn.ConvTranspose2d(num_classes,
                                         num_classes,
                                         kernel_size=2,
                                         stride=2)
        self.dconv3 = nn.ConvTranspose2d(num_classes,
                                         num_classes,
                                         kernel_size=2,
                                         stride=2)
        self.dconv2 = nn.ConvTranspose2d(num_classes,
                                         num_classes,
                                         kernel_size=2,
                                         stride=2)
        self.dconv1 = nn.ConvTranspose2d(num_classes,
                                         num_classes,
                                         kernel_size=2,
                                         stride=2)
        '''
Example #16
0
def _get_untrained_model(model_name, num_classes):
    """
    Primarily, this method exists to return an untrained / vanilla version of a specified (pretrained) model.
    This is on best-attempt basis only and may be out of sync with actual model definitions. The code is manually maintained.

    :param model_name: Lower-case model names are pretrained by convention.
    :param num_classes: Number of classes to initialize the vanilla model with.

    :return: default model for the model_name with custom number of classes
    """

    if model_name.startswith('bninception'):
        return classification.BNInception(num_classes=num_classes)
    elif model_name.startswith('densenet'):
        return torch_models.DenseNet(num_classes=num_classes)
    elif model_name.startswith('dpn'):
        return classification.DPN(num_classes=num_classes)
    elif model_name.startswith('inceptionresnetv2'):
        return classification.InceptionResNetV2(num_classes=num_classes)
    elif model_name.startswith('inception_v3'):
        return torch_models.Inception3(num_classes=num_classes)
    elif model_name.startswith('inceptionv4'):
        return classification.InceptionV4(num_classes=num_classes)
    elif model_name.startswith('nasnetalarge'):
        return classification.NASNetALarge(num_classes=num_classes)
    elif model_name.startswith('nasnetamobile'):
        return classification.NASNetAMobile(num_classes=num_classes)
    elif model_name.startswith('pnasnet5large'):
        return classification.PNASNet5Large(num_classes=num_classes)
    elif model_name.startswith('polynet'):
        return classification.PolyNet(num_classes=num_classes)
    elif model_name.startswith('pyresnet'):
        return classification.PyResNet(num_classes=num_classes)
    elif model_name.startswith('resnet'):
        return torch_models.ResNet(num_classes=num_classes)
    elif model_name.startswith('resnext101_32x4d'):
        return classification.ResNeXt101_32x4d(num_classes=num_classes)
    elif model_name.startswith('resnext101_64x4d'):
        return classification.ResNeXt101_64x4d(num_classes=num_classes)
    elif model_name.startswith('se_inception'):
        return classification.SEInception3(num_classes=num_classes)
    elif model_name.startswith('se_resnext50_32x4d'):
        return classification.se_resnext50_32x4d(num_classes=num_classes,
                                                 pretrained=None)
    elif model_name.startswith('se_resnext101_32x4d'):
        return classification.se_resnext101_32x4d(num_classes=num_classes,
                                                  pretrained=None)
    elif model_name.startswith('senet154'):
        return classification.senet154(num_classes=num_classes,
                                       pretrained=None)
    elif model_name.startswith('se_resnet50'):
        return classification.se_resnet50(num_classes=num_classes,
                                          pretrained=None)
    elif model_name.startswith('se_resnet101'):
        return classification.se_resnet101(num_classes=num_classes,
                                           pretrained=None)
    elif model_name.startswith('se_resnet152'):
        return classification.se_resnet152(num_classes=num_classes,
                                           pretrained=None)
    elif model_name.startswith('squeezenet1_0'):
        return torch_models.squeezenet1_0(num_classes=num_classes,
                                          pretrained=False)
    elif model_name.startswith('squeezenet1_1'):
        return torch_models.squeezenet1_1(num_classes=num_classes,
                                          pretrained=False)
    elif model_name.startswith('xception'):
        return classification.Xception(num_classes=num_classes)
    else:
        raise ValueError(
            'No vanilla model found for model name: {}'.format(model_name))
Example #17
0
    hacks around the restriction from cleverhans that requires a 2D logits tensor
    '''
    def __init__(self, model):
        T.nn.Module.__init__(self)
        self.model = model

    def forward(self, x):
        y = self.model(x)
        if y.dim() == 3:
            return y.squeeze(1)
        else:
            return y


#model = cuda(DFSGlimpseSingleObjectClassifier())
model = cuda(tvmodels.ResNet(tvmodels.resnet.BasicBlock, [2, 2, 2, 2], 10))
model.load_state_dict(T.load('model.pt'))

s = tf.Session()
x_op = tf.placeholder(tf.float32, shape=(None, 3, 200, 200))

tf_model_fn = convert_pytorch_model_to_tf(cuda(TemporaryModule(model)))
cleverhans_model = CallableModelWrapper(tf_model_fn, output_layer='logits')

fgsm_op = FastGradientMethod(cleverhans_model, sess=s)
fgsm_params = {'eps': 0.01, 'clip_min': 0, 'clip_max': 1}
adv_x_op = fgsm_op.generate(x_op, **fgsm_params)
adv_preds_op = tf_model_fn(adv_x_op)
preds_op = tf_model_fn(x_op)

total = 0