Exemple #1
0
    def __init__(self, sigma=0.5, temperature=0.5, gradclip=1, npts=10, option='incremental', size=128,
                 path_to_check='checkpoint_fansoft/fan_109.pth', args=None):
        self.npoints = npts
        self.gradclip = gradclip
        
        # - define FAN
        self.FAN = FAN(1, n_points=self.npoints)

        if not option == 'scratch':
            net_dict = self.FAN.state_dict()
            pretrained_dict = torch.load(path_to_check, map_location='cuda')
            pretrained_dict = {k: v for k, v in pretrained_dict.items() if (k in net_dict)}
            pretrained_dict = {k: v for k, v in pretrained_dict.items() if pretrained_dict[k].shape == net_dict[k].shape}
            net_dict.update(pretrained_dict)
            self.FAN.load_state_dict(net_dict, strict=True)

            if option == 'incremental':
                print('Option is incremental')
                self.FAN.apply(convertLayer)

        # - define Bottleneck
        self.BOT = GeoDistill(sigma=sigma, temperature=temperature, out_res=int(size/4))

        # - define GEN      
        self.GEN = Generator(conv_dim=32, c_dim=self.npoints)

        # # Load pretrained model
        if args.resume_folder:
            path_fan = '{}/model_{}.fan.pth'.format(args.resume_folder, args.resume_epoch)
            path_gen = '{}/model_{}.gen.pth'.format(args.resume_folder, args.resume_epoch)
            self._resume(path_fan, path_gen)

        # - multiple GPUs
        if torch.cuda.device_count() > 1:
            self.FAN = torch.nn.DataParallel(self.FAN)
            self.BOT = torch.nn.DataParallel(self.BOT)
            self.GEN = torch.nn.DataParallel(self.GEN)

        self.FAN.to('cuda').train()
        self.BOT.to('cuda').train()
        self.GEN.to('cuda').train()

        # - VGG for perceptual loss
        self.loss_network = LossNetwork(torch.nn.DataParallel(vgg16(pretrained=True)))\
            if torch.cuda.device_count() > 1 else LossNetwork(vgg16(pretrained=True))
        self.loss_network.eval()
        self.loss_network.to('cuda')
        self.loss = dict.fromkeys(['all_loss', 'rec', 'perceptual'])
        self.A = None
        
        # - define losses for reconstruction
        self.SelfLoss = torch.nn.MSELoss().to('cuda')
        self.PerceptualLoss = torch.nn.MSELoss().to('cuda')

        self.heatmap = HeatMap(32, 0.5).cuda()
Exemple #2
0
    def test_speedup_vgg16(self):
        prune_model_l1(vgg16())
        model = vgg16()
        model.train()
        ms = ModelSpeedup(model, torch.randn(2, 3, 32, 32), MASK_FILE)
        ms.speedup_model()

        orig_model = vgg16()
        assert model.training
        assert model.features[2].out_channels == int(orig_model.features[2].out_channels * SPARSITY)
        assert model.classifier[0].in_features == int(orig_model.classifier[0].in_features * SPARSITY)
    def __init__(self,
                 losses=dict(adv=1,
                             rec=100,
                             self=100,
                             triple=100,
                             tv=1e-5,
                             percep=0),
                 gradclip=0,
                 gantype='wgan-gp',
                 edge=False):

        self.npoints = 68
        self.gradclip = gradclip
        self.l = losses

        # - define models
        self.DIS = Discriminator(ndim=0)
        self.GEN = Generator(conv_dim=64,
                             c_dim=self.npoints if not edge else 3)
        init_weights(self.DIS)
        init_weights(self.GEN)
        if torch.cuda.device_count() > 1:
            self.GEN = torch.nn.DataParallel(self.GEN)
            self.DIS = torch.nn.DataParallel(self.DIS)

        self.DIS.to('cuda').train()
        self.GEN.to('cuda').train()

        if self.l['percep'] > 0:
            # - VGG for perceptual loss
            self.loss_network = LossNetwork(
                torch.nn.DataParallel(vgg16(pretrained=True))
            ) if torch.cuda.device_count() > 1 else LossNetwork(
                vgg16(pretrained=True))
            self.loss_network.eval()
            self.loss_network.to('cuda')

        self.TripleLoss = (lambda x, y: torch.mean(torch.abs(x - y))
                           ) if self.l['triple'] > 0 else None
        self.SelfLoss = torch.nn.L1Loss().to(
            'cuda') if self.l['self'] > 0 else None
        self.RecLoss = torch.nn.L1Loss().to(
            'cuda') if self.l['rec'] > 0 else None
        self.PercepLoss = torch.nn.MSELoss().to(
            'cuda') if self.l['percep'] > 0 else None
        self.TVLoss = TVLoss if self.l['tv'] > 0 else None
        self.gantype = gantype
        self.loss = dict(G=dict.fromkeys(
            ['adv', 'self', 'triple', 'percep', 'rec', 'tv', 'all']),
                         D=dict.fromkeys(['real', 'fake', 'gp', 'all']))
 def __init__(self, num):
     super(VGG16, self).__init__()
     vgg = vgg16(pretrained=True)
     module = nn.Sequential(*list(vgg.features)[:num]).eval()
     for param in module.parameters():
         param.requires_grad = False
     self.module = module
Exemple #5
0
 def __init__(self, nlayers):
     torch.manual_seed(0)
     self.input = x = torch.randn(1, 3, 224, 224)
     self.model = vgg.vgg16(pretrained=True).to('cpu').eval()
     self.profiler = ddp.TorchProfiler(self.model)
     self.layerdict = self.profiler.create_layers(nlayers)
     self.output, self.actives = self.profiler.model.forward(x)
Exemple #6
0
    def build_model(self):
        self.netG = Generator(n_residual_blocks=self.num_residuals,
                              upsample_factor=self.upscale_factor,
                              base_filter=64,
                              num_channel=1).to(self.device)
        self.netD = Discriminator(base_filter=64,
                                  num_channel=1).to(self.device)
        self.feature_extractor = vgg16(pretrained=True)
        self.netG.weight_init(mean=0.0, std=0.2)
        self.netD.weight_init(mean=0.0, std=0.2)
        self.criterionG = nn.MSELoss()
        self.criterionD = nn.BCELoss()
        torch.manual_seed(self.seed)

        if self.GPU_IN_USE:
            torch.cuda.manual_seed(self.seed)
            self.feature_extractor.cuda()
            cudnn.benchmark = True
            self.criterionG.cuda()
            self.criterionD.cuda()

        self.optimizerG = optim.Adam(self.netG.parameters(),
                                     lr=self.lr,
                                     betas=(0.9, 0.999))
        self.optimizerD = optim.SGD(self.netD.parameters(),
                                    lr=self.lr / 100,
                                    momentum=0.9,
                                    nesterov=True)
        self.scheduler = optim.lr_scheduler.MultiStepLR(
            self.optimizerG, milestones=[50, 75, 100], gamma=0.5)  # lr decay
        self.scheduler = optim.lr_scheduler.MultiStepLR(
            self.optimizerD, milestones=[50, 75, 100], gamma=0.5)  # lr decay
    def __init__(self):
        super().__init__(self.make_layers())
        self.ranges = ((0, 5), (5, 10), (10, 17), (17, 24), (24, 31))

        self.load_state_dict(vgg16(pretrained=True).state_dict())

        del self.classifier
Exemple #8
0
    def __init__(self,
                 input_n_channels,
                 n_classes=2,
                 pretrained=True,
                 n_class_features=1024):
        super(IcebergPretrainedVGG16WithAngleAndStats2, self).__init__()

        model = vgg16(pretrained=pretrained)
        features = [f for f in model.features]
        if input_n_channels != 3:
            features[0] = Conv2d(input_n_channels,
                                 64,
                                 kernel_size=3,
                                 padding=1)

        features = features[:-1]  # Remove the last max pooling
        self.features = Sequential(*features)
        self.features.add_module("%i" % len(self.features), Flatten())

        self.metadata_features = Sequential(Linear(7, 50), ReLU(True),
                                            Dropout(), Linear(50, 25),
                                            ReLU(True), Dropout())

        self.classifier = Sequential(
            Linear(512 * 4 * 4 + 25, n_class_features), ReLU(True), Dropout(),
            Linear(n_class_features, n_class_features), ReLU(True), Dropout(),
            Linear(n_class_features, n_classes))
        self._initialize_weights(input_n_channels, pretrained)
Exemple #9
0
def main(seed: int):
    print("seed =", seed)

    for N in (1, 64, 4096):
        torch.manual_seed(seed)
        myvgg = vgg.vgg16()

        assert isinstance(myvgg.classifier[0], Linear)
        assert isinstance(myvgg.classifier[3], Linear)
        if N == 1:  # 逐次
            myvgg.classifier[0] = SequentialLinear(myvgg.classifier[0])
            myvgg.classifier[3] = SequentialLinear(myvgg.classifier[3])
        elif N == 64:  # 準同期
            myvgg.classifier[0] = SemisyncLinear(myvgg.classifier[0])
            myvgg.classifier[3] = SemisyncLinear(myvgg.classifier[3])

        myvgg.classifier[-1] = Linear(4096, 10)

        # Dropout 抜き
        myvgg.classifier = nn.Sequential(
            myvgg.classifier[0],  # Linear  (Semi)
            myvgg.classifier[1],  # ReLU
            myvgg.classifier[3],  # Linear  (Semi)
            myvgg.classifier[4],  # ReLU
            myvgg.classifier[6],  # Linear
        )

        print(myvgg)
        myvgg.to(device)
        record = conduct(myvgg, *(preprocess.cifar_10_for_vgg_loaders()))
        write_final_record(record, N)
def classify_objects(img_name):
    global vgg_model_cache

    if vgg_model_cache is None:
        model = vgg16(pretrained=True)
        vgg_model_cache = model
    else:
        model = vgg_model_cache

    if torch.cuda.is_available():
        model = model.cuda()
    model.eval()

    # load the image transformer
    centre_crop = trn.Compose([
            trn.Resize((256,256)),
            trn.CenterCrop(224),
            trn.ToTensor(),
            trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    # load the class label
    file_name = 'categories_imagenet.txt'

    img = Image.open(io.BytesIO(img_name))
    input_img = V(centre_crop(img).unsqueeze(0))

    if torch.cuda.is_available():
        input_img = input_img.cuda()
    logit = model.forward(input_img)
    h_x = F.softmax(logit, 1).data.squeeze()
    print(h_x)
    return h_x
Exemple #11
0
    def __init__(self):
        super(VGGFeature, self).__init__()
        self.vgg = vgg16(pretrained=True)
        self.loss_network = nn.Sequential(*list(self.vgg.features)[:31]).eval()

        for param in self.loss_network.parameters():
            param.requires_grad = False
Exemple #12
0
 def __init__(self, device):
     model = vgg16(pretrained=True)
     model.to(device=device)
     model.eval()
     self._device = device
     self._downsamp = 32
     super(VggFeatureExtractor, self)._split_net(model)
    def __init__(self, useVggWeights: bool):
        super(SegModel, self).__init__()
        model = toDevice(vgg.vgg16(pretrained=useVggWeights))
        self._features = model.eval().features
        self.vgg_features = self._features._modules.items()

        self.layers = {
            # '16': 'pool3',
            '23': 'pool4',
            '30': 'pool5',
        }

        self.scoreLayer1 = nn.Sequential(
            nn.Conv2d(512, 2048, 1),
            nn.ReLU(inplace=True),
            nn.Dropout2d(),
            nn.Conv2d(2048, 2048, 1),
            nn.ReLU(inplace=True),
            nn.Dropout2d(),
            nn.ReLU(inplace=True),
            nn.Conv2d(2048, nClass, 1),
        )

        self.scoreLayer2 = nn.Sequential(nn.Conv2d(512, 2048, 1),
                                         nn.ReLU(inplace=True), nn.Dropout2d(),
                                         nn.Conv2d(2048, nClass, 1))

        self.upLayer2 = makeUpSampler(nClass, 2)
        self.upLayer16 = makeUpSampler(nClass, 16)
Exemple #14
0
 def __init__(self, pretrained=None):
     super(HeadCommon, self).__init__()
     self.config = ConfigParser()
     config_path = os.path.abspath(
         os.path.join(__file__, "../../", "config.ini"))
     assert os.path.exists(config_path), "config.ini not exists!"
     self.config.read(config_path)
     self.backbone_type = self.config['BACKBONE']['BACKBONE_TYPE']
     _pretrained = True if pretrained is not None else False
     assert self.backbone_type in ['resnet', 'vgg16']
     if self.backbone_type == 'resnet':
         resnet_layer = int(self.config['BACKBONE']['RESNET_LAYER'])
         assert resnet_layer in [18, 34, 50, 101, 152]
         if resnet_layer == 18:
             _resnet = resnet.resnet18(_pretrained)
         elif resnet_layer == 34:
             _resnet = resnet.resnet34(_pretrained)
         elif resnet_layer == 50:
             _resnet = resnet.resnet50(_pretrained)
         elif resnet_layer == 101:
             _resnet = resnet.resnet101(_pretrained)
         else:
             _resnet = resnet.resnet152(_pretrained)
         # using resnet_c5 the last bottle neck of resnet
         _resnet.layer4[0].conv2.stride = (1, 1)
         _resnet.layer4[0].downsample[0].stride = (1, 1)
         self.resnet_c5 = _resnet.layer4
         self.resnet_c5_avg = _resnet.avgpool
     elif self.backbone_type == 'vgg16':
         assert not bool(int(self.config['HEAD']['MASK_HEAD_ON'])), (
             "When mask head on, not support vgg16 backbone.")
         vgg = vgg16(pretrained=True)
         self.vgg_fc = nn.Sequential(
             *list(vgg.classifier._modules.values())[:-1])
Exemple #15
0
    def __init__(self, n_classes=21, pretrained=False):
        super(lrn_vgg16, self).__init__()
        self.n_classes = n_classes
        vgg16 = vgg.vgg16(pretrained=pretrained)
        self.encoder = vgg16.features
        self.out_conv = nn.Conv2d(in_channels=512,
                                  out_channels=self.n_classes,
                                  kernel_size=1)

        # ----decoder refine units----
        self.refine_units = []

        self.refine_1 = LRNRefineUnit(self.n_classes, 512)
        self.refine_units.append(self.refine_1)

        self.refine_2 = LRNRefineUnit(self.n_classes, 512)
        self.refine_units.append(self.refine_2)

        self.refine_3 = LRNRefineUnit(self.n_classes, 256)
        self.refine_units.append(self.refine_3)

        self.refine_4 = LRNRefineUnit(self.n_classes, 128)
        self.refine_units.append(self.refine_4)

        self.refine_5 = LRNRefineUnit(self.n_classes, 64)
        self.refine_units.append(self.refine_5)
Exemple #16
0
def vgg(**config):
    dataset = config.pop('dataset', 'imagenet')
    depth = config.pop('depth', 16)
    bn = config.pop('bn', True)

    if dataset == 'imagenet':
        config.setdefault('num_classes', 1000)
        if depth == 11:
            if bn is False:
                return vgg11(pretrained=False, **config)
            else:
                return vgg11_bn(pretrained=False, **config)
        if depth == 13:
            if bn is False:
                return vgg13(pretrained=False, **config)
            else:
                return vgg13_bn(pretrained=False, **config)
        if depth == 16:
            if bn is False:
                return vgg16(pretrained=False, **config)
            else:
                return vgg16_bn(pretrained=False, **config)
        if depth == 19:
            if bn is False:
                return vgg19(pretrained=False, **config)
            else:
                return vgg19_bn(pretrained=False, **config)
    elif dataset == 'cifar10':
        config.setdefault('num_classes', 10)
    elif dataset == 'cifar100':
        config.setdefault('num_classes', 100)
    config.setdefault('batch_norm', bn)
    return VGG(model_name[depth], **config)
    def __init__(self, backbone, use_original_imgsize):
        super(HypercorrSqueezeNetwork, self).__init__()

        # 1. Backbone network initialization
        self.backbone_type = backbone
        self.use_original_imgsize = use_original_imgsize
        if backbone == 'vgg16':
            self.backbone = vgg.vgg16(pretrained=True)
            self.feat_ids = [17, 19, 21, 24, 26, 28, 30]
            self.extract_feats = extract_feat_vgg
            nbottlenecks = [2, 2, 3, 3, 3, 1]
        elif backbone == 'resnet50':
            self.backbone = resnet.resnet50(pretrained=True)
            self.feat_ids = list(range(4, 17))
            self.extract_feats = extract_feat_res
            nbottlenecks = [3, 4, 6, 3]
        elif backbone == 'resnet101':
            self.backbone = resnet.resnet101(pretrained=True)
            self.feat_ids = list(range(4, 34))
            self.extract_feats = extract_feat_res
            nbottlenecks = [3, 4, 23, 3]
        else:
            raise Exception('Unavailable backbone: %s' % backbone)

        self.bottleneck_ids = reduce(
            add, list(map(lambda x: list(range(x)), nbottlenecks)))
        self.lids = reduce(add,
                           [[i + 1] * x for i, x in enumerate(nbottlenecks)])
        self.stack_ids = torch.tensor(
            self.lids).bincount().__reversed__().cumsum(dim=0)[:3]
        self.backbone.eval()
        self.hpn_learner = HPNLearner(list(reversed(nbottlenecks[-3:])))
        self.cross_entropy_loss = nn.CrossEntropyLoss()
Exemple #18
0
    def __init__(self):
        super(VGGLoss, self).__init__()
        vgg = vgg16(pretrained=True)
        self.loss_generator = nn.Sequential(*list(vgg.features)[:35]).eval()
        for param in self.loss_generator.parameters():
            param.requires_grad = False

        self.distance = nn.MSELoss()
def vgg_ssd():
    size = 300
    num_classes = 21
    base = vgg_for_ssd(vgg.vgg16(pretrained=True))
    extras = get_extras(extras_cfg[size], 1024)
    cls_headers, reg_headers = get_headers(headers_cfg[size], base, [21, 33],
                                           extras, num_classes)
    return SSD(size, num_classes, base, extras, cls_headers, reg_headers)
Exemple #20
0
def CreateLossNetwork(opt):
    vgg_model = vgg.vgg16(pretrained=True)
    if torch.cuda.is_available():
        vgg_model.cuda(opt.gpu_ids[0])
    loss_network = LossNetwork(vgg_model)
    loss_network.eval()
    del vgg_model
    return loss_network
Exemple #21
0
 def __init__(self,floor):
     super(PerceptionLoss, self).__init__()
     vgg = vgg16(pretrained=True)
     loss_network = nn.Sequential(*list(vgg.features)[:floor]).eval()
     for param in loss_network.parameters():
         param.requires_grad = False
     self.loss_network = loss_network
     self.mse_loss = nn.MSELoss()
Exemple #22
0
def test_calibration_integration():
    transform_pipeline = Compose([Resize((64, 64)), ToTensor()])
    cifar10_train = DummyDataset(transform_pipeline)
    cifar10_test = DummyDataset(transform_pipeline)

    # we don't create different trainset for calibration since the goal is not
    # to calibrate
    al_dataset = ActiveLearningDataset(
        cifar10_train, pool_specifics={'transform': transform_pipeline})
    al_dataset.label_randomly(10)
    use_cuda = False
    model = vgg.vgg16(pretrained=False, num_classes=10)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=0.001,
                          momentum=0.9,
                          weight_decay=0.0005)

    wrapper = ModelWrapper(model, criterion)
    calibrator = DirichletCalibrator(wrapper=wrapper,
                                     num_classes=10,
                                     lr=0.001,
                                     reg_factor=0.01)

    for step in range(2):
        wrapper.train_on_dataset(al_dataset,
                                 optimizer=optimizer,
                                 batch_size=10,
                                 epoch=1,
                                 use_cuda=use_cuda,
                                 workers=0)

        wrapper.test_on_dataset(cifar10_test,
                                batch_size=10,
                                use_cuda=use_cuda,
                                workers=0)

        before_calib_param = list(
            map(lambda x: x.clone(), wrapper.model.parameters()))

        calibrator.calibrate(al_dataset,
                             cifar10_test,
                             batch_size=10,
                             epoch=5,
                             use_cuda=use_cuda,
                             double_fit=False,
                             workers=0)

        after_calib_param = list(map(lambda x: x.clone(), model.parameters()))

        assert all([
            np.allclose(i.detach(), j.detach())
            for i, j in zip(before_calib_param, after_calib_param)
        ])

        assert len(list(wrapper.model.modules())) < len(
            list(calibrator.calibrated_model.modules()))
Exemple #23
0
 def __init__(self):
     super(GeneratorLoss, self).__init__()
     vgg = vgg16(pretrained=False)
     loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
     #         for param in loss_network.parameters():
     #             param.requires_grad = False
     self.loss_network = loss_network
     self.L1_loss = nn.L1Loss()
     self.tv_loss = TVLoss()
Exemple #24
0
 def __init__(self):
     super(GeneratorLoss, self).__init__()
     vgg = vgg16(pretrained=True)
     loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
     for param in loss_network.parameters():
         param.requires_grad = False
     self.loss_network = loss_network
     self.mse_loss = nn.MSELoss()
     self.bce_loss = nn.BCELoss()
Exemple #25
0
    def _get_performance(self, bottleneck):
        from itertools import combinations
        from skimage.measure import compare_ssim as _compare_ssim
        from torchvision.models import vgg
        from torchvision.transforms import functional as TF
        _vgg16 = vgg.vgg16(pretrained=True)
        _perceptual = PerceptualLoss(_vgg16).eval()
        _tensor = lambda x: TF.normalize(TF.to_tensor(x),
                                         mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])[None]
        _numpy = lambda x: np.array(x, dtype=float) / 255.

        compare_l1 = lambda a, b: np.abs(_numpy(a) - _numpy(b)).mean()
        compare_ssim = lambda a, b: _compare_ssim(
            _numpy(a), _numpy(b), multichannel=True)

        l1 = []
        ssim = []
        perceptual = []

        data_loader = get_data_loader('data')

        def compare_perceptual(a, b):
            a_features = _perceptual(_tensor(a))
            b_features = _perceptual(_tensor(b))
            loss = 0.
            for name in a_features:
                loss += float(
                    (a_features[name] - b_features[name]).abs().mean())

            return loss

        for img in data_loader():
            w, h = img.size

            z = self.encode(img, bottleneck)

            assert z.nbytes <= bottleneck, "Latent vector exceeds bottleneck"

            img_rec = self.decode(z, bottleneck)

            # from PIL import Image
            # size = {4096:36,16384:73,65536:147}.get(bottleneck)
            # img_low = img.resize((size,size),Image.ANTIALIAS)
            # img_rec = img_low.resize((256,256),Image.ANTIALIAS)

            assert img_rec.size == img.size, "Decoded image has wrong resolution"

            rec_l1 = compare_l1(img, img_rec)
            rec_ssim = compare_ssim(img, img_rec)
            rec_perceptual = compare_perceptual(img, img_rec)

            l1.append(rec_l1)
            ssim.append(rec_ssim)
            perceptual.append(rec_perceptual)

        return l1, ssim, perceptual
Exemple #26
0
    def __init__(self):
        super(PerceptualLoss, self).__init__()

        vgg = vgg16(pretrained=True).cuda()
        self.loss_network = nn.Sequential(*list(vgg.features)[:16]).eval()
        for param in self.loss_network.parameters():
            param.requires_grad = False

        self.l1_loss = nn.L1Loss()
 def __init__(self):
     super(GLoss, self).__init__()
     vgg = vgg16(pretrained=True)
     lossnet = vgg.features
     lossnet.eval()
     for param in lossnet.parameters():
         param.requires_grad = False
     self.lossnet = lossnet
     self.mseloss = nn.MSELoss()
def get_vgg(switch_avg=True):
    vgg_model = vgg16(pretrained=True)
    if switch_avg:
        for layer_i in range(len(vgg_model.features)):
            if type(vgg_model.features[layer_i]
                    ) == torch.nn.modules.pooling.MaxPool2d:
                vgg_model.features[layer_i] = torch.nn.modules.AvgPool2d(
                    kernel_size=2, stride=2, padding=0, ceil_mode=False)
    return vgg_model
Exemple #29
0
 def __init__(self, as_gray=False):
     super().__init__()
     vgg = vgg16(pretrained=True)
     loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
     for param in loss_network.parameters():
         param.requires_grad = False
     self.loss_network = loss_network
     self.as_gray = as_gray
     self.mse_loss = nn.MSELoss()
    def __init__(self):
        self.model = models.vgg16(pretrained=True)

        self.model.eval()

        self.transform_batch = transforms.Compose([
            ToTensor(),
            Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        ])
Exemple #31
0
 def test_vgg16(self):
     # VGG 16-layer model (configuration "D")
     x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
     self.exportTest(toC(vgg16()), toC(x))
Exemple #32
0
 def test_vgg16(self):
     state_dict = model_zoo.load_url(model_urls['vgg16'], progress=False)
     self.run_model_test(vgg16(), train=False, batch_size=BATCH_SIZE,
                         state_dict=state_dict)