Esempio n. 1
0
 def test(self, testloader, test_size, inception=None):
     with torch.no_grad():
         self.G.eval()
         self.D.eval()
         x_real, _ = iter(testloader).next()
         x_real = x_real.to(self.device)
         x_fake = []
         if inception:
             f_real = []
             f_fake = []
         for i in range(test_size // 100):
             z = torch.randn(100, self.z_dim, device=self.device)
             x_i = self.G(z) * 0.5 + 0.5
             x_fake.append(x_i)
             if inception:
                 f_real.append(inception(x_real[i * 100:(i + 1) * 100]))
                 f_fake.append(inception(x_i))
         x_fake = torch.cat(x_fake, dim=0)
         if inception:
             f_real = torch.cat(f_real, dim=0)
             f_fake = torch.cat(f_fake, dim=0)
             fid = FID_score(f_real.cpu(), f_fake.cpu())
             nn_real, nn_fake = nn_accuracy(f_real,
                                            f_fake,
                                            device=self.device)
         else:
             fid = -1
             nn_real, nn_fake = nn_accuracy(x_real,
                                            x_fake,
                                            device=self.device)
     return fid, nn_real, nn_fake
Esempio n. 2
0
     embedding_layer_name = 'l2norm'
 elif args.loss == 'softmax_corr':
     embedding_layer_name = 'softmax'
 else:
     embedding_layer_name = 'embedding'
 
 if not args.no_progress:
     model.summary()
 
 batch_transform_kwargs = {
     'embedding' : embedding,
     'num_classes' : data_generator.num_classes if args.cls_weight > 0 else None
 }
 if args.loss.endswith('_corr'):
     loss = utils.inv_correlation
     metric = 'accuracy' if (args.loss == 'softmax_corr') or (args.embedding == 'onehot') else utils.nn_accuracy(embedding, dot_prod_sim = True)
 else:
     loss = utils.squared_distance
     metric = utils.nn_accuracy(embedding, dot_prod_sim = False)
 
 # Load pre-trained weights and train last layer for a few epochs
 if args.finetune:
     print('Loading pre-trained weights from {}'.format(args.finetune))
     model.load_weights(args.finetune, by_name=True, skip_mismatch=True)
     if args.finetune_init > 0:
         print('Pre-training new layers')
         for layer in model.layers:
             layer.trainable = (layer.name in ('embedding', 'prob'))
         embed_model.layers[-1].trainable = True
         if args.cls_weight > 0:
             par_model.compile(optimizer = keras.optimizers.SGD(lr=args.sgd_lr, momentum=0.9, nesterov=args.nesterov, clipnorm = args.clipgrad),
        embedding_layer_name = 'embedding'

    if not args.no_progress:
        model.summary()

    batch_transform_kwargs = {
        'embedding': embedding,
        'num_classes':
        data_generator.num_classes if args.cls_weight > 0 else None
    }
    if args.loss.endswith('_corr'):
        loss = utils.inv_correlation
        metrics = [
            'accuracy' if
            (args.loss == 'softmax_corr') or (args.embedding == 'onehot') else
            utils.nn_accuracy(embedding, dot_prod_sim=True)
        ]
        if len(args.top_k_acc) > 0:
            for k in args.top_k_acc:
                metrics.append(
                    utils.top_k_acc(k) if (args.loss == 'softmax_corr') or (
                        args.embedding == 'onehot') else utils.
                    nn_accuracy(embedding, dot_prod_sim=True, k=k))
    else:
        loss = utils.squared_distance
        metrics = [
            'accuracy' if args.embedding == 'onehot' else utils.nn_accuracy(
                embedding, dot_prod_sim=False)
        ]
        if len(args.top_k_acc) > 0:
            for k in args.top_k_acc:
Esempio n. 4
0
    if not args.no_progress:
        model.summary()

    callbacks = []
    batch_transform_kwargs = {'embedding': embedding}

    if args.init_weights and (args.init_epochs > 0):
        print('Pre-training linear transformation')
        for layer in model.layers[:-1]:
            layer.trainable = False

        model.compile(
            optimizer=keras.optimizers.Adagrad(lr=args.init_lr),
            loss=utils.devise_ranking_loss(embedding, args.margin),
            metrics=[utils.nn_accuracy(embedding, dot_prod_sim=True)])

        model.fit_generator(data_generator.train_sequence(
            args.batch_size,
            batch_transform=transform_inputs,
            batch_transform_kwargs=batch_transform_kwargs),
                            validation_data=data_generator.test_sequence(
                                args.val_batch_size,
                                batch_transform=transform_inputs,
                                batch_transform_kwargs=batch_transform_kwargs),
                            epochs=args.init_epochs,
                            callbacks=callbacks,
                            verbose=not args.no_progress,
                            max_queue_size=100,
                            workers=8,
                            use_multiprocessing=True)
    loss = utils.inv_correlation if args.loss == 'inv_corr' else utils.squared_distance
    
    # Load pre-trained weights and train last layer for a few epochs
    if args.finetune:
        print('Loading pre-trained weights from {}'.format(args.finetune))
        model.load_weights(args.finetune, by_name=True, skip_mismatch=True)
        if args.finetune_init > 0:
            print('Pre-training new layers')
            for layer in model.layers:
                layer.trainable = (layer.name in ('embedding', 'prob'))
            embed_model.layers[-1].trainable = True
            if args.cls_weight > 0:
                par_model.compile(optimizer = keras.optimizers.SGD(lr=args.sgd_lr, momentum=0.9, nesterov=args.nesterov, clipnorm = args.clipgrad),
                                loss = { embedding_layer_name : loss, 'prob' : 'categorical_crossentropy' },
                                loss_weights = { embedding_layer_name : 1.0, 'prob' : args.cls_weight },
                                metrics = { embedding_layer_name : utils.nn_accuracy(embedding, dot_prod_sim = (args.loss == 'inv_corr')), 'prob' : 'accuracy' })
            else:
                par_model.compile(optimizer = keras.optimizers.SGD(lr=args.sgd_lr, momentum=0.9, nesterov=args.nesterov, clipnorm = args.clipgrad),
                                loss = loss,
                                metrics = [utils.nn_accuracy(embedding, dot_prod_sim = (args.loss == 'inv_corr'))])
            par_model.fit_generator(
                    data_generator.train_sequence(args.batch_size, batch_transform = transform_inputs, batch_transform_kwargs = batch_transform_kwargs),
                    validation_data = data_generator.test_sequence(args.val_batch_size, batch_transform = transform_inputs, batch_transform_kwargs = batch_transform_kwargs),
                    epochs = args.finetune_init, verbose = not args.no_progress,
                    max_queue_size = args.queue_size, workers = args.read_workers, use_multiprocessing = True)
            for layer in model.layers:
                layer.trainable = True
            print('Full model training')

    # Train model
    callbacks, num_epochs = utils.get_lr_schedule(args.lr_schedule, data_generator.num_train, args.batch_size, schedule_args = { arg_name : arg_val for arg_name, arg_val in vars(args).items() if arg_val is not None })
Esempio n. 6
0
     embedding_layer_name = 'l2norm'
 elif args.loss == 'softmax_corr':
     embedding_layer_name = 'softmax'
 else:
     embedding_layer_name = 'embedding'
 
 if not args.no_progress:
     model.summary()
 
 batch_transform_kwargs = {
     'embedding' : embedding,
     'num_classes' : data_generator.num_classes if args.cls_weight > 0 else None
 }
 if args.loss.endswith('_corr'):
     loss = utils.inv_correlation
     metrics = ['accuracy' if (args.loss == 'softmax_corr') or (args.embedding == 'onehot') else utils.nn_accuracy(embedding, dot_prod_sim = True)]
     if len(args.top_k_acc) > 0:
         for k in args.top_k_acc:
             metrics.append(utils.top_k_acc(k) if (args.loss == 'softmax_corr') or (args.embedding == 'onehot') else utils.nn_accuracy(embedding, dot_prod_sim = True, k = k))
 else:
     loss = utils.squared_distance
     metrics = ['accuracy' if args.embedding == 'onehot' else utils.nn_accuracy(embedding, dot_prod_sim = False)]
     if len(args.top_k_acc) > 0:
         for k in args.top_k_acc:
             metrics.append(utils.top_k_acc(k) if args.embedding == 'onehot' else utils.nn_accuracy(embedding, dot_prod_sim = False, k = k))
 
 cls_metrics = ['accuracy']
 if len(args.top_k_acc) > 0:
     for k in args.top_k_acc:
         cls_metrics.append(utils.top_k_acc(k))