Exemple #1
0
        # Add noise to fake
        if opt.add_noise:
            fake = added_gaussian_chi(fake, add_noise_var)

        prediction_fake_g = discriminator(fake)
        label_real = real_label(batch_size).to(gpu)
        g_err = loss(prediction_fake_g, label_real)
        g_err.backward()
        d_fake_2 = prediction_fake_g.mean().item()

        # only update if we don't freeze generator
        if not opt.freezeG or (opt.freezeG and epoch <= freezeEpochs):
            g_optimizer.step()

        logger.log(d_error_total, g_err, epoch, n_batch, len(dataloader), d_real, d_fake_1, d_fake_2)

        if n_batch % 10 == 0:
            print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                  % (epoch, opt.epochs, n_batch, len(dataloader),
                     d_error_total, g_err.item(), d_real, d_fake_1, d_fake_2))

        if n_batch % 100 == 0:
            Logger.batch = n_batch
            # generate fake with fixed noise
            test_fake = generator(fixed_noise)
            test_fake = F.pad(test_fake, (p, p, p, p), mode='replicate')

            # clone network to remove batch norm for relevance propagation
            canonical = type(discriminator)(nc, ndf, alpha, ngpu)
            canonical.load_state_dict(discriminator.state_dict())
Exemple #2
0
    def train(self, 
             model_dir=constant.train_config['trained_model_dir'], 
             model_name=constant.predict_config['best_model_name']):

        iteration_step = 0
        logger = Logger(self.model_name)

        start_idx_epoch = 0
        for epoch in range(start_idx_epoch, start_idx_epoch+self.num_epochs): 
            print('Executing Epoch: {}'.format(epoch))
            
            #execute each batch
            for sample in iter(self.train_batch):
                #extract data and label
                data = sample['feature']
                label = sample['target']

                #clear gradient
                self.optimizer.zero_grad()
                
                #forward propagation
                batch_output = self.classifier_model.nn_model(data)

                #calculate loss
                loss = self.error(batch_output, label[:, 0, :])

                #claculate gradient and update weight
                loss.backward()
                self.optimizer.step()
                                                        
                # Find metrics on validation dataset
                iteration_step += self.batch_size


            eval_metric = EvaluationMetric(self.target_num_classes)

            training_loss = eval_metric.calculateLoss(self.valid_batch, self.batch_size, self.classifier_model.nn_model, self.error)
            test_loss = eval_metric.calculateLoss(self.valid_batch, self.batch_size, self.classifier_model.nn_model, self.error)
            

            
            precision_train, recall_train, f1_train  = eval_metric.calculateEvaluationMetric(self.train_batch, self.batch_size, self.classifier_model.nn_model)
            precision_valid, recall_valid, f1_valid = eval_metric.calculateEvaluationMetric(self.valid_batch, self.batch_size, self.classifier_model.nn_model)
            
            
            print('Epoch: {}, F1-Score (Training Dataset): {}, F1-Score (Validation Dataset): {},  Training Loss: {},  Validation Loss: {}'
            .format(epoch, f1_train, f1_valid, training_loss, test_loss))

            print('Precision(Training Dataset): {}, Precision(Validation Dataset): {}, Recall(Training Dataset): {}, Recall(Validation Dataset): {}'
            .format(precision_train, precision_valid, recall_train, recall_valid))
            

            #log the metric in graph with tensorboard
            logger.log(f1_train, f1_valid, training_loss, test_loss, iteration_step)

                
            #save the model weights
            model_filepath = model_dir + os.sep + 'weight_epoch-{}_loss-{}'.format(epoch, training_loss)
            torch.save(self.classifier_model.nn_model.state_dict(), model_filepath)
        
        logger.close()
Exemple #3
0
        # Add noise to fake
        if opt.add_noise:
            fake = added_gaussian_chi(fake, add_noise_var)

        prediction_fake_g = discriminator(fake)
        label_real = real_label(batch_size).to(gpu)
        g_err = loss(prediction_fake_g, label_real)
        g_err.backward()
        d_fake_2 = prediction_fake_g.mean().item()

        # only update if we don't freeze generator
        if not opt.freezeG or (opt.freezeG and epoch <= freezeEpochs):
            g_optimizer.step()

        logger.log(d_error_total, g_err, epoch, n_batch, len(dataloader))

        if n_batch % 10 == 0:
            print(
                '[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                % (epoch, opt.epochs, n_batch, len(dataloader), d_error_total,
                   g_err.item(), d_real, d_fake_1, d_fake_2))

        if n_batch % 100 == 0:
            Logger.batch = n_batch
            # generate fake with fixed noise
            test_fake = generator(fixed_noise)
            test_fake = F.pad(test_fake, (p, p, p, p), mode='replicate')

            # clone network to remove batch norm for relevance propagation
            canonical = type(discriminator)(nc, ndf, alpha, ngpu)
Exemple #4
0
        fake_data = generator(noise(N)).to(gpu).detach()

        # Train Discriminator
        d_error, d_pred_real, d_pred_fake = train_discriminator(
            d_optimizer, real_data, fake_data)

        # 2. Train Generator

        # Generate fake data
        fake_data = generator(noise(N)).to(gpu)

        # Train Generator
        g_error = train_generator(g_optimizer, fake_data)

        # Log Batch error
        logger.log(d_error, g_error, epoch, n_batch, num_batches)

        # Display Progress every few batches
        if n_batch % 100 == 0:

            # generate fake with fixed noise
            test_fake = generator(test_noise)

            # set ngpu to one, so relevance propagation works
            if (opt.ngpu > 1):
                discriminator.setngpu(1)

            # eval needs to be set so batch norm works with batch size of 1
            # discriminator.eval()
            test_result = discriminator(test_fake)
            test_relevance = discriminator.relprop()
Exemple #5
0
        generator.zero_grad()

        # Generate and predict on fake images as if they were real
        z_ = noise(n).to(gpu)
        x_f = generator(z_)
        x_fn = added_gaussian(x_f, add_noise_var)
        g_prediction_fake = discriminator(x_fn)
        g_training_loss = loss(g_prediction_fake, y_real)

        # Backpropagate and update weights
        g_training_loss.backward()
        g_optimizer.step()

        # Log batch error
        logger.log(d_training_loss, g_training_loss, epoch, n_batch,
                   num_batches)

        print(
            '[%d/%d][%d/%d] Loss_D: %f Loss_G: %f Loss_D_real: %f Loss_D_fake %f'
            % (epoch, num_epochs, n_batch, num_batches, d_training_loss,
               g_training_loss, d_loss_real, d_loss_fake))

        # Display Progress every few batches
        if n_batch % 100 == 0 or n_batch == num_batches:

            test_fake = generator(test_noise)
            if (opt.ngpu > 1):
                discriminator.setngpu(1)
            discriminator.eval()
            test_result = discriminator(test_fake)
            discriminator.train()
Exemple #6
0
from utils.utils import Logger
from utils.utils import save_checkpoint
from utils.utils import save_linear_checkpoint

from common.train import *
from evals import test_classifier, evaluate
from datasets.datalmdb import DataLmdb

if 'sup' in P.mode:
    from training.sup import setup
else:
    from training.unsup import setup
train, fname = setup(P.mode, P)

logger = Logger(fname, ask=not resume, local_rank=P.local_rank)
logger.log(P)
logger.log(model)

#############################
dev_dl = torch.utils.data.DataLoader(DataLmdb("/kaggle/working/Fakej/valid",
                                              db_size=38566,
                                              crop_size=128,
                                              flip=False,
                                              scale=0.00390625,
                                              random=False),
                                     batch_size=128,
                                     shuffle=False)
model.eval()
evaluate(model, dev_dl)
print('========================== DONE =======================')
#############################
Exemple #7
0
    for i, data in enumerate(data_loader.get_train_loader()):

        timer.update_data()

        # optimize
        model.set_data(data)
        model.optimize()

        counter.update_step()
        timer.update_step()

        if counter.get_steps() % opt.display_freq == 0:
            printer.display(counter, timer, model)
        if counter.get_steps() % opt.log_freq == 0:
            logger.log(model.get_info(),
                       counter.get_total_steps(),
                       prefix='Loss')

    counter.update_epoch()
    timer.update_epoch()
    timer.display_epochs()

    if counter.get_epochs() % opt.val_freq == 0:
        # validate model
        best = validator.validate(
            model, data_loader.get_val_loader())  # will save when appropriate
        if best and opt.test:
            tester.validate(
                model,
                data_loader.get_test_loader())  # will save when appropriate
            logger.log(tester.get_info(),
def main():
    global args
    args = parser.parse_args()
    use_gpu = torch.cuda.is_available()

    # Load and process data
    time_data = time.time()
    SRC, TRG, train_iter, val_iter = preprocess(args.v, args.b)
    print('Loaded data. |TRG| = {}. Time: {:.2f}.'.format(
        len(TRG.vocab),
        time.time() - time_data))

    # Load embeddings if available
    LOAD_EMBEDDINGS = True
    if LOAD_EMBEDDINGS:
        np_de_file = 'scripts/emb-{}-de.npy'.format(len(SRC.vocab))
        np_en_file = 'scripts/emb-{}-en.npy'.format(len(TRG.vocab))
        embedding_src, embedding_trg = load_embeddings(SRC, TRG, np_de_file,
                                                       np_en_file)
        print('Loaded embedding vectors from np files')
    else:
        embedding_src = (torch.rand(len(SRC.vocab), args.emb) - 0.5) * 2
        embedding_trg = (torch.rand(len(TRG.vocab), args.emb) - 0.5) * 2
        print('Initialized embedding vectors')

    # Create model
    tokens = [TRG.vocab.stoi[x] for x in ['<s>', '</s>', '<pad>', '<unk>']]
    model = Seq2seq(embedding_src,
                    embedding_trg,
                    args.hs,
                    args.nlayers,
                    args.dp,
                    args.bi,
                    args.attn,
                    tokens_bos_eos_pad_unk=tokens,
                    reverse_input=args.reverse_input)

    # Load pretrained model
    if args.model is not None and os.path.isfile(args.model):
        model.load_state_dict(torch.load(args.model))
        print('Loaded pretrained model.')
    model = model.cuda() if use_gpu else model

    # Create weight to mask padding tokens for loss function
    weight = torch.ones(len(TRG.vocab))
    weight[TRG.vocab.stoi['<pad>']] = 0
    weight = weight.cuda() if use_gpu else weight

    # Create loss function and optimizer
    criterion = nn.CrossEntropyLoss(weight=weight)
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                        model.parameters()),
                                 lr=args.lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           'max',
                                                           patience=30,
                                                           factor=0.25,
                                                           verbose=True,
                                                           cooldown=6)
    # scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10,13,16,19], gamma=0.5)

    # Create directory for logs, create logger, log hyperparameters
    path = os.path.join('saves',
                        datetime.datetime.now().strftime("%m-%d-%H-%M-%S"))
    os.makedirs(path, exist_ok=True)
    logger = Logger(path)
    logger.log('COMMAND ' + ' '.join(sys.argv), stdout=False)
    logger.log(
        'ARGS: {}\nOPTIMIZER: {}\nLEARNING RATE: {}\nSCHEDULER: {}\nMODEL: {}\n'
        .format(args, optimizer, args.lr, vars(scheduler), model),
        stdout=False)

    # Train, validate, or predict
    start_time = time.time()
    if args.predict_from_input is not None:
        predict.predict_from_input(model, args.predict_from_input, SRC, TRG,
                                   logger)
    elif args.predict is not None:
        predict.predict(model, args.predict, args.predict_outfile, SRC, TRG,
                        logger)
    elif args.visualize:
        visualize.visualize(train_iter, model, SRC, TRG, logger)
    elif args.evaluate:
        valid.validate(val_iter, model, criterion, SRC, TRG, logger)
    else:
        train.train(train_iter, val_iter, model, criterion, optimizer,
                    scheduler, SRC, TRG, args.epochs, logger)
    logger.log('Finished in {}'.format(time.time() - start_time))
    return