示例#1
0
def eval(name, cpu, test_data, train, arch, log_dir, model_path,
         output_filename):
    use_cuda = not cpu and torch.cuda.is_available()
    device = 'cuda' if use_cuda else 'cpu'
    print('Using device:', device)

    model = get_model(arch)
    model.to(device)
    model = load_model(model, device)

    criterion = get_criterion(device, train['loss_reduction'])
    exp_logger = logger.Experiment(name)
    exp_logger.add_meters('test', metrics.make_meter_matching())

    gene_test = Generator('test', test_data)
    gene_test.load_dataset()
    test_loader = siamese_loader(gene_test, train['batch_size'],
                                 gene_test.constant_n_vertices)
    acc, loss = trainer.val_triplet(
        test_loader,
        model,
        criterion,
        exp_logger,
        device,
        epoch=0,
        eval_score=metrics.accuracy_linear_assignment,
        val_test='test')
    key = create_key()
    filename_test = os.path.join(log_dir, output_filename)
    print('Saving result at: ', filename_test)
    save_to_json(key, acc, loss, filename_test)
示例#2
0
def eval(name, cpu, test_data, train, arch, log_dir, model_path,
         output_filename):
    use_cuda = not cpu and torch.cuda.is_available()
    device = "cuda" if use_cuda else "cpu"
    print("Using device:", device)

    model = get_model(arch)
    model.to(device)
    model = load_model(
        model, device, model_path
    )  # modified to add model_path. may not be a good idea for normal use

    criterion = get_criterion(device, train["loss_reduction"])
    exp_logger = logger.Experiment(name)
    exp_logger.add_meters("test", metrics.make_meter_matching())

    gene_test = Generator("test", test_data)
    gene_test.load_dataset()
    test_loader = siamese_loader(gene_test, train["batch_size"],
                                 gene_test.constant_n_vertices)
    acc, loss = trainer.val_triplet(
        test_loader,
        model,
        criterion,
        exp_logger,
        device,
        epoch=0,
        eval_score=metrics.accuracy_linear_assignment,
        val_test="test",
    )
    key = create_key()
    filename_test = os.path.join(log_dir, output_filename)
    print("Saving result at: ", filename_test)
    save_to_json(key, acc, loss, filename_test)
示例#3
0
def main():
    global args, best_score, best_epoch
    best_score, best_epoch = -1, -1
    if len(sys.argv) > 1:
        args = parse_args()
        print('----- Experiments parameters -----')
        for k, v in args.__dict__.items():
            print(k, ':', v)
    else:
        print('Please provide some parameters for the current experiment. Check-out args.py for more info!')
        sys.exit()

    # init random seeds
    utils.setup_env(args)

    # init tensorboard summary is asked
    tb_writer = SummaryWriter(f'{args.data_dir}/runs/{args.name}/tensorboard') if args.tensorboard else None

    # init data loaders
    loader = get_loader(args)
    train_loader = torch.utils.data.DataLoader(loader(data_dir=args.data_dir, split='train', min_size=args.min_size_train, max_size=args.max_size_train,
                                                      dataset_size=args.dataset_size_train), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, collate_fn=lambda x: x, pin_memory=True)
    val_loader = torch.utils.data.DataLoader(loader(data_dir=args.data_dir, split='val', min_size=args.min_size_val,
                                                    max_size=args.max_size_val, dataset_size=args.dataset_size_val), batch_size=1, shuffle=False, num_workers=args.workers, collate_fn=lambda x: x, pin_memory=True)

    exp_logger, lr = None, None

    model = get_model(args)
    criterion = losses.get_criterion(args)

    # optionally resume from a checkpoint
    if args.resume:
        model, exp_logger, args.start_epoch, best_score, best_epoch, lr = load_checkpoint(args, model)
        args.lr = lr
    else:
        # create all output folders 
        utils.init_output_env(args)

    if exp_logger is None:
        exp_logger = init_logger(args, model)

    optimizer, scheduler = optimizers.get_optimizer(args, model)

    print('  + Number of params: {}'.format(utils.count_params(model)))

    model.to(args.device)
    criterion.to(args.device)

    if args.test:
        test_loader = torch.utils.data.DataLoader(loader(data_dir=args.data_dir, split='test', min_size=args.min_size_val,
                                                    max_size=args.max_size_val, dataset_size=args.dataset_size_val), batch_size=args.batch_size,
                                                  shuffle=False, num_workers=args.workers, collate_fn=lambda x: x, pin_memory=True)
        trainer.test(args, test_loader, model, criterion, args.start_epoch,
                     eval_score=metrics.get_score(args.test_type), output_dir=args.out_pred_dir, has_gt=True, print_freq=args.print_freq_val)
        sys.exit()

    is_best = True
    for epoch in range(args.start_epoch, args.epochs + 1):
        print('Current epoch:', epoch)

        trainer.train(args, train_loader, model, criterion, optimizer, exp_logger, epoch, eval_score=metrics.get_score(args.train_type), print_freq=args.print_freq_train, tb_writer=tb_writer)

        # evaluate on validation set
        mAP, val_loss = trainer.validate(args, val_loader, model, criterion, exp_logger, epoch, eval_score=metrics.get_score(args.val_type), print_freq=args.print_freq_val, tb_writer=tb_writer)

        # Update learning rate
        if scheduler is None:
            trainer.adjust_learning_rate(args, optimizer, epoch)
        else:
            prev_lr =  optimizer.param_groups[0]['lr']
            if 'ReduceLROnPlateau' == args.scheduler:
                scheduler.step(val_loss)
            else:    
                scheduler.step()
                
            print(f"Updating learning rate from {prev_lr} to {optimizer.param_groups[0]['lr']}")

        # remember best acc and save checkpoint
        is_best = mAP > best_score

        best_score = max(mAP, best_score)
        if True == is_best:
            best_epoch = epoch

        save_checkpoint(args, {
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_score': best_score,
            'best_epoch': best_epoch,
            'exp_logger': exp_logger,
        }, is_best)

    if args.tensorboard:
        tb_writer.close()

    print(" ***** Processes all done. *****")
def main():

    global args, best_score, best_epoch
    best_score, best_epoch = -1, -1
    if len(sys.argv) > 1:
        args = parse_args()
        print('----- Experiments parameters -----')
        for k, v in args.__dict__.items():
            print(k, ':', v)
    else:
        print(
            'Please provide some parameters for the current experiment. Check-out arg.py for more info!'
        )
        sys.exit()

    # init random seeds
    utils.setup_env(args)

    # init tensorboard summary is asked
    tb_writer = SummaryWriter(f'{args.data_dir}/runs/{args.name}/tensorboard'
                              ) if args.tensorboard else None

    # init data loaders
    loader = get_loader(args)
    train_loader = torch.utils.data.DataLoader(loader(
        path_to_data=args.data_dir, mode='TRAIN'),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(loader(path_to_data=args.data_dir,
                                                    mode='VAL'),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    exp_logger, lr = None, None

    model = get_model(args)
    criterion = losses.get_criterion(args)
    # optionally resume from a checkpoint
    if args.resume:
        model, exp_logger, args.start_epoch, best_score, best_epoch, lr = load_checkpoint(
            args, model)
        args.lr = lr
    else:
        # create all output folders
        utils.init_output_env(args)
    if exp_logger is None:
        exp_logger = init_logger(args, model)

    optimizer, scheduler = optimizers.get_optimizer(args, model)

    print('  + Number of params: {}'.format(utils.count_params(model)))

    model.to(args.device)
    criterion.to(args.device)

    if args.test:
        test_loader = torch.utils.data.DataLoader(loader(
            path_to_data=args.data_dir, mode='TEST'),
                                                  batch_size=args.batch_size,
                                                  shuffle=False,
                                                  num_workers=args.workers,
                                                  pin_memory=True)
        trainer.test(args,
                     test_loader,
                     model,
                     criterion,
                     args.start_epoch,
                     eval_score=metrics.accuracy_regression,
                     output_dir=args.out_pred_dir,
                     has_gt=True)
        sys.exit()

    is_best = True
    for epoch in range(args.start_epoch, args.epochs + 1):
        print('Current epoch: ', epoch)

        trainer.train(args,
                      train_loader,
                      model,
                      criterion,
                      optimizer,
                      exp_logger,
                      epoch,
                      eval_score=metrics.accuracy_regression,
                      tb_writer=tb_writer)

        # evaluate on validation set
        val_mae, val_squared_mse, val_loss = trainer.validate(
            args,
            val_loader,
            model,
            criterion,
            exp_logger,
            epoch,
            eval_score=metrics.accuracy_regression,
            tb_writer=tb_writer)

        # update learning rate
        if scheduler is None:
            trainer.adjust_learning_rate(args, optimizer, epoch)
        else:
            prev_lr = optimizer.param_groups[0]['lr']
            if 'ReduceLROnPlateau' == args.scheduler:
                scheduler.step(val_loss)
            else:
                scheduler.step()

            print(
                f"Updating learning rate from {prev_lr} to {optimizer.param_groups[0]['lr']}"
            )

        # remember best acc and save checkpoint
        is_best = val_mae < best_score
        best_score = min(val_mae, best_score)
        if True == is_best:
            best_epoch = epoch

        save_checkpoint(
            args, {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_score': best_score,
                'best_epoch': best_epoch,
                'exp_logger': exp_logger,
            }, is_best)

        # write plots to disk
        generate_plots(args, exp_logger, is_best=is_best)

        # generate html report
        logger.export_logs(args, epoch, best_epoch)

    if args.tensorboard:
        tb_writer.close()

    print("That's all folks!")
示例#5
0
def train(cpu, train_data, train, arch, log_dir):
    """ Main func.
    """
    global best_score, best_epoch
    best_score, best_epoch = -1, -1
    use_cuda = not cpu and torch.cuda.is_available()
    device = 'cuda' if use_cuda else 'cpu'
    print('Using device:', device)

    # init random seeds
    setup_env()
    print("Models saved in ", log_dir)

    init_output_env()
    exp_logger = init_logger()

    gene_train = Generator('train', train_data)
    gene_train.load_dataset()
    train_loader = siamese_loader(gene_train, train['batch_size'],
                                  gene_train.constant_n_vertices)
    gene_val = Generator('val', train_data)
    gene_val.load_dataset()
    val_loader = siamese_loader(gene_val, train['batch_size'],
                                gene_val.constant_n_vertices)

    model = get_model(arch)
    optimizer, scheduler = get_optimizer(train, model)
    criterion = get_criterion(device, train['loss_reduction'])

    model.to(device)

    is_best = True
    for epoch in range(train['epoch']):
        print('Current epoch: ', epoch)
        trainer.train_triplet(train_loader,
                              model,
                              criterion,
                              optimizer,
                              exp_logger,
                              device,
                              epoch,
                              eval_score=metrics.accuracy_max,
                              print_freq=train['print_freq'])

        acc, loss = trainer.val_triplet(
            val_loader,
            model,
            criterion,
            exp_logger,
            device,
            epoch,
            eval_score=metrics.accuracy_linear_assignment)
        scheduler.step(loss)
        # remember best acc and save checkpoint
        is_best = (acc > best_score)
        best_score = max(acc, best_score)
        if True == is_best:
            best_epoch = epoch

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_score': best_score,
                'best_epoch': best_epoch,
                'exp_logger': exp_logger,
            }, is_best)
示例#6
0
def train(cpu, train_data, train, arch, log_dir):
    """Main func."""
    global best_score, best_epoch
    best_score, best_epoch = -1, -1
    use_cuda = not cpu and torch.cuda.is_available()
    device = "cuda" if use_cuda else "cpu"
    print("Using device:", device)

    # init random seeds
    setup_env()
    print("Models saved in ", log_dir)

    init_output_env()
    exp_logger = init_logger()

    gene_train = Generator("train", train_data)
    gene_train.load_dataset()
    train_loader = siamese_loader(gene_train, train["batch_size"],
                                  gene_train.constant_n_vertices)
    gene_val = Generator("val", train_data)
    gene_val.load_dataset()
    val_loader = siamese_loader(gene_val, train["batch_size"],
                                gene_val.constant_n_vertices)

    model = get_model(arch)

    optimizer, scheduler = get_optimizer(train, model)
    criterion = get_criterion(device, train["loss_reduction"])

    model.to(device)

    is_best = True
    for epoch in range(train["epoch"]):
        print("Current epoch: ", epoch)
        trainer.train_triplet(
            train_loader,
            model,
            criterion,
            optimizer,
            exp_logger,
            device,
            epoch,
            eval_score=metrics.accuracy_linear_assignment,
            print_freq=train["print_freq"],
        )

        acc, loss = trainer.val_triplet(
            val_loader,
            model,
            criterion,
            exp_logger,
            device,
            epoch,
            eval_score=metrics.accuracy_linear_assignment,
        )
        scheduler.step(loss)
        # remember best acc and save checkpoint
        is_best = acc > best_score
        best_score = max(acc, best_score)
        if True == is_best:
            best_epoch = epoch

        save_checkpoint(
            {
                "epoch": epoch + 1,
                "state_dict": model.state_dict(),
                "best_score": best_score,
                "best_epoch": best_epoch,
                "exp_logger": exp_logger,
            },
            is_best,
        )
示例#7
0
def main():
    """ Main func.
    """
    global args, best_score, best_epoch
    best_score, best_epoch = -1, -1
    args = docopt(__doc__)
    args = type_args(args)
    args = update_args(args)
    use_cuda = not bool(args['--cpu']) and torch.cuda.is_available()
    device = 'cuda' if use_cuda else 'cpu'
    print('Using device:', device)

    # init random seeds
    utils.setup_env(args)

    utils.init_output_env(args)

    exp_logger = init_logger(args)

    print(args['--batch_size'])
    gene_train = Generator('train', args)
    _ = gene_train.load_dataset()
    train_loader = siamese_loader(gene_train, args['--batch_size'])
    gene_val = Generator('val', args)
    _ = gene_val.load_dataset()
    val_loader = siamese_loader(gene_val, args['--batch_size'])

    model = get_model(args)
    optimizer, scheduler = get_optimizer(args, model)
    criterion = get_criterion(device)

    exp_logger = init_logger(args)

    model.to(device)

    is_best = True
    for epoch in range(args['--epoch']):
        print('Current epoch: ', epoch)
        trainer.train_triplet(train_loader,
                              model,
                              criterion,
                              optimizer,
                              exp_logger,
                              device,
                              epoch,
                              eval_score=metrics.accuracy_max)
        scheduler.step()
        #print(args['--num_examples_train'])

        acc = trainer.val_triplet(val_loader,
                                  model,
                                  criterion,
                                  exp_logger,
                                  device,
                                  epoch,
                                  eval_score=metrics.accuracy_linear_assigment)

        # remember best acc and save checkpoint
        is_best = acc > best_score
        best_score = max(acc, best_score)
        if True == is_best:
            best_epoch = epoch

        save_checkpoint(
            args, {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_score': best_score,
                'best_epoch': best_epoch,
                'exp_logger': exp_logger,
            }, is_best)