示例#1
0
def main():
    # Create Model, Criterion and State
    model, criterion, state = create_model(args)
    print("=> Model and criterion are ready")
    # Create Dataloader
    if not args.test_only:
        train_loader = get_train_loader(args)
    val_loader = get_test_loader(args)
    print("=> Dataloaders are ready")
    # Create Logger
    logger = Logger(args, state)
    print("=> Logger is ready")  # Create Trainer
    trainer = Trainer(args, model, criterion, logger)
    print("=> Trainer is ready")

    if args.test_only:
        test_summary = trainer.test(0, val_loader)
        print("- Test:  Acc %6.3f " % (test_summary['acc']))
    else:
        start_epoch = logger.state['epoch'] + 1
        print("=> Start training")
        # test_summary = trainer.test(0, val_loader)

        for epoch in range(start_epoch, args.n_epochs + 1):
            train_summary = trainer.train(epoch, train_loader)
            test_summary = trainer.test(epoch, val_loader)

            logger.record(epoch, train_summary, test_summary, model)

        logger.final_print()
示例#2
0
def main():
    '''
    01, 02 - 2D spatial (images)
    03, 04 - 3D geometric (3D images)
    05, 06 - 3D temporal (3D optical flow)
    07, 08 - 3D temporal (3D optical flow - no augmentation)
    09, 10 - 2D temporal (2D optical flow)

    ------ If time:
    - 2-stream concatenate lstm output
    - 2-stream svm classifier
    '''
    print_config()

    # Get network
    net = torch.nn.DataParallel(NEURAL_NET).cuda()

    # Get dataloaders
    train_loader = get_train_loader()
    test_loader = get_test_loader()

    # Set up optimizer with auto-adjusting learning rate
    parameters = [p for p in net.parameters() if p.requires_grad]
    optimizer = optim.Adam(parameters, lr=0.001)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5)

    # Train
    for epoch in range(NUM_EPOCHS):
        scheduler.step()
        train_acc = training_epoch(net, optimizer, epoch, train_loader)

        # Checkpoint results
        model_file = 'torch_models/torch_model_experiment_{:02}_epoch_{:02}'.format(
            EXPERIMENT_NUM, epoch)
        torch.save(net.state_dict(), model_file)

        # net.load_state_dict(torch.load(model_file))
        # valid_acc = test_epoch(net, test_loader, desc="Validation (epoch {:02})".format(epoch))
        # print('Epoch {:02} top-1 validation accuracy: {:.1f}%'.format(epoch, valid_acc))

    # Save results
    model_file = 'torch_models/torch_model_experiment_{:02}'.format(
        EXPERIMENT_NUM)
    torch.save(net.state_dict(), model_file)

    # Test
    # net.load_state_dict(torch.load('torch_models/torch_model_experiment_{:02}'.format(EXPERIMENT_NUM)))
    test_acc = test_epoch(net, test_loader, desc="Testing")
    print('Experiment {:02} test-set accuracy: {:.2f}%'.format(
        EXPERIMENT_NUM, test_acc))
示例#3
0
    ##################################################
    # Main Loop #
    ##################################################
    for i, (trn_idx, val_idx) in enumerate(splitter.split(train_all)):
        if i not in global_params["folds"]:
            continue
        logger.info("=" * 20)
        logger.info(f"Fold {i}")
        logger.info("=" * 20)

        trn_df = train_all.loc[trn_idx, :].reset_index(drop=True)
        val_df = train_all.loc[val_idx, :].reset_index(drop=True)

        loaders = {
            phase: datasets.get_train_loader(df_, tp, fp, train_audio, config,
                                             phase)
            for df_, phase in zip([trn_df, val_df], ["train", "valid"])
        }
        model = models.get_model(config, fold=i).to(device)
        criterion = criterions.get_criterion(config)
        optimizer = training.get_optimizer(model, config)
        scheduler = training.get_scheduler(optimizer, config)
        callbacks = clb.get_callbacks(config)

        runner = training.get_runner(config, device)

        runner.train(model=model,
                     criterion=criterion,
                     loaders=loaders,
                     optimizer=optimizer,
                     scheduler=scheduler,
示例#4
0
            model = models.prepare_for_inference(
                model, expdir / f"fold{i}/checkpoints/best.pth").to(device)
            last = False

        ttas = config["tta"]
        oof_tta_predictions = []
        tta_predictions = []
        for tta in ttas:
            logger.info("#" * 20)
            logger.info(tta["name"])

            _config = config.copy()
            _config["transforms"]["valid"] = [tta]
            val_loader = datasets.get_train_loader(val_df,
                                                   tp,
                                                   fp,
                                                   train_audio,
                                                   _config,
                                                   phase="valid")

            _config["transforms"]["test"] = [tta]

            loader = datasets.get_test_loader(test_all, test_audio, _config)

            if config["inference"]["prediction_type"] == "strong":
                ##################################################
                # OOF #
                ##################################################
                logger.info("*" * 20)
                logger.info(f"OOF prediction for fold{i}")
                logger.info("*" * 20)
                recording_ids = []
示例#5
0
    splitter = training.get_split(config)

    ##################################################
    # Main Loop #
    ##################################################
    fold_predictions = []
    oof_predictions = []
    for i, (trn_idx, val_idx) in enumerate(splitter.split(train_all)):
        if i not in global_params["folds"]:
            continue
        logger.info("=" * 20)
        logger.info(f"Fold {i}")
        logger.info("=" * 20)

        val_df = train_all.iloc[val_idx, :].reset_index(drop=True)
        val_loader = datasets.get_train_loader(
            val_df, tp, fp, train_audio, config, phase="valid")

        loader = datasets.get_test_loader(test_all, test_audio, config)
        model = models.get_model(config)
        if config["inference"].get("last", False):
            model = models.prepare_for_inference(
                model, expdir / f"fold{i}/checkpoints/last.pth").to(device)
            last = True
        else:
            model = models.prepare_for_inference(
                model, expdir / f"fold{i}/checkpoints/best.pth").to(device)
            last = False

        if config["inference"].get("soft_prediction", False):
            soft_val_loader = datasets.get_train_loader(
                val_df, tp, fp, train_audio, soft_inference_config, phase="valid")
示例#6
0
    ##################################################
    # Main Loop #
    ##################################################
    logger.info("=" * 20)
    logger.info("No Fold Training")
    logger.info("=" * 20)

    checkpoints_dir = logdir / "checkpoints"
    checkpoints_dir.mkdir(exist_ok=True, parents=True)

    train_writer = SummaryWriter(log_dir=logdir / "train_log")

    loader = datasets.get_train_loader(train_all,
                                       tp,
                                       fp,
                                       train_audio,
                                       config,
                                       phase="train")

    model = models.get_model(config).to(device)
    criterion = criterions.get_criterion(config)
    optimizer = training.get_optimizer(model, config)
    scheduler = training.get_scheduler(optimizer, config)

    ema_model = AveragedModel(
        model,
        device=device,
        avg_fn=lambda averaged_model_parameter, model_parameter, num_averaged:
        0.1 * averaged_model_parameter + 0.9 * model_parameter)

    _metrics = {}