示例#1
0
def main():
    # Create Model, Criterion and State
    model, criterion, state = create_model(args)
    print("=> Model and criterion are ready")
    # Create Dataloader
    if not args.test_only:
        train_loader = get_train_loader(args)
    val_loader = get_test_loader(args)
    print("=> Dataloaders are ready")
    # Create Logger
    logger = Logger(args, state)
    print("=> Logger is ready")  # Create Trainer
    trainer = Trainer(args, model, criterion, logger)
    print("=> Trainer is ready")

    if args.test_only:
        test_summary = trainer.test(0, val_loader)
        print("- Test:  Acc %6.3f " % (test_summary['acc']))
    else:
        start_epoch = logger.state['epoch'] + 1
        print("=> Start training")
        # test_summary = trainer.test(0, val_loader)

        for epoch in range(start_epoch, args.n_epochs + 1):
            train_summary = trainer.train(epoch, train_loader)
            test_summary = trainer.test(epoch, val_loader)

            logger.record(epoch, train_summary, test_summary, model)

        logger.final_print()
示例#2
0
文件: eval.py 项目: foamtsp/CVProject
def eval_ckpt():
    parser = argparse.ArgumentParser()
    parser.add_argument("config_paths", nargs="+", help="path to config.yaml")
    parser.add_argument("--weight", help="path to weight to evaluate.pth")
    parser.add_argument("--result_dir", help="path to save the result file")
    args, left_argv = parser.parse_known_args()

    cfg = Config(*args.config_paths, default="cfgs/defaults.yaml")
    cfg.argv_update(left_argv)
    img_dir = Path(args.result_dir)
    img_dir.mkdir(parents=True, exist_ok=True)

    trn_transform, val_transform = setup_transforms(cfg)

    g_kwargs = cfg.get('g_args', {})
    gen = Generator(1, cfg.C, 1, **g_kwargs).cuda()

    weight = torch.load(args.weight)
    if "generator_ema" in weight:
        weight = weight["generator_ema"]
    gen.load_state_dict(weight)
    test_dset, test_loader = get_test_loader(cfg, val_transform)

    for batch in test_loader:
        style_imgs = batch["style_imgs"].cuda()
        char_imgs = batch["source_imgs"].unsqueeze(1).cuda()

        out = gen.gen_from_style_char(style_imgs, char_imgs)
        fonts = batch["fonts"]
        chars = batch["chars"]

        for image, font, char in zip(refine(out), fonts, chars):
            (img_dir / font).mkdir(parents=True, exist_ok=True)
            path = img_dir / font / f"{char}.png"
            save_tensor_to_image(image, path)
示例#3
0
文件: test.py 项目: rosaann/landmark
def get_test_max_landmark_of_one_model(config, gi, best_model_idx, key_group):
    test_img_list = gen_test_csv()
    print('test_img_list ', len(test_img_list))

    # result_set_whole = {}
    # for img_id in test_img_list:
    #初始化添加
    #  result_set_whole[img_id] = {}
    test_data_set = get_test_loader(config, test_img_list,
                                    get_transform(config, 'val'))
    model = get_model(config, gi)
    if torch.cuda.is_available():
        model = model.cuda()
    optimizer = get_optimizer(config, model.parameters())
    checkpoint = utils.checkpoint.get_model_saved(config, gi, best_model_idx)
    best_epoch, step = utils.checkpoint.load_checkpoint(
        model, optimizer, checkpoint)
    result_set = test_one_model(test_data_set, model, key_group)

    #
    result_list_whole = []
    for img_ps in result_set.keys():
        ps = result_set[img_ps]
        max_p_key = max(ps, key=ps.get)
        # result_set_whole[img_ps][max_p_key] = ps[max_p_key]
        result_list_whole.append((img_ps, max_p_key, ps[max_p_key]))

    test_pd = pd.DataFrame.from_records(
        result_list_whole, columns=['img_id', 'landmark_id', 'pers'])
    output_filename = os.path.join('./results/test/',
                                   'test_img_land_' + str(gi) + '.csv')
    test_pd.to_csv(output_filename, index=False)

    return
示例#4
0
def main():
    '''
    01, 02 - 2D spatial (images)
    03, 04 - 3D geometric (3D images)
    05, 06 - 3D temporal (3D optical flow)
    07, 08 - 3D temporal (3D optical flow - no augmentation)
    09, 10 - 2D temporal (2D optical flow)

    ------ If time:
    - 2-stream concatenate lstm output
    - 2-stream svm classifier
    '''
    print_config()

    # Get network
    net = torch.nn.DataParallel(NEURAL_NET).cuda()

    # Get dataloaders
    train_loader = get_train_loader()
    test_loader = get_test_loader()

    # Set up optimizer with auto-adjusting learning rate
    parameters = [p for p in net.parameters() if p.requires_grad]
    optimizer = optim.Adam(parameters, lr=0.001)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5)

    # Train
    for epoch in range(NUM_EPOCHS):
        scheduler.step()
        train_acc = training_epoch(net, optimizer, epoch, train_loader)

        # Checkpoint results
        model_file = 'torch_models/torch_model_experiment_{:02}_epoch_{:02}'.format(
            EXPERIMENT_NUM, epoch)
        torch.save(net.state_dict(), model_file)

        # net.load_state_dict(torch.load(model_file))
        # valid_acc = test_epoch(net, test_loader, desc="Validation (epoch {:02})".format(epoch))
        # print('Epoch {:02} top-1 validation accuracy: {:.1f}%'.format(epoch, valid_acc))

    # Save results
    model_file = 'torch_models/torch_model_experiment_{:02}'.format(
        EXPERIMENT_NUM)
    torch.save(net.state_dict(), model_file)

    # Test
    # net.load_state_dict(torch.load('torch_models/torch_model_experiment_{:02}'.format(EXPERIMENT_NUM)))
    test_acc = test_epoch(net, test_loader, desc="Testing")
    print('Experiment {:02} test-set accuracy: {:.2f}%'.format(
        EXPERIMENT_NUM, test_acc))
示例#5
0
        for tta in ttas:
            logger.info("#" * 20)
            logger.info(tta["name"])

            _config = config.copy()
            _config["transforms"]["valid"] = [tta]
            val_loader = datasets.get_train_loader(val_df,
                                                   tp,
                                                   fp,
                                                   train_audio,
                                                   _config,
                                                   phase="valid")

            _config["transforms"]["test"] = [tta]

            loader = datasets.get_test_loader(test_all, test_audio, _config)

            if config["inference"]["prediction_type"] == "strong":
                ##################################################
                # OOF #
                ##################################################
                logger.info("*" * 20)
                logger.info(f"OOF prediction for fold{i}")
                logger.info("*" * 20)
                recording_ids = []
                batch_predictions = []
                indices = []
                for batch in tqdm(val_loader, leave=True):
                    recording_ids.extend(batch["recording_id"])
                    indices.extend(batch["index"].numpy())
                    input_ = batch[global_params["input_key"]].to(device)
示例#6
0
                }
            },
            "loader": {
                "valid": {
                    "batch_size": 1,
                    "shuffle": False,
                    "num_workers": 20
                },
                "test": {
                    "batch_size": 1,
                    "shuffle": False,
                    "num_workers": 20
                }
            }
        }
        soft_test_loader = datasets.get_test_loader(
            test_all, test_audio, soft_inference_config)

        soft_oof_dir = expdir / "soft_oof"
        soft_oof_dir.mkdir(exist_ok=True, parents=True)

        soft_prediction_dir = expdir / "soft_prediction"
        soft_prediction_dir.mkdir(exist_ok=True, parents=True)

        soft_predictions = {}

    # validation
    splitter = training.get_split(config)

    ##################################################
    # Main Loop #
    ##################################################
示例#7
0
input_size = args.input_size
train_transform = get_train_transforms(input_size)
valid_transform = get_valid_transforms(input_size)
if args.dataset == "JRDR":
    dataset_dir = os.path.join("data", "JRDR")
    test_dir = os.path.join("data", "JRDR")
    train_data = JRDR(root=dataset_dir, transform=train_transform)
    valid_data = JRDR(root=dataset_dir, transform=valid_transform)
    test_data = JRDR(root=test_dir, split="test", transform=valid_transform)
else:
    raise NotImplementedError(args.dataset)

train_loader, valid_loader = get_train_valid_loader(train_data,
                                                    valid_data,
                                                    show_sample=False)
test_loader = get_test_loader(test_data)

model = DerainCNNModular(
    input_size=input_size,
    channel_mul=args.channel_mul,
    depth=args.depth,
    center_depth=args.center_depth,
    attention_type=args.attention_type,
    reduction=args.reduction,
    lr=args.lr,
    gamma=args.gamma,
)

exp_id = os.path.join(args.dataset, args.id)
logdir = args.logdir
示例#8
0
        checkpoints_dir = _logdir_high / "checkpoints"
        checkpoints_dir.mkdir(exist_ok=True, parents=True)

        train_writer = SummaryWriter(log_dir=_logdir_high / "train_log")
        valid_writer = SummaryWriter(log_dir=_logdir_high / "valid_log")

        config["dataset"]["train"]["params"]["frequency_range"] = "high"
        config["dataset"]["valid"]["params"]["frequency_range"] = "high"
        config["dataset"]["test"]["params"]["frequency_range"] = "high"

        loaders = {
            phase: datasets.get_train_loader(df_, tp, fp, train_audio, config,
                                             phase)
            for df_, phase in zip([trn_df, val_df], ["train", "valid"])
        }
        test_loader = datasets.get_test_loader(test_all, test_audio, config)

        soft_inference_config = {
            "loader": {
                "test": {
                    "batch_size": 1,
                    "shuffle": False,
                    "num_workers": config["loader"]["test"]["num_workers"]
                }
            },
            "dataset": {
                "test": {
                    "name": "LimitedFrequencySampleWiseSpectrogramTestDataset",
                    "params": config["dataset"]["test"]["params"]
                }
            },