예제 #1
0
def test_neural(name, cfg, learned_params):
    """Function for meta-test of learned optimizers.
  """
    cfg = Config.merge(
        [cfg.problem, cfg.neural_optimizers[name].test_args, cfg.args])
    data_cls = _get_attr_by_name(cfg.data_cls)
    model_cls = _get_attr_by_name(cfg.model_cls)
    optim_cls = _get_optim_by_name(cfg.optim_module)
    writer = TFWriter(cfg.save_dir, name) if cfg.save_dir else None

    optimizer = C(optim_cls())
    if learned_params is not None:
        optimizer.params = learned_params

    meta_optim = None
    unroll = 1

    best_test_loss = 999999
    result_all = ResultDict()
    result_final = ResultDict()  # test loss & acc for the whole testset
    tests_pbar = tqdm(range(cfg.n_test), 'test')

    # Meta-test
    for j in tests_pbar:
        data = data_cls().sample_meta_test()
        result, params = optimizer.meta_optimize(cfg, meta_optim, data,
                                                 model_cls, writer, 'test')
        result_all.append(result)
        result_final.append(
            final_inner_test(C(model_cls(params)),
                             data['in_test'],
                             mode='test'))
        result_final_mean = result_final.mean()
        last_test_loss = result_final_mean['final_loss_mean']
        last_test_acc = result_final_mean['final_acc_mean']

        if last_test_loss < best_test_loss:
            best_test_loss = last_test_loss
            best_test_acc = last_test_acc

        result_test = dict(
            best_test_loss=best_test_loss,
            best_test_acc=best_test_acc,
            last_test_loss=last_test_loss,
            last_test_acc=last_test_acc,
        )
        log_pbar(result_test, tests_pbar)
        if cfg.save_dir:
            save_figure(name, cfg.save_dir, writer, result, j, 'test')
    result_all_mean = result_all.mean(0)

    # TF-events for inner loop (train & test)
    if cfg.save_dir:
        # test_result_mean = ResultDict()
        for i in range(cfg.iter_test):
            mean_i = result_all_mean.getitem(i)
            walltime = result_all_mean['walltime'][i]
            log_tf_event(writer, 'meta_test_inner', mean_i, i, walltime)
            # test_result_mean.update(**mean_i, step=i, walltime=walltime)
        result_all.save(name, cfg.save_dir)
        result_all.save_as_csv(name, cfg.save_dir, 'meta_test_inner')
        result_final.save_as_csv(name,
                                 cfg.save_dir,
                                 'meta_test_final',
                                 trans_1d=True)

    return result_all
예제 #2
0
def test_normal(name, cfg):
    """function for test of static optimizers."""

    cfg = Config.merge(
        [cfg.problem, cfg.args, cfg.neural_optimizers[name].test_args])
    data_cls = _get_attr_by_name(cfg.problem.data_cls)
    model_cls = _get_attr_by_name(cfg.problem.model_cls)
    optim_cls = _get_attr_by_name(cfg.problem.optim_cls)

    writer = TFWriter(cfg.save_dir, name) if cfg.save_dir else None
    tests_pbar = tqdm(range(cfg.n_test), 'outer_test')
    result_all = ResultDict()
    result_final = ResultDict()  # test loss & acc for the whole testset
    best_test_loss = 999999
    # params_tracker = ParamsIndexTracker(n_tracks=10)

    # Outer loop (n_test)
    for j in tests_pbar:
        data = data_cls().sample_meta_test()
        model = C(model_cls())
        optimizer = optim_cls(model.parameters(), **cfg.optim_args)

        walltime = Walltime()
        result = ResultDict()
        iter_pbar = tqdm(range(cfg.iter_test), 'inner_train')

        # Inner loop (iter_test)
        for k in iter_pbar:

            with WalltimeChecker(walltime):
                # Inner-training loss
                train_nll, train_acc = model(*data['in_train'].load())
                optimizer.zero_grad()
                train_nll.backward()
                # before = model.params.detach('hard').flat
                optimizer.step()
            # Inner-test loss

            test_nll, test_acc = model(*data['in_test'].load())
            # update = model.params.detach('hard').flat - before
            # grad = model.params.get_flat().grad
            result_ = dict(
                train_nll=train_nll.tolist(),
                test_nll=test_nll.tolist(),
                train_acc=train_acc.tolist(),
                test_acc=test_acc.tolist(),
                walltime=walltime.time,
            )
            log_pbar(result_, iter_pbar)
            result.append(result_)

        if cfg.save_dir:
            save_figure(name, cfg.save_dir, writer, result, j, 'normal')
        result_all.append(result)
        result_final.append(
            final_inner_test(model, data['in_test'], mode='test'))

        result_final_mean = result_final.mean()
        last_test_loss = result_final_mean['final_loss_mean']
        last_test_acc = result_final_mean['final_acc_mean']
        if last_test_loss < best_test_loss:
            best_test_loss = last_test_loss
            best_test_acc = last_test_acc
        result_test = dict(
            best_test_loss=best_test_loss,
            best_test_acc=best_test_acc,
            last_test_loss=last_test_loss,
            last_test_acc=last_test_acc,
        )
        log_pbar(result_test, tests_pbar)

    # TF-events for inner loop (train & test)
    mean_j = result_all.mean(0)
    if cfg.save_dir:
        for i in range(cfg.iter_test):
            mean = mean_j.getitem(i)
            walltime = mean_j['walltime'][i]
            log_tf_event(writer, 'meta_test_inner', mean, i, walltime)
        result_all.save(name, cfg.save_dir)
        result_all.save_as_csv(name, cfg.save_dir, 'meta_test_inner')
        result_final.save_as_csv(name,
                                 cfg.save_dir,
                                 'meta_test_final',
                                 trans_1d=True)

    return result_all
예제 #3
0
def test_neural(name,
                save_dir,
                learned_params,
                data_cls,
                model_cls,
                optim_module,
                n_test=100,
                iter_test=100,
                preproc=False,
                out_mul=1.0,
                no_mask=False,
                k_obsrv=10):
    """Function for meta-test of learned optimizers.
  """
    data_cls = _get_attr_by_name(data_cls)
    model_cls = _get_attr_by_name(model_cls)
    optim_cls = _get_optim_by_name(optim_module)
    writer = TFWriter(save_dir, name) if save_dir else None

    optimizer = C(optim_cls())
    if learned_params is not None:
        optimizer.params = learned_params

    meta_optim = None
    unroll = 1

    best_test_loss = 999999
    result_all = ResultDict()
    result_final = ResultDict()  # test loss & acc for the whole testset
    tests_pbar = tqdm(range(n_test), 'test')

    # Meta-test
    for j in tests_pbar:
        data = data_cls().sample_meta_test()
        result, params = optimizer.meta_optimize(meta_optim, data, model_cls,
                                                 iter_test, unroll, out_mul,
                                                 k_obsrv, no_mask, writer,
                                                 'test')
        result_all.append(result)
        result_final.append(
            final_inner_test(C(model_cls(params)),
                             data['in_test'],
                             mode='test'))
        result_final_mean = result_final.mean()
        last_test_loss = result_final_mean['loss_mean']
        last_test_acc = result_final_mean['acc_mean']

        if last_test_loss < best_test_loss:
            best_test_loss = last_test_loss
            best_test_acc = last_test_acc

        result_test = dict(
            best_test_loss=best_test_loss,
            best_test_acc=best_test_acc,
            last_test_loss=last_test_loss,
            last_test_acc=last_test_acc,
        )
        log_pbar(result_test, tests_pbar)
        if save_dir:
            save_figure(name, save_dir, writer, result, j, 'test')
    result_all_mean = result_all.mean(0)

    # TF-events for inner loop (train & test)
    if save_dir:
        # test_result_mean = ResultDict()
        for i in range(iter_test):
            mean_i = result_all_mean.getitem(i)
            walltime = result_all_mean['walltime'][i]
            log_tf_event(writer, 'meta_test_inner', mean_i, i, walltime)
            # test_result_mean.update(**mean_i, step=i, walltime=walltime)
        result_all.save(name, save_dir)
        result_all.save_as_csv(name, save_dir, 'meta_test_inner')
        result_final.save_as_csv(name,
                                 save_dir,
                                 'meta_test_final',
                                 trans_1d=True)

    return result_all