예제 #1
0
파일: test_load.py 프로젝트: sony/nnabla
def test_load_and_infer_improvement(nntxt_idx, parameter_format, dataset_sample_num):
    '''This case tests improvement features, comparing legacy implementation,
    legacy cannot load or infer successfully, while refactor-ed is OK.
    '''
    with generate_case_from_nntxt_str(NNTXT_IMPROVEMENT_CASES[nntxt_idx], parameter_format, dataset_sample_num) as nnp_file:
        with pytest.raises(ValueError) as excinfo:
            ref_info = ref_load(nnp_file)
            ref_result = partial(
                common_forward, forward_func=_ref_forward)(ref_info)
            print(excinfo)

        info = load.load(nnp_file)
        result = partial(common_forward, forward_func=_forward)(info)
예제 #2
0
파일: test_load.py 프로젝트: sony/nnabla
def test_load_and_infer_equivalence(nntxt_idx, parameter_format, dataset_sample_num):
    '''These cases tends to test equivalence before and after
    refactoring NNP load functions. The scope of refactor includes network part and load function.
    This test firstly generated .nnp from nntxt_str, according to specified parameter_format
    and replace dataset's uri with a temporarily generated random dataset, then performs inferring
    operation similar to what is done in cli/forward.py.
    '''
    with generate_case_from_nntxt_str(NNTXT_EQUIVALENCE_CASES[nntxt_idx], parameter_format, dataset_sample_num) as nnp_file:
        ref_info = ref_load(nnp_file)
        ref_result = partial(
            common_forward, forward_func=_ref_forward)(ref_info)

        info = load.load(nnp_file)
        result = partial(common_forward, forward_func=_forward)(info)

    assert_tensor_equal(result, ref_result)
예제 #3
0
파일: test_load.py 프로젝트: donproc/nnabla
def test_resume_suspend_equivalence(nntxt_idx, parameter_format,
                                    dataset_sample_num, batch_size):
    '''These cases tends to test equivalence before and after refactoring.
    '''
    verbose = True
    a_few_iter = 10
    half_iter = 5
    output_network_topology = False
    with generate_case_from_nntxt_str(NNTXT_EQUIVALENCE_CASES[nntxt_idx],
                                      parameter_format, dataset_sample_num,
                                      batch_size) as nnp_file:
        with create_temp_with_dir(
                "saved_parameter.nnp") as saved_parameter_nnp:

            class Callback:
                pass

            class ModelSaver:
                def __init__(self, info):
                    self.info = info

                def __call__(self, config):
                    if config.iter != half_iter:
                        return
                    _save_parameters(saved_parameter_nnp, config,
                                     NNTXT_EQUIVALENCE_CASES[nntxt_idx])

            new_config = TrainConfig()
            new_config.start_iteration = 0
            new_config.end_iteration = a_few_iter
            new_config.save_optimizer_variable = False
            new_config.save_evaluation_variable = False
            new_cb = Callback()
            new_cb.forward = lambda x: x.target.forward(clear_no_need_grad=True
                                                        )
            new_cb.backward = lambda x, b: x.target.backward()
            new_config.cb = new_cb
            new_config.impl = "ref"

            ref_result = []
            ref_info = load.load(nnp_file, batch_size=batch_size)
            print("load.load")

            if output_network_topology:
                for n, opt in ref_info.optimizers.items():
                    print(n)
                    opt.network.execute_on_proto(Verifier())

            new_config.on_iter = ModelSaver(ref_info)
            for cost, error in partial(train, config=new_config)(ref_info):
                ref_result.append((cost, error))

            new_config.on_iter = None
            new_config.start_iteration = half_iter
            new_config.end_iteration = a_few_iter
            new_config.impl = "new"
            result = []
            nn.clear_parameters()
            info = load.load(nnp_file,
                             batch_size=batch_size,
                             exclude_parameter=True)
            print("load.load")

            # Here, `info` is different `config`, but optimizer is same.
            load_train_state(saved_parameter_nnp, info)

            for cost, error in partial(train, config=new_config)(info):
                result.append((cost, error))

            compare_info(ref_info, info)

            for i, ((cost_ref, error_ref),
                    (cost, error)) in enumerate(zip(ref_result, result)):
                if verbose:
                    print("{}: cost: {} <--> {}".format(i, cost_ref, cost))
                    print("{}: error: {} <--> {}".format(i, error_ref, error))
                if i > new_config.start_iteration:
                    assert_allclose(np.array([cost_ref, error_ref]),
                                    np.array([cost, error]),
                                    rtol=1e-2,
                                    atol=1e-5,
                                    err_msg="Error: {}".format(nntxt_idx))
예제 #4
0
파일: test_load.py 프로젝트: donproc/nnabla
def test_load_and_save_equivalence(nntxt_idx, parameter_format,
                                   dataset_sample_num, batch_size,
                                   include_params, variable_batch_size):
    '''These cases tends to test equivalence before and after refactoring.
    '''
    verbose = True
    a_few_iter = 10
    half_iter = 5
    output_network_topology = True
    with generate_case_from_nntxt_str(NNTXT_EQUIVALENCE_CASES[nntxt_idx],
                                      parameter_format, dataset_sample_num,
                                      batch_size) as nnp_file:
        with create_temp_with_dir("saved.nnp") as saved_nnp_file:

            class Callback:
                pass

            class ModelSaver:
                def __init__(self, info):
                    self.info = info

                def __call__(self, config):
                    if config.iter != half_iter:
                        return

                    info = self.info
                    datasets = []
                    with ExitStack() as stack:
                        for d_name, d in info.datasets.items():
                            ds = {}
                            ds['name'] = d_name
                            ds['uri'] = d.uri
                            ds['cache_dir'] = d.cache_dir
                            di_instance = stack.enter_context(
                                d.data_iterator())
                            ds['variables'] = [
                                var_name for var_name in di_instance.variables
                            ]
                            ds['batch_size'] = di_instance.batch_size
                            ds['no_image_normalization'] = not d.normalize
                            ds['shuffle'] = di_instance._shuffle
                            datasets.append(ds)

                    dataset_assign = set()
                    for obj in itertools.chain(info.monitors.values(),
                                               info.executors.values(),
                                               info.optimizers.values()):
                        for pv in obj.dataset_assign.keys():
                            dataset_assign.add(pv.name)

                    contents = {
                        'global_config': {
                            'default_context':
                            info.global_config.default_context
                        },
                        'training_config': {
                            'max_epoch': info.training_config.max_epoch,
                            'iter_per_epoch':
                            info.training_config.iter_per_epoch,
                            'save_best': info.training_config.save_best
                        },
                        'networks': [{
                            'name': n_name,
                            'batch_size': n.batch_size,
                            'outputs': {
                                out: n.variables[out].variable_instance
                                for out in n.outputs
                            },
                            'names': {
                                inp: n.variables[inp].variable_instance
                                for inp in itertools.chain(
                                    n.inputs, n.outputs)
                            }
                        } for n_name, n in info.networks.items()],
                        'executors': [{
                            'name':
                            e_name,
                            'network':
                            e.network.name,
                            'data':
                            [pv.name for pv in e.dataset_assign.keys()],
                            'generator_variables':
                            [pv.name for pv in e.generator_assign.keys()],
                            'output':
                            [pv.name for pv in e.output_assign.keys()]
                        } for e_name, e in info.executors.items()],
                        'optimizers': [{
                            'name':
                            o_name,
                            'solver':
                            o.solver,
                            'network':
                            o.network.name,
                            'data_variables':
                            {pv.name: d
                             for pv, d in o.dataset_assign.items()},
                            'generator_variables':
                            [pv.name for pv in o.generator_assign.keys()],
                            'loss_variables':
                            [pv.name for pv in o.loss_variables],
                            'dataset':
                            [ds_name for ds_name in o.data_iterators.keys()],
                            'weight_decay':
                            o.weight_decay,
                            'lr_decay':
                            o.lr_decay,
                            'lr_decay_interval':
                            o.lr_decay_interval,
                            'update_interval':
                            o.update_interval
                        } for o_name, o in info.optimizers.items()],
                        'datasets':
                        datasets,
                        'monitors': [{
                            'name':
                            m_name,
                            'network':
                            m.network.name,
                            'data_variables':
                            {pv.name: d
                             for pv, d in m.dataset_assign.items()},
                            'generator_variables':
                            [pv.name for pv in m.generator_assign.keys()],
                            'monitor_variables':
                            [pv.name for pv in m.monitor_variables],
                            'dataset':
                            [ds_name for ds_name in m.data_iterators.keys()]
                        } for m_name, m in info.monitors.items()],
                    }

                    save.save(saved_nnp_file, contents, include_params,
                              variable_batch_size)

            new_config = TrainConfig()
            new_config.start_iteration = 0
            new_config.end_iteration = a_few_iter
            new_config.save_optimizer_variable = False
            new_config.save_evaluation_variable = False
            new_cb = Callback()
            new_cb.forward = lambda x: x.target.forward(clear_no_need_grad=True
                                                        )
            new_cb.backward = lambda x, b: x.target.backward(clear_buffer=True)
            new_config.cb = new_cb
            new_config.impl = "ref"

            ref_result = []
            ref_info = load.load(nnp_file, batch_size=batch_size)

            if output_network_topology:
                for n, opt in ref_info.optimizers.items():
                    print(n)
                    opt.network.execute_on_proto(Verifier())

            new_config.on_iter = ModelSaver(ref_info)
            for cost, error in partial(train, config=new_config)(ref_info):
                ref_result.append((cost, error))

            new_config.on_iter = None
            new_config.start_iteration = half_iter
            new_config.end_iteration = a_few_iter
            new_config.impl = "new"
            result = []
            nn.clear_parameters()
            info = load.load(saved_nnp_file, batch_size=batch_size)

            if output_network_topology:
                for n, opt in info.optimizers.items():
                    print(n)
                    opt.network.execute_on_proto(Verifier())

            for cost, error in partial(train, config=new_config)(info):
                result.append((cost, error))

            compare_info(ref_info, info)

            for i, ((cost_ref, error_ref),
                    (cost, error)) in enumerate(zip(ref_result, result)):
                if verbose:
                    print("{}: cost: {} <--> {}".format(i, cost_ref, cost))
                    print("{}: error: {} <--> {}".format(i, error_ref, error))
                if i > new_config.start_iteration:
                    assert_allclose(np.array([cost_ref, error_ref]),
                                    np.array([cost, error]),
                                    rtol=1e-2,
                                    atol=1e-5,
                                    err_msg="Error: {}".format(nntxt_idx))
예제 #5
0
파일: test_load.py 프로젝트: donproc/nnabla
def test_load_and_train_equivalence(nntxt_idx, parameter_format,
                                    dataset_sample_num, batch_size):
    '''These cases tends to test equivalence before and after refactoring.
    The operation is similar to what is done in cli/train.py.
    '''
    # for debugging
    save_v = False
    output_network_topology = False
    verbose = False
    m_iter = 10

    class Callback:
        pass

    legacy_config = TrainConfig()
    legacy_config.on_iter = None
    legacy_config.save_optimizer_variable = False
    legacy_config.save_evaluation_variable = False
    legacy_config.start_iteration = 0
    legacy_config.end_iteration = 10
    legacy_config.enable_save_variable = save_v
    legacy_cb = Callback()
    legacy_cb.forward = lambda o: o.network.forward(o.forward_sequence)
    legacy_cb.backward = lambda o, b: o.network.backward(
        o.backward_sequence, b)
    legacy_config.cb = legacy_cb
    legacy_config.impl = "legacy"

    new_config = TrainConfig()
    new_config.on_iter = None
    new_config.save_optimizer_variable = False
    new_config.save_evaluation_variable = False
    new_config.start_iteration = 0
    new_config.end_iteration = 10
    new_config.enable_save_variable = save_v
    new_cb = Callback()
    new_cb.forward = lambda x: x.target.forward(clear_no_need_grad=True)
    new_cb.backward = lambda x, b: x.target.backward(clear_buffer=True)
    new_config.cb = new_cb
    new_config.impl = "new"

    with generate_case_from_nntxt_str(NNTXT_EQUIVALENCE_CASES[nntxt_idx],
                                      parameter_format, dataset_sample_num,
                                      batch_size) as nnp_file:
        ref_result = []
        result = []
        nn.clear_parameters()
        info = ref_load(nnp_file, batch_size=batch_size)
        for cost, error in partial(train, config=legacy_config)(info):
            ref_result.append((cost, error))

        nn.clear_parameters()
        info = load.load(nnp_file, batch_size=batch_size)

        if output_network_topology:
            for n, opt in info.optimizers.items():
                print(n)
                opt.network.execute_on_proto(Verifier())

        for cost, error in partial(train, config=new_config)(info):
            result.append((cost, error))

        for i, ((cost_ref, error_ref),
                (cost, error)) in enumerate(zip(ref_result, result)):
            if verbose:
                print("{}: cost: {} <--> {}".format(i, cost_ref, cost))
                print("{}: error: {} <--> {}".format(i, error_ref, error))
            assert_allclose(np.array([cost_ref, error_ref]),
                            np.array([cost, error]),
                            rtol=1e-2,
                            atol=1e-3,
                            err_msg="Error: {}".format(nntxt_idx))
예제 #6
0
파일: test_load.py 프로젝트: donproc/nnabla
def test_expander(nntxt_idx):
    with generate_case_from_nntxt_str(NNTXT_EQUIVALENCE_CASES[nntxt_idx],
                                      '.protobuf', 128) as nnp_file:
        g = nn.graph_def.load(nnp_file)
        n = g.default_graph.expand_loop_control()
        n.execute_on_proto(Verifier())