示例#1
0
def gen(output, recipe, bwd=True, use_gpu=False):
    from ch2o import test_args
    test_args.get_test_args([output, '--allow-unused-params'])

    (idim, odim, args), (xs, ilens, ys) = recipe
    ch2o.generate_testcase(lambda: E2E(idim, odim, args),
                           [xs, ilens, ys], backprop=bwd, use_gpu=use_gpu)
示例#2
0
def dump_test_inputs_outputs(inputs, outputs, test_data_dir):
    if not os.path.exists(test_data_dir):
        os.makedirs(test_data_dir)

    for typ, values in [('input', inputs), ('output', outputs)]:
        for i, (value_info, value) in enumerate(values):
            name = value_info.name
            if isinstance(value, list):
                assert value
                digits = len(str(len(value)))
                for j, v in enumerate(value):
                    filename = os.path.join(
                        test_data_dir,
                        '%s_%d_%s.pb' % (typ, i, str(j).zfill(digits)))
                    tensor = tensor_from_array(v, name)
                    with open(filename, 'wb') as f:
                        f.write(tensor.SerializeToString())

                value_info.type.CopyFrom(onnx.TypeProto())
                sequence_type = value_info.type.sequence_type
                tensor_type = sequence_type.elem_type.tensor_type
                tensor_type.elem_type = tensor.data_type
            else:
                filename = os.path.join(test_data_dir, '%s_%d.pb' % (typ, i))
                if value is None:
                    if get_test_args().allow_unused_params:
                        continue
                    raise RuntimeError('Unused parameter: %s' % name)
                tensor = tensor_from_array(value, name)
                with open(filename, 'wb') as f:
                    f.write(tensor.SerializeToString())

                vi = onnx.helper.make_tensor_value_info(
                    name, tensor.data_type, tensor.dims)
                value_info.CopyFrom(vi)
示例#3
0
def generate_testcase(model,
                      orig_xs,
                      subname=None,
                      output_dir=None,
                      backprop=False,
                      use_gpu=False):
    xs = copy.deepcopy(orig_xs)
    if output_dir is None:
        args = get_test_args()
        output_dir = args.output

        if backprop:
            output_dir = output_dir + '_backprop'

        if not _seen_subnames:
            # Remove all related directories to renamed tests.
            for d in [output_dir] + glob.glob(output_dir + '_*'):
                if os.path.isdir(d):
                    shutil.rmtree(d)
        assert (backprop, subname) not in _seen_subnames
        _seen_subnames.add((backprop, subname))
        if subname is not None:
            output_dir = output_dir + '_' + subname
    else:
        assert subname is None
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    def get_model():
        if isinstance(model, type) or isinstance(model, types.FunctionType):
            return model()
        return model

    # さらの状態からonnxのmodをつくる
    onnxmod = compile_model(get_model(), xs)
    all_input_tensors = onnxmod.graph.input
    output_tensors = onnxmod.graph.output

    model = get_model()
    if use_gpu:
        model.to_gpu()
        xs_gpu = []
        for x in xs:
            if isinstance(x, (list, tuple)):
                x = [model.xp.array(a) for a in x]
            else:
                x = model.xp.array(x)
            xs_gpu.append(x)
        xs = xs_gpu
    chainer.config.train = backprop
    model.cleargrads()
    ys = model(*xs)
    chainer_out = validate_chainer_output(ys)

    if backprop:
        ys.grad = model.xp.ones(ys.shape, ys.dtype)
        ys.backward()

    # 1回の実行をもとにinitialize
    edit_onnx_protobuf(onnxmod, model)

    initializer_names = set()
    for initializer in onnxmod.graph.initializer:
        initializer_names.add(initializer.name)
    input_tensors = []
    for input_tensor in all_input_tensors:
        if input_tensor.name not in initializer_names:
            input_tensors.append(input_tensor)

    if len(output_tensors) < len(chainer_out):
        assert len(output_tensors) == 1
        chainer_out = [np.array(chainer_out)]
    assert len(output_tensors) == len(chainer_out)

    outputs = list(zip(output_tensors, chainer_out))
    if backprop:
        for name, param in sorted(model.namedparams()):
            bp_name = onnx.helper.make_tensor_value_info(
                'grad_out@' + name, onnx.TensorProto.FLOAT, ())
            outputs.append((bp_name, param.grad))

    xs = list(map(lambda x: _validate_inout(x), orig_xs))

    dump_test_inputs_outputs(list(zip(input_tensors, xs)), outputs,
                             os.path.join(output_dir, 'test_data_set_0'))

    with open(os.path.join(output_dir, 'model.onnx'), 'wb') as fp:
        fp.write(onnxmod.SerializeToString())