def __init__(self, model): self.name_list = dict() self.parameters = [] self.constants = [] namedlink = {n: l for n, l in model.namedlinks()} self.param_to_link = {} for name, param in model.namedparams(): owned_link_name = name[:name.rindex('/')] if owned_link_name in namedlink: onnx_owned_link_name = onnx_helper.cleanse_param_name( owned_link_name) self.param_to_link[id(param)] = (onnx_owned_link_name, namedlink[owned_link_name]) onnx_name = onnx_helper.cleanse_param_name(name) self.set_name(param, onnx_name)
def export_testcase(model, args, out_dir, output_grad=False, **kwargs): """Export model and I/O tensors of the model in protobuf format. Similar to the `export` function, this function first performs a forward computation to a given input for obtaining an output. Then, this function saves the pair of input and output in Protobuf format, which is a defacto-standard format in ONNX. This function also saves the model with the name "model.onnx". Args: model (~chainer.Chain): The model object. args (list): The arguments which are given to the model directly. Unlike `export` function, only `list` type is accepted. out_dir (str): The directory name used for saving the input and output. output_grad (bool): If True, this function will output model's gradient with names 'gradient_%d.pb'. **kwargs (dict): keyword arguments for ``onnx_chainer.export``. """ os.makedirs(out_dir, exist_ok=True) model.cleargrads() onnx_model, inputs, outputs = export( model, args, filename=os.path.join(out_dir, 'model.onnx'), return_named_inout=True, **kwargs) test_data_dir = os.path.join(out_dir, 'test_data_set_0') os.makedirs(test_data_dir, exist_ok=True) for i, (name, var) in enumerate(inputs.items()): pb_name = os.path.join(test_data_dir, 'input_{}.pb'.format(i)) array = chainer.cuda.to_cpu(var.array) write_tensor_pb(pb_name, name, array) for i, (name, var) in enumerate(outputs.items()): pb_name = os.path.join(test_data_dir, 'output_{}.pb'.format(i)) array = chainer.cuda.to_cpu(var.array) write_tensor_pb(pb_name, name, array) if output_grad: # Perform backward computation if len(outputs) > 1: outputs = chainer.functions.identity(*outputs) for out in outputs.values(): out.grad = model.xp.ones_like(out.array) list(outputs.values())[0].backward() for i, (name, param) in enumerate(model.namedparams()): pb_name = os.path.join(test_data_dir, 'gradient_{}.pb'.format(i)) grad = chainer.cuda.to_cpu(param.grad) onnx_name = cleanse_param_name(name) if grad is None: warnings.warn( 'Parameter `{}` does not have gradient value'.format(name)) else: write_tensor_pb(pb_name, onnx_name, grad)
def add_param(self, array, name, use_original_name=False): """Add a parameter array as an ONNX initializer. Returns: str: registered name. """ if use_original_name: onnx_name = name else: if not (name.startswith('/') or name.startswith('_')): name = '/' + name onnx_name = '{}_{}'.format(onnx_helper.get_func_name(), onnx_helper.cleanse_param_name(name)) self.set_name(array, onnx_name) self.parameters.append(array) return onnx_name