Beispiel #1
0
def clip_exporter_v11(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    min_value, max_value, const_tensors = None, None, []
    dtype = context.ws.FetchTensor(op_def.output[0]).dtype
    for arg in op_def.arg:
        if arg.name == 'low':
            min_value = arg.f
        elif arg.name == 'high':
            max_value = arg.f
    if min_value is not None:
        const_tensors.append(helper.from_array(
            numpy.array(min_value, dtype),
            context.unique_name(op_def.input[0] + '/clip/min_value'),
        ))
        node.input.extend([const_tensors[-1].name])
    else:
        node.input.extend([''])
    if max_value is not None:
        const_tensors.append(helper.from_array(
            numpy.array(max_value, dtype),
            context.unique_name(op_def.input[0] + '/clip/max_value'),
        ))
        node.input.extend([const_tensors[-1].name])
    else:
        node.input.extend([''])
    return node, const_tensors
Beispiel #2
0
def pad_exporter_v11(op_def, context):
    node, pads, value = pad_exporter(**locals())
    pads = helper.from_array(
        numpy.array(pads, 'int64'),
        context.unique_name(op_def.input[0] + '/pad/pads'),
    )
    value = helper.from_array(
        numpy.array(value, 'float64'),
        context.unique_name(op_def.input[0] + '/pad/value'),
    )
    node.input.extend([pads.name, value.name])
    return node, [pads, value]
Beispiel #3
0
def slice_exporter_v10(op_def, context):
    node, starts, ends = slice_exporter(**locals())
    axes = helper.from_array(
        numpy.arange(len(starts), dtype='int64'),
        context.unique_name(op_def.input[0] + '/slice/axes'),
    )
    starts = helper.from_array(
        numpy.array(starts, 'int64'),
        context.unique_name(op_def.input[0] + '/slice/starts'),
    )
    ends = helper.from_array(
        numpy.array(ends, 'int64'),
        context.unique_name(op_def.input[0] + '/slice/ends'),
    )
    node.input.extend([starts.name, ends.name, axes.name])
    return node, [starts, ends, axes]
Beispiel #4
0
def expand_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    shape = list(context.blob_shapes[op_def.output[0]])
    shape = helper.from_array(
        numpy.array(shape, 'int64'),
        context.unique_name(op_def.input[0] + '/expand/shape'),
    )
    node.input.extend([shape.name])
    return node, [shape]
Beispiel #5
0
def fill_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'Constant'
    shape = list(context.blob_shapes[op_def.output[0]])
    value = helper.from_array(
        numpy.array(shape, 'int64'),
        context.unique_name(op_def.output[0] + '/constant/value'))
    helper.add_attribute(node, 'value', value)
    node.ClearField('input')
    return node, [value]
Beispiel #6
0
def top_k_exporter_v11(op_def, context):
    node, (k, axis, largest, sorted) = top_k_exporter(**locals())
    helper.add_attribute(node, 'axis', axis)
    helper.add_attribute(node, 'largest', largest)
    helper.add_attribute(node, 'sorted', sorted)
    k = helper.from_array(
        numpy.array([k], 'int64'),
        context.unique_name(op_def.input[0] + '/top_k/k'),
    )
    node.input.extend([k.name])
    return node, [k]
Beispiel #7
0
def top_k_exporter_v10(op_def, context):
    node, (k, axis, largest, sorted) = top_k_exporter(**locals())
    if largest == 0:
        raise ValueError('TopK-10 does not support smallest mode.')
    helper.add_attribute(node, 'axis', axis)
    k = helper.from_array(
        numpy.array([k], 'int64'),
        context.unique_name(op_def.input[0] + '/top_k/k'),
    )
    node.input.extend([k.name])
    return node, [k]
Beispiel #8
0
def trilu_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    k = 0
    for arg in op_def.arg:
        if arg.name == 'upper':
            helper.add_attribute(node, 'upper', arg.i)
        elif arg.name == 'k':
            k = arg.i
    k = helper.from_array(
        numpy.array(k, 'int64'),
        context.unique_name(op_def.input[0] + '/trilu/k'),
    )
    node.input.extend([k.name])
    return node, [k]
Beispiel #9
0
def tile_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    repeats = []
    for arg in op_def.arg:
        if arg.name == 'repeats':
            repeats = [e for e in arg.ints]
        elif arg.name == 'repeats_desc':
            repeats = helper.fetch_argument(op_def, arg, context.ws)
    repeats = helper.from_array(
        numpy.array(repeats, 'int64'),
        context.unique_name(op_def.input[0] + '/tile/repeats'),
    )
    node.input.extend([repeats.name])
    return node, [repeats]
Beispiel #10
0
def one_hot_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    helper.add_attribute(node, 'axis', -1)
    depth, on_value, off_value = 1, 1., 0.
    dtype = context.ws.FetchTensor(node.output[0]).dtype
    for arg in op_def.arg:
        if arg.name == 'depth':
            depth = arg.i
        elif arg.name == 'on_value':
            on_value = arg.f
        elif arg.name == 'off_value':
            off_value = arg.f
    depth = helper.from_array(
        numpy.array(depth, 'int64'),
        context.unique_name(op_def.input[0] + '/one_hot/depth'),
    )
    values = helper.from_array(
        numpy.array([off_value, on_value], dtype),
        context.unique_name(op_def.input[0] + '/one_hot/values'),
    )
    const_tensors = [depth, values]
    node.input.extend([depth.name, values.name])
    return node, const_tensors
Beispiel #11
0
def resize_v10(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    input_shape = context.blob_shapes[op_def.input[0]]
    output_shape = context.blob_shapes[op_def.output[0]]
    for arg in op_def.arg:
        if arg.name == 'mode':
            helper.add_attribute(node, 'mode', arg.s.lower())
    scales = helper.from_array(
        numpy.array([
            float(output_shape[i]) / input_shape[i]
            for i in range(len(input_shape))
        ], 'float32'),
        context.unique_name(op_def.input[0] + '/resize/scales'),
    )
    node.input.extend([scales.name])
    return node, [scales]
Beispiel #12
0
def cumulative_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    axis = 0
    for arg in op_def.arg:
        if arg.name == 'axis':
            axis = arg.i
        elif arg.name == 'exclusive':
            helper.add_attribute(node, 'exclusive', arg.i)
        elif arg.name == 'reverse':
            helper.add_attribute(node, 'reverse', arg.i)
    axis = helper.from_array(
        numpy.array(axis, 'int64'),
        context.unique_name(op_def.input[0] + '/cumulative/axis'),
    )
    node.input.extend([axis.name])
    return node, [axis]
Beispiel #13
0
def reshape_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    shape = dims = list(context.blob_shapes[op_def.output[0]])
    for arg in op_def.arg:
        if arg.name == 'dims':
            dims = [int(e) for e in arg.ints]
        elif arg.name == 'dims_desc':
            dims = helper.fetch_argument(op_def, arg, context.ws)
    for axis, dim in enumerate(dims):
        shape[axis] = dim if dim <= 0 else shape[axis]
    shape = helper.from_array(
        numpy.array(shape, 'int64'),
        context.unique_name(op_def.input[0] + '/reshape/shape'),
    )
    node.input.extend([shape.name])
    return node, [shape]
Beispiel #14
0
def roi_align(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    # Make a dummy "batch_indices".
    batch_indices = helper.from_array(
        numpy.zeros((context.blob_shapes[node.input[1]][0], ), 'int64'),
        context.unique_name(op_def.input[0] + '/roi_align/batch_indices'),
    )
    node.input.extend([batch_indices.name])
    for arg in op_def.arg:
        if arg.name == 'pooled_h':
            helper.add_attribute(node, 'output_height', arg.i)
        elif arg.name == 'pooled_w':
            helper.add_attribute(node, 'output_width', arg.i)
        elif arg.name == 'spatial_scale':
            helper.add_attribute(node, 'spatial_scale', arg.f)
        elif arg.name == 'sampling_ratio':
            helper.add_attribute(node, 'sampling_ratio', arg.i)
    return node, [batch_indices]
Beispiel #15
0
def resize_v11(op_def, context):
    node, const_tensors = resize_v10(**locals())
    coord_mode = 'half_pixel'
    for arg in op_def.arg:
        if arg.name == 'mode':
            if arg.s.lower() == b'nearest':
                helper.add_attribute(node, 'nearest_mode', 'floor')
        if arg.name == 'align_corners':
            if arg.i > 0:
                coord_mode = 'align_corners'
    helper.add_attribute(node, 'coordinate_transformation_mode', coord_mode)
    rank = len(context.blob_shapes[op_def.input[0]])
    roi = helper.from_array(
        numpy.array(([0] * rank + [1] * rank), 'float32'),
        context.unique_name(op_def.input[0] + '/resize/roi'),
    )
    node.input[:] = [node.input[0], roi.name, node.input[1]]
    return node, const_tensors + [roi]
Beispiel #16
0
    def graph_def_to_onnx_graph(
        cls,
        graph_def,
        input_names=None,
        output_names=None,
        input_shapes=None,
        constants=None,
        value_info=None,
        opset_version=None,
        workspace=None,
        verbose=True,
    ):
        input_names = [] if input_names is None else input_names
        output_names = [] if output_names is None else output_names
        constants = {} if constants is None else constants
        value_info = {} if value_info is None else value_info

        if not nest.is_sequence(input_names):
            raise ValueError('<input_names> should be a sequence.')
        if not nest.is_sequence(output_names):
            raise ValueError('<output_names> should be a sequence.')
        if not isinstance(constants, dict):
            raise ValueError('<constants> should be a dict with name -> value.')
        if not isinstance(value_info, dict):
            raise ValueError('<value_info> should be a dict with name -> (dtype, shape).')

        # Determine the opset version to select exporters.
        if opset_version is None:
            opset_version = cls._check_opset_version(opset_version)

        # Create aliases for blobs.
        blob_aliases = {}
        for i, alias in enumerate(output_names):
            blob_aliases[graph_def.output[i]] = alias
            workspace.RegisterAlias(graph_def.output[i], alias)
            if graph_def.output[i] in value_info:
                value_info[alias] = value_info[graph_def.output[i]]
        for i, alias in enumerate(input_names):
            blob_aliases[graph_def.input[i]] = alias
            workspace.RegisterAlias(graph_def.input[i], alias)
            if graph_def.input[i] in value_info:
                value_info[alias] = value_info[graph_def.input[i]]

        # Maybe rewrite the input shapes for future development.
        # A common case is that we should fill ``-1`` for dynamic dimension
        # in the inference runtime like TensorRT.
        if input_shapes is not None:
            if isinstance(input_shapes, dict):
                for k, v in input_shapes.items():
                    value_info[k] = (value_info[k][0], v)
            else:
                for k, v in zip(graph_def.input[:], input_shapes):
                    value_info[k] = (value_info[k][0], v)

        # Prepare to make the graph.
        onnx_graph = onnx.GraphProto(name=graph_def.name
                                     if len(graph_def.name) > 0
                                     else 'onnx-model')
        blob_shapes, blob_names = {}, {}
        blob_versions = collections.defaultdict(
            int, **dict((blob_aliases.get(k, k), 1)
                        for k in helper.collect_inputs(graph_def)))
        initializers, seen_initializers = [], set()

        # Build translator context.
        context = export_util.TranslatorContext(
            workspace=workspace,
            blob_names=blob_names,
            blob_shapes=blob_shapes,
            blob_versions=blob_versions,
            opset_version=opset_version,
        )

        # Add nodes.
        for op in graph_def.op:
            # Get the shape of inputs and outputs.
            for name in itertools.chain(op.input, op.output):
                impl = workspace.GetTensor(name)
                if impl is not None:
                    blob_shapes[name] = impl.dims
                else:
                    blob_shapes[name] = value_info[name][1]

            # Translate definition.
            nodes, const_tensors = cls._make_node(op, context)

            # Rewritten for names.
            for node in nodes:
                node.input[:] = [blob_aliases.get(e, e) for e in node.input]
                node.output[:] = [blob_aliases.get(e, e) for e in node.output]
                cls._rewrite_for_ssa(node, context)

            # Convert constant outputs if necessary.
            if None in nodes:
                const_tensors = [helper.from_tensor(name, workspace)
                                 for name in op.output]
            else:
                onnx_graph.node.extend(nodes)

            # Merge constant tensors.
            if const_tensors is not None:
                value_info = {**value_info,
                              **dict((e.name, (e.data_type, e.dims))
                                     for e in const_tensors)}
                for tensor in const_tensors:
                    if tensor.name not in seen_initializers:
                        initializers.append(tensor)
                        seen_initializers.add(tensor.name)

        # Add constants.
        if constants is not None:
            for k, v in constants.items():
                initializers.append(helper.from_array(v, name=k))

        # Add inputs.
        for name in helper.collect_inputs(onnx_graph):
            try:
                onnx_graph.input.extend([
                    helper.make_tensor_value_info(
                        name=name,
                        elem_type=value_info[name][0],
                        shape=value_info[name][1])])
            except KeyError:
                impl = workspace.GetTensor(name)
                if impl is not None:
                    initializer = helper.from_tensor(name, workspace)
                    onnx_graph.input.extend([
                        helper.make_tensor_value_info(
                            name=name,
                            elem_type=initializer.data_type,
                            shape=initializer.dims)])
                    if name not in seen_initializers:
                        initializers.append(initializer)
                        seen_initializers.add(initializer.name)
                else:
                    raise ValueError(
                        'Info of tensor `{}` is missing, '
                        'specify it in <value_info>.'.format(name))

        # Add initializers.
        onnx_graph.initializer.extend(initializers)

        # Add outputs.
        onnx_graph.output.extend(
            helper.make_tensor_value_info(
                name=blob_names.get(name_v2, name_v2),
                elem_type=value_info[name_v2][0],
                shape=value_info[name_v2][1])
            for name_v2 in [blob_aliases.get(name, name)
                            for name in set(graph_def.output)])

        if verbose:
            print(helper.printable_graph(onnx_graph))

        return onnx_graph