Пример #1
0
def top_k_exporter_v1(op_def, context):
    node, (k, axis, largest, sorted) = top_k_exporter(**locals())
    if largest == 0:
        raise ValueError('TopK-1 does not support smallest mode.')
    helper.add_attribute(node, 'axis', axis)
    helper.add_attribute(node, 'k', k)
    return node, None
Пример #2
0
def multinomial_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    helper.add_attribute(node, 'dtype', helper.tensor_type('int64'))
    for arg in op_def.arg:
        if arg.name == 'sample_size':
            helper.add_attribute(node, 'sample_size', arg.i)
    return node, const_tensors
Пример #3
0
def depth_space_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    for arg in op_def.arg:
        _assert_data_format(arg)
        if arg.name == 'block_size':
            helper.add_attribute(node, 'blocksize', arg.i)
    return node, const_tensors
Пример #4
0
def pool(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    rank = len(context.blob_shapes[op_def.input[0]]) - 2
    global_pool, node_copy = 0, copy.deepcopy(node)
    for arg in op_def.arg:
        _assert_data_format(arg)
        if arg.name == 'kernel_shape':
            helper.add_attribute(node, 'kernel_shape',
                                 _normalize_tuple(arg.ints, rank))
        elif arg.name == 'strides':
            helper.add_attribute(node, 'strides',
                                 _normalize_tuple(arg.ints, rank))
        elif arg.name == 'pads':
            helper.add_attribute(node, 'pads', _normalize_pads(arg.ints, rank))
        elif arg.name == 'padding' and arg.s != b'VALID':
            helper.add_attribute(node, 'auto_pad', arg.s)
        elif arg.name == 'mode':
            if arg.s == b'MAX':
                node.op_type = 'MaxPool'
            elif arg.s == b'AVG':
                node.op_type = 'AveragePool'
        elif arg.name == 'ceil_mode':
            helper.add_attribute(node, 'ceil_mode', arg.i)
        elif arg.name == 'global_pool':
            global_pool = arg.i
    if global_pool > 0:
        # Remove regular pooling attributes.
        node_copy.op_type = 'Global' + node.op_type
        node = node_copy
    return node, const_tensors
Пример #5
0
def affine_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'ATen'  # Currently not supported in ai.onnx
    helper.add_attribute(node, 'op_type', 'Affine')
    for arg in op_def.arg:
        if arg.name == 'axes':
            helper.add_attribute(node, 'axes', arg.ints)
    return node, const_tensors
Пример #6
0
def clip_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    for arg in op_def.arg:
        if arg.name == 'low':
            helper.add_attribute(node, 'min', arg.f)
        elif arg.name == 'high':
            helper.add_attribute(node, 'max', arg.f)
    return node, const_tensors
Пример #7
0
def relu_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    for arg in op_def.arg:
        if arg.name == 'alpha':
            if arg.f > 0:
                node.op_type = 'LeakyRelu'
                helper.add_attribute(node, 'alpha', arg.f)
    return node, const_tensors
Пример #8
0
def softmax_exporter_v13(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    ndim = len(context.blob_shapes[op_def.input[0]])
    for arg in op_def.arg:
        if arg.name == 'axis':
            axis = arg.i + (ndim if arg.i < 0 else 0)
            helper.add_attribute(node, 'axis', arg.i)
    return node, const_tensors
Пример #9
0
def unsqueeze_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    axes = None
    for arg in op_def.arg:
        if arg.name == 'axes':
            axes = arg.ints
    if axes is not None:
        helper.add_attribute(node, 'axes', axes)
    return node, const_tensors
Пример #10
0
def transpose_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    for arg in op_def.arg:
        if arg.name == 'perm':
            helper.add_attribute(node, 'perm', arg.ints)
        elif arg.name == 'perm_desc':
            values = helper.fetch_argument(op_def, arg, context.ws)
            helper.add_attribute(node, 'perm', values)
    return node, None
Пример #11
0
def cast_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'Cast'
    if len(node.input) == 0:
        raise ValueError('ONNX does not support in-place cast.')
    for arg in op_def.arg:
        if arg.name == 'dtype':
            helper.add_attribute(node, 'to', helper.tensor_type(arg.s))
    return node, const_tensors
Пример #12
0
def split_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    axis = 0
    for arg in op_def.arg:
        if arg.name == 'axis':
            axis = arg.i
    size_splits = [context.blob_shapes[e][axis] for e in op_def.output]
    helper.add_attribute(node, 'split', size_splits)
    return node, const_tensors
Пример #13
0
def lp_normalize_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'LpNormalization'
    for arg in op_def.arg:
        if arg.name == 'axis':
            helper.add_attribute(node, 'axis', arg.i)
        if arg.name == 'p':
            helper.add_attribute(node, 'p', arg.i)
    return node, const_tensors
Пример #14
0
def reduce_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    axes = list(range(len(context.blob_shapes[op_def.input[0]])))
    for arg in op_def.arg:
        if arg.name == 'axes':
            axes = arg.ints
        elif arg.name == 'keepdims':
            helper.add_attribute(node, 'keepdims', arg.i)
    helper.add_attribute(node, 'axes', axes)
    return node, const_tensors
Пример #15
0
def fill_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'Constant'
    shape = list(context.blob_shapes[op_def.output[0]])
    value = helper.from_array(
        numpy.array(shape, 'int64'),
        context.unique_name(op_def.output[0] + '/constant/value'))
    helper.add_attribute(node, 'value', value)
    node.ClearField('input')
    return node, [value]
Пример #16
0
def index_select_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'Gather'
    for arg in op_def.arg:
        if arg.name == 'axis':
            helper.add_attribute(node, 'axis', arg.i)
        elif arg.name == 'num_axes':
            if arg.i > 1:
                raise ValueError('Reshape to avoid selecting multiple axes.')
    return node, const_tensors
Пример #17
0
def dropout_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    drop_ratio = 0.5  # The prob to set zeros randomly.
    for arg in op_def.arg:
        if arg.name == 'prob':
            drop_ratio = arg.f
        elif arg.name == 'prob_desc':
            drop_ratio = helper.fetch_argument(op_def, arg, context.ws)
    helper.add_attribute(node, 'ratio', float(drop_ratio))
    return node, const_tensors
Пример #18
0
def softmax_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    ndim = len(context.blob_shapes[op_def.input[0]])
    for arg in op_def.arg:
        if arg.name == 'axis':
            axis = arg.i + (ndim if arg.i < 0 else 0)
            if axis != (ndim - 1):
                raise ValueError('Axis could only be the last if opset < 13.')
            helper.add_attribute(node, 'axis', arg.i)
    return node, const_tensors
Пример #19
0
def selu_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    alpha, gamma = 1.67326, 1.0507
    for arg in op_def.arg:
        if arg.name == 'alpha':
            alpha = arg.f
        elif arg.name == 'gamma':
            gamma = arg.f
    helper.add_attribute(node, 'alpha', alpha)
    helper.add_attribute(node, 'gamma', gamma)
    return node, const_tensors
Пример #20
0
def flatten_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    for arg in op_def.arg:
        if arg.name == 'axis':
            helper.add_attribute(node, 'axis', arg.i)
        elif arg.name == 'num_axes':
            if arg.i != -1:
                raise ValueError(
                    'Excepted <num_axes> is -1, '
                    'got {}.'.format(arg.i))
    return node, None
Пример #21
0
def hardsigmoid_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    alpha, beta = 0.2, 0.5
    for arg in op_def.arg:
        if arg.name == 'alpha':
            alpha = arg.f
        elif arg.name == 'beta':
            beta = arg.f
    helper.add_attribute(node, 'alpha', alpha)
    helper.add_attribute(node, 'beta', beta)
    return node, const_tensors
Пример #22
0
def top_k_exporter_v10(op_def, context):
    node, (k, axis, largest, sorted) = top_k_exporter(**locals())
    if largest == 0:
        raise ValueError('TopK-10 does not support smallest mode.')
    helper.add_attribute(node, 'axis', axis)
    k = helper.from_array(
        numpy.array([k], 'int64'),
        context.unique_name(op_def.input[0] + '/top_k/k'),
    )
    node.input.extend([k.name])
    return node, [k]
Пример #23
0
def arg_reduce_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    # ONNX requires indices only, remove the values.
    indices = node.output[0]
    node.ClearField('output')
    node.output.extend([indices])
    for arg in op_def.arg:
        if arg.name == 'axis':
            helper.add_attribute(node, 'axis', arg.i)
        elif arg.name == 'keepdims':
            helper.add_attribute(node, 'keepdims', arg.i)
    return node, None
Пример #24
0
def roi_pool(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'MaxRoiPool'
    pooled_shape = [None, None]
    for arg in op_def.arg:
        if arg.name == 'pooled_h':
            pooled_shape[0] = arg.i
        elif arg.name == 'pooled_w':
            pooled_shape[1] = arg.i
        elif arg.name == 'spatial_scale':
            helper.add_attribute(node, 'spatial_scale', arg.f)
    helper.add_attribute(node, 'pooled_shape', pooled_shape)
    return node, const_tensors
Пример #25
0
def resize_v7(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'Upsample'
    input_shape = context.blob_shapes[op_def.input[0]]
    output_shape = context.blob_shapes[op_def.output[0]]
    for arg in op_def.arg:
        if arg.name == 'mode':
            helper.add_attribute(node, 'mode', arg.s.lower())
    helper.add_attribute(node, 'scales', [
        float(output_shape[i]) / input_shape[i]
        for i in range(len(input_shape))
    ])
    return node, const_tensors
Пример #26
0
def pad_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    pads, value = [], 0
    for arg in op_def.arg:
        if arg.name == 'pads':
            pads = [int(e) for e in arg.ints]
        elif arg.name == 'pads_desc':
            pads = helper.fetch_argument(op_def, arg, context.ws)
        elif arg.name == 'mode':
            helper.add_attribute(node, 'mode', arg.s.lower())
        elif arg.name == 'value':
            value = arg.f
    return node, pads, value
Пример #27
0
def trilu_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    k = 0
    for arg in op_def.arg:
        if arg.name == 'upper':
            helper.add_attribute(node, 'upper', arg.i)
        elif arg.name == 'k':
            k = arg.i
    k = helper.from_array(
        numpy.array(k, 'int64'),
        context.unique_name(op_def.input[0] + '/trilu/k'),
    )
    node.input.extend([k.name])
    return node, [k]
Пример #28
0
def gather_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    axis, end_axis = None, None
    for arg in op_def.arg:
        if arg.name == 'axis':
            axis = arg.i
            helper.add_attribute(node, 'axis', arg.i)
        elif arg.name == 'end_axis':
            end_axis = arg.i
            if end_axis < 0:
                input_shape = context.blob_shapes[op_def.input[0]]
                end_axis += len(input_shape)
    if end_axis is not None and axis != end_axis:
        raise ValueError('Reshape to avoid multiple axes.')
    return node, const_tensors
Пример #29
0
def cumulative_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    axis = 0
    for arg in op_def.arg:
        if arg.name == 'axis':
            axis = arg.i
        elif arg.name == 'exclusive':
            helper.add_attribute(node, 'exclusive', arg.i)
        elif arg.name == 'reverse':
            helper.add_attribute(node, 'reverse', arg.i)
    axis = helper.from_array(
        numpy.array(axis, 'int64'),
        context.unique_name(op_def.input[0] + '/cumulative/axis'),
    )
    node.input.extend([axis.name])
    return node, [axis]
Пример #30
0
def resize_v10(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    input_shape = context.blob_shapes[op_def.input[0]]
    output_shape = context.blob_shapes[op_def.output[0]]
    for arg in op_def.arg:
        if arg.name == 'mode':
            helper.add_attribute(node, 'mode', arg.s.lower())
    scales = helper.from_array(
        numpy.array([
            float(output_shape[i]) / input_shape[i]
            for i in range(len(input_shape))
        ], 'float32'),
        context.unique_name(op_def.input[0] + '/resize/scales'),
    )
    node.input.extend([scales.name])
    return node, [scales]