예제 #1
0
def div_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    const_tensors = []  # Global scalars
    for name in op_def.input:
        if name.startswith('/share/scalar/'):
            const_tensors.append(helper.from_tensor(name, context.ws))
    return node, const_tensors
예제 #2
0
파일: vision.py 프로젝트: ORG-MARS/dragon
def depth_space_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    for arg in op_def.arg:
        _assert_data_format(arg)
        if arg.name == 'block_size':
            helper.add_attribute(node, 'blocksize', arg.i)
    return node, const_tensors
예제 #3
0
def clip_exporter_v11(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    min_value, max_value, const_tensors = None, None, []
    dtype = context.ws.FetchTensor(op_def.output[0]).dtype
    for arg in op_def.arg:
        if arg.name == 'low':
            min_value = arg.f
        elif arg.name == 'high':
            max_value = arg.f
    if min_value is not None:
        const_tensors.append(helper.from_array(
            numpy.array(min_value, dtype),
            context.unique_name(op_def.input[0] + '/clip/min_value'),
        ))
        node.input.extend([const_tensors[-1].name])
    else:
        node.input.extend([''])
    if max_value is not None:
        const_tensors.append(helper.from_array(
            numpy.array(max_value, dtype),
            context.unique_name(op_def.input[0] + '/clip/max_value'),
        ))
        node.input.extend([const_tensors[-1].name])
    else:
        node.input.extend([''])
    return node, const_tensors
예제 #4
0
def multinomial_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    helper.add_attribute(node, 'dtype', helper.tensor_type('int64'))
    for arg in op_def.arg:
        if arg.name == 'sample_size':
            helper.add_attribute(node, 'sample_size', arg.i)
    return node, const_tensors
예제 #5
0
def conv_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'ConvTranspose' if 'Transpose' in op_def.type else 'Conv'
    if 'Depthwise' in op_def.type:
        input_shape = context.blob_shapes[op_def.input[0]]
        helper.add_attribute(node, 'group', input_shape[1])
    rank = len(context.blob_shapes[op_def.input[0]]) - 2
    for arg in op_def.arg:
        _assert_data_format(arg)
        if arg.name == 'kernel_shape':
            helper.add_attribute(node, 'kernel_shape',
                                 _normalize_tuple(arg.ints, rank))
        elif arg.name == 'dilations':
            helper.add_attribute(node, 'dilations',
                                 _normalize_tuple(arg.ints, rank))
        elif arg.name == 'strides':
            helper.add_attribute(node, 'strides',
                                 _normalize_tuple(arg.ints, rank))
        elif arg.name == 'pads':
            helper.add_attribute(node, 'pads', _normalize_pads(arg.ints, rank))
        elif arg.name == 'padding' and arg.s != b'VALID':
            helper.add_attribute(node, 'auto_pad', arg.s)
        elif arg.name == 'group':
            helper.add_attribute(node, 'group', arg.i)
        elif arg.name == 'output_shape':
            helper.add_attribute(node, 'output_shape', arg.ints)
        elif arg.name == 'output_padding':
            helper.add_attribute(node, 'output_padding', arg.ints)
    return node, const_tensors
예제 #6
0
def pool(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    rank = len(context.blob_shapes[op_def.input[0]]) - 2
    global_pool, node_copy = 0, copy.deepcopy(node)
    for arg in op_def.arg:
        _assert_data_format(arg)
        if arg.name == 'kernel_shape':
            helper.add_attribute(node, 'kernel_shape',
                                 _normalize_tuple(arg.ints, rank))
        elif arg.name == 'strides':
            helper.add_attribute(node, 'strides',
                                 _normalize_tuple(arg.ints, rank))
        elif arg.name == 'pads':
            helper.add_attribute(node, 'pads', _normalize_pads(arg.ints, rank))
        elif arg.name == 'padding' and arg.s != b'VALID':
            helper.add_attribute(node, 'auto_pad', arg.s)
        elif arg.name == 'mode':
            if arg.s == b'MAX':
                node.op_type = 'MaxPool'
            elif arg.s == b'AVG':
                node.op_type = 'AveragePool'
        elif arg.name == 'ceil_mode':
            helper.add_attribute(node, 'ceil_mode', arg.i)
        elif arg.name == 'global_pool':
            global_pool = arg.i
    if global_pool > 0:
        # Remove regular pooling attributes.
        node_copy.op_type = 'Global' + node.op_type
        node = node_copy
    return node, const_tensors
예제 #7
0
def slice_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    in_shape = context.blob_shapes[op_def.input[0]]
    starts, sizes, ends = [], [], []
    for arg in op_def.arg:
        if arg.name == 'starts':
            starts = [int(e) for e in arg.ints]
        elif arg.name == 'starts_desc':
            starts = helper.fetch_argument(op_def, arg, context.ws)
        elif arg.name == 'starts_descs':
            starts = helper.fetch_arguments(op_def, arg, context.ws)
        elif arg.name == 'sizes':
            sizes = [int(e) for e in arg.ints]
        elif arg.name == 'sizes_desc':
            sizes = helper.fetch_argument(op_def, arg, context.ws)
        elif arg.name == 'sizes_descs':
            sizes = helper.fetch_arguments(op_def, arg, context.ws)
    for i, size in enumerate(sizes):
        if size == -1:
            ends.append(in_shape[i])
        elif size == 0:
            ends.append(starts[i] + 1)
        else:
            ends.append(starts[i] + size)
    return node, starts, ends
예제 #8
0
def affine_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'ATen'  # Currently not supported in ai.onnx
    helper.add_attribute(node, 'op_type', 'Affine')
    for arg in op_def.arg:
        if arg.name == 'axes':
            helper.add_attribute(node, 'axes', arg.ints)
    return node, const_tensors
예제 #9
0
def softmax_exporter_v13(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    ndim = len(context.blob_shapes[op_def.input[0]])
    for arg in op_def.arg:
        if arg.name == 'axis':
            axis = arg.i + (ndim if arg.i < 0 else 0)
            helper.add_attribute(node, 'axis', arg.i)
    return node, const_tensors
예제 #10
0
def minimum_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'Min'  # Eltwise, Broadcast
    const_tensors = []  # Global scalars
    for name in op_def.input:
        if name.startswith('/share/scalar/'):
            const_tensors.append(helper.from_tensor(name, context.ws))
    return node, const_tensors
예제 #11
0
def clip_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    for arg in op_def.arg:
        if arg.name == 'low':
            helper.add_attribute(node, 'min', arg.f)
        elif arg.name == 'high':
            helper.add_attribute(node, 'max', arg.f)
    return node, const_tensors
예제 #12
0
def relu_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    for arg in op_def.arg:
        if arg.name == 'alpha':
            if arg.f > 0:
                node.op_type = 'LeakyRelu'
                helper.add_attribute(node, 'alpha', arg.f)
    return node, const_tensors
예제 #13
0
def unsqueeze_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    axes = None
    for arg in op_def.arg:
        if arg.name == 'axes':
            axes = arg.ints
    if axes is not None:
        helper.add_attribute(node, 'axes', axes)
    return node, const_tensors
예제 #14
0
def split_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    axis = 0
    for arg in op_def.arg:
        if arg.name == 'axis':
            axis = arg.i
    size_splits = [context.blob_shapes[e][axis] for e in op_def.output]
    helper.add_attribute(node, 'split', size_splits)
    return node, const_tensors
예제 #15
0
def cast_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'Cast'
    if len(node.input) == 0:
        raise ValueError('ONNX does not support in-place cast.')
    for arg in op_def.arg:
        if arg.name == 'dtype':
            helper.add_attribute(node, 'to', helper.tensor_type(arg.s))
    return node, const_tensors
예제 #16
0
def add_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    dtype = str(helper.fetch_tensor(op_def.output[0], context.ws).dtype)
    node.op_type = 'Or' if dtype == 'bool' else 'Add'
    const_tensors = []  # Global scalars
    for name in op_def.input:
        if name.startswith('/share/scalar/'):
            const_tensors.append(helper.from_tensor(name, context.ws))
    return node, const_tensors
예제 #17
0
def expand_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    shape = list(context.blob_shapes[op_def.output[0]])
    shape = helper.from_array(
        numpy.array(shape, 'int64'),
        context.unique_name(op_def.input[0] + '/expand/shape'),
    )
    node.input.extend([shape.name])
    return node, [shape]
예제 #18
0
def lp_normalize_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'LpNormalization'
    for arg in op_def.arg:
        if arg.name == 'axis':
            helper.add_attribute(node, 'axis', arg.i)
        if arg.name == 'p':
            helper.add_attribute(node, 'p', arg.i)
    return node, const_tensors
예제 #19
0
def transpose_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    for arg in op_def.arg:
        if arg.name == 'perm':
            helper.add_attribute(node, 'perm', arg.ints)
        elif arg.name == 'perm_desc':
            values = helper.fetch_argument(op_def, arg, context.ws)
            helper.add_attribute(node, 'perm', values)
    return node, None
예제 #20
0
def reduce_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    axes = list(range(len(context.blob_shapes[op_def.input[0]])))
    for arg in op_def.arg:
        if arg.name == 'axes':
            axes = arg.ints
        elif arg.name == 'keepdims':
            helper.add_attribute(node, 'keepdims', arg.i)
    helper.add_attribute(node, 'axes', axes)
    return node, const_tensors
예제 #21
0
def index_select_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'Gather'
    for arg in op_def.arg:
        if arg.name == 'axis':
            helper.add_attribute(node, 'axis', arg.i)
        elif arg.name == 'num_axes':
            if arg.i > 1:
                raise ValueError('Reshape to avoid selecting multiple axes.')
    return node, const_tensors
예제 #22
0
def softmax_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    ndim = len(context.blob_shapes[op_def.input[0]])
    for arg in op_def.arg:
        if arg.name == 'axis':
            axis = arg.i + (ndim if arg.i < 0 else 0)
            if axis != (ndim - 1):
                raise ValueError('Axis could only be the last if opset < 13.')
            helper.add_attribute(node, 'axis', arg.i)
    return node, const_tensors
예제 #23
0
def dropout_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    drop_ratio = 0.5  # The prob to set zeros randomly.
    for arg in op_def.arg:
        if arg.name == 'prob':
            drop_ratio = arg.f
        elif arg.name == 'prob_desc':
            drop_ratio = helper.fetch_argument(op_def, arg, context.ws)
    helper.add_attribute(node, 'ratio', float(drop_ratio))
    return node, const_tensors
예제 #24
0
def fill_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'Constant'
    shape = list(context.blob_shapes[op_def.output[0]])
    value = helper.from_array(
        numpy.array(shape, 'int64'),
        context.unique_name(op_def.output[0] + '/constant/value'))
    helper.add_attribute(node, 'value', value)
    node.ClearField('input')
    return node, [value]
예제 #25
0
def selu_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    alpha, gamma = 1.67326, 1.0507
    for arg in op_def.arg:
        if arg.name == 'alpha':
            alpha = arg.f
        elif arg.name == 'gamma':
            gamma = arg.f
    helper.add_attribute(node, 'alpha', alpha)
    helper.add_attribute(node, 'gamma', gamma)
    return node, const_tensors
예제 #26
0
def matmul_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'MatMul'
    for arg in op_def.arg:
        if arg.name == 'transA':
            if arg.i > 0:
                raise ValueError('Matmul requires an non-transposed matrix a.')
        elif arg.name == 'transB':
            if arg.i > 0:
                raise ValueError('Matmul requires an non-transposed matrix b.')
    return node, const_tensors
예제 #27
0
def hardsigmoid_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    alpha, beta = 0.2, 0.5
    for arg in op_def.arg:
        if arg.name == 'alpha':
            alpha = arg.f
        elif arg.name == 'beta':
            beta = arg.f
    helper.add_attribute(node, 'alpha', alpha)
    helper.add_attribute(node, 'beta', beta)
    return node, const_tensors
예제 #28
0
def flatten_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    for arg in op_def.arg:
        if arg.name == 'axis':
            helper.add_attribute(node, 'axis', arg.i)
        elif arg.name == 'num_axes':
            if arg.i != -1:
                raise ValueError(
                    'Excepted <num_axes> is -1, '
                    'got {}.'.format(arg.i))
    return node, None
예제 #29
0
def arg_reduce_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    # ONNX requires indices only, remove the values.
    indices = node.output[0]
    node.ClearField('output')
    node.output.extend([indices])
    for arg in op_def.arg:
        if arg.name == 'axis':
            helper.add_attribute(node, 'axis', arg.i)
        elif arg.name == 'keepdims':
            helper.add_attribute(node, 'keepdims', arg.i)
    return node, None
예제 #30
0
def batch_norm_exporter(op_def, context):
    node, const_tensors = export_util.translate(**locals())
    node.op_type = 'BatchNormalization'
    for arg in op_def.arg:
        if arg.name == 'epsilon':
            helper.add_attribute(node, 'epsilon', arg.f)
        elif arg.name == 'momentum':
            helper.add_attribute(node, 'momentum', arg.f)
        elif arg.name == 'momentum_desc':
            momentum = helper.fetch_argument(op_def, arg, context.ws)
            helper.add_attribute(node, 'momentum', float(momentum))
    return node, const_tensors