def generic_convert_binary(converter, nnef_op, onnx_graph, target_name, broadcast_workaround=False): # type: (Converter, NNEFOperation, ONNXGraph, str, bool)->None if broadcast_workaround: x, y = converter.converted_tensors(nnef_op.inputs) z = converter.converted_tensor(nnef_op.output) # Caffe2 onnx backend cannot broadcast in Pow if x.shape != z.shape: x = converter.add_expand(onnx_graph, x, z.shape) if y.shape != z.shape: y = converter.add_expand(onnx_graph, y, z.shape) ONNXOperation(graph=onnx_graph, name=target_name, inputs=(x, y), outputs=z) else: x, y = converter.converted_tensors(nnef_op.inputs) z = converter.converted_tensor(nnef_op.output) ONNXOperation(graph=onnx_graph, name=target_name, inputs=(converter.add_unsqueeze( onnx_graph, x, list(range(x.rank, y.rank))) if 0 < x.rank < y.rank else x, converter.add_unsqueeze( onnx_graph, y, list(range(y.rank, x.rank))) if 0 < y.rank < x.rank else y), outputs=z)
def convert_default(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None print("Warning: Converter of {} is not implemented, doing default conversion.".format(nnef_op.name)) def flatten(x): return utils.flatten(x) if isinstance(x, (list, tuple)) else x ONNXOperation(graph=onnx_graph, name=nnef_op.name, inputs=converter.converted_tensors(nnef_op.inputs), attribs={k: flatten(v) for k, v in six.iteritems(nnef_op.attribs)}, outputs=converter.converted_tensors(nnef_op.outputs))
def convert_multilinear_upsample(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, output = converter.converted_tensors((nnef_op.input, nnef_op.output)) if nnef_op.attribs['method'] != 'symmetric': if converter.enable_imprecise_image_resize: print("Warning: method={} is unsupported in multilinear_upsample, " "using symmetric, because enable_imprecise_image_resize was True" .format(nnef_op.attribs["method"])) else: assert False, "Error: method={} is unsupported in multilinear_upsample. " \ "Use enable_imprecise_image_resize=True, to suppress this error." \ .format(nnef_op.attribs["method"]) if nnef_op.attribs['border'] != 'replicate': if converter.enable_imprecise_image_resize: print("Warning: border={} is unsupported in multilinear_upsample, " "using replicate, because enable_imprecise_image_resize was True" .format(nnef_op.attribs["border"])) else: assert False, "Error: border={} is unsupported in multilinear_upsample. " \ "Use enable_imprecise_image_resize=True, to suppress this error." \ .format(nnef_op.attribs["border"]) scales = [float(f) for f in [1, 1] + nnef_op.attribs['factor']] ONNXOperation(graph=onnx_graph, name='Upsample', inputs=(input, converter.constant_1d_tensor(graph=onnx_graph, list_=scales, dtype='FLOAT')), attribs=dict(mode='linear'), outputs=output)
def generic_convert_conv_deconv(converter, nnef_op, onnx_graph, is_deconv): # type: (Converter, NNEFOperation, ONNXGraph, bool)->None input, filter, bias = converter.converted_tensors(nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) partial_convert_conv_deconv(converter, nnef_op, onnx_graph, is_deconv, input, filter, bias, output)
def generic_convert_pool(converter, nnef_op, onnx_graph, target_name): # type: (Converter, NNEFOperation, ONNXGraph, str)->None input = converter.converted_tensor(nnef_op.input) outputs = converter.converted_tensors(nnef_op.outputs) partial_convert_pool(converter, nnef_op, onnx_graph, target_name, input, outputs)
def convert_unstack(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input = converter.converted_tensor(nnef_op.input) outputs = converter.converted_tensors(nnef_op.outputs) axis = nnef_op.attribs['axis'] num_parts = input.shape[axis] def unsqueeze(shape, axis): shape = list(shape) shape.insert(axis, 1) return shape intermediates = [ ONNXTensor(graph=onnx_graph, shape=unsqueeze(output.shape, axis), dtype=output.dtype) for output in outputs ] ONNXOperation(graph=onnx_graph, name='Split', inputs=input, attribs=dict(axis=axis, split=[1] * num_parts), outputs=intermediates) for intermediate, output in zip(intermediates, outputs): converter.add_unsqueeze(onnx_graph, intermediate, axis, unsqueezed_tensor=output)
def generic_convert_binary1_or_binary2(converter, nnef_op, onnx_graph, target_name1, target_name2): # type: (Converter, NNEFOperation, ONNXGraph, str, str)->None x, y = converter.converted_tensors(nnef_op.inputs) z = converter.converted_tensor(nnef_op.output) binary1 = ONNXOperation( graph=onnx_graph, name=target_name1, inputs=( converter.add_unsqueeze(onnx_graph, x, list(range(x.rank, y.rank))) if 0 < x.rank < y.rank else x, converter.add_unsqueeze(onnx_graph, y, list(range(y.rank, x.rank))) if 0 < y.rank < x.rank else y), outputs=ONNXTensor(graph=onnx_graph, shape=list(z.shape), dtype=z.dtype)) binary2 = ONNXOperation( graph=onnx_graph, name=target_name2, inputs=( converter.add_unsqueeze(onnx_graph, x, list(range(x.rank, y.rank))) if 0 < x.rank < y.rank else x, converter.add_unsqueeze(onnx_graph, y, list(range(y.rank, x.rank))) if 0 < y.rank < x.rank else y), outputs=ONNXTensor(graph=onnx_graph, shape=list(z.shape), dtype=z.dtype)) ONNXOperation(graph=onnx_graph, name="Or", inputs=(binary1.output, binary2.output), outputs=z)
def convert_clamp(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None x, a, b = converter.converted_tensors(nnef_op.inputs) y = converter.converted_tensor(nnef_op.output) if a.is_constant and a.shape == [] and b.is_constant and b.shape == []: ONNXOperation(graph=onnx_graph, name='Clip', inputs=x, attribs=dict(min=a.data[0], max=b.data[0]), outputs=y) else: max_rank = max(t.rank for t in [x, a, b]) def broadcast(t): return converter.add_unsqueeze( onnx_graph, t, list(range( t.rank, max_rank))) if 0 < t.rank < max_rank else t min = ONNXOperation( graph=onnx_graph, name='Min', inputs=(broadcast(x), broadcast(b)), outputs=ONNXTensor(graph=onnx_graph, shape=list(y.shape), dtype=y.dtype), ) ONNXOperation( graph=onnx_graph, name='Max', inputs=(min.output, broadcast(a)), outputs=y, )
def generic_convert_reduce(converter, nnef_op, onnx_graph, target_name, target_name_if_normalized="", one_axis=False): # type: (Converter, NNEFOperation, ONNXGraph, str, str, bool)->None input, output = converter.converted_tensors( (nnef_op.input, nnef_op.output)) if not nnef_op.attribs['axes']: ONNXOperation(graph=onnx_graph, name='Identity', inputs=input, outputs=output) return if one_axis: assert len( nnef_op.attribs['axes'] ) == 1, "{} supports only one axis in ONNX".format(target_name) onnx_op = ONNXOperation( graph=onnx_graph, name=(target_name_if_normalized if target_name_if_normalized and nnef_op.attribs['normalize'] else target_name), inputs=input, attribs=dict(keepdims=1), outputs=output) if one_axis: onnx_op.attribs['axis'] = nnef_op.attribs['axes'][0] else: onnx_op.attribs['axes'] = list(nnef_op.attribs['axes'])
def convert_box(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, output = converter.converted_tensors((nnef_op.input, nnef_op.output)) if nnef_op.attribs['size'] == [1] * input.rank: onnx_op = ONNXOperation(graph=onnx_graph, name='Pad', inputs=input, attribs=dict(mode=converter.onnx_pad_mode(nnef_op.attribs['border']), pads=converter.onnx_pads(nnef_op.attribs['padding'])), outputs=output) if onnx_op.attribs['mode'] == 'constant': onnx_op.attribs['value'] = 0.0 return if nnef_op.attribs['normalize']: partial_convert_pool(converter, nnef_op, onnx_graph, target_name='AveragePool', input=input, outputs=(output,)) else: temporary = ONNXTensor(graph=onnx_graph, shape=list(output.shape), dtype=output.dtype) partial_convert_pool(converter, nnef_op, onnx_graph, target_name='AveragePool', input=input, outputs=(temporary,), force_constant=True) ONNXOperation( graph=onnx_graph, name='Mul', inputs=(temporary, converter.constant_0d_tensor(onnx_graph, float(np.product(nnef_op.attribs['size'])), 'FLOAT')), outputs=output)
def convert_batch_normalization(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, mean, variance, offset, scale = converter.converted_tensors( nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) if input.rank < 3: # Caffe2 BatchNorm works only for rank >= 3 axes = list(range(input.rank, 3)) input = converter.add_unsqueeze(onnx_graph=onnx_graph, onnx_tensor=input, axes=axes) final_output = output output = ONNXTensor(graph=onnx_graph, shape=list(input.shape), dtype=input.dtype) converter.add_squeeze(onnx_graph=onnx_graph, onnx_tensor=output, axes=axes, squeezed_tensor=final_output) ONNXOperation(graph=onnx_graph, name='BatchNormalization', inputs=(input, converter.add_squeeze(onnx_graph, scale, [0]), converter.add_squeeze(onnx_graph, offset, [0]), converter.add_squeeze(onnx_graph, mean, [0]), converter.add_squeeze(onnx_graph, variance, [0])), attribs=dict(epsilon=nnef_op.attribs['epsilon']), outputs=output)
def convert_prelu(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, slope = converter.converted_tensors(nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) ONNXOperation(graph=onnx_graph, name='PRelu', inputs=(input, converter.add_squeeze(onnx_graph, slope, [0])), outputs=output)
def convert_copy_n(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input = converter.converted_tensor(nnef_op.input) outputs = converter.converted_tensors(nnef_op.outputs) for output in outputs: ONNXOperation(graph=onnx_graph, name='Identity', inputs=input, outputs=output)
def convert_concat(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None inputs = converter.converted_tensors(nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) ONNXOperation(graph=onnx_graph, name='Concat', inputs=inputs, attribs=dict(axis=nnef_op.attribs['axis']), outputs=output)
def convert_tile(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, output = converter.converted_tensors((nnef_op.input, nnef_op.output)) ONNXOperation(graph=onnx_graph, name='Tile', inputs=(input, converter.constant_1d_tensor(graph=onnx_graph, list_=nnef_op.attribs['repeats'], dtype='INT64')), outputs=output)
def convert_linear(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None A, B, C = converter.converted_tensors(nnef_op.inputs) D = converter.converted_tensor(nnef_op.output) assert A.rank <= 2 and B.rank <= 2 and C.rank <= 2, "Batch matmul is unsupported in ONNX" ONNXOperation(graph=onnx_graph, name='Gemm', inputs=(A, B, C), outputs=D, attribs=dict(transA=0, transB=1))
def convert_nearest_upsample(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, output = converter.converted_tensors((nnef_op.input, nnef_op.output)) scales = [float(f) for f in [1, 1] + nnef_op.attribs['factor']] ONNXOperation(graph=onnx_graph, name='Upsample', inputs=(input, converter.constant_1d_tensor(onnx_graph, scales, 'FLOAT')), attribs=dict(mode='nearest'), outputs=output)
def convert_matmul(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None A, B = converter.converted_tensors(nnef_op.inputs) C = converter.converted_tensor(nnef_op.output) assert A.rank <= 2 and B.rank <= 2, "Batch matmul is unsupported in ONNX" ONNXOperation(graph=onnx_graph, name='Gemm', inputs=(A, B, converter.constant_0d_tensor(graph=onnx_graph, value=0.0, dtype=C.dtype)), outputs=C, attribs=dict(transA=1 if nnef_op.attribs['transposeA'] else 0, transB=1 if nnef_op.attribs['transposeB'] else 0))
def convert_stack(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None inputs = converter.converted_tensors(nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) inputs = [converter.add_unsqueeze(onnx_graph, input, [nnef_op.attribs['axis']]) for input in inputs] ONNXOperation(graph=onnx_graph, name='Concat', inputs=inputs, attribs=dict(axis=nnef_op.attribs['axis']), outputs=output)
def convert_round(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, output = converter.converted_tensors((nnef_op.input, nnef_op.output)) add = ONNXOperation(graph=onnx_graph, name='Add', inputs=(input, converter.constant_0d_tensor(graph=onnx_graph, value=0.5, dtype=input.dtype)), outputs=ONNXTensor(graph=onnx_graph, shape=list(output.shape), dtype=output.dtype)) ONNXOperation(graph=onnx_graph, name='Floor', inputs=add.output, outputs=output)
def convert_pad(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, output = converter.converted_tensors((nnef_op.input, nnef_op.output)) onnx_op = ONNXOperation(graph=onnx_graph, name='Pad', inputs=input, attribs=dict(mode=converter.onnx_pad_mode(nnef_op.attribs['border']), pads=converter.onnx_pads(nnef_op.attribs['padding'])), outputs=output) if onnx_op.attribs['mode'] == 'constant': onnx_op.attribs['value'] = 0.0
def convert_select(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None cond, true_value, false_value = converter.converted_tensors(nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) max_rank = max(t.rank for t in [cond, true_value, false_value]) def broadcast(t): return converter.add_unsqueeze(onnx_graph, t, list(range(t.rank, max_rank))) if 0 < t.rank < max_rank else t ONNXOperation( graph=onnx_graph, name='Where', inputs=(broadcast(cond), broadcast(true_value), broadcast(false_value)), outputs=output)
def convert_split(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input = converter.converted_tensor(nnef_op.input) outputs = converter.converted_tensors(nnef_op.outputs) axis = nnef_op.attribs['axis'] ratios = nnef_op.attribs['ratios'] assert input.shape[axis] % sum(ratios) == 0 unit = input.shape[axis] // sum(ratios) ONNXOperation(graph=onnx_graph, name='Split', inputs=input, attribs=dict(axis=axis, split=[ratio * unit for ratio in ratios]), outputs=outputs)
def convert_lrn(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, output = converter.converted_tensors((nnef_op.input, nnef_op.output)) nnef_size = nnef_op.attribs['size'] assert len(nnef_size) >= 2 and all(dim == 1 or i == 1 for i, dim in enumerate(nnef_size)), \ 'Only channel LRN is supported' ONNXOperation(graph=onnx_graph, name='LRN', inputs=input, outputs=output, attribs=dict(size=nnef_size[1], alpha=nnef_op.attribs['alpha'], beta=nnef_op.attribs['beta'], bias=nnef_op.attribs['bias']))
def convert_desample(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, indices = converter.converted_tensors(nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) strides = list(nnef_op.attribs['stride']) if not strides: strides = [1] * input.rank dilations = nnef_op.attribs['dilation'] if not dilations: dilations = [1] * input.rank assert nnef_op.attribs['border'] in ['constant', 'ignore'] pads = nnef_op.attribs['padding'] if not pads: # auto pad calc_output_size = [i * s for i, s in (input.shape[2:], strides)] pads = infer.same_padding(upscaled_input=calc_output_size, filter=nnef_op.attribs['size'], stride=strides, dilation=dilations) assert pads[:2] == [ (0, 0), (0, 0) ], "Padding in batch and channel dimensions is not supported in ONNX" pads = pads[2:] pads = converter.onnx_pads(pads) assert nnef_op.attribs['size'][:2] == strides[:2] == dilations[:2] == [1, 1], \ 'Pooling in batch and channel dimensions is not supported in ONNX' strides = strides[2:] dilations = dilations[2:] assert all( d == 1 for d in dilations), 'Dilation is not supported for pooling in ONNX' ONNXOperation(graph=onnx_graph, name='MaxUnpool', inputs=(input, indices), attribs=dict(kernel_shape=nnef_op.attribs['size'][2:], pads=pads, strides=strides), outputs=output)
def convert_transpose(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, output = converter.converted_tensors((nnef_op.input, nnef_op.output)) if not nnef_op.attribs['axes']: ONNXOperation(graph=onnx_graph, name='Identity', inputs=input, outputs=output) return perm = list(nnef_op.attribs['axes']) if len(perm) < input.rank: perm += list(range(len(perm), input.rank)) ONNXOperation(graph=onnx_graph, name='Transpose', inputs=input, attribs=dict(perm=perm), outputs=output)
def convert_ne(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None x, y = converter.converted_tensors(nnef_op.inputs) z = converter.converted_tensor(nnef_op.output) equal = ONNXOperation( graph=onnx_graph, name='Equal', inputs=(converter.add_unsqueeze(onnx_graph, x, list(range(x.rank, y.rank))) if 0 < x.rank < y.rank else x, converter.add_unsqueeze(onnx_graph, y, list(range(y.rank, x.rank))) if 0 < y.rank < x.rank else y), outputs=ONNXTensor(graph=onnx_graph, shape=list(z.shape), dtype=z.dtype)) ONNXOperation( graph=onnx_graph, name="Not", inputs=equal.output, outputs=z)
def convert_reshape(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, output = converter.converted_tensors((nnef_op.input, nnef_op.output)) shape = nnef_op.attribs['shape'] axis_start = nnef_op.attribs['axis_start'] axis_count = nnef_op.attribs['axis_count'] if axis_count == -1: axis_count = input.rank - axis_start onnx_shape = input.shape[:axis_start] + shape + input.shape[axis_start + axis_count:] ONNXOperation(graph=onnx_graph, name='Reshape', inputs=(input, converter.constant_1d_tensor(graph=onnx_graph, list_=onnx_shape, dtype='INT64')), outputs=output)
def convert_slice(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, output = converter.converted_tensors((nnef_op.input, nnef_op.output)) if not nnef_op.attribs['axes']: ONNXOperation(graph=onnx_graph, name='Identity', inputs=input, outputs=output) return axes = list(nnef_op.attribs['axes']) starts = list(nnef_op.attribs['begin']) ends = list(utils.INT32_MAX if e == 0 else e for e in nnef_op.attribs['end']) ONNXOperation(graph=onnx_graph, name='Slice', inputs=input, attribs=dict(axes=axes, starts=starts, ends=ends), outputs=output)