def generic_convert_binary(converter, nnef_op, onnx_graph, target_name, broadcast_workaround=False): # type: (Converter, NNEFOperation, ONNXGraph, str, bool)->None if broadcast_workaround: x, y = converter.converted_tensors(nnef_op.inputs) z = converter.converted_tensor(nnef_op.output) # Caffe2 onnx backend cannot broadcast in Pow if x.shape != z.shape: x = converter.add_expand(onnx_graph, x, z.shape) if y.shape != z.shape: y = converter.add_expand(onnx_graph, y, z.shape) ONNXOperation(graph=onnx_graph, name=target_name, inputs=(x, y), outputs=z) else: x, y = converter.converted_tensors(nnef_op.inputs) z = converter.converted_tensor(nnef_op.output) ONNXOperation(graph=onnx_graph, name=target_name, inputs=(converter.add_unsqueeze( onnx_graph, x, list(range(x.rank, y.rank))) if 0 < x.rank < y.rank else x, converter.add_unsqueeze( onnx_graph, y, list(range(y.rank, x.rank))) if 0 < y.rank < x.rank else y), outputs=z)
def generic_convert_unary(converter, nnef_op, onnx_graph, target_name, copy_attribs=None): # type: (Converter, NNEFOperation, ONNXGraph, str, typing.List[str])->None onnx_op = ONNXOperation(graph=onnx_graph, name=target_name, inputs=converter.converted_tensor(nnef_op.input), outputs=converter.converted_tensor(nnef_op.output)) if copy_attribs: for attr in copy_attribs: onnx_op.attribs[attr] = copy.deepcopy(nnef_op.attribs[attr])
def generic_convert_pool(converter, nnef_op, onnx_graph, target_name): # type: (Converter, NNEFOperation, ONNXGraph, str)->None input = converter.converted_tensor(nnef_op.input) outputs = converter.converted_tensors(nnef_op.outputs) partial_convert_pool(converter, nnef_op, onnx_graph, target_name, input, outputs)
def convert_batch_normalization(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, mean, variance, offset, scale = converter.converted_tensors( nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) if input.rank < 3: # Caffe2 BatchNorm works only for rank >= 3 axes = list(range(input.rank, 3)) input = converter.add_unsqueeze(onnx_graph=onnx_graph, onnx_tensor=input, axes=axes) final_output = output output = ONNXTensor(graph=onnx_graph, shape=list(input.shape), dtype=input.dtype) converter.add_squeeze(onnx_graph=onnx_graph, onnx_tensor=output, axes=axes, squeezed_tensor=final_output) ONNXOperation(graph=onnx_graph, name='BatchNormalization', inputs=(input, converter.add_squeeze(onnx_graph, scale, [0]), converter.add_squeeze(onnx_graph, offset, [0]), converter.add_squeeze(onnx_graph, mean, [0]), converter.add_squeeze(onnx_graph, variance, [0])), attribs=dict(epsilon=nnef_op.attribs['epsilon']), outputs=output)
def generic_convert_binary1_or_binary2(converter, nnef_op, onnx_graph, target_name1, target_name2): # type: (Converter, NNEFOperation, ONNXGraph, str, str)->None x, y = converter.converted_tensors(nnef_op.inputs) z = converter.converted_tensor(nnef_op.output) binary1 = ONNXOperation( graph=onnx_graph, name=target_name1, inputs=( converter.add_unsqueeze(onnx_graph, x, list(range(x.rank, y.rank))) if 0 < x.rank < y.rank else x, converter.add_unsqueeze(onnx_graph, y, list(range(y.rank, x.rank))) if 0 < y.rank < x.rank else y), outputs=ONNXTensor(graph=onnx_graph, shape=list(z.shape), dtype=z.dtype)) binary2 = ONNXOperation( graph=onnx_graph, name=target_name2, inputs=( converter.add_unsqueeze(onnx_graph, x, list(range(x.rank, y.rank))) if 0 < x.rank < y.rank else x, converter.add_unsqueeze(onnx_graph, y, list(range(y.rank, x.rank))) if 0 < y.rank < x.rank else y), outputs=ONNXTensor(graph=onnx_graph, shape=list(z.shape), dtype=z.dtype)) ONNXOperation(graph=onnx_graph, name="Or", inputs=(binary1.output, binary2.output), outputs=z)
def convert_clamp(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None x, a, b = converter.converted_tensors(nnef_op.inputs) y = converter.converted_tensor(nnef_op.output) if a.is_constant and a.shape == [] and b.is_constant and b.shape == []: ONNXOperation(graph=onnx_graph, name='Clip', inputs=x, attribs=dict(min=a.data[0], max=b.data[0]), outputs=y) else: max_rank = max(t.rank for t in [x, a, b]) def broadcast(t): return converter.add_unsqueeze( onnx_graph, t, list(range( t.rank, max_rank))) if 0 < t.rank < max_rank else t min = ONNXOperation( graph=onnx_graph, name='Min', inputs=(broadcast(x), broadcast(b)), outputs=ONNXTensor(graph=onnx_graph, shape=list(y.shape), dtype=y.dtype), ) ONNXOperation( graph=onnx_graph, name='Max', inputs=(min.output, broadcast(a)), outputs=y, )
def generic_convert_conv_deconv(converter, nnef_op, onnx_graph, is_deconv): # type: (Converter, NNEFOperation, ONNXGraph, bool)->None input, filter, bias = converter.converted_tensors(nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) partial_convert_conv_deconv(converter, nnef_op, onnx_graph, is_deconv, input, filter, bias, output)
def convert_unstack(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input = converter.converted_tensor(nnef_op.input) outputs = converter.converted_tensors(nnef_op.outputs) axis = nnef_op.attribs['axis'] num_parts = input.shape[axis] def unsqueeze(shape, axis): shape = list(shape) shape.insert(axis, 1) return shape intermediates = [ ONNXTensor(graph=onnx_graph, shape=unsqueeze(output.shape, axis), dtype=output.dtype) for output in outputs ] ONNXOperation(graph=onnx_graph, name='Split', inputs=input, attribs=dict(axis=axis, split=[1] * num_parts), outputs=intermediates) for intermediate, output in zip(intermediates, outputs): converter.add_unsqueeze(onnx_graph, intermediate, axis, unsqueezed_tensor=output)
def convert_prelu(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, slope = converter.converted_tensors(nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) ONNXOperation(graph=onnx_graph, name='PRelu', inputs=(input, converter.add_squeeze(onnx_graph, slope, [0])), outputs=output)
def convert_copy_n(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input = converter.converted_tensor(nnef_op.input) outputs = converter.converted_tensors(nnef_op.outputs) for output in outputs: ONNXOperation(graph=onnx_graph, name='Identity', inputs=input, outputs=output)
def convert_concat(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None inputs = converter.converted_tensors(nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) ONNXOperation(graph=onnx_graph, name='Concat', inputs=inputs, attribs=dict(axis=nnef_op.attribs['axis']), outputs=output)
def convert_linear(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None A, B, C = converter.converted_tensors(nnef_op.inputs) D = converter.converted_tensor(nnef_op.output) assert A.rank <= 2 and B.rank <= 2 and C.rank <= 2, "Batch matmul is unsupported in ONNX" ONNXOperation(graph=onnx_graph, name='Gemm', inputs=(A, B, C), outputs=D, attribs=dict(transA=0, transB=1))
def convert_matmul(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None A, B = converter.converted_tensors(nnef_op.inputs) C = converter.converted_tensor(nnef_op.output) assert A.rank <= 2 and B.rank <= 2, "Batch matmul is unsupported in ONNX" ONNXOperation(graph=onnx_graph, name='Gemm', inputs=(A, B, converter.constant_0d_tensor(graph=onnx_graph, value=0.0, dtype=C.dtype)), outputs=C, attribs=dict(transA=1 if nnef_op.attribs['transposeA'] else 0, transB=1 if nnef_op.attribs['transposeB'] else 0))
def convert_stack(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None inputs = converter.converted_tensors(nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) inputs = [converter.add_unsqueeze(onnx_graph, input, [nnef_op.attribs['axis']]) for input in inputs] ONNXOperation(graph=onnx_graph, name='Concat', inputs=inputs, attribs=dict(axis=nnef_op.attribs['axis']), outputs=output)
def convert_select(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None cond, true_value, false_value = converter.converted_tensors(nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) max_rank = max(t.rank for t in [cond, true_value, false_value]) def broadcast(t): return converter.add_unsqueeze(onnx_graph, t, list(range(t.rank, max_rank))) if 0 < t.rank < max_rank else t ONNXOperation( graph=onnx_graph, name='Where', inputs=(broadcast(cond), broadcast(true_value), broadcast(false_value)), outputs=output)
def convert_split(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input = converter.converted_tensor(nnef_op.input) outputs = converter.converted_tensors(nnef_op.outputs) axis = nnef_op.attribs['axis'] ratios = nnef_op.attribs['ratios'] assert input.shape[axis] % sum(ratios) == 0 unit = input.shape[axis] // sum(ratios) ONNXOperation(graph=onnx_graph, name='Split', inputs=input, attribs=dict(axis=axis, split=[ratio * unit for ratio in ratios]), outputs=outputs)
def convert_desample(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, indices = converter.converted_tensors(nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) strides = list(nnef_op.attribs['stride']) if not strides: strides = [1] * input.rank dilations = nnef_op.attribs['dilation'] if not dilations: dilations = [1] * input.rank assert nnef_op.attribs['border'] in ['constant', 'ignore'] pads = nnef_op.attribs['padding'] if not pads: # auto pad calc_output_size = [i * s for i, s in (input.shape[2:], strides)] pads = infer.same_padding(upscaled_input=calc_output_size, filter=nnef_op.attribs['size'], stride=strides, dilation=dilations) assert pads[:2] == [ (0, 0), (0, 0) ], "Padding in batch and channel dimensions is not supported in ONNX" pads = pads[2:] pads = converter.onnx_pads(pads) assert nnef_op.attribs['size'][:2] == strides[:2] == dilations[:2] == [1, 1], \ 'Pooling in batch and channel dimensions is not supported in ONNX' strides = strides[2:] dilations = dilations[2:] assert all( d == 1 for d in dilations), 'Dilation is not supported for pooling in ONNX' ONNXOperation(graph=onnx_graph, name='MaxUnpool', inputs=(input, indices), attribs=dict(kernel_shape=nnef_op.attribs['size'][2:], pads=pads, strides=strides), outputs=output)
def convert_ne(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None x, y = converter.converted_tensors(nnef_op.inputs) z = converter.converted_tensor(nnef_op.output) equal = ONNXOperation( graph=onnx_graph, name='Equal', inputs=(converter.add_unsqueeze(onnx_graph, x, list(range(x.rank, y.rank))) if 0 < x.rank < y.rank else x, converter.add_unsqueeze(onnx_graph, y, list(range(y.rank, x.rank))) if 0 < y.rank < x.rank else y), outputs=ONNXTensor(graph=onnx_graph, shape=list(z.shape), dtype=z.dtype)) ONNXOperation( graph=onnx_graph, name="Not", inputs=equal.output, outputs=z)