def convert_prelu(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, slope = converter.converted_tensors(nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) ONNXOperation(graph=onnx_graph, name='PRelu', inputs=(input, converter.add_squeeze(onnx_graph, slope, [0])), outputs=output)
def convert_batch_normalization(converter, nnef_op, onnx_graph): # type: (Converter, NNEFOperation, ONNXGraph)->None input, mean, variance, offset, scale = converter.converted_tensors( nnef_op.inputs) output = converter.converted_tensor(nnef_op.output) if input.rank < 3: # Caffe2 BatchNorm works only for rank >= 3 axes = list(range(input.rank, 3)) input = converter.add_unsqueeze(onnx_graph=onnx_graph, onnx_tensor=input, axes=axes) final_output = output output = ONNXTensor(graph=onnx_graph, shape=list(input.shape), dtype=input.dtype) converter.add_squeeze(onnx_graph=onnx_graph, onnx_tensor=output, axes=axes, squeezed_tensor=final_output) ONNXOperation(graph=onnx_graph, name='BatchNormalization', inputs=(input, converter.add_squeeze(onnx_graph, scale, [0]), converter.add_squeeze(onnx_graph, offset, [0]), converter.add_squeeze(onnx_graph, mean, [0]), converter.add_squeeze(onnx_graph, variance, [0])), attribs=dict(epsilon=nnef_op.attribs['epsilon']), outputs=output)
def partial_convert_conv_deconv(converter, nnef_op, onnx_graph, is_deconv, input, filter, bias, output): # type: (Converter, NNEFOperation, ONNXGraph, bool, ONNXTensor, ONNXTensor, ONNXTensor, ONNXTensor)->None strides = list(nnef_op.attribs['stride']) if not strides: strides = [1] * (input.rank - 2) dilations = nnef_op.attribs['dilation'] if not dilations: dilations = [1] * (input.rank - 2) groups = nnef_op.attribs.get( 'groups', 1) # default needed because box does not have groups if groups == 0: groups = input.shape[1] assert nnef_op.attribs['border'] == 'constant' if is_deconv: pads = nnef_op.attribs['padding'] if not pads: # auto pad calc_output_size = [i * s for i, s in (input.shape[2:], strides)] pads = infer.same_padding(upscaled_input=calc_output_size, filter=filter.shape[2:], stride=strides, dilation=dilations) else: calc_output_size = infer.conv(input=input.shape, filter=filter.shape[2:], padding=pads, stride=strides, dilation=dilations, groups=groups, output_channels=output.shape[1], format=infer.Format.NCHW, deconv=True)[2:] output_size = output.shape[2:] output_padding = [o - c for o, c in zip(output_size, calc_output_size)] else: pads = nnef_op.attribs['padding'] if not pads: pads = infer.same_padding(upscaled_input=input.shape[2:], filter=filter.shape[2:], stride=strides, dilation=dilations) output_padding = [0] * len(pads) pads = converter.onnx_pads(pads) if bias.is_constant and bias.data == [0.0]: inputs = (input, filter) else: if bias.rank == 2: assert bias.shape[0] == 1 bias = converter.add_squeeze(onnx_graph=onnx_graph, onnx_tensor=bias, axes=[0]) inputs = (input, filter, bias) op = ONNXOperation( graph=onnx_graph, name='ConvTranspose' if is_deconv else 'Conv', inputs=inputs, attribs=dict( kernel_shape=filter. shape[2:], # Not mandatory, but Caffe2 fails without this strides=strides, dilations=dilations, pads=pads, group=groups), outputs=output) if is_deconv: op.attribs['output_padding'] = output_padding