def BatchNormalization(self, func): onnx_order = [0, 2, 1, 3, 4] if len(func.input) != len(onnx_order): raise ValueError( "The number of BatchNormalization input must be {}".format( len(onnx_order))) onnx_input = [func.input[i] for i in onnx_order] if func.batch_normalization_param.batch_stat: # Batch normalization for training is currently not supported raise ValueError("BatchNormalization with batch_stat=True is " "currently not supported for ONNX conversion") eps = 1e-5 if func.batch_normalization_param.eps == 0.0 \ else func.batch_normalization_param.eps decay_rate = 0.9 if func.batch_normalization_param.decay_rate == 0.0 \ else func.batch_normalization_param.decay_rate n = onnx.helper.make_node('BatchNormalization', onnx_input, func.output, is_test=True, epsilon=eps, momentum=decay_rate # spatial=1 different from SPEC. ) for p in func.input[1:]: d = sum([d if d > 1 else 0 for d in self._var_dict[p].dim]) b_shape = nnabla_pb2.Shape() b_shape.dim.extend([d]) self._var_dict[p] = b_shape return [n]
def DepthwiseConvolution(self, func): cp = func.depthwise_convolution_param in_shape = [d for d in self._var_dict[func.input[0]].dim] w = [d for d in self._var_dict[func.input[1]].dim] out_shape = [d for d in self._var_dict[func.output[0]].dim] assert in_shape[cp.base_axis] * \ cp.multiplier == out_shape[cp.base_axis] assert w[0] == in_shape[cp.base_axis] * cp.multiplier group = int(out_shape[cp.base_axis] / cp.multiplier) w = [int(w[0] / cp.multiplier), int(w[0] / group), w[1], w[2]] w_shape = nnabla_pb2.Shape() w_shape.dim.extend(w) self._var_dict[func.input[1]] = w_shape multiple = out_shape[cp.base_axis] / in_shape[cp.base_axis] assert multiple == cp.multiplier, "Invalid input/output shape!" n = onnx.helper.make_node( 'Conv', func.input, func.output, kernel_shape=w[2:], dilations=cp.dilation.dim, strides=cp.stride.dim, pads=cp.pad.dim[:] * 2, group=group ) return [n]
def Deconvolution(self, func): output_name = fork_name(func.output[0]) input_shape = self._var_dict[func.input[0]].dim if len(input_shape) != 4: raise ValueError("Currently, the input shape != 4 dims is not supported " "by most of ConvTranspose function implementation.") kernel_shape = self._var_dict[func.input[1]].dim if len(kernel_shape) != 4: raise ValueError("Currently, the weight shape != 4 dims is not supported " "by most of ConvTranspose function implementation.") kernel_shape = kernel_shape[2:] strides = func.deconvolution_param.stride.dim pads = func.deconvolution_param.pad.dim # ONNX requires (x1_b, x2_b, x1_e, x2_e) style pads = [pads[0], pads[1], pads[0], pads[1]] if func.deconvolution_param.dilation.dim != [1, 1]: raise ValueError("Currently, dilation != [1, 1] is not supported " "by most of ConvTranspose function implementation.") if func.deconvolution_param.group != 1: raise ValueError("Currently, group != 1 is not supported " "by most of ConvTranspose function implementation.") if len(func.input) > 2: b_dims = self._var_dict[func.input[2]].dim b_shape = nnabla_pb2.Shape() b_shape.dim.extend([1, b_dims[0], 1, 1]) self._var_dict[func.input[2]] = b_shape node_conv_transpose = onnx.helper.make_node( "ConvTranspose", [func.input[0], func.input[1]], [output_name], pads=pads, strides=strides, kernel_shape=kernel_shape, name=func.name ) node_add = onnx.helper.make_node( "Add", [output_name, func.input[2]], func.output, broadcast=1, name=func.name + "_add_bias" ) return [node_conv_transpose, node_add] else: node_conv_transpose = onnx.helper.make_node( "ConvTranspose", func.input, func.output, pads=pads, strides=strides, kernel_shape=kernel_shape, name=func.name ) return [node_conv_transpose]
def replace_negative_size_with_batch_size(shape, batch_size): """Replace all dimensions with negative values to batch size""" sl = [] for d in shape.dim: if d < 0: # Negative size means batch size sl.append(batch_size) else: sl.append(d) out_shape = nnabla_pb2.Shape() out_shape.dim.extend(sl) return out_shape
def BatchNormalization(self, func): nl = [] bnp = func.batch_normalization_param onnx_order = [0, 2, 1, 3, 4] if len(func.input) != len(onnx_order): raise ValueError( "The number of BatchNormalization input must be {}".format(len(onnx_order))) for p in func.input[1:]: d = sum([d if d > 1 else 0 for d in self._var_dict[p].dim]) b_shape = nnabla_pb2.Shape() b_shape.dim.extend([d]) self._var_dict[p] = b_shape if func.batch_normalization_param.batch_stat: bn_input = [func.input[i] for i in onnx_order[:3]] bn_output = [func.output[0]] n = onnx.helper.make_node( 'InstanceNormalization', bn_input, bn_output, epsilon=1e-5 ) else: bn_input = [func.input[i] for i in onnx_order] eps = 1e-5 if bnp.eps == 0.0 else bnp.eps decay_rate = 0.9 if bnp.decay_rate == 0.0 \ else bnp.decay_rate n = onnx.helper.make_node( 'BatchNormalization', bn_input, [func.output[0]], is_test=True, epsilon=eps, momentum=decay_rate # spatial=1 # say: "Don't know map unexpected argument spatial." # different from SPEC. ) nl.append(n) return nl