def _convert_conv(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) x.order.unify(OrderNCHW) w = converter.get_variable(onnx_op.input[1]) w.order.unify(Order([Axis.N, Axis.C, Axis.KH, Axis.KW])) attrs = attribute_dict(onnx_op) ksize = list(attrs["kernel_shape"].ints) dilations = list(attrs["dilations"].ints) stride = list(attrs["strides"].ints) pad = list(attrs["pads"].ints) if any(pad[2 * i] != pad[2 * i + 1] for i in range(len(pad) // 2)): raise NotImplementedError( "[ONNXConverter] odd-size padding is not supported.") pad = [pad[0], pad[2]] y, = Convolution2D(None, ksize=ksize, stride=stride, padding=pad, dilation_rate=dilations)(x, w) y.change_order(OrderNCHW) if len(onnx_op.input) == 3: # with bias b = converter.get_variable(onnx_op.input[2]) b.order.unify(OrderC) y = y + b converter.set_variable(onnx_op.output[0], y)
def _convert_max_pool(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) x.order.unify(OrderNCHW) attrs = attribute_dict(onnx_op) ksize = list(attrs["kernel_shape"].ints) if "dilations" in attrs.keys(): dilations = list(attrs["dilations"].ints) else: #print("\ndilations attribute not found for \n{}\n Setting to [1].".format(onnx_op)) dilations = [1] if any(d != 1 for d in dilations): raise NotImplementedError( "[ONNXConverter] MaxPool is supported only when dilations are 1.") stride = list(attrs["strides"].ints) pad = list(attrs["pads"].ints) if len(pad) == 2: # NOTE: # In PyTorch, pads is generated as tuple of 2 integers, but ONNX spec says that pads contains 2*N integers where N is the number of # padded dimension. It's maybe PyTorch's bug. pass else: if any(pad[2 * i] != pad[2 * i + 1] for i in range(len(pad) // 2)): raise NotImplementedError( "[ONNXConverter] odd-size padding is not supported.") pad = [pad[0], pad[2]] y, = MaxPooling2D(None, ksize=ksize, stride=stride, padding=pad)(x) converter.set_variable(onnx_op.output[0], y)
def _convert_reshape(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) if converter.opset_version >= 5: # output shape is specified by onnx_op.input[1] # It have to be ConstantVariable. # TODO: test for different operator set version shape_var = converter.get_variable(onnx_op.input[1]) assert isinstance( shape_var, ConstantVariable ), "Shape specifier of Reshape operator have to be constant." out_shape = [int(d) for d in shape_var.data] else: # Reshape-1 attrs = attribute_dict(onnx_op) out_shape = [ r if s == 0 else s for r, s in zip(x.shape, attrs["shape"].ints) ] if -1 in out_shape: i = out_shape.index(-1) out_shape.remove(-1) out_shape.insert(i, x.size // mul(out_shape)) out_order = Order([None] * len(out_shape)) y, = Reshape(None, in_order=x.order, out_order=out_order, out_shape=out_shape)(x) converter.set_variable(onnx_op.output[0], y)
def _convert_average_pool(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) x.order.unify(OrderNCHW) attrs = attribute_dict(onnx_op) ksize = list(attrs["kernel_shape"].ints) stride = list(attrs["strides"].ints) pad = list(attrs["pads"].ints) if len(pad) == 2: # NOTE: # In PyTorch, pads is generated as tuple of 2 integers, but ONNX spec says that pads contains 2*N integers where N is the number of # padded dimension. It's maybe PyTorch's bug. pass else: if any(pad[2 * i] != pad[2 * i + 1] for i in range(len(pad) // 2)): raise NotImplementedError( "[ONNXConverter] odd-size padding is not supported.") pad = [pad[0], pad[2]] y, = AveragePooling2D(None, ksize=ksize, stride=stride, padding=pad, cover_all=False)(x) converter.set_variable(onnx_op.output[0], y)
def _convert_max_pool(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) x.order.unify(OrderNCHW) attrs = attribute_dict(onnx_op) ksize = list(attrs["kernel_shape"].ints) stride = list(attrs["strides"].ints) pad = list(attrs["pads"].ints) if len(pad) == 2: # NOTE: # In PyTorch, pads is generated as tuple of 2 integers, but ONNX spec says that pads contains 2*N integers where N is the number of # padded dimension. It's maybe PyTorch's bug. pass else: if any(pad[2 * i] != pad[2 * i + 1] for i in range(len(pad) // 2)): raise NotImplementedError( "[ONNXConverter] odd-size padding is not supported.") pad = [pad[0], pad[2]] # https://github.com/onnx/onnx/blob/master/docs/Operators.md # output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) # In PyTorch, nn.MaxPool2d(2) with input size 11 produces output size 5, # where kernel_shape=2, pads=0, strides=2 is set as onnx attributes. # It corresponds to cover_all=False. y, = MaxPooling2D(None, ksize=ksize, stride=stride, padding=pad, cover_all=False)(x) converter.set_variable(onnx_op.output[0], y)
def _convert_gemm(converter: ONNXConverter, onnx_op: INodeProto): A = converter.get_variable(onnx_op.input[0]) B = converter.get_variable(onnx_op.input[1]) C = converter.get_variable(onnx_op.input[2]) attrs = attribute_dict(onnx_op) alpha = attrs["alpha"].f beta = attrs["beta"].f broadcast = attrs.get("broadcast", 0) y, = Tensordot( None, axes=(A.order.axes[0 if ( attrs.get("transA", False) and attrs["transA"].i) else 1], B.order.axes[1 if ( attrs.get("transB", False) and attrs["transB"].i) else 0]))( A, B) if broadcast: check_broadcast_constraints(y, C) else: y.order.unify(C.order) y = alpha * y + beta * C converter.set_variable(onnx_op.output[0], y)
def _convert_slice(converter: ONNXConverter, onnx_op: INodeProto): ### TODO Implement this. x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) print(attrs) # Attrs: # 'starts' --> starts.ints # 'ends' --> ends.ints # 'axes' --> axes.ints # https://mil-tokyo.github.io/webdnn/docs/_modules/webdnn/graph/operators/slice.html # Slice(name, AxisKeyDict: indices) # eg. multiplier = AxisKeyDict(x.order.axes, [pad_begin if a == axis else x.shape_dict[a] for a in x.order.axes]) # TODO: Construct AxisKeyDict of indices # Set name for Slice layer if possible? # Should work like this. I hope. indices = AxisKeyDict(x.order.axes, [ slice(s, t) for s, t in zip(attrs["starts"].ints, attrs["ends"].ints) ]) #indices = AxisKeyDict(attrs["axes"].ints, [slice(s,t) for s,t in zip(attrs["starts"].ints, attrs["ends"].ints)]) print(x.order.axes) print(attrs["axes"].ints) y, = Slice(None, indices)(x) converter.set_variable(onnx_op.output[0], y)
def _convert_sum(converter: ONNXConverter, onnx_op: INodeProto): xs = [converter.get_variable(proto) for proto in onnx_op.input] while len(xs) > 1: check_broadcast_constraints(xs[0], xs[1]) xs.append(xs.pop(0) + xs.pop(0)) converter.set_variable(onnx_op.output[0], xs[0] / len(onnx_op.input))
def _convert_pow(converter: ONNXConverter, onnx_op: INodeProto): x0 = converter.get_variable(onnx_op.input[0]) x1 = converter.get_variable(onnx_op.input[1]) check_broadcast_constraints(x0, x1) y = x0**x1 converter.set_variable(onnx_op.output[0], y)
def _convert_leaky_relu(converter: ONNXConverter, onnx_op: INodeProto): x0 = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) alpha = attrs["alpha"].f y, = LeakyRelu(None, slope=alpha)(x0) converter.set_variable(onnx_op.output[0], y)
def _convert_squeeze(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) axes = [x.order.axes[i] for i in attrs["axes"].ints] y = x.squeeze(axes) converter.set_variable(onnx_op.output[0], y)
def _convert_softmax(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) axis = attrs["axis"].i y, = Softmax(None, axis=x.order.axes[axis])(x) converter.set_variable(onnx_op.output[0], y)
def _convert_transpose(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) y, = Transpose(None)(x) perm = list(attrs["perm"].ints if "perm" in attrs else reversed(range(x.ndim))) y.change_order(Order([x.order.axes[i] for i in perm])) converter.set_variable(onnx_op.output[0], y)
def _convert_unsqueeze(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) if isinstance(x, ConstantVariable): data = np.expand_dims(x.data, 0) y = ConstantVariable(data, Order([None] * len(data.shape))) else: y = x.expand_dims(Axis()) converter.set_variable(onnx_op.output[0], y)
def _convert_selu(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) alpha = attrs["alpha"].f if "alpha" in attrs else 1.6732 gamma = attrs["gamma"].f if "gamma" in attrs else 1.0507 y, = Select(None)(x > 0, gamma * x, gamma * (alpha * Exp(None)(x)[0] - alpha)) converter.set_variable(onnx_op.output[0], y)
def _convert_min(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) max_x = ConstantVariable(np.ones([1] * x.ndim), x.order) * attrs["max"].f min_x = ConstantVariable(np.ones([1] * x.ndim), x.order) * attrs["min"].f y, = Select(None)(x > max_x, max_x, x) y, = Select(None)(y > min_x, y, min_x) converter.set_variable(onnx_op.output[0], y)
def _convert_concat(converter: ONNXConverter, onnx_op: INodeProto): xs = [converter.get_variable(v) for v in onnx_op.input] for x in xs[1:]: xs[0].order.unify(x.order) attrs = attribute_dict(onnx_op) axis = xs[0].order.axes[attrs["axis"].i] y, = Concat(None, axis=axis)(*xs) converter.set_variable(onnx_op.output[0], y)
def _convert_depth_to_space(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) x.order.unify(OrderNCHW) attrs = attribute_dict(onnx_op) blocksize = attrs["blocksize"].i y, = Depth2Space(None, blocksize)(x) converter.set_variable(onnx_op.output[0], y)
def _convert_constant(converter: ONNXConverter, onnx_op: INodeProto): attrs = attribute_dict(onnx_op) value = attrs["value"].t np_type = DataTypeMappingDict[value.data_type] if np_type.type is None: raise TypeError(f"[ONNXConverter] type \"{np_type.name}\" is not supported") data = np.frombuffer(value.raw_data, np_type.type).reshape([1] if len(value.dims) == 0 else value.dims) y = ConstantVariable(data, Order([None] * data.ndim)) converter.set_variable(onnx_op.output[0], y)
def _convert_flatten(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) axis = attrs["axis"].i if "axis" in attrs else 1 new_shape = [mul(x.shape[:axis]), mul(x.shape[axis:])] new_order = Order([None, None]) y = x.reshape(shape=new_shape, order=new_order) converter.set_variable(onnx_op.output[0], y)
def _convert_elu(converter: ONNXConverter, onnx_op: INodeProto): x0 = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) alpha = attrs["alpha"].f if alpha != 1: raise NotImplementedError( "[ONNXConverter] Operator \"Elu\" is supported only the case when parameter \"alpha\" is 1." ) y, = Elu(None)(x0) converter.set_variable(onnx_op.output[0], y)
def _convert_min(converter: ONNXConverter, onnx_op: INodeProto): xs = [converter.get_variable(v) for v in onnx_op.input] while len(xs) > 1: x0 = xs.pop(0) x1 = xs.pop(0) check_broadcast_constraints(x0, x1) y, = Select(None)(x0 > x1, x1, x0) xs.append(y) converter.set_variable(onnx_op.output[0], xs[0])
def _convert_argmin(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) axis = attrs["axis"].i keepdims = (attrs["keepdims"].i if "keepdims" in attrs else 1) == 1 x, = ArgMin(None, axis=x.order.axes[axis])(x) if not keepdims: x = x.squeeze(axis=x.order.axes[axis]) converter.set_variable(onnx_op.output[0], x)
def _convert_reduce_prod(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) axes = attrs["axes"].ints keepdims = (attrs["keepdims"].i if "keepdims" in attrs else 1) == 1 for a in axes: x, = Prod(None, axis=x.order.axes[a])(x) if not keepdims: x = x.squeeze(axis=[x.order.axes[i] for i in axes]) converter.set_variable(onnx_op.output[0], x)
def _convert_global_max_pool(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) if x.ndim == 4: x.order.unify(OrderNCHW) reduction_size = mul(x.shape[2:]) reduction_axis = Axis() x = x.reshape([x.shape[0], x.shape[1], reduction_size], Order([x.order.axes[0], x.order.axes[1], reduction_axis])) y, = Max(None, axis=reduction_axis)(x) converter.set_variable(onnx_op.output[0], y)
def _convert_reshape(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) out_shape = [r if s == 0 else s for r, s in zip(x.shape, attrs["shape"].ints)] if -1 in out_shape: i = out_shape.index(-1) out_shape.remove(-1) out_shape.insert(i, x.size // mul(out_shape)) out_order = Order([None] * len(out_shape)) y, = Reshape(None, in_order=x.order, out_order=out_order, out_shape=out_shape)(x) converter.set_variable(onnx_op.output[0], y)
def _convert_softmax(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) axis = attrs["axis"].i if "axis" in attrs else 1 new_shape = [mul(x.shape[:axis]), mul(x.shape[axis:])] new_order = Order([None, None]) x = x.reshape(shape=new_shape, order=new_order) max_x, = Max(None, axis=x.order.axes[1])(x) y = x >= max_x converter.set_variable(onnx_op.output[0], y)
def _convert_squeeze(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) axes = [x.order.axes[i] for i in attrs["axes"].ints] if isinstance(x, ConstantVariable): data = x.data for axis in attrs["axes"].ints: data = data.squeeze(axis) y = ConstantVariable(data, Order([None] * len(data.shape))) else: y = x.squeeze(axes) converter.set_variable(onnx_op.output[0], y)
def _convert_concat(converter: ONNXConverter, onnx_op: INodeProto): xs = [converter.get_variable(v) for v in onnx_op.input] for x in xs[1:]: xs[0].order.unify(x.order) attrs = attribute_dict(onnx_op) if all(isinstance(x, ConstantVariable) for x in xs): # generate actual data as constant concat_data = np.concatenate([x.data for x in xs], axis=attrs["axis"].i) y = ConstantVariable(concat_data, xs[0].order) else: axis = xs[0].order.axes[attrs["axis"].i] y, = Concat(None, axis=axis)(*xs) converter.set_variable(onnx_op.output[0], y)
def _convert_split(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) axis = x.order.axes[attrs["axis"].i] if "split" not in attrs: raise NotImplementedError( "[ONNXConverter] Operator \"Split\" without \"split\" parameter is not supported yet." ) split = attrs["split"].ints sections = np.cumsum(split).tolist()[:-1] ys = SplitAxis(None, axis=axis, sections=sections)(x) for i, y in enumerate(ys): converter.set_variable(onnx_op.output[i], y)