예제 #1
0
def _convert_conv(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    x.order.unify(OrderNCHW)

    w = converter.get_variable(onnx_op.input[1])
    w.order.unify(Order([Axis.N, Axis.C, Axis.KH, Axis.KW]))

    attrs = attribute_dict(onnx_op)
    ksize = list(attrs["kernel_shape"].ints)
    dilations = list(attrs["dilations"].ints)
    stride = list(attrs["strides"].ints)

    pad = list(attrs["pads"].ints)
    if any(pad[2 * i] != pad[2 * i + 1] for i in range(len(pad) // 2)):
        raise NotImplementedError(
            "[ONNXConverter] odd-size padding is not supported.")
    pad = [pad[0], pad[2]]

    y, = Convolution2D(None,
                       ksize=ksize,
                       stride=stride,
                       padding=pad,
                       dilation_rate=dilations)(x, w)
    y.change_order(OrderNCHW)

    if len(onnx_op.input) == 3:
        # with bias
        b = converter.get_variable(onnx_op.input[2])
        b.order.unify(OrderC)
        y = y + b

    converter.set_variable(onnx_op.output[0], y)
예제 #2
0
파일: nn.py 프로젝트: hubertsgithub/webdnn
def _convert_max_pool(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    x.order.unify(OrderNCHW)

    attrs = attribute_dict(onnx_op)
    ksize = list(attrs["kernel_shape"].ints)
    if "dilations" in attrs.keys():
        dilations = list(attrs["dilations"].ints)
    else:
        #print("\ndilations attribute not found for \n{}\n Setting to [1].".format(onnx_op))
        dilations = [1]
    if any(d != 1 for d in dilations):
        raise NotImplementedError(
            "[ONNXConverter] MaxPool is supported only when dilations are 1.")

    stride = list(attrs["strides"].ints)

    pad = list(attrs["pads"].ints)
    if len(pad) == 2:
        # NOTE:
        # In PyTorch, pads is generated as tuple of 2 integers, but ONNX spec says that pads contains 2*N integers where N is the number of
        # padded dimension. It's maybe PyTorch's bug.
        pass

    else:
        if any(pad[2 * i] != pad[2 * i + 1] for i in range(len(pad) // 2)):
            raise NotImplementedError(
                "[ONNXConverter] odd-size padding is not supported.")
        pad = [pad[0], pad[2]]

    y, = MaxPooling2D(None, ksize=ksize, stride=stride, padding=pad)(x)
    converter.set_variable(onnx_op.output[0], y)
예제 #3
0
파일: nn.py 프로젝트: steerapi/webdnn
def _convert_batch_normalization(converter: ONNXConverter,
                                 onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    x.order.axes[0].unify(Axis.N)
    x.order.axes[1].unify(Axis.C)

    gamma = converter.get_variable(onnx_op.input[1])
    gamma.order.unify(OrderC)

    beta = converter.get_variable(onnx_op.input[2])
    beta.order.unify(OrderC)

    attrs = attribute_dict(onnx_op)
    eps = attrs["epsilon"].f

    if len(onnx_op.input) == 5:
        mean = converter.get_variable(onnx_op.input[3])
        mean.order.unify(OrderC)

        variance = converter.get_variable(onnx_op.input[4])
        variance.order.unify(OrderC)

    elif len(onnx_op.input) == 3:
        mean = 0 if onnx_op.running_mean is None else ConstantVariable(
            onnx_op.running_mean, OrderC)
        variance = 1 if onnx_op.running_var is None else ConstantVariable(
            onnx_op.running_var, OrderC)

    else:
        raise ValueError(
            "Number of inputs to BatchNormalizationFunction must be 3 or 5.")

    y = (x - mean) / ((variance + eps)**0.5) * gamma + beta
    converter.set_variable(onnx_op.output[0], y)
예제 #4
0
def _convert_gemm(converter: ONNXConverter, onnx_op: INodeProto):
    A = converter.get_variable(onnx_op.input[0])
    B = converter.get_variable(onnx_op.input[1])
    C = converter.get_variable(onnx_op.input[2])

    attrs = attribute_dict(onnx_op)
    alpha = attrs["alpha"].f
    beta = attrs["beta"].f
    broadcast = attrs.get("broadcast", 0)

    y, = Tensordot(
        None,
        axes=(A.order.axes[0 if (
            attrs.get("transA", False) and attrs["transA"].i) else 1],
              B.order.axes[1 if (
                  attrs.get("transB", False) and attrs["transB"].i) else 0]))(
                      A, B)

    if broadcast:
        check_broadcast_constraints(y, C)
    else:
        y.order.unify(C.order)

    y = alpha * y + beta * C

    converter.set_variable(onnx_op.output[0], y)
예제 #5
0
def _convert_slice(converter: ONNXConverter, onnx_op: INodeProto):
    ### TODO Implement this.

    x = converter.get_variable(onnx_op.input[0])
    attrs = attribute_dict(onnx_op)

    print(attrs)

    # Attrs:
    # 'starts' --> starts.ints
    # 'ends' --> ends.ints
    # 'axes' --> axes.ints
    # https://mil-tokyo.github.io/webdnn/docs/_modules/webdnn/graph/operators/slice.html

    # Slice(name, AxisKeyDict: indices)
    # eg. multiplier = AxisKeyDict(x.order.axes, [pad_begin if a == axis else x.shape_dict[a] for a in x.order.axes])

    # TODO: Construct AxisKeyDict of indices
    # Set name for Slice layer if possible?
    # Should work like this. I hope.

    indices = AxisKeyDict(x.order.axes, [
        slice(s, t) for s, t in zip(attrs["starts"].ints, attrs["ends"].ints)
    ])

    #indices = AxisKeyDict(attrs["axes"].ints, [slice(s,t) for s,t in zip(attrs["starts"].ints, attrs["ends"].ints)])

    print(x.order.axes)
    print(attrs["axes"].ints)

    y, = Slice(None, indices)(x)

    converter.set_variable(onnx_op.output[0], y)
예제 #6
0
def _convert_max_pool(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    x.order.unify(OrderNCHW)

    attrs = attribute_dict(onnx_op)
    ksize = list(attrs["kernel_shape"].ints)
    stride = list(attrs["strides"].ints)

    pad = list(attrs["pads"].ints)
    if len(pad) == 2:
        # NOTE:
        # In PyTorch, pads is generated as tuple of 2 integers, but ONNX spec says that pads contains 2*N integers where N is the number of
        # padded dimension. It's maybe PyTorch's bug.
        pass

    else:
        if any(pad[2 * i] != pad[2 * i + 1] for i in range(len(pad) // 2)):
            raise NotImplementedError(
                "[ONNXConverter] odd-size padding is not supported.")
        pad = [pad[0], pad[2]]

    # https://github.com/onnx/onnx/blob/master/docs/Operators.md
    # output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
    # In PyTorch, nn.MaxPool2d(2) with input size 11 produces output size 5,
    # where kernel_shape=2, pads=0, strides=2 is set as onnx attributes.
    # It corresponds to cover_all=False.
    y, = MaxPooling2D(None,
                      ksize=ksize,
                      stride=stride,
                      padding=pad,
                      cover_all=False)(x)
    converter.set_variable(onnx_op.output[0], y)
예제 #7
0
def _convert_average_pool(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    x.order.unify(OrderNCHW)

    attrs = attribute_dict(onnx_op)
    ksize = list(attrs["kernel_shape"].ints)
    stride = list(attrs["strides"].ints)

    pad = list(attrs["pads"].ints)
    if len(pad) == 2:
        # NOTE:
        # In PyTorch, pads is generated as tuple of 2 integers, but ONNX spec says that pads contains 2*N integers where N is the number of
        # padded dimension. It's maybe PyTorch's bug.
        pass

    else:
        if any(pad[2 * i] != pad[2 * i + 1] for i in range(len(pad) // 2)):
            raise NotImplementedError(
                "[ONNXConverter] odd-size padding is not supported.")
        pad = [pad[0], pad[2]]

    y, = AveragePooling2D(None,
                          ksize=ksize,
                          stride=stride,
                          padding=pad,
                          cover_all=False)(x)
    converter.set_variable(onnx_op.output[0], y)
예제 #8
0
def _convert_reshape(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    if converter.opset_version >= 5:
        # output shape is specified by onnx_op.input[1]
        # It have to be ConstantVariable.
        # TODO: test for different operator set version
        shape_var = converter.get_variable(onnx_op.input[1])
        assert isinstance(
            shape_var, ConstantVariable
        ), "Shape specifier of Reshape operator have to be constant."
        out_shape = [int(d) for d in shape_var.data]
    else:
        # Reshape-1
        attrs = attribute_dict(onnx_op)
        out_shape = [
            r if s == 0 else s for r, s in zip(x.shape, attrs["shape"].ints)
        ]

    if -1 in out_shape:
        i = out_shape.index(-1)
        out_shape.remove(-1)
        out_shape.insert(i, x.size // mul(out_shape))

    out_order = Order([None] * len(out_shape))

    y, = Reshape(None,
                 in_order=x.order,
                 out_order=out_order,
                 out_shape=out_shape)(x)
    converter.set_variable(onnx_op.output[0], y)
예제 #9
0
파일: tensor.py 프로젝트: you74674/webdnn
def _convert_squeeze(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])

    attrs = attribute_dict(onnx_op)
    axes = [x.order.axes[i] for i in attrs["axes"].ints]

    y = x.squeeze(axes)
    converter.set_variable(onnx_op.output[0], y)
예제 #10
0
파일: math.py 프로젝트: zhangaz1/webdnn
def _convert_softmax(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])

    attrs = attribute_dict(onnx_op)
    axis = attrs["axis"].i

    y, = Softmax(None, axis=x.order.axes[axis])(x)
    converter.set_variable(onnx_op.output[0], y)
예제 #11
0
파일: math.py 프로젝트: zhangaz1/webdnn
def _convert_leaky_relu(converter: ONNXConverter, onnx_op: INodeProto):
    x0 = converter.get_variable(onnx_op.input[0])

    attrs = attribute_dict(onnx_op)
    alpha = attrs["alpha"].f

    y, = LeakyRelu(None, slope=alpha)(x0)
    converter.set_variable(onnx_op.output[0], y)
예제 #12
0
def _convert_transpose(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    attrs = attribute_dict(onnx_op)

    y, = Transpose(None)(x)
    perm = list(attrs["perm"].ints if "perm" in attrs else reversed(range(x.ndim)))
    y.change_order(Order([x.order.axes[i] for i in perm]))

    converter.set_variable(onnx_op.output[0], y)
예제 #13
0
파일: math.py 프로젝트: zhangaz1/webdnn
def _convert_min(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    attrs = attribute_dict(onnx_op)
    max_x = ConstantVariable(np.ones([1] * x.ndim), x.order) * attrs["max"].f
    min_x = ConstantVariable(np.ones([1] * x.ndim), x.order) * attrs["min"].f

    y, = Select(None)(x > max_x, max_x, x)
    y, = Select(None)(y > min_x, y, min_x)

    converter.set_variable(onnx_op.output[0], y)
예제 #14
0
def _convert_depth_to_space(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    x.order.unify(OrderNCHW)

    attrs = attribute_dict(onnx_op)
    blocksize = attrs["blocksize"].i

    y, = Depth2Space(None, blocksize)(x)

    converter.set_variable(onnx_op.output[0], y)
예제 #15
0
파일: math.py 프로젝트: zhangaz1/webdnn
def _convert_selu(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    attrs = attribute_dict(onnx_op)
    alpha = attrs["alpha"].f if "alpha" in attrs else 1.6732
    gamma = attrs["gamma"].f if "gamma" in attrs else 1.0507

    y, = Select(None)(x > 0, gamma * x,
                      gamma * (alpha * Exp(None)(x)[0] - alpha))

    converter.set_variable(onnx_op.output[0], y)
예제 #16
0
파일: tensor.py 프로젝트: you74674/webdnn
def _convert_concat(converter: ONNXConverter, onnx_op: INodeProto):
    xs = [converter.get_variable(v) for v in onnx_op.input]
    for x in xs[1:]:
        xs[0].order.unify(x.order)

    attrs = attribute_dict(onnx_op)
    axis = xs[0].order.axes[attrs["axis"].i]

    y, = Concat(None, axis=axis)(*xs)
    converter.set_variable(onnx_op.output[0], y)
예제 #17
0
def _convert_constant(converter: ONNXConverter, onnx_op: INodeProto):
    attrs = attribute_dict(onnx_op)
    value = attrs["value"].t

    np_type = DataTypeMappingDict[value.data_type]
    if np_type.type is None:
        raise TypeError(f"[ONNXConverter] type \"{np_type.name}\" is not supported")
    data = np.frombuffer(value.raw_data, np_type.type).reshape([1] if len(value.dims) == 0 else value.dims)

    y = ConstantVariable(data, Order([None] * data.ndim))
    converter.set_variable(onnx_op.output[0], y)
예제 #18
0
파일: math.py 프로젝트: zhangaz1/webdnn
def _convert_elu(converter: ONNXConverter, onnx_op: INodeProto):
    x0 = converter.get_variable(onnx_op.input[0])

    attrs = attribute_dict(onnx_op)
    alpha = attrs["alpha"].f
    if alpha != 1:
        raise NotImplementedError(
            "[ONNXConverter] Operator \"Elu\" is supported only the case when parameter \"alpha\" is 1."
        )

    y, = Elu(None)(x0)
    converter.set_variable(onnx_op.output[0], y)
예제 #19
0
def _convert_argmin(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])

    attrs = attribute_dict(onnx_op)
    axis = attrs["axis"].i
    keepdims = (attrs["keepdims"].i if "keepdims" in attrs else 1) == 1
    x, = ArgMin(None, axis=x.order.axes[axis])(x)

    if not keepdims:
        x = x.squeeze(axis=x.order.axes[axis])

    converter.set_variable(onnx_op.output[0], x)
예제 #20
0
def _convert_flatten(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])

    attrs = attribute_dict(onnx_op)
    axis = attrs["axis"].i if "axis" in attrs else 1

    new_shape = [mul(x.shape[:axis]), mul(x.shape[axis:])]
    new_order = Order([None, None])

    y = x.reshape(shape=new_shape, order=new_order)

    converter.set_variable(onnx_op.output[0], y)
예제 #21
0
def _convert_reduce_prod(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])

    attrs = attribute_dict(onnx_op)
    axes = attrs["axes"].ints
    keepdims = (attrs["keepdims"].i if "keepdims" in attrs else 1) == 1
    for a in axes:
        x, = Prod(None, axis=x.order.axes[a])(x)

    if not keepdims:
        x = x.squeeze(axis=[x.order.axes[i] for i in axes])

    converter.set_variable(onnx_op.output[0], x)
예제 #22
0
def _convert_squeeze(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])

    attrs = attribute_dict(onnx_op)
    axes = [x.order.axes[i] for i in attrs["axes"].ints]

    if isinstance(x, ConstantVariable):
        data = x.data
        for axis in attrs["axes"].ints:
            data = data.squeeze(axis)
        y = ConstantVariable(data, Order([None] * len(data.shape)))
    else:
        y = x.squeeze(axes)
    converter.set_variable(onnx_op.output[0], y)
예제 #23
0
def _convert_reshape(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    attrs = attribute_dict(onnx_op)
    out_shape = [r if s == 0 else s for r, s in zip(x.shape, attrs["shape"].ints)]

    if -1 in out_shape:
        i = out_shape.index(-1)
        out_shape.remove(-1)
        out_shape.insert(i, x.size // mul(out_shape))

    out_order = Order([None] * len(out_shape))

    y, = Reshape(None, in_order=x.order, out_order=out_order, out_shape=out_shape)(x)
    converter.set_variable(onnx_op.output[0], y)
예제 #24
0
파일: math.py 프로젝트: zhangaz1/webdnn
def _convert_softmax(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])

    attrs = attribute_dict(onnx_op)
    axis = attrs["axis"].i if "axis" in attrs else 1
    new_shape = [mul(x.shape[:axis]), mul(x.shape[axis:])]
    new_order = Order([None, None])

    x = x.reshape(shape=new_shape, order=new_order)

    max_x, = Max(None, axis=x.order.axes[1])(x)
    y = x >= max_x

    converter.set_variable(onnx_op.output[0], y)
예제 #25
0
def _convert_concat(converter: ONNXConverter, onnx_op: INodeProto):
    xs = [converter.get_variable(v) for v in onnx_op.input]
    for x in xs[1:]:
        xs[0].order.unify(x.order)

    attrs = attribute_dict(onnx_op)
    if all(isinstance(x, ConstantVariable) for x in xs):
        # generate actual data as constant
        concat_data = np.concatenate([x.data for x in xs],
                                     axis=attrs["axis"].i)
        y = ConstantVariable(concat_data, xs[0].order)
    else:
        axis = xs[0].order.axes[attrs["axis"].i]

        y, = Concat(None, axis=axis)(*xs)
    converter.set_variable(onnx_op.output[0], y)
예제 #26
0
def _convert_split(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])

    attrs = attribute_dict(onnx_op)

    axis = x.order.axes[attrs["axis"].i]

    if "split" not in attrs:
        raise NotImplementedError(
            "[ONNXConverter] Operator \"Split\" without \"split\" parameter is not supported yet."
        )
    split = attrs["split"].ints
    sections = np.cumsum(split).tolist()[:-1]

    ys = SplitAxis(None, axis=axis, sections=sections)(x)
    for i, y in enumerate(ys):
        converter.set_variable(onnx_op.output[i], y)
예제 #27
0
def _convert_concat(converter: ONNXConverter, onnx_op: INodeProto):
    xs = [converter.get_variable(v) for v in onnx_op.input]

    for x in xs[1:]:
        xs[0].order.unify(x.order)

    attrs = attribute_dict(onnx_op)
    axis = xs[0].order.axes[attrs["axis"].i]

    if isinstance(xs[0], ConstantVariable):
        data = []
        for x in xs:
            data.append(x.data)
        data = np.concatenate(data, attrs["axis"].i)
        y = ConstantVariable(data, Order([None] * len(data.shape)))
    else:
        y, = Concat(None, axis=axis)(*xs)
    converter.set_variable(onnx_op.output[0], y)
예제 #28
0
def _convert_slice(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    if not isinstance(x, ConstantVariable):
        raise NotImplementedError(
            "[ONNXConverter] Operator \"Slice\" for non-constant variable is not supported yet."
        )

    numpy_slice = [slice(None) for i in range(x.ndim)]
    attrs = attribute_dict(onnx_op)
    axes = attrs["axes"].ints  # may not present (not supported)
    starts = attrs["starts"].ints
    ends = attrs["ends"].ints
    for a, s, e in zip(axes, starts, ends):
        numpy_slice[a] = slice(s, e)
    data_sliced = x.data[tuple(numpy_slice)].copy()
    y = ConstantVariable(data_sliced, x.order)

    converter.set_variable(onnx_op.output[0], y)
예제 #29
0
파일: math.py 프로젝트: zhangaz1/webdnn
def _convert_div(converter: ONNXConverter, onnx_op: INodeProto):
    x0 = converter.get_variable(onnx_op.input[0])
    x1 = converter.get_variable(onnx_op.input[1])

    attrs = attribute_dict(onnx_op)

    if "broadcast" in attrs:
        broadcast = attrs["broadcast"].i
        if broadcast:
            check_broadcast_constraints(
                x0, x1, axis=attrs["axis"].i if "axis" in attrs else None)

        else:
            x0.order.unify(x1.order)
    else:
        x0.order.unify(x1.order)

    y = x0 / x1
    converter.set_variable(onnx_op.output[0], y)
예제 #30
0
def _convert_squeeze(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])

    attrs = attribute_dict(onnx_op)

    if isinstance(x, ConstantVariable):
        # generate actual data as constant
        new_axes = list(x.order.axes)
        new_data = x.data.copy()
        for i in attrs["axes"].ints:
            new_axes.insert(i, Axis())
            new_data = np.expand_dims(new_data, axis=i)
        y = ConstantVariable(new_data, Order(new_axes))
    else:
        y = x
        for i in attrs["axes"].ints:
            y = y.expand_dims(Axis(), i)

    converter.set_variable(onnx_op.output[0], y)