コード例 #1
0
def test_fold_mul_deep():
    """
    before)

    c0 -+
        +{Mul}-h1-+
    c1 -+         +-{Mul}-h2-+
               c2-+          +-{Add}-h4
                          h3-+
    after)

    c0*c1*c2 -+
              +-{Add}-h3
          h2 -+
    """
    c0 = ConstantVariable(np.random.rand(2, 3, 4, 5), OrderNCHW)
    c1 = ConstantVariable(np.random.rand(2, 3, 4, 5), OrderNCHW)

    h1 = c0 * c1
    c2 = ConstantVariable(np.random.rand(2, 3, 4, 5), OrderNCHW)

    h2 = h1 * c2
    h3 = Variable([2, 3, 4, 5], OrderNCHW)

    h4 = h2 + h3

    graph = Graph([h3], [h4])

    ConstantFolding().optimize(graph)

    h2_new = h4.output_from.inputs["x0"]

    assert h2_new is not h2
    assert isinstance(h2_new, ConstantVariable)
    assert np.abs(np.mean(h2_new.data - (c0.data * c1.data * c2.data))) < 1e-5
コード例 #2
0
ファイル: embedding_test.py プロジェクト: zhangaz1/webdnn
def template(x_shape=[2, 3],
             feature_size=5,
             vocabulary_size=6,
             x_order=OrderNT,
             w_order=OrderCN,
             y_order=OrderNTC,
             description: str = ""):
    x = Variable(x_shape, order=x_order)

    vx = np.random.randint(low=0,
                           high=vocabulary_size,
                           size=(x.shape_dict[Axis.N],
                                 x.shape_dict[Axis.T]))  # OrderNT
    vw = np.random.rand(vocabulary_size, feature_size)  # OrderCN
    vy = vw[vx]  # OrderNTC

    w = ConstantVariable(vw, order=OrderCN)
    y, = Embedding(None)(x, w)

    x = x.change_order(x_order)
    w = w.change_order(w_order)
    y = y.change_order(y_order)

    generate_kernel_test_case(
        description=f"Embedding {description}",
        backend=["webgpu", "webassembly"],
        graph=Graph([x], [y]),
        inputs={x: vx.transpose([OrderNT.axes_dict[a] for a in x.order.axes])},
        expected={
            y: vy.transpose([OrderNTC.axes_dict[a] for a in y.order.axes])
        })
コード例 #3
0
def test_fold_add():
    """
    before)

    c0 -+
        +{Add}-h1-+
    c1 -+         +-{Add}-h3
              h2-+

    after)

    c0+c1 -+
           +-{Add}-h3
       h2 -+
    """
    c0 = ConstantVariable(np.random.rand(2, 3, 4, 5), OrderNCHW)
    c1 = ConstantVariable(np.random.rand(2, 3, 4, 5), OrderNCHW)

    h1 = c0 + c1
    h2 = Variable([2, 3, 4, 5], OrderNCHW)

    h3 = h1 + h2

    graph = Graph([h2], [h3])

    ConstantFolding().optimize(graph)

    h1_new = h3.output_from.inputs["x0"]

    assert h1_new is not h1
    assert isinstance(h1_new, ConstantVariable)
    assert np.abs(np.mean(h1_new.data - (c0.data + c1.data))) < 1e-5
コード例 #4
0
ファイル: split_variable.py プロジェクト: fossabot/hash2face
def _split_axis(v: Variable, axis: Axis, graph):
    """
    split variable by specified axis
    """
    s1 = v.shape_dict[axis] // 2
    s2 = v.shape_dict[axis] - s1

    if isinstance(v, ConstantVariable):
        v_datum = np.split(v.data, [s1], v.order.axes_dict[axis])
        v1 = ConstantVariable(v_datum[0], v.order)
        v2 = ConstantVariable(v_datum[1], v.order)

    else:
        v1 = Variable([s1 if a == axis else v.shape_dict[a] for a in v.order.axes], v.order)
        v2 = Variable([s2 if a == axis else v.shape_dict[a] for a in v.order.axes], v.order)

    ops = list(v.input_to)
    if v.output_from is not None:
        ops += [v.output_from]

    for op in ops:
        if all(isinstance(v, ConstantVariable) for v in op.inputs.values()):
            op.fold_constance(graph)

        elif isinstance(op, Tensordot):
            # NOTE:
            # "_split_tensordot" must be called before "_split_tensorwise".
            #
            # Let consider follow case:
            #
            #   A.order = [Axis.X, Axis.Y]
            #   B.order = [Axis.Y, Axis.Z]
            #   C, = Tensordot(None, [Axis.Y, Axis.Z])(A, B)  # -> C.order = [Axis.X, Axis.Y]
            #
            # In this case, tensordot operator has "Tensorwise[X]" and "Tensorwise[Y]" attributes, because "Tensordot" operation is
            # tensorwise operation for each output axis. However, "Axis.Y" is also contained in reduced axes in "A". Therefore,
            # "_split_tensorwise" incorrectly split "A".
            #
            _split_tensordot(graph, op, v, [v1, v2], axis)

        elif Tensorwise.check_splittable(op, axis):
            _split_tensorwise(graph, op, v, [v1, v2], axis)

        elif isinstance(op, SplitAxis):
            _split_splitaxis(graph, op, v, [v1, v2], axis)

        elif isinstance(op, Concat):
            _split_concat(graph, op, v, [v1, v2], axis)

        elif isinstance(op, Im2Col):
            _split_im2col(graph, op, v, [v1, v2], axis)

        elif isinstance(op, PartialIm2Col):
            _split_partial_im2col(graph, op, v, [v1, v2], axis)

        elif isinstance(op, Reshape):
            _split_reshape(graph, op, v, [v1, v2], axis)

        else:
            raise NotImplementedError(f"Variable is too large to handle in WebGL backend: {v}")
コード例 #5
0
def _convert_batch_normalization(converter: KerasConverter,
                                 k_op: keras.layers.BatchNormalization):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    axis = x.order.axes[k_op.axis]

    variance_data, mean_data = K.batch_get_value(
        [k_op.moving_variance, k_op.moving_mean])

    if k_op.scale:
        gamma_data, = K.batch_get_value([k_op.gamma])
    else:
        gamma_data = np.ones_like(variance_data)

    if k_op.center:
        beta_data, = K.batch_get_value([k_op.beta])
    else:
        beta_data = np.zeros_like(mean_data)

    gamma_div_std_data = gamma_data / np.sqrt(variance_data + k_op.epsilon)
    beta_scaled_data = beta_data - mean_data * gamma_div_std_data

    gamma_div_std = ConstantVariable(gamma_div_std_data, Order([axis]))
    beta_scaled = ConstantVariable(beta_scaled_data, Order([axis]))

    y, = AxiswiseScale(None, axis=axis)(x, gamma_div_std)
    y, = AxiswiseBias(None, axis=axis)(y, beta_scaled)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #6
0
def test_general():
    for condition_custom in [
        {},
        {"x1_order": OrderNCHW, "x2_order": OrderHWCN}
    ]:
        condition = dict(condition_default)
        condition.update(condition_custom)

        vx1 = np.random.rand(2, 3, 4, 5)
        vx2 = np.random.rand(2, 3, 4, 5)
        vy = vx1 ** vx2

        x1 = Variable(vx1.shape, order=OrderNHWC)
        x2 = Variable(vx2.shape, order=OrderNHWC)
        y = x1 ** x2

        x1.change_order(condition["x1_order"])
        x2.change_order(condition["x2_order"])
        y.change_order(condition["y_order"])

        generate_kernel_test_case(
            description=f"ElementwisePow: " + (", ".join([f"{k}={v}" for k, v in condition_custom.items()])),
            backend=condition["backend"],
            graph=Graph([x1, x2], [y]),
            inputs={
                x1: ConstantVariable(vx1, OrderNHWC).change_order(x1.order).data,
                x2: ConstantVariable(vx2, OrderNHWC).change_order(x2.order).data
            },
            expected={y: ConstantVariable(vy, OrderNHWC).change_order(y.order).data},
            raise_skip=False
        )

    raise SkipTest
コード例 #7
0
ファイル: scalar_pow_test.py プロジェクト: xczhanjun/webdnn
def test_general():
    for condition_custom in [
        {},
        {"x_order": OrderNCHW},
    ]:
        condition = dict(condition_default)
        condition.update(condition_custom)

        vx = np.random.rand(2, 3, 4, 5) - 0.5
        vy = vx.copy() ** condition["value"]

        x = Variable(vx.shape, order=OrderNHWC)
        y = x ** condition["value"]  # type: Variable

        x.change_order(condition["x_order"])
        y.change_order(condition["y_order"])

        generate_kernel_test_case(
            description=f"ScalarPow: " + (", ".join([f"{k}={v}" for k, v in condition_custom.items()])),
            backend=condition["backend"],
            graph=Graph([x], [y]),
            inputs={x: ConstantVariable(vx, OrderNHWC).change_order(x.order).data},
            expected={y: ConstantVariable(vy, OrderNHWC).change_order(y.order).data},
            raise_skip=False
        )

    raise SkipTest
コード例 #8
0
ファイル: normalization.py プロジェクト: VislaLabs/webdnn-1
def _convert_batch_normalization_function(
    converter: ChainerConverter, c_op:
    "chainer.functions.normalization.batch_normalization.BatchNormalizationFunction"
):
    x = converter.get_variable(c_op.inputs[0])
    x.order.axes[0].unify(Axis.N)
    x.order.axes[1].unify(Axis.C)

    gamma = converter.get_variable(c_op.inputs[1])
    gamma.order.unify(OrderC)

    beta = converter.get_variable(c_op.inputs[2])
    beta.order.unify(OrderC)

    if len(c_op.inputs) == 5:
        mean = converter.get_variable(c_op.inputs[3])
        mean.order.unify(OrderC)

        variance = converter.get_variable(c_op.inputs[4])
        variance.order.unify(OrderC)

    elif len(c_op.inputs) == 3:
        mean = 0 if c_op.running_mean is None else ConstantVariable(
            c_op.running_mean, OrderC)
        variance = 1 if c_op.running_var is None else ConstantVariable(
            c_op.running_var, OrderC)

    else:
        raise ValueError(
            "Number of inputs to BatchNormalizationFunction must be 3 or 5.")

    y = (x - mean) / ((variance + c_op.eps)**0.5) * gamma + beta
    converter.set_variable(c_op.outputs[0](), y)
コード例 #9
0
ファイル: keras.py プロジェクト: zilongzhong/webdnn
    def convert_layer_batchnormalization(self, layer_config: Dict[str, object], inputs: List[Variable]) -> List[
        Variable]:
        """
        Example:
 {'class_name': 'BatchNormalization',
  'config': {'axis': 3,
   'beta_constraint': None,
   'beta_initializer': {'class_name': 'Zeros', 'config': {}},
   'beta_regularizer': None,
   'center': True,
   'epsilon': 0.001,
   'gamma_constraint': None,
   'gamma_initializer': {'class_name': 'Ones', 'config': {}},
   'gamma_regularizer': None,
   'momentum': 0.99,
   'moving_mean_initializer': {'class_name': 'Zeros', 'config': {}},
   'moving_variance_initializer': {'class_name': 'Ones', 'config': {}},
   'name': 'bn2a_branch2a',
   'scale': True,
   'trainable': True},
  'inbound_nodes': [[['res2a_branch2a', 0, 0, {}]]],
  'name': 'bn2a_branch2a'},

        :param layer_config: 
        :param inputs: 
        :return: 
        """
        assert len(inputs) == 1
        input = inputs[0]
        name: str = layer_config["name"]

        axis = input.order.axes[layer_config["axis"]]

        mean = self.weights[f"{name}/{name}/moving_mean:0"].value
        variance = self.weights[f"{name}/{name}/moving_variance:0"].value

        if layer_config["scale"]:
            gamma = self.weights[f"{name}/{name}/gamma:0"].value
        else:
            gamma = np.ones_like(variance)

        if layer_config["center"]:
            beta = self.weights[f"{name}/{name}/beta:0"].value
        else:
            beta = np.zeros_like(mean)

        # (x - mean) / sqrt(var + eps) * gamma + beta
        # gamma_div_std = gamma / sqrt(var + eps)
        # beta_scaled = beta - mean * gamma_div_std
        # y = x * gamma_div_std + beta_scaled

        gamma_div_std = gamma / np.sqrt(variance + layer_config["epsilon"])
        beta_scaled = beta - mean * gamma_div_std

        scale_opr = AxiswiseScale(name + "_scale", axis=axis)
        bias_opr = AxiswiseBias(name + "_bias", axis=axis)
        scale_out, = scale_opr(input, ConstantVariable(gamma_div_std, OrderC))
        y, = bias_opr(scale_out, ConstantVariable(beta_scaled, OrderC))

        return [y]
コード例 #10
0
ファイル: softplus_test.py プロジェクト: xczhanjun/webdnn
def test_general():
    for condition_custom in [
        {},
        {"x_order": OrderNCHW},
    ]:
        condition = dict(condition_default)
        condition.update(condition_custom)
        beta = condition["beta"]

        vx = np.random.rand(2, 3, 4, 5) - 0.5
        vy = np.log(np.exp(vx * beta) + 1.0) / beta

        x = Variable(vx.shape, order=OrderNHWC)
        y, = Softplus(None, beta=beta)(x)

        x.change_order(condition["x_order"])
        y.change_order(condition["y_order"])

        generate_kernel_test_case(
            description=f"Softplus: " + (", ".join([f"{k}={v}" for k, v in condition_custom.items()])),
            backend=condition["backend"],
            graph=Graph([x], [y]),
            inputs={x: ConstantVariable(vx, OrderNHWC).change_order(x.order).data},
            expected={y: ConstantVariable(vy, OrderNHWC).change_order(y.order).data},
            raise_skip=False
        )

    raise SkipTest
コード例 #11
0
def convert_odd_padding_to_concat(x: Variable,
                                  padding: Sequence[Tuple[int, int]],
                                  value: float = 0.0):
    # Currently WebDNN does not support different-size-padding.
    for i, ((pad_begin, pad_end),
            axis) in enumerate(zip(padding, (Axis.H, Axis.W))):
        if pad_begin != pad_end:
            xs = []
            if pad_begin > 0:
                data = np.full([
                    pad_begin if a == axis else x.shape_dict[a]
                    for a in x.order.axes
                ],
                               value,
                               dtype=np.float32)
                xs.append(ConstantVariable(data, x.order))

            xs.append(x)

            if pad_end > 0:
                data = np.full([
                    pad_end if a == axis else x.shape_dict[a]
                    for a in x.order.axes
                ],
                               value,
                               dtype=np.float32)
                xs.append(ConstantVariable(data, x.order))

            if len(xs) > 1:
                x, = Concat(None, axis=axis)(*xs)

            padding = tuple(
                (0, 0) if j == i else padding[j] for j in range(len(padding)))

    return x, tuple(p[0] for p in padding)
コード例 #12
0
ファイル: im2col_test.py プロジェクト: zilongzhong/webdnn
def test_wide_stride_CNHW():
    v_im, v_col = generate_data_212()

    col_dummy = ConstantVariable(v_col, order=OrderNHWC)
    col_dummy.change_order(OrderCNHW)

    im = Variable(v_im.shape, order=OrderNHWC)

    col_wasm, = WasmIm2Col(None, ksize=2, padding=1, stride=2)(im)
    col_wasm.change_order(OrderCNHW)

    col_webgpu, = WebGPUIm2Col(None, ksize=2, padding=1, stride=2)(im)
    col_webgpu.change_order(OrderCNHW)

    generate_kernel_test_case(description=f"Im2Col output=CNHW stride=2",
                              backend=["webassembly"],
                              graph=Graph([im], [col_wasm]),
                              inputs={im: v_im},
                              expected={col_wasm: col_dummy.data},
                              raise_skip=False)

    generate_kernel_test_case(description=f"Im2Col output=CNHW stride=2",
                              backend=["webgpu"],
                              graph=Graph([im], [col_webgpu]),
                              inputs={im: v_im},
                              expected={col_webgpu: col_dummy.data})
コード例 #13
0
def test_conv_bias():
    for order_x, order_w in itertools.product(orders4, orders4):
        conv = Convolution2D(None, ksize=3, stride=1, padding=1)
        bias = AxiswiseBias(None, axis=Axis.C)

        x = Variable([8, 7, 6, 5], OrderNHWC)
        x.change_order(order_x)

        w_shape = [4, 3, 3, 5]
        w = ConstantVariable(arange_shaped(w_shape), OrderNHWC)
        w.change_order(order_w)
        w_data = w.data.copy()

        h, = conv(x, w)

        b_shape = [h.shape_dict[Axis.C]]
        b = ConstantVariable(arange_shaped(b_shape), OrderC)
        b_data = b.data.copy()

        y, = bias(h, b)

        graph = Graph([x], [y])

        graph, _ = ConcatAffine().optimize(graph)

        w_data_expected = w_data
        b_data_expected = b_data

        ops = listup_operators(graph)
        assert len(ops) == 2 and isinstance(
            ops[0], Convolution2D) and isinstance(ops[1], AxiswiseBias)
        assert np.all(np.equal(ops[0].inputs["w"].data, w_data_expected))
        assert np.all(np.equal(ops[1].inputs["b"].data, b_data_expected))
コード例 #14
0
ファイル: keras.py プロジェクト: zilongzhong/webdnn
    def convert_layer_dense(self, layer_config: Dict[str, object], inputs: List[Variable]) -> List[Variable]:
        assert len(inputs) == 1
        input = inputs[0]
        name: str = layer_config["name"]
        weight_array = self.weights[f"{name}/{name}/kernel:0"].value
        weight_var = ConstantVariable(weight_array, OrderCN)  # shape: (in, out)
        linear_opr = Linear(name)
        y, = linear_opr(input, weight_var)

        if layer_config["use_bias"]:
            bias_array = self.weights[f"{name}/{name}/bias:0"].value
            bias_var = ConstantVariable(bias_array, OrderC)
            bias_opr = AxiswiseBias(name + "_bias", Axis.C)
            y, = bias_opr(y, bias_var)

        act_opr: Operator = None
        activation_type: str = layer_config["activation"]
        if activation_type == "relu":
            act_opr = Relu(name + "_activation")
        elif activation_type == "softmax":
            warn("omitting softmax activation")
        else:
            raise NotImplementedError(f"Unknown activation {activation_type}")

        if act_opr is not None:
            y, = act_opr(y)

        return [y]
コード例 #15
0
def test_conv_scale():
    for order_x, order_w in itertools.product(orders4, orders4):
        conv = Convolution2D(None, ksize=3, stride=1, padding=1)
        scale = AxiswiseScale(None, axis=Axis.C)

        x = Variable([8, 7, 6, 5], OrderNHWC)
        x.change_order(order_x)

        w_shape = [4, 3, 3, 5]
        w = ConstantVariable(arange_shaped(w_shape), OrderNHWC)
        w.change_order(order_w)
        w_data = w.data.copy()

        h, = conv(x, w)

        s_shape = [h.shape_dict[Axis.C]]
        s = ConstantVariable(arange_shaped(s_shape), OrderC)
        s_data = s.data.copy()

        y, = scale(h, s)

        graph = Graph([x], [y])

        graph, _ = ConcatAffine().optimize(graph)

        # noinspection PyTypeChecker
        expander = (None, ) * order_w.axes_dict[Axis.N] + (
            Ellipsis, ) + (None, ) * (3 - order_w.axes_dict[Axis.N])
        w_data_expected = w_data * s_data[expander]

        ops = listup_operators(graph)
        assert len(ops) == 1 and isinstance(ops[0], Convolution2D)
        assert conv.outputs["y"] == y
        assert np.all(np.equal(w.data, w_data_expected))
コード例 #16
0
def test_nobias():
    link = chainer.links.Convolution2D(4,
                                       10,
                                       ksize=3,
                                       stride=1,
                                       pad=1,
                                       nobias=True)

    vx = chainer.Variable(np.random.rand(2, 4, 6, 8).astype(np.float32))
    vy = link(vx)

    graph = ChainerConverter().convert_from_inout_vars([vx], [vy])

    x = graph.inputs[0]
    y = graph.outputs[0]

    generate_kernel_test_case(
        description=f"[chainer] L.Convolution2D(nobias=True)",
        graph=graph,
        inputs={
            x: ConstantVariable(vx.data, OrderNCHW).change_order(x.order).data
        },
        expected={
            y: ConstantVariable(vy.data, OrderNCHW).change_order(y.order).data
        })
コード例 #17
0
ファイル: hard_sigmoid_test.py プロジェクト: zhangaz1/webdnn
def test_general():
    for condition_custom in [{}, {"x_order": OrderNCHW}]:
        condition = dict(condition_default)
        condition.update(condition_custom)

        vx = np.random.rand(2, 3, 4, 5) - 0.5
        vy = np.clip(vx * 0.2 + 0.5, 0.0, 1.0)

        x = Variable(vx.shape, order=OrderNHWC)
        y, = HardSigmoid(None)(x)

        x.change_order(condition["x_order"])
        y.change_order(condition["y_order"])

        generate_kernel_test_case(
            description=f"HardSigmoid: " +
            (", ".join([f"{k}={v}" for k, v in condition_custom.items()])),
            backend=condition["backend"],
            graph=Graph([x], [y]),
            inputs={
                x: ConstantVariable(vx, OrderNHWC).change_order(x.order).data
            },
            expected={
                y: ConstantVariable(vy, OrderNHWC).change_order(y.order).data
            },
            raise_skip=False)

    raise SkipTest
コード例 #18
0
ファイル: nn.py プロジェクト: steerapi/webdnn
def _convert_batch_normalization(converter: ONNXConverter,
                                 onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    x.order.axes[0].unify(Axis.N)
    x.order.axes[1].unify(Axis.C)

    gamma = converter.get_variable(onnx_op.input[1])
    gamma.order.unify(OrderC)

    beta = converter.get_variable(onnx_op.input[2])
    beta.order.unify(OrderC)

    attrs = attribute_dict(onnx_op)
    eps = attrs["epsilon"].f

    if len(onnx_op.input) == 5:
        mean = converter.get_variable(onnx_op.input[3])
        mean.order.unify(OrderC)

        variance = converter.get_variable(onnx_op.input[4])
        variance.order.unify(OrderC)

    elif len(onnx_op.input) == 3:
        mean = 0 if onnx_op.running_mean is None else ConstantVariable(
            onnx_op.running_mean, OrderC)
        variance = 1 if onnx_op.running_var is None else ConstantVariable(
            onnx_op.running_var, OrderC)

    else:
        raise ValueError(
            "Number of inputs to BatchNormalizationFunction must be 3 or 5.")

    y = (x - mean) / ((variance + eps)**0.5) * gamma + beta
    converter.set_variable(onnx_op.output[0], y)
コード例 #19
0
def test_change_order_with_compression():
    d1 = np.arange(3 * 4).reshape((3, 1, 1, 4))
    v = ConstantVariable(d1, OrderNHWC)
    v.change_order(OrderCN)
    d2 = np.rollaxis(d1, 0, 4)

    assert v.order == OrderCN
    assert np.all(v.data.flatten() == d2.flatten())
コード例 #20
0
def test_change_order_with_expansion():
    d1 = np.arange(3 * 4).reshape((3, 4))
    v = ConstantVariable(d1, OrderNC)
    v.change_order(OrderCHWN)
    d2 = np.rollaxis(d1, 0, 2)

    assert v.order == OrderCHWN
    assert np.all(v.data.flatten() == d2.flatten())
コード例 #21
0
    def fold_constance(self, graph: Graph):
        x = self.inputs["x"]  # type: ConstantVariable
        y = self.outputs["y"]

        new_y = ConstantVariable(np.tile(x.data, self.multiplier), x.order)
        new_y.change_order(y.order)
        OptimizeRule.replace_variable(graph, y, new_y)
        self.remove_all()
コード例 #22
0
ファイル: reshape.py プロジェクト: VislaLabs/webdnn-1
    def fold_constance(self, graph: Graph):
        x = self.inputs["x"]  # type: ConstantVariable
        y = self.outputs["y"]
        self.remove_all()

        y_new = ConstantVariable(x.data, x.order).change_order(self.in_order)
        y_new = ConstantVariable(y_new.data.reshape(self.out_shape), self.out_order).change_order(y.order)
        OptimizeRule.replace_variable(graph, y, y_new)
コード例 #23
0
    def fold_constance(self, graph: Graph):
        x0 = self.inputs["x0"]  # type: ConstantVariable
        y = self.outputs["y"]

        new_y = ConstantVariable(1 / np.sqrt(x0.data), x0.order)
        new_y.change_order(y.order)
        OptimizeRule.replace_variable(graph, y, new_y)
        self.remove_all()
コード例 #24
0
ファイル: scalar_pow.py プロジェクト: zhangaz1/webdnn
    def fold_constance(self, graph: Graph):
        x0 = self.inputs["x0"]  # type: ConstantVariable
        y = self.outputs["y"]
        self.remove_all()

        y_new = ConstantVariable(x0.data, x0.order).change_order(y.order)
        y_new.data = y_new.data**self.value
        OptimizeRule.replace_variable(graph, y, y_new)
コード例 #25
0
def pad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])

    paddings = converter.get_variable(tf_op.inputs[1])
    if not isinstance(paddings, ConstantVariable):
        raise NotImplementedError(
            '[TensorFlowConverter] Pad with dynamic padding size is not supported'
        )

    paddings = paddings.data.astype(np.int).tolist()

    if x.order.check_same_axes(OrderNHWC) and all([
            paddings[x.order.axes_dict[Axis.N]][0] ==
            paddings[x.order.axes_dict[Axis.N]][1] == 0,
            paddings[x.order.axes_dict[Axis.H]][0]
            == paddings[x.order.axes_dict[Axis.H]][1],
            paddings[x.order.axes_dict[Axis.W]][0]
            == paddings[x.order.axes_dict[Axis.W]][1],
            paddings[x.order.axes_dict[Axis.C]][0] ==
            paddings[x.order.axes_dict[Axis.C]][1] == 0
    ]):
        # Padding for only spatial axes: use ZeroPadding2D
        y, = ZeroPadding2D(None,
                           padding=tuple(
                               paddings[x.order.axes_dict[Axis.H]][0],
                               paddings[x.order.axes_dict[Axis.W]][0]))(x)

    else:
        # General case: Use Concat
        for axis, (pad_begin, pad_end) in zip(x.order.axes, paddings):
            xs = []

            if pad_begin > 0:
                xs.append(
                    ConstantVariable(
                        np.zeros([
                            pad_begin if a is axis else x.shape_dict[a]
                            for a in x.order.axes
                        ]), x.order))

            xs.append(x)

            if pad_end > 0:
                xs.append(
                    ConstantVariable(
                        np.zeros([
                            pad_end if a is axis else x.shape_dict[a]
                            for a in x.order.axes
                        ]), x.order))

            if len(xs) > 1:
                x, = Concat(None, axis=axis)(*xs)

        y = x

    converter.set_variable(tf_op.outputs[0], y)
コード例 #26
0
def test_change_order():
    d1 = np.arange(2 * 3 * 4 * 5).reshape((2, 3, 4, 5))

    v = ConstantVariable(d1, OrderNHWC)
    v.change_order(OrderHWNC)

    d2 = np.rollaxis(d1, 0, 3)

    assert v.order == OrderHWNC
    assert np.all(v.data == d2)
コード例 #27
0
ファイル: math.py プロジェクト: zhangaz1/webdnn
def _convert_min(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    attrs = attribute_dict(onnx_op)
    max_x = ConstantVariable(np.ones([1] * x.ndim), x.order) * attrs["max"].f
    min_x = ConstantVariable(np.ones([1] * x.ndim), x.order) * attrs["min"].f

    y, = Select(None)(x > max_x, max_x, x)
    y, = Select(None)(y > min_x, y, min_x)

    converter.set_variable(onnx_op.output[0], y)
コード例 #28
0
ファイル: reinterpret_axis.py プロジェクト: zhangaz1/webdnn
    def fold_constance(self, graph: "graph.Graph"):
        x = self.inputs["x"]  # type: ConstantVariable
        y = self.outputs["y"]
        self.remove_all()

        y_new = ConstantVariable(
            x.data,
            Order([
                self.out_order.axes[self.in_order.axes.index(a)]
                for a in x.order.axes
            ]))
        OptimizeRule.replace_variable(graph, y, y_new.change_order(y.order))
コード例 #29
0
    def fold_constance(self, graph: Graph):
        x = self.inputs["x"]  # type: ConstantVariable
        y = self.outputs["y"]

        remained_axes_in_x_order = [a for a in x.order.axes if a in y.order.axes]
        new_axes = [a for a in y.order.axes if a not in x.order.axes]
        slices = [self.indices[a] for a in x.order.axes] + [None] * len(new_axes)

        new_y = ConstantVariable(x.data[slices], Order(remained_axes_in_x_order + new_axes))
        new_y.change_order(y.order)
        OptimizeRule.replace_variable(graph, y, new_y)
        self.remove_all()
コード例 #30
0
ファイル: sum.py プロジェクト: fossabot/hash2face
    def fold_constance(self, graph: Graph):
        x = self.inputs["x"]  # type: ConstantVariable
        y = self.outputs["y"]

        new_axes = list(x.order.axes)
        new_axes.remove(self.axis)
        new_y = ConstantVariable(
            np.sum(x.data, axis=x.order.axes_dict[self.axis]), Order(new_axes))

        new_y.change_order(y.order)

        OptimizeRule.replace_variable(graph, y, new_y)
        self.remove_all()