Beispiel #1
0
def test_scalar_sub2():
    x = Variable([5, 5], OrderNC)
    h = fn(x)
    y = h - 1

    graph = Graph([x], [y])

    flag_changed = True
    while flag_changed:
        graph, flag_changed = RemoveNoEffectOperator().optimize(graph)

    ops = listup_operators(graph)
    assert len(ops) == 2
Beispiel #2
0
def test_scalar_pow2():
    x = Variable([5, 5], OrderNC)
    h = fn(x)
    y = h**2

    graph = Graph([x], [y])

    flag_changed = True
    while flag_changed:
        graph, flag_changed = RemoveUnnecessaryOperator().optimize(graph)

    ops = listup_operators(graph)
    assert len(ops) == 2
Beispiel #3
0
def test_scalar_affine2():
    x = Variable([5, 5], OrderNC)
    h = fn(x)
    y, = ScalarAffine(None, scale=2, bias=0)(h)

    graph = Graph([x], [y])

    flag_changed = True
    while flag_changed:
        graph, flag_changed = RemoveNoEffectOperator().optimize(graph)

    ops = listup_operators(graph)
    assert len(ops) == 2
Beispiel #4
0
    def convert(self, inputs: List["chainer.Variable"],
                outputs: List["chainer.Variable"]) -> Graph:
        """convert(inputs, outputs)

        Convert chainer computational graph into WebDNN IR.

        Args:
            inputs(list of chainer.Variable): input chainer variables
            outputs(list of chainer.Variable): output chainer variables

        .. admonition:: Example

            .. code::

                model = chainer.links.model.vision.resnet.ResNet50Layers()

                # Forward propagation with dummy input to build computational graph
                x = chainer.Variable(np.empty((1, 3, 224, 224), dtype=np.float32))
                y = model(x, layers=["fc6"])["fc6"]

                graph = ChainerConverter().convert_from_inout_vars([x], [y])

        Returns:
            (:class:`~webdnn.Graph`): WebDNN Graph
        """
        chainer_graph = chainer.computational_graph.build_computational_graph(
            outputs)

        # In chainer v2, variables are represented as Variable and VariableNode object, and
        # graph information such as edge connection is contained in variable node.
        # Therefore all chainer variable must be normalized into variable node.
        c_vars = list(
            map(
                _to_variable_node,
                filter(lambda v: isinstance(v, VariableNode),
                       chainer_graph.nodes)))  # type: List[VariableNode]
        inputs = [_to_variable_node(v) for v in inputs]
        outputs = [_to_variable_node(v) for v in outputs]

        for c_var in c_vars:
            if c_var.creator is None:
                # If :code:`creator is None` and it's not input variable, it's parameter.
                self._convert_var(c_var, constant=c_var not in inputs)

        for c_opr in _listup_functions(inputs, outputs):
            self._convert_operator(c_opr)

        graph = Graph([self.get_variable(c_var) for c_var in inputs],
                      [self.get_variable(c_var) for c_var in outputs])

        return graph
def test_major_axis():
    vx = np.random.rand(10, 6, 4, 8)
    vb = np.random.rand(10)
    vy = vx + vb[:, None, None, None]

    x = Variable(vx.shape, order=OrderCNHW)
    b = ConstantVariable(vb, order=OrderC)
    y, = AxiswiseBias(None, axis=Axis.C)(x, b)

    generate_kernel_test_case(description=f"AxiswiseBias for major axis",
                              backend=["webgpu", "fallback"],
                              graph=Graph([x], [y]),
                              inputs={x: vx},
                              expected={y: vy})
def test_scalar_affine():
    vx = np.random.rand(2, 3)
    vy = vx * 4 + 5

    x = Variable(vx.shape, order=OrderNC)
    y, = ScalarAffine(None, scale=4, bias=5)(x)

    generate_kernel_test_case(
        description=f"ScalarAffine",
        backend="webgpu",
        graph=Graph([x], [y]),
        inputs={x: vx},
        expected={y: vy}
    )
def test_NHWC():
    vx = np.random.rand(10, 6, 4, 8)
    vs = np.random.rand(8)
    vy = vx * vs[None, None, None, :]

    x = Variable(vx.shape, order=OrderNHWC)
    s = ConstantVariable(vs, order=OrderC)
    y, = AxiswiseScale(None, axis=Axis.C)(x, s)

    generate_kernel_test_case(description=f"AxiswiseScale for input OrderNHWC",
                              backend=["webgpu", "webassembly", "fallback"],
                              graph=Graph([x], [y]),
                              inputs={x: vx},
                              expected={y: vy})
Beispiel #8
0
def test_NHWC():
    v_im, v_col = generate_data_311()

    im = Variable(v_im.shape, order=OrderNHWC)

    col_wasm, = WasmIm2Col(None, ksize=3, padding=1, stride=1)(im)
    col_wasm.change_order(OrderNHWC)

    col_webgpu, = WebGPUIm2Col(None, ksize=3, padding=1, stride=1)(im)
    col_webgpu.change_order(OrderNHWC)

    generate_kernel_test_case(description=f"Im2Col output=NHWC",
                              backend=["webassembly"],
                              graph=Graph([im], [col_wasm]),
                              inputs={im: v_im},
                              expected={col_wasm: v_col},
                              raise_skip=False)

    generate_kernel_test_case(description=f"Im2Col output=NHWC",
                              backend=["webgpu"],
                              graph=Graph([im], [col_webgpu]),
                              inputs={im: v_im},
                              expected={col_webgpu: v_col})
Beispiel #9
0
    def optimize(self, graph: Graph) -> Tuple[Graph, bool]:
        flag_changed = False
        for op in traverse.listup_operators(graph):
            if not isinstance(op, Sgemm):
                continue

            op: Sgemm

            if not op.parameters.get("eigen", False):
                op.parameters["eigen"] = True
                flag_changed = True
                graph.licenses["eigen"] = EIGEN_LICENSE

        return graph, flag_changed
Beispiel #10
0
def test_dilated_NHWC():
    v_im, v_col = generate_data_3112()

    im = Variable(v_im.shape, order=OrderNHWC)

    col, = Im2Col(None, ksize=3, padding=1, stride=1, dilation_rate=2)(im)
    col.change_order(OrderNHWC)

    generate_kernel_test_case(
        description=f"Im2Col output=NHWC dilation_rate=2",
        backend=["webgpu", "webgl", "webassembly"],
        graph=Graph([im], [col]),
        inputs={im: v_im},
        expected={col: v_col})
Beispiel #11
0
def test_general():
    vx = np.array([[2, 4, 3]])
    vw = np.arange(15).reshape(5, 3)
    vy = vw[vx]

    x = Variable(vx.shape, order=OrderNT)
    w = ConstantVariable(vw, order=OrderCN)
    y, = Embedding(None)(x, w)

    generate_kernel_test_case(description=f"Embedding",
                              backend=["webgpu"],
                              graph=Graph([x], [y]),
                              inputs={x: vx},
                              expected={y: vy})
Beispiel #12
0
def template(x_order=OrderNHWC, y_order=OrderNCHW, description: str = ""):
    vx = np.random.rand(2, 3, 4, 5)
    vy = np.transpose(vx, [x_order.axes_dict[a] for a in y_order.axes])

    x = Variable(vx.shape, order=x_order)
    y = x.transpose(y_order)

    generate_kernel_test_case(
        description=f"Transpose {description}",
        backend=["webgpu", "webgl", "webassembly"],
        graph=Graph([x], [y]),
        inputs={x: vx},
        expected={y: vy},
    )
def generate_graph_model2(caption_net, hidden_num):
    # inputs
    var_input_img = Variable([1, 1, hidden_num], OrderNTC)
    var_input_word = Variable([1, 1], OrderNT)
    var_switch_img = Variable([1, 1, hidden_num], OrderNTC)
    var_switch_word = Variable([1, 1, hidden_num], OrderNTC)
    var_last_h = Variable([1, hidden_num], OrderNC)
    var_last_c = Variable([1, hidden_num], OrderNC)

    # prepare for lstm
    var_emb_word, = Embedding(None)(var_input_word,
                                    ConstantVariable(
                                        caption_net.word_vec.W.data,
                                        OrderCN))  # OrderNTC
    var_lstm_input = (var_emb_word * var_switch_word) + \
        (var_input_img * var_switch_img)

    # lstm
    lstm_opr = LSTM(None,
                    use_bias=True,
                    return_sequences=False,
                    activation="tanh",
                    recurrent_activation="sigmoid",
                    use_initial_h=True,
                    use_initial_c=True)
    w_input = _convert_lstm_to_webdnn_order(caption_net.lstm.upward.W.data.T)
    w_hidden = _convert_lstm_to_webdnn_order(caption_net.lstm.lateral.W.data.T)
    b = _convert_lstm_to_webdnn_order(
        caption_net.lstm.upward.b.data[None, :])[0]
    var_lstm_h, var_lstm_c = lstm_opr(
        x=var_lstm_input,
        w_input=ConstantVariable(w_input, OrderCN),
        w_hidden=ConstantVariable(w_hidden, OrderCN),
        b=ConstantVariable(b, OrderC),
        initial_h=var_last_h,
        initial_c=var_last_c)

    # word probability
    var_word_score, = Linear(None)(var_lstm_h,
                                   ConstantVariable(
                                       caption_net.out_word.W.data.T, OrderCN))
    var_word_score_biased, = AxiswiseBias(None, axis=Axis.C)(
        var_word_score, ConstantVariable(caption_net.out_word.b.data, OrderC))
    var_word_prob, = Softmax(None, axis=Axis.C)(var_word_score_biased)

    return Graph([
        var_input_img, var_input_word, var_switch_img, var_switch_word,
        var_last_h, var_last_c
    ], [var_word_prob, var_lstm_h, var_lstm_c])
Beispiel #14
0
def test_NC_CN():
    vx = np.random.rand(3, 4)
    vw = np.random.rand(4, 5)
    vy = np.dot(vx, vw)

    x = Variable(vx.shape, order=OrderNC)
    w = ConstantVariable(vw, order=OrderCN)
    y, = Linear(None)(x, w)

    generate_kernel_test_case(description=f"Linear: NC*CN",
                              backend=["fallback", "webassembly", "webgpu"],
                              graph=Graph([x], [y]),
                              inputs={x: vx},
                              expected={y: vy},
                              raise_skip=False)
Beispiel #15
0
def test_NHWC_HWCN():
    vx = np.random.rand(2, 3, 4, 5)
    vw = np.random.rand(3, 4, 5, 2)
    vy = np.tensordot(vx, vw, ((1, 2, 3), (0, 1, 2)))

    x = Variable(vx.shape, order=OrderNHWC)
    w = ConstantVariable(vw, order=OrderHWCN)
    y, = Linear(None)(x, w)

    generate_kernel_test_case(description=f"Linear: NHWC*HWCN",
                              backend=["fallback", "webassembly", "webgpu"],
                              graph=Graph([x], [y]),
                              inputs={x: vx},
                              expected={y: vy},
                              raise_skip=False)
Beispiel #16
0
    def convert(self, input_shapes: List[List[int]]) -> Graph:
        input_layers = self.model_config["config"]["input_layers"]  # [['input_1', 0, 0]]
        self.preprocess_zeropadding2d()
        # Variableは(layer_name, 0, 0)という形式のキーで管理
        var_dict = {}

        graph_inputs = []
        for input_layer, input_shape in zip(input_layers, input_shapes):
            order = None
            if len(input_shape) == 1:
                order = OrderC
            elif len(input_shape) == 2:
                order = OrderNC
            elif len(input_shape) == 4:
                # Assuming data_format == "channels_last":
                order = OrderNHWC
            else:
                raise NotImplementedError("Input shape must be 1,2,4 dimensions")
            v = Variable(input_shape, order)

            graph_inputs.append(v)
            var_dict[tuple(input_layer)] = v  # key: ('input_1', 0, 0)

        for layer in self.model_config["config"]["layers"]:
            layer_class_name = layer["class_name"]
            if layer_class_name == "InputLayer":
                # 入力を表すダミーレイヤー
                continue
            # 入力変数をリストにまとめる
            input_variables = []
            assert len(layer["inbound_nodes"]) == 1  # [[var1, var2, ...]]
            for inbound_node in layer["inbound_nodes"][0]:
                key = (inbound_node[0], inbound_node[1], inbound_node[2])
                assert inbound_node[3] == {}
                input_variables.append(var_dict[key])

            output_variables = self.convert_layer(layer_class_name, layer["config"], input_variables)
            assert len(output_variables) == 1  # 複数出力の時の表現を認識できない
            key = (layer["name"], 0, 0)
            assert key not in var_dict
            var_dict[key] = output_variables[0]

        output_layers = self.model_config["config"]["output_layers"]
        graph_outputs = []
        for output_layer in output_layers:
            graph_outputs.append(var_dict[tuple(output_layer)])

        return Graph(graph_inputs, graph_outputs)
Beispiel #17
0
    def convert(self, chainer_computational_graph: chainer.computational_graph.
                ComputationalGraph, input_c_vars: List[chainer.Variable],
                output_c_vars: List[chainer.Variable]) -> Graph:
        # In chainer v2, variables are represented as Variable and VariableNode object, and
        # graph information such as edge connection is contained in variable node.
        # Therefore all chainer variable must be normalized into variable node.
        input_c_vars = [_to_variable_node(v) for v in input_c_vars]
        output_c_vars = [_to_variable_node(v) for v in output_c_vars]

        # Append InputVariable attribute to input variables
        input_n_vars = []
        for c_var in input_c_vars:
            n_var = self._convert_var(c_var)
            n_var.attributes.add(Input(n_var))
            input_n_vars.append(n_var)

        self._convert_weight_vars(chainer_computational_graph)

        pending_c_oprs = [
            c_opr for c_opr in chainer_computational_graph.nodes
            if isinstance(c_opr, chainer.Function)
        ]

        while len(pending_c_oprs) > 0:
            for c_opr in pending_c_oprs:
                if all(((self.has_variable(_to_variable_node(c_var)))
                        for c_var in c_opr.inputs)):
                    # All input variables of the `cfunc` are converted, so this `c_opr` can be converted.
                    self.convert_operator(c_opr)
                    pending_c_oprs.remove(c_opr)
                    break  # for c_opr in pending_functions
            else:
                console.debug(pending_c_oprs)
                raise ValueError("Inputs to functions cannot be resolved.")

        # Append OutputVariable attribute to output variables
        output_n_vars = []
        for c_var in output_c_vars:
            if not self.has_variable(c_var):
                raise ValueError("Output variable is not generated by graph.")
            n_var = self.get_variable(c_var)
            n_var.attributes.add(Output)
            output_n_vars.append(n_var)

        # Convert variable order into typical one in Chainer
        self._transpose_vars()

        return Graph(input_n_vars, output_n_vars)
Beispiel #18
0
    def convert(self,
                model: "keras.models.Model",
                input_orders: List[Order] = None) -> Graph:
        """convert(model, input_orders=None)

        Convert kerasmodel into WebDNN IR Graph.

        Args:
            model (`keras.models.Model`): keras model
            input_orders (list of :class:`~webdnn.graph.order.Order`): Order of input tensors. If `None` is passed, default order
                (`OrderNC` for 2D, `OrderNTC` for 3D, `OrderNHWC` for 4D) is used. If `input_orders=None`, default orders
                are assigned to all input tensors. If `input_orders[0]=None`, only first input tensor are converted with
                the default order.

        .. admonition:: Example

            .. code::

                model = keras.models.load_model("pre_trained_model.h5")
                graph = KerasConverter(batch_size=1).convert(model)

        Returns:
            (:class:`~webdnn.graph.graph.Graph`): WebDNN IR Graph
        """
        if not model.built:
            model.build(None)

        self._convert_tensors(model.inputs, input_orders)

        for depth in sorted(list(model.nodes_by_depth.keys()), reverse=True):
            for node in model.nodes_by_depth[depth]:
                self._convert_operator(node.outbound_layer)

                # Check that all output tensors from current layer are converted into WebDNN Variable
                for tensor in node.output_tensors:
                    if not self.has_variable(tensor):
                        raise AssertionError(
                            f"[KerasConverter] {node.outbound_layer} outputs {tensor}, but it was not converted into WebDNN Variable by "
                            f"{self._handler_map[self.__class__.__name__][self.serialize_operator_type(node.outbound_layer)]}"
                        )

        return Graph([
            self.get_variable(t)
            for t in _to_list(self.get_input_tensor(model))
        ], [
            self.get_variable(t)
            for t in _to_list(self.get_output_tensor(model))
        ])
Beispiel #19
0
    def convert(self, chainer_computational_graph: chainer.computational_graph.ComputationalGraph,
                input_vars: List[chainer.Variable], output_vars: List[chainer.Variable]) -> Graph:
        # 戦略
        # 生成済み変数(chainer.Variable)をセットに入れる; 入力変数およびウェイト
        # 生成済み変数だけを入力とし、未処理のchainer.Functionを変換し、生成済み変数セットに追加
        # 未処理のchainer.Functionがなくなったら終わり
        self._cvar_to_nvar = {}
        self._cvar_ids = []
        self._known_nvars = []
        self._convert_weight_vars(chainer_computational_graph)
        self._convert_input_vars(input_vars)

        pending_functions = [cfunc for cfunc in chainer_computational_graph.nodes if
                             isinstance(cfunc, chainer.Function)]
        while len(pending_functions) > 0:
            for cfunc in pending_functions:
                if all(((id(cvar) in self._cvar_ids) for cvar in cfunc.inputs)):
                    # このレイヤーは入力が揃った
                    opr_block = self._construct_operator_block(cfunc)
                    out_nvars = opr_block([self._cvar_to_nvar[id(cvar)] for cvar in cfunc.inputs])
                    assert len(out_nvars) == len(cfunc.outputs), str(cfunc)
                    self._known_nvars.extend(opr_block.hidden_consts)
                    self._known_nvars.extend(opr_block.hidden_vars)
                    # 出力変数を対応づける
                    for out_nvar, out_cvar_wref in zip(out_nvars, cfunc.outputs):
                        out_cvar = out_cvar_wref()
                        assert tuple(out_nvar.shape) == out_cvar.shape, str(cfunc)
                        self._cvar_to_nvar[id(out_cvar)] = out_nvar
                        self._cvar_ids.append(id(out_cvar))
                        self._known_nvars.append(out_nvar)
                    pending_functions.remove(cfunc)
                    break  # for cfunc in pending_functions
            else:
                print(pending_functions)
                raise ValueError("inputs to functions cannot be resolved.")

        # 出力変数にAttributeをつける
        for cvar in output_vars:
            if id(cvar) not in self._cvar_ids:
                raise ValueError("Output variable is not generated by graph.")
            nvar = self._cvar_to_nvar[id(cvar)]
            nvar.attributes.add(Output)

        # このフレームワークで標準的なデータオーダーに変更
        self._transpose_vars(self._known_nvars)

        return Graph([self._cvar_to_nvar[id(cvar)] for cvar in input_vars],
                     [self._cvar_to_nvar[id(cvar)] for cvar in output_vars])
Beispiel #20
0
def test_CNHW():
    v_im, v_col = generate_data_311()

    col_dummy = ConstantVariable(v_col, order=OrderNHWC)
    col_dummy.change_order(OrderCNHW)

    im = Variable(v_im.shape, order=OrderNHWC)

    col, = Im2Col(None, ksize=3, padding=1, stride=1, dilation_rate=1)(im)
    col.change_order(OrderCNHW)

    generate_kernel_test_case(description=f"Im2Col output=CNHW",
                              backend=["webgpu", "webgl", "webassembly"],
                              graph=Graph([im], [col]),
                              inputs={im: v_im},
                              expected={col: col_dummy.data})
Beispiel #21
0
def test_single_softmax():
    linear = Linear('linear')
    softmax = Softmax('softmax', axis=Axis.C)

    x = Variable([4, 5], OrderNC)
    w = Variable([4, 5], OrderNC)
    h, = linear(x, w)
    y, = softmax(h)

    graph = Graph([x], [y])

    graph, _ = RemoveLastSoftmax().optimize(graph)

    ops = listup_operators(graph)
    assert len(ops) == 1 and isinstance(ops[0], Linear)
    assert len(graph.outputs) == 1 and ops[0].outputs["y"] == graph.outputs[0]
Beispiel #22
0
def test_major_axis():
    vx = np.random.rand(10, 6, 4, 8)
    vs = np.random.rand(10)
    vy = vx * vs[:, None, None, None]

    x = Variable(vx.shape, order=OrderCNHW)
    s = Variable(vs.shape, order=OrderC)
    y, = AxiswiseScale(None, axis=Axis.C)(x, s)

    generate_kernel_test_case(
        description=f"AxiswiseScale for major axis",
        backend=["webgpu", "webassembly", "fallback"],
        graph=Graph([x, s], [y]),
        inputs={x: vx, s: vs},
        expected={y: vy}
    )
Beispiel #23
0
def test_broadcast():
    vx1 = np.random.rand(3)
    vx2 = np.random.rand(2, 3, 4, 5)
    vy = vx1[None, :, None, None] ** vx2

    x1 = Variable(vx1.shape, order=OrderC)
    x2 = Variable(vx2.shape, order=OrderNCHW)
    y = x1 ** x2
    y.change_order(OrderNCHW)

    generate_kernel_test_case(
        description=f"ElementwisePow broadcast",
        graph=Graph([x1, x2], [y]),
        inputs={x1: vx1, x2: vx2},
        expected={y: vy},
    )
Beispiel #24
0
def test_broadcast():
    vx1 = np.random.rand(3)
    vx2 = np.random.rand(2, 3, 4, 5) - 0.5
    vy = np.float32(vx1[None, :, None, None] > vx2)

    x1 = Variable(vx1.shape, order=OrderC)
    x2 = Variable(vx2.shape, order=OrderNCHW)
    y = x1 > x2
    y.change_order(OrderNCHW)

    generate_kernel_test_case(
        description=f"Greater broadcast",
        graph=Graph([x1, x2], [y]),
        inputs={x1: vx1, x2: vx2},
        expected={y: vy},
    )
Beispiel #25
0
def template(x_order=OrderNHWC, y_order=OrderNHWC, description: str = ""):
    vx = np.random.rand(2, 3, 4, 5) - 0.5
    vy = vx / (np.abs(vx) + 1.0)

    x = Variable(vx.shape, order=OrderNHWC)
    y, = Softsign(None)(x)

    x.change_order(x_order)
    y.change_order(y_order)

    generate_kernel_test_case(
        description=f"Softsign {description}",
        graph=Graph([x], [y]),
        inputs={x: np.transpose(vx, [OrderNHWC.axes_dict[a] for a in x.order.axes])},
        expected={y: np.transpose(vy, [OrderNHWC.axes_dict[a] for a in y.order.axes])},
    )
Beispiel #26
0
def test_HWNC():
    vx = np.random.rand(6, 4, 10, 8)
    vb = np.random.rand(8)
    vy = vx + vb[None, None, None, :]

    x = Variable(vx.shape, order=OrderHWNC)
    b = ConstantVariable(vb, order=OrderC)
    y, = AxiswiseBias(None, axis=Axis.C)(x, b)

    generate_kernel_test_case(
        description=f"AxiswiseBias for input OrderHWNC",
        backend=["webgpu", "webassembly", "fallback"],
        graph=Graph([x], [y]),
        inputs={x: vx},
        expected={y: vy}
    )
Beispiel #27
0
def template(x_order=OrderNHWC, y_order=OrderNHWC, threshold=0.5, description: str = ""):
    vx = np.random.rand(2, 3, 4, 5) - 0.5
    vy = vx * (vx > threshold)

    x = Variable(vx.shape, order=OrderNHWC)
    y, = ThresholdRelu(None, threshold=threshold)(x)

    x.change_order(x_order)
    y.change_order(y_order)

    generate_kernel_test_case(
        description=f"ThresholdRelu {description}",
        graph=Graph([x], [y]),
        inputs={x: np.transpose(vx, [OrderNHWC.axes_dict[a] for a in x.order.axes])},
        expected={y: np.transpose(vy, [OrderNHWC.axes_dict[a] for a in y.order.axes])},
    )
Beispiel #28
0
def template(a_shape=(2, 3, 4, 5), b_shape=(3, 4, 5, 6), axes=((1, 2, 3), (0, 1, 2)), backend=None, description: str = ""):
    va = np.random.rand(*a_shape).astype(np.float32)
    vb = np.random.rand(*b_shape).astype(np.float32)
    vc = np.tensordot(va, vb, axes)

    a = Variable(a_shape, Order([None] * len(a_shape)))
    b = Variable(b_shape, Order([None] * len(b_shape)))
    c, = Tensordot(None, axes=[[v.order.axes[aaa] for aaa in aa] for v, aa in zip([a, b], axes)])(a, b)

    generate_kernel_test_case(
        description=f"Tensordot {description}",
        backend=backend,
        graph=Graph([a, b], [c]),
        inputs={a: va, b: vb},
        expected={c: vc}
    )
Beispiel #29
0
def template(x_order=OrderNHWC, y_order=OrderNHW, axis=Axis.C, description: str = ""):
    vx = np.arange(120).reshape(2, 3, 4, 5)
    vy = np.max(vx, axis=OrderNHWC.axes_dict[axis])

    x = Variable(vx.shape, order=OrderNHWC)
    y, = Max(None, axis=axis)(x)

    x.change_order(x_order)
    y.change_order(y_order)

    generate_kernel_test_case(
        description=f"Max {description}",
        graph=Graph([x], [y]),
        backend=["webgpu", "webgl", "webassembly"],
        inputs={x: np.transpose(vx, [OrderNHWC.axes_dict[a] for a in x.order.axes])},
        expected={y: np.transpose(vy, [OrderNHW.axes_dict[a] for a in y.order.axes])},
    )
Beispiel #30
0
def test_t_is_10_nonzero_c_sequence_output():
    np.random.seed(2)
    N = 1
    T = 10
    C1 = 128
    C2 = 64
    vx = np.random.normal(size=(N, T, C1)).astype(np.float32)
    vw_input = np.random.normal(size=(C1, C2 * 4)).astype(np.float32)
    vw_hidden = np.random.normal(size=(C2, C2 * 4)).astype(np.float32)
    vb = np.random.normal(size=(C2 * 4,)).astype(np.float32)
    vc_in = np.random.normal(size=(N, C2)).astype(np.float32)
    vc_out = vc_in.copy()
    vh_in = np.random.normal(size=(N, C2)).astype(np.float32)
    vh = vh_in

    vw_input_c = _convert_to_chainer_order(vw_input)
    vw_hidden_c = _convert_to_chainer_order(vw_hidden)
    vb_c = _convert_to_chainer_order(vb[None, :])
    vh_sequence = []

    for i in range(T):
        vc_out, vh = lstm(vc_out, linear(vx[:, i, :], vw_input_c.T) + linear(vh, vw_hidden_c.T) + vb_c)
        vh_sequence.append(vh.data)

    vh = np.array(vh_sequence).transpose((1, 0, 2))  # TNC -> NTC
    vc_out = vc_out.data

    x = Variable(vx.shape, order=OrderNTC)
    c_in = ConstantVariable(vc_in, order=OrderNC)
    vh_in = ConstantVariable(vh_in, order=OrderNC)
    w_input = ConstantVariable(vw_input, order=OrderCN)
    w_hidden = ConstantVariable(vw_hidden, order=OrderCN)
    b = ConstantVariable(vb, order=OrderC)
    y, c_out = LSTM(None, return_sequences=True, use_bias=True, use_initial_c=True, use_initial_h=True,
                    activation="tanh", recurrent_activation="sigmoid")(x, w_input, w_hidden, b, initial_c=c_in,
                                                                       initial_h=vh_in)

    generate_kernel_test_case(
        description=f"LSTM t=10 initial_c,initial_h=nonzero sequence_out",
        backend=["webassembly", "webgpu"],
        graph=Graph([x], [y, c_out]),
        inputs={x: vx},
        expected={y: vh, c_out: vc_out},
        EPS=1e-3,
        ABS_EPS=1e-7
    )