コード例 #1
0
def simple_if(condition_val):
    condition = ng.constant(condition_val, dtype=np.bool)
    # then_body
    X_t = ng.parameter([2], np.float32, "X")
    Y_t = ng.parameter([2], np.float32, "Y")

    then_mul = ng.multiply(X_t, Y_t)
    then_body_res_1 = ng.result(then_mul)
    then_body = GraphBody([X_t, Y_t], [then_body_res_1])
    then_body_inputs = [
        TensorIteratorInvariantInputDesc(1, 0),
        TensorIteratorInvariantInputDesc(2, 1)
    ]
    then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)]

    # else_body
    X_e = ng.parameter([2], np.float32, "X")
    Y_e = ng.parameter([2], np.float32, "Y")
    add_e = ng.add(X_e, Y_e)
    else_body_res_1 = ng.result(add_e)
    else_body = GraphBody([X_e, Y_e], [else_body_res_1])
    else_body_inputs = [
        TensorIteratorInvariantInputDesc(1, 0),
        TensorIteratorInvariantInputDesc(2, 1)
    ]
    else_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)]

    X = ng.constant([3, 4], dtype=np.float32)
    Y = ng.constant([2, 1], dtype=np.float32)
    if_node = ng.if_op(condition, [X, Y], (then_body, else_body),
                       (then_body_inputs, else_body_inputs),
                       (then_body_outputs, else_body_outputs))
    relu = ng.relu(if_node)
    return relu
コード例 #2
0
ファイル: test_ops.py プロジェクト: ravirajpinnamaraju/ngraph
def test_add_with_mul():

    element_type = Type.f32
    shape = Shape([2, 2])
    A = Parameter(element_type, shape)
    B = Parameter(element_type, shape)
    C = Parameter(element_type, shape)
    parameter_list = [A, B, C]
    function = Function([ng.multiply(ng.add(A, B), C)], parameter_list, 'test')
    backend = Backend.create(test.BACKEND_NAME)

    a = backend.create_tensor(element_type, shape)
    b = backend.create_tensor(element_type, shape)
    c = backend.create_tensor(element_type, shape)
    result = backend.create_tensor(element_type, shape)

    a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 16)
    b.write(util.numpy_to_c(np.array([5, 6, 7, 8], dtype=np.float32)), 16)
    c.write(util.numpy_to_c(np.array([9, 10, 11, 12], dtype=np.float32)), 16)

    result_arr = np.array([0, 0, 0, 0], dtype=np.float32)
    result.write(util.numpy_to_c(result_arr), 16)
    handle = backend.compile(function)
    handle.call([result], [a, b, c])
    result.read(util.numpy_to_c(result_arr), 16)

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    b_arr = np.array([5, 6, 7, 8], dtype=np.float32)
    c_arr = np.array([9, 10, 11, 12], dtype=np.float32)
    result_arr_ref = (a_arr + b_arr) * c_arr

    assert np.allclose(result_arr, result_arr_ref)
コード例 #3
0
    def construct_batchnorm_fprop_pattern(self):
        """
        Generate graph op that represents a pattern for batchnorm fprop operation.
        self.gamma * ((in_obj - xmean) * ng.reciprocal(ng.sqrt(xvar + self.eps))) + self.beta
        Returns:
               Single pattern that matches batchnorm fprop op
        """
        self.batchnorm_fprop_input_tensor_label = "in_obj"
        self.batchnorm_fprop_gamma_label = "gamma"
        self.batchnorm_fprop_beta_label = "beta"
        self.batchnorm_fprop_variance_label = "variance"
        self.batchnorm_fprop_epsilon_label = "epsilon"
        self.batchnorm_fprop_mean_label = "mean"

        # bind the label to the op's which needed to be updated in the dict
        in_obj = PatternLabelOp(self.batchnorm_fprop_input_tensor_label,
                                (lambda op: isinstance(op, ContiguousOp)))
        flatten_tensor = PatternSkipOp(in_obj,
                                       (lambda op: isinstance(op, Flatten)))
        gamma = PatternLabelOp(self.batchnorm_fprop_gamma_label,
                               (lambda op: isinstance(op, BroadcastOp)))
        beta = PatternLabelOp(self.batchnorm_fprop_beta_label,
                              (lambda op: isinstance(op, BroadcastOp)))
        variance = PatternLabelOp(self.batchnorm_fprop_variance_label,
                                  (lambda op: isinstance(op, Divide)))
        epsilon = PatternLabelOp(self.batchnorm_fprop_epsilon_label,
                                 (lambda op: isinstance(op, BroadcastOp)))
        mean = PatternLabelOp(self.batchnorm_fprop_mean_label,
                              (lambda op: isinstance(op, Divide)))

        # construct the fprop batchnorm pattern matching the computation graph
        # ng.sqrt(xvar + self.eps)
        SqrtofVarianceAndEps = ng.sqrt(ng.add(variance, epsilon))
        # ng.reciprocal(ng.sqrt(xvar + self.eps))
        reciprocal_op = ng.reciprocal(SqrtofVarianceAndEps)
        reciprocal_op_w_braodcast = ng.PatternSkipOp(reciprocal_op,
                                                     lambda op: isinstance(op, BroadcastOp))

        mean_bcast = ng.PatternSkipOp(mean, lambda op: isinstance(op, BroadcastOp))
        # (in_obj - xmean) * ng.reciprocal(ng.sqrt(xvar + self.eps))
        mul_op_1 = ng.multiply(ng.subtract(flatten_tensor, mean_bcast), reciprocal_op_w_braodcast)
        # "self.gamma * ((in_obj - xmean) * ng.reciprocal(ng.sqrt(xvar + self.eps)))
        MultiplyGamma = ng.multiply(mul_op_1, gamma)
        # self.gamma * ((in_obj - xmean) * ng.reciprocal(ng.sqrt(xvar + self.eps))) + self.beta
        AddBeta = ng.Unflatten(ng.Add(MultiplyGamma, beta))
        return AddBeta
コード例 #4
0
ファイル: test_constant.py プロジェクト: ugiwgh/ngraph
def test_constant_multiply(transformer_factory):
    """TODO."""
    a = ng.constant(4)
    b = ng.constant(2)
    c = ng.multiply(a, b)
    with executor(c) as ex:
        result = ex()
    assert result == 8
コード例 #5
0
ファイル: match_lstm.py プロジェクト: Asteur/NervanaNlpApch
    def __call__(self, in_obj, keep=None, **kwargs):

        if self.mask is None:
            in_axes = in_obj.axes.sample_axes()
            self.mask = ng.persistent_tensor(axes=in_axes).named('mask')
        self.mask = ng.less_equal(ng.uniform(self.mask, low=0.0, high=1.0),
                                  keep)
        return ng.multiply(self.mask, in_obj) * (1. / keep)
コード例 #6
0
def create_diff_if_with_two_outputs(condition_val):
    condition = ng.constant(condition_val, dtype=np.bool)

    # then_body
    X_t = ng.parameter([2], np.float32, "X")
    Y_t = ng.parameter([2], np.float32, "Y")
    mmul_t = ng.matmul(X_t, Y_t, False, False)
    mul_t = ng.multiply(Y_t, X_t)
    then_body_res_1 = ng.result(mmul_t)
    then_body_res_2 = ng.result(mul_t)
    then_body = GraphBody([X_t, Y_t], [then_body_res_1, then_body_res_2])
    then_body_inputs = [
        TensorIteratorInvariantInputDesc(1, 0),
        TensorIteratorInvariantInputDesc(2, 1)
    ]
    then_body_outputs = [
        TensorIteratorBodyOutputDesc(0, 0),
        TensorIteratorBodyOutputDesc(1, 1)
    ]

    # else_body
    X_e = ng.parameter([2], np.float32, "X")
    Z_e = ng.parameter([], np.float32, "Z")
    mul_e = ng.multiply(X_e, Z_e)
    else_body_res_1 = ng.result(Z_e)
    else_body_res_2 = ng.result(mul_e)
    else_body = GraphBody([X_e, Z_e], [else_body_res_1, else_body_res_2])
    else_body_inputs = [
        TensorIteratorInvariantInputDesc(1, 0),
        TensorIteratorInvariantInputDesc(3, 1)
    ]
    else_body_outputs = [
        TensorIteratorBodyOutputDesc(0, 0),
        TensorIteratorBodyOutputDesc(1, 1)
    ]

    X = ng.constant([3, 4], dtype=np.float32)
    Y = ng.constant([2, 1], dtype=np.float32)
    Z = ng.constant(4.0, dtype=np.float32)
    if_node = ng.if_op(condition, [X, Y, Z], (then_body, else_body),
                       (then_body_inputs, else_body_inputs),
                       (then_body_outputs, else_body_outputs))
    return if_node
コード例 #7
0
def ngraph_embedding(ids, vocab_embeddings, vocab_size, embedding_dim,
                     padding_idx, sparse):
    """
    decomposing embedding with ngraph ops.
    """
    import ngraph as ng
    from ngraph import opset8 as opset
    from openvino.inference_engine import IECore

    if vocab_embeddings is None:
        #
        vocab_embeddings = np.zeros(
            (vocab_size, embedding_dim)).astype("float32")

    node_ids = ng.parameter(shape=ids.shape, name='ids', dtype=ids.dtype)
    node_w = ng.parameter(shape=vocab_embeddings.shape,
                          name='w',
                          dtype=vocab_embeddings.dtype)

    if padding_idx == -1:
        padding_idx += vocab_size

    if padding_idx is not None:
        '''
        mask W
        '''
        masked_embeddings = np.ones(vocab_embeddings.shape, dtype='int64')
        masked_embeddings[padding_idx, :] = 0  # mask

        node_mask = ng.constant(masked_embeddings,
                                name='mask',
                                dtype=vocab_embeddings.dtype)
        node_masked_w = ng.multiply(node_w, node_mask)

    node_axis = ng.constant([0], name='const0', dtype=np.int64)
    node_gather = opset.gather(data=node_masked_w if padding_idx else node_w,
                               indices=node_ids,
                               axis=node_axis,
                               batch_dims=0)

    graph = ng.result(node_gather, name='y')

    parameters = [node_ids, node_w]
    inputs_dict = {'ids': ids, "w": vocab_embeddings}

    #
    function = ng.Function(graph, parameters, "embedding")

    ie_network = ng.function_to_cnn(function)
    ie = IECore()
    executable_network = ie.load_network(ie_network, 'CPU')
    output = executable_network.infer(inputs_dict)

    return output
コード例 #8
0
def test_elementwise_fp16_out(transformer_factory):
    axes = ng.make_axes([ng.make_axis(length=2), ng.make_axis(length=2)])

    a = ng.constant(np.array([[1.0, 2.0], [4.0, 12.0]], dtype='float32'), axes)
    b = ng.constant(np.array([[1.0, 2.0], [6.0, 12.0]], dtype='float32'), axes)

    c = ng.multiply(a, b, dtype=np.dtype(np.float16))

    with executor(c) as ex:
        result = ex()
        ng.testing.assert_allclose(result, [[1.0, 4.0], [24.0, 144.0]])
コード例 #9
0
ファイル: test_execution.py プロジェクト: QiJune/ngraph
def test_constant_tensor_multiply(transformer_factory):
    Y = ng.make_axis(length=2)
    N = ng.make_axis(length=2)

    a = ng.constant(np.array([[1.0, 1.0], [1.0, 1.0]], dtype='float32'), [Y, N])
    b = ng.constant(np.array([[1.0, 1.0], [1.0, 1.0]], dtype='float32'), [Y, N])

    c = ng.multiply(a, b)

    with executor(c) as ex:
        result = ex()
        ng.testing.assert_allclose(result, [[1.0, 1.0], [1.0, 1.0]])
コード例 #10
0
ファイル: ops_binary.py プロジェクト: ugiwgh/ngraph
    def ElementTimes(self, cntk_op, inputs):
        """
        Returns input[0] x input[1] (element wise).

        Arguments:
             cntk_op: CNTK operation to be imported.
            inputs: List of inputs to this node.

        Returns:
            A ngraph Op.
        """
        cast_0, cast_1 = self._cast_for_binary_op(inputs)
        return ng.multiply(cast_0, cast_1).named(cntk_op.uid)
コード例 #11
0
def create_simple_if_with_two_outputs(condition_val):
    condition = ng.constant(condition_val, dtype=np.bool)

    # then_body
    X_t = ng.parameter([], np.float32, "X")
    Y_t = ng.parameter([], np.float32, "Y")
    Z_t = ng.parameter([], np.float32, "Z")

    add_t = ng.add(X_t, Y_t)
    mul_t = ng.multiply(Y_t, Z_t)
    then_body_res_1 = ng.result(add_t)
    then_body_res_2 = ng.result(mul_t)
    then_body = GraphBody([X_t, Y_t, Z_t], [then_body_res_1, then_body_res_2])
    then_body_inputs = [
        TensorIteratorInvariantInputDesc(1, 0),
        TensorIteratorInvariantInputDesc(2, 1),
        TensorIteratorInvariantInputDesc(3, 2)
    ]
    then_body_outputs = [
        TensorIteratorBodyOutputDesc(0, 0),
        TensorIteratorBodyOutputDesc(1, 1)
    ]

    # else_body
    X_e = ng.parameter([], np.float32, "X")
    Z_e = ng.parameter([], np.float32, "Z")
    W_e = ng.parameter([], np.float32, "W")

    add_e = ng.add(X_e, W_e)
    pow_e = ng.power(W_e, Z_e)
    else_body_res_1 = ng.result(add_e)
    else_body_res_2 = ng.result(pow_e)
    else_body = GraphBody([X_e, Z_e, W_e], [else_body_res_1, else_body_res_2])
    else_body_inputs = [
        TensorIteratorInvariantInputDesc(1, 0),
        TensorIteratorInvariantInputDesc(3, 1),
        TensorIteratorInvariantInputDesc(4, 2)
    ]
    else_body_outputs = [
        TensorIteratorBodyOutputDesc(0, 0),
        TensorIteratorBodyOutputDesc(1, 1)
    ]

    X = ng.constant(15.0, dtype=np.float32)
    Y = ng.constant(-5.0, dtype=np.float32)
    Z = ng.constant(4.0, dtype=np.float32)
    W = ng.constant(2.0, dtype=np.float32)
    if_node = ng.if_op(condition, [X, Y, Z, W], (then_body, else_body),
                       (then_body_inputs, else_body_inputs),
                       (then_body_outputs, else_body_outputs))
    return if_node
コード例 #12
0
def test_constant_multiply(transformer_factory):
    # TODO: better error message when missing axes length in cases where it
    # is needed
    Y = ng.make_axis(length=1)

    # TODO: don't require axes
    a = ng.constant(np.array([4.0], dtype='float32'), [Y])
    b = ng.constant(np.array([2.0], dtype='float32'), [Y])

    c = ng.multiply(a, b)

    with executor(c) as ex:
        result = ex()
    ng.testing.assert_allclose(result, [8])
コード例 #13
0
ファイル: test_execution.py プロジェクト: tensor-tang/ngraph
def test_elementwise_fp16_out(transformer_factory):
    Y = ng.make_axis(name='Y')
    N = ng.make_axis(name='N')

    Y.length = 2
    N.length = 2

    a = ng.constant(np.array([[1.0, 2.0], [4.0, 12.0]], dtype='float32'), [Y, N])
    b = ng.constant(np.array([[1.0, 2.0], [6.0, 12.0]], dtype='float32'), [Y, N])

    c = ng.multiply(a, b, dtype=np.dtype(np.float16))

    result = executor(c)()
    np.testing.assert_allclose(result, [[1.0, 4.0], [24.0, 144.0]])
コード例 #14
0
ファイル: test_execution.py プロジェクト: tensor-tang/ngraph
def test_constant_tensor_multiply(transformer_factory):
    Y = ng.make_axis(name='Y')
    N = ng.make_axis(name='N')

    Y.length = 2
    N.length = 2

    a = ng.constant(np.array([[1.0, 1.0], [1.0, 1.0]], dtype='float32'), [Y, N])
    b = ng.constant(np.array([[1.0, 1.0], [1.0, 1.0]], dtype='float32'), [Y, N])

    c = ng.multiply(a, b)

    result = executor(c)()
    np.testing.assert_allclose(result, [[1.0, 1.0], [1.0, 1.0]])
コード例 #15
0
ファイル: test_constant.py プロジェクト: ugiwgh/ngraph
def test_cputensor_multiply_constant(transformer_factory):
    """TODO."""
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=3)

    np_a = np.array([[1, 2, 3]], dtype=np.float32)
    np_c = np.multiply(np_a, 2)

    a = ng.constant(np_a, [M, N])
    b = ng.constant(2)
    c = ng.multiply(a, b)

    with executor(c) as ex:
        result = ex()
    print(result)
    assert np.array_equal(result, np_c)
コード例 #16
0
ファイル: test_constant.py プロジェクト: ugiwgh/ngraph
def test_cputensor_fusion(transformer_factory):
    """TODO."""
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=3)

    np_a = np.array([[1, 2, 3]], dtype=np.float32)
    np_b = np.array([[3, 2, 1]], dtype=np.float32)
    np_d = np.multiply(np_b, np.add(np_a, 2))

    a = ng.constant(np_a, [M, N])
    b = ng.constant(np_b, [M, N])
    c = ng.constant(2)
    d = ng.multiply(b, ng.add(a, c))

    with executor(d) as ex:
        result = ex()
    print(result)
    assert np.array_equal(result, np_d)
コード例 #17
0
ファイル: test_ops.py プロジェクト: ravirajpinnamaraju/ngraph
def binary_op(op_str, a, b):

    if op_str == '+':
        return a + b
    elif op_str == 'Add':
        return ng.add(a, b)
    elif op_str == '-':
        return a - b
    elif op_str == 'Sub':
        return ng.subtract(a, b)
    elif op_str == '*':
        return a * b
    elif op_str == 'Mul':
        return ng.multiply(a, b)
    elif op_str == '/':
        return a / b
    elif op_str == 'Div':
        return ng.divide(a, b)
    elif op_str == 'Dot':
        return Dot(a, b)
    elif op_str == 'Equal':
        return ng.equal(a, b)
    elif op_str == 'Greater':
        return ng.greater(a, b)
    elif op_str == 'GreaterEq':
        return ng.greater_equal(a, b)
    elif op_str == 'Less':
        return ng.less(a, b)
    elif op_str == 'LessEq':
        return ng.less_equal(a, b)
    elif op_str == 'Maximum':
        return ng.maximum(a, b)
    elif op_str == 'Minimum':
        return ng.minimum(a, b)
    elif op_str == 'NotEqual':
        return ng.not_equal(a, b)
    elif op_str == 'Power':
        return ng.power(a, b)
コード例 #18
0
def binary_op(op_str, a, b):

    if op_str == "+":
        return a + b
    elif op_str == "Add":
        return ng.add(a, b)
    elif op_str == "-":
        return a - b
    elif op_str == "Sub":
        return ng.subtract(a, b)
    elif op_str == "*":
        return a * b
    elif op_str == "Mul":
        return ng.multiply(a, b)
    elif op_str == "/":
        return a / b
    elif op_str == "Div":
        return ng.divide(a, b)
    elif op_str == "Equal":
        return ng.equal(a, b)
    elif op_str == "Greater":
        return ng.greater(a, b)
    elif op_str == "GreaterEq":
        return ng.greater_equal(a, b)
    elif op_str == "Less":
        return ng.less(a, b)
    elif op_str == "LessEq":
        return ng.less_equal(a, b)
    elif op_str == "Maximum":
        return ng.maximum(a, b)
    elif op_str == "Minimum":
        return ng.minimum(a, b)
    elif op_str == "NotEqual":
        return ng.not_equal(a, b)
    elif op_str == "Power":
        return ng.power(a, b)
コード例 #19
0
def test_add_with_mul():

    element_type = Type.f32
    shape = Shape([4])
    A = Parameter(element_type, shape)
    B = Parameter(element_type, shape)
    C = Parameter(element_type, shape)
    parameter_list = [A, B, C]
    function = Function([ng.multiply(ng.add(A, B), C)], parameter_list, "test")

    runtime = get_runtime()
    computation = runtime.computation(function, A, B, C)
    result = computation(
        np.array([1, 2, 3, 4], dtype=np.float32),
        np.array([5, 6, 7, 8], dtype=np.float32),
        np.array([9, 10, 11, 12], dtype=np.float32),
    )[0]

    a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
    b_arr = np.array([5, 6, 7, 8], dtype=np.float32)
    c_arr = np.array([9, 10, 11, 12], dtype=np.float32)
    result_arr_ref = (a_arr + b_arr) * c_arr

    assert np.allclose(result, result_arr_ref)
コード例 #20
0
ファイル: ops_bridge.py プロジェクト: rsumner31/ngraph
def Mul(onnx_node, ng_inputs):  # type: (NodeWrapper, List[TensorOp]) -> Op
    left, right = cast_axes_for_binary_broadcast(onnx_node, ng_inputs)
    return ng.multiply(left, right)
コード例 #21
0
def Mul(onnx_node, ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Perform element-wise binary multiplication."""
    left, right = broadcast_for_binary_operation(onnx_node, ng_inputs)
    return ng.multiply(left, right)
コード例 #22
0
ファイル: train.py プロジェクト: Asteur/NervanaNlpApch
# Masks for question and para after embedding/LookupTable layer
mask_para_embed = ng.dot(const_LSTM_embed, reorder_para_mask)
mask_question_embed = ng.dot(
    const_LSTM_embed, ng.cast_axes(reorder_ques_mask, [dummy_axis, REC, N]))

# Pass question and para through embedding layer and dropout layers
embed_output_para_1 = embed_layer(inputs['para'])
embed_output_para = dropout_1(embed_output_para_1, keep=dropout_val)
question_inps = ng.cast_axes(inputs['question'], [N, REC])
embed_output_ques_1 = embed_layer(question_inps)
embed_output_ques = dropout_2(embed_output_ques_1, keep=dropout_val)

# Mask output of embedding layer to the length of each sentence
embed_output_para = ng.multiply(
    embed_output_para,
    ng.cast_axes(mask_para_embed,
                 [embed_output_para.axes[0], embed_output_para.axes[1], N]))

embed_output_ques = ng.multiply(
    embed_output_ques,
    ng.cast_axes(mask_question_embed,
                 [embed_output_ques.axes[0], embed_output_ques.axes[1], N]))

# Encoding Lyer
H_pr_be = rlayer_1(embed_output_ques)
H_hy_be = rlayer_1(embed_output_para)

# Mask the output of eencoding layers to the length of each sentence
H_hy = ng.multiply(H_hy_be, mask_para)
H_pr = ng.multiply(H_pr_be, mask_question)
コード例 #23
0
ファイル: train.py プロジェクト: cdj0311/nlp-architect
mask_para_embed = ng.dot(const_LSTM_embed, reorder_para_mask)
mask_question_embed = ng.dot(
    const_LSTM_embed, ng.cast_axes(
        reorder_ques_mask, [
            dummy_axis, REC, N]))

# Pass question and para through embedding layer and dropout layers
embed_output_para_1 = embed_layer(inputs['para'])
embed_output_para = dropout_1(embed_output_para_1, keep=dropout_val)
question_inps = ng.cast_axes(inputs['question'], [N, REC])
embed_output_ques_1 = embed_layer(question_inps)
embed_output_ques = dropout_2(embed_output_ques_1, keep=dropout_val)


# Mask output of embedding layer to the length of each sentence
embed_output_para = ng.multiply(embed_output_para, ng.cast_axes(
    mask_para_embed, [embed_output_para.axes[0], embed_output_para.axes[1], N]))

embed_output_ques = ng.multiply(
    embed_output_ques, ng.cast_axes(
        mask_question_embed, [
            embed_output_ques.axes[0], embed_output_ques.axes[1], N]))


# Encoding Lyer
H_pr_be = rlayer_1(embed_output_ques)
H_hy_be = rlayer_1(embed_output_para)


# Mask the output of eencoding layers to the length of each sentence
H_hy = ng.multiply(H_hy_be, mask_para)
H_pr = ng.multiply(H_pr_be, mask_question)
コード例 #24
0
ファイル: match_lstm.py プロジェクト: Asteur/NervanaNlpApch
    def __call__(self,
                 H_pr,
                 h_ip,
                 states,
                 output=None,
                 reset_cells=True,
                 input_data=None):
        """
        Arguments:
        ----------
        H_pr : Encoding for question
        h_ip: Sliced input of paragraph encoding for a particular time step
        states: State of the LSTM cell
        output: previous hidden state
        input_data: the ArrayIterator object for training data (contains information of
                                                        length of each sentence)
        """
        # get recurrent axis for question
        rec_axis_pr = H_pr.axes.recurrent_axis()
        const_one = ng.constant(const=1, axes=[self.dummy_axis])
        # if first word in a paragraph is encountered, assign the previous LSTM
        # hidden state as zeros
        if output is None:
            h_r_old = ng.constant(axes=[self.F, self.N], const=0)
        else:
            h_r_old = ng.cast_axes(output, [self.F, self.N])

        # Compute attention vector
        sum_1 = ng.dot(self.W_q, H_pr)
        sum_1 = ng.cast_axes(sum_1,
                             [self.hidden_rows, self.hidden_cols_ques, self.N])
        int_sum1 = ng.dot(self.W_p, h_ip)
        int_sum2 = ng.dot(self.W_r, h_r_old)
        int_sum = int_sum1 + int_sum2 + self.b_p
        int_sum = ng.ExpandDims(int_sum, self.dummy_axis, 1)

        # making for the attention vector
        req_mask = ng.axes_with_order(
            ng.cast_axes(ng.dot(self.e_q2, input_data['question_len']),
                         [self.hidden_rows, self.N, self.hidden_cols_ques]),
            [self.hidden_rows, self.hidden_cols_ques, self.N])

        req_mask_2 = ng.axes_with_order(
            ng.cast_axes(ng.dot(const_one, input_data['question_len']),
                         [self.N, self.hidden_cols_ques]),
            [self.hidden_cols_ques, self.N])

        G_i_int = sum_1 + ng.multiply(
            req_mask,
            ng.axes_with_order(
                ng.dot(int_sum, self.e_q),
                [self.hidden_rows, self.hidden_cols_ques, self.N]))

        G_i = ng.tanh(G_i_int)
        # Attention Vector
        at_sum1 = ng.dot(self.w_lr, G_i)
        at = ng.softmax(at_sum1 + ng.log(req_mask_2))
        at_repeated = ng.cast_axes(
            ng.dot(self.e_q2, ng.ExpandDims(at, self.dummy_axis, 0)),
            [self.F, rec_axis_pr, self.N])

        # Stack the 2 vectors as per the equation in the paper
        z1 = h_ip
        z2 = ng.sum(ng.multiply(H_pr, at_repeated), rec_axis_pr)
        # represents the inp to lstm_cell
        # ng.concat_along_axis([z1,z2],self.F)
        inputs_lstm = ng.dot(self.ZX, z1) + ng.dot(self.ZY, z2)

        # LSTM cell computations (from LSTM brach in ngraph)
        if self.out_axes is None:
            self.out_axes = self.feature_axes + inputs_lstm.axes.batch_axis()
        if states is None:
            states = self.initialize_states(inputs_lstm.axes.batch_axis(),
                                            reset_cells=reset_cells)
        assert self.out_axes == states['h'].axes

        for gate in self._gate_names:
            transform = self.gate_transform[gate]
            gate_input = self.i2h[gate](inputs_lstm) + self.h2h[gate](
                states['h'])
            self.gate_output[gate] = ng.cast_role(transform(gate_input),
                                                  self.out_axes)

        states['c'] = (states['c'] * self.gate_output['f'] +
                       self.gate_output['i'] * self.gate_output['g'])
        states['h'] = self.gate_output['o'] * self.activation(states['c'])
        states['h'] = ng.cast_role(states['h'], self.out_axes)
        # return unrolled output and state of LSTM cell
        return ng.cast_axes(states['h'], axes=[self.F, self.N]), states
コード例 #25
0
ファイル: match_lstm.py プロジェクト: Asteur/NervanaNlpApch
    def __call__(self,
                 H_concat,
                 states=None,
                 output=None,
                 reset_cells=True,
                 input_data=None):
        """
        Arguments:
        ----------
        H_concat: Concatenated forward and reverse unrolled outputs of the
                 `MatchLSTMCell_withAttention` cell
        states: previous LSTM state
        output: hidden state from previous timestep
        reset_cells: argument to reset a cell
        input_data: the ArrayIterator object for training data
                    (contains information of length of each sentence)

        """

        rec_axis_pr = H_concat.axes.recurrent_axis()
        const_one = ng.constant(const=1, axes=[self.dummy_axis])

        b_k_lists = []
        # rec_axis_hy=H_hy.axes.recurrent_axis()
        for i in range(0, 2):
            if output is None:
                h_k_old = ng.constant(axes=[self.F, self.N], const=0)
            else:
                h_k_old = ng.cast_axes(output, [self.F, self.N])

            sum_1 = ng.dot(
                self.V_answer,
                ng.cast_axes(H_concat,
                             [self.lstm_feature_new, rec_axis_pr, self.N]))
            sum_1 = ng.cast_axes(
                sum_1, [self.hidden_rows, self.hidden_cols_para, self.N])

            int_sum2 = ng.dot(self.W_a, h_k_old)
            int_sum = int_sum2  # +self.b_a
            int_sum = ng.ExpandDims(int_sum, self.dummy_axis, 1)

            # Following notations from the paper
            # Compute Attention Vector
            F_i_int = sum_1 + ng.axes_with_order(
                ng.dot(int_sum, self.e_q),
                [self.hidden_rows, self.hidden_cols_para, self.N])

            F_i = ng.tanh(F_i_int)  # Attention Vector

            b_k_sum1 = ng.dot(self.v_lr, F_i)
            # This masking with -inf for length of para>max_para ensures that
            # when we do softmax over these values we get a 0
            mask_loss_new = ng.log(ng.dot(const_one, input_data['para_len']))
            mask_loss_new = ng.axes_with_order(
                ng.cast_axes(mask_loss_new, [self.N, self.hidden_cols_para]),
                [self.hidden_cols_para, self.N])

            # Add mask to the required logits
            b_k = ng.softmax(b_k_sum1 + mask_loss_new)
            b_k_req = ng.softmax(b_k_sum1 + mask_loss_new)
            b_k_repeated = ng.cast_axes(
                ng.dot(self.e_q2, ng.ExpandDims(b_k, self.dummy_axis, 0)),
                [H_concat.axes[0], rec_axis_pr, self.N])

            inputs_lstm = ng.sum(ng.multiply(H_concat, b_k_repeated),
                                 rec_axis_pr)

            # LSTM Cell calculations
            if self.out_axes is None:
                self.out_axes = self.feature_axes + inputs_lstm.axes.batch_axis(
                )
            if states is None:
                states = self.initialize_states(inputs_lstm.axes.batch_axis(),
                                                reset_cells=reset_cells)
            assert self.out_axes == states['h'].axes

            for gate in self._gate_names:
                transform = self.gate_transform[gate]
                gate_input = self.i2h[gate](inputs_lstm) + self.h2h[gate](
                    states['h'])
                self.gate_output[gate] = ng.cast_role(transform(gate_input),
                                                      self.out_axes)

            states['c'] = (states['c'] * self.gate_output['f'] +
                           self.gate_output['i'] * self.gate_output['g'])
            states['h'] = self.gate_output['o'] * self.activation(states['c'])
            states['h'] = ng.cast_role(states['h'], self.out_axes)

            output = states['h']

            # append required outputs
            b_k_lists.append(b_k_req)

        return b_k_lists
コード例 #26
0
def test_tensor_iterator():
    from ngraph.utils.tensor_iterator_types import (
        GraphBody,
        TensorIteratorSliceInputDesc,
        TensorIteratorMergedInputDesc,
        TensorIteratorInvariantInputDesc,
        TensorIteratorBodyOutputDesc,
        TensorIteratorConcatOutputDesc,
    )

    #  Body parameters
    body_timestep = ng.parameter([], np.int32, "timestep")
    body_data_in = ng.parameter([1, 2, 2], np.float32, "body_in")
    body_prev_cma = ng.parameter([2, 2], np.float32, "body_prev_cma")
    body_const_one = ng.parameter([], np.int32, "body_const_one")

    # CMA = cumulative moving average
    prev_cum_sum = ng.multiply(ng.convert(body_timestep, "f32"), body_prev_cma)
    curr_cum_sum = ng.add(prev_cum_sum, ng.squeeze(body_data_in, [0]))
    elem_cnt = ng.add(body_const_one, body_timestep)
    curr_cma = ng.divide(curr_cum_sum, ng.convert(elem_cnt, "f32"))
    cma_hist = ng.unsqueeze(curr_cma, [0])

    # TI inputs
    data = ng.parameter([16, 2, 2], np.float32, "data")
    # Iterations count
    zero = ng.constant(0, dtype=np.int32)
    one = ng.constant(1, dtype=np.int32)
    initial_cma = ng.constant(np.zeros([2, 2], dtype=np.float32),
                              dtype=np.float32)
    iter_cnt = ng.range(zero, np.int32(16), np.int32(1))
    ti_inputs = [iter_cnt, data, initial_cma, one]

    graph_body = GraphBody(
        [body_timestep, body_data_in, body_prev_cma, body_const_one],
        [curr_cma, cma_hist])
    ti_slice_input_desc = [
        # timestep
        # input_idx, body_param_idx, start, stride, part_size, end, axis
        TensorIteratorSliceInputDesc(0, 0, 0, 1, 1, -1, 0),
        # data
        TensorIteratorSliceInputDesc(1, 1, 0, 1, 1, -1, 0),
    ]
    ti_merged_input_desc = [
        # body prev/curr_cma
        TensorIteratorMergedInputDesc(2, 2, 0),
    ]
    ti_invariant_input_desc = [
        # body const one
        TensorIteratorInvariantInputDesc(3, 3),
    ]

    # TI outputs
    ti_body_output_desc = [
        # final average
        TensorIteratorBodyOutputDesc(0, 0, -1),
    ]
    ti_concat_output_desc = [
        # history of cma
        TensorIteratorConcatOutputDesc(1, 1, 0, 1, 1, -1, 0),
    ]

    node = ng.tensor_iterator(
        ti_inputs,
        graph_body,
        ti_slice_input_desc,
        ti_merged_input_desc,
        ti_invariant_input_desc,
        ti_body_output_desc,
        ti_concat_output_desc,
    )

    assert node.get_type_name() == "TensorIterator"
    assert node.get_output_size() == 2
    # final average
    assert list(node.get_output_shape(0)) == [2, 2]
    # cma history
    assert list(node.get_output_shape(1)) == [16, 2, 2]
コード例 #27
0
def Mul(onnx_node, ng_inputs):  # type: (NodeWrapper, List[TensorOp]) -> Op
    verify_axes_binary_broadcast_compatible(onnx_node, ng_inputs)
    return ng.multiply(ng_inputs[0], ng_inputs[1])