Пример #1
0
def test_argmax_pushdown():
    x = matrix()
    for sm in [softmax_graph, softmax_legacy]:
        # test that the max_and_argmax is pushed down if the max is not used
        out = max_and_argmax(sm(exp(tanh(sigmoid(x)))), axis=-1)[1]
        fgraph = FunctionGraph([x], [out])
        optdb.query(OPT_FAST_RUN).optimize(fgraph)

        # print 'AFTER'
        # for node in fgraph.toposort():
        # print node.op
        assert len(fgraph.toposort()) == 1
        assert isinstance(fgraph.toposort()[0].op, Argmax)
        assert check_stack_trace(fgraph, ops_to_check=Argmax)
        x = matrix()
        # test that the max_and_argmax is not pushed down if the max is used
        out = max_and_argmax(sm(exp(tanh(sigmoid(x)))), axis=-1)[0]
        fgraph = FunctionGraph([x], [out])

        assert hasattr(fgraph.outputs[0].tag, "trace")

        optdb.query(OPT_FAST_RUN).optimize(fgraph)

        # print 'AFTER'
        # for node in fgraph.toposort():
        # print node.op
        assert len(fgraph.toposort()) == 3
        assert isinstance(fgraph.toposort()[0].op, Elemwise)
        assert isinstance(fgraph.toposort()[1].op, Softmax)
        assert isinstance(fgraph.toposort()[2].op, CAReduce)
        assert isinstance(fgraph.toposort()[2].op.scalar_op,
                          aesara.scalar.ScalarMaximum)
Пример #2
0
def test_NoOutputFromInplace():
    x = matrix()
    y = matrix()
    a = dot(x, y)
    b = tanh(a)
    c = tanh(dot(2 * x, y))

    # Ensure that the elemwise op that produces the output is inplace when
    # using a mode that does not include the optimization
    fct_no_opt = function([x, y], [b, c], mode="FAST_RUN")
    op = fct_no_opt.maker.fgraph.outputs[0].owner.op
    assert op.destroy_map and 0 in op.destroy_map
    op = fct_no_opt.maker.fgraph.outputs[1].owner.op
    assert op.destroy_map and 0 in op.destroy_map

    # Ensure that the elemwise op that produces the output is not inplace when
    # using a mode that includes the optimization
    opt = AddFeatureOptimizer(NoOutputFromInplace([1]))
    mode_opt = Mode(linker="py", optimizer="fast_run").register((opt, 49.9))

    fct_opt = function([x, y], [b, c], mode=mode_opt)
    op = fct_opt.maker.fgraph.outputs[0].owner.op
    assert op.destroy_map and 0 in op.destroy_map
    op = fct_opt.maker.fgraph.outputs[1].owner.op
    assert not op.destroy_map or 0 not in op.destroy_map
Пример #3
0
def test_reallocation():
    x = scalar("x")
    y = scalar("y")
    z = tanh(3 * x + y) + cosh(x + 5 * y)
    # The functinality is currently implement for non lazy and non c VM only.
    for linker in [
            VMLinker(allow_gc=False, lazy=False, use_cloop=False),
            VMLinker(allow_gc=True, lazy=False, use_cloop=False),
    ]:
        m = get_mode(Mode(linker=linker))
        m = m.excluding("fusion", "inplace")

        f = function([x, y], z, name="test_reduce_memory", mode=m)
        output = f(1, 2)
        assert output
        storage_map = f.fn.storage_map

        def check_storage(storage_map):
            for i in storage_map:
                if not isinstance(i, TensorConstant):
                    keys_copy = list(storage_map.keys())[:]
                    keys_copy.remove(i)
                    for o in keys_copy:
                        if storage_map[i][
                                0] and storage_map[i][0] is storage_map[o][0]:
                            return [True, storage_map[o][0]]
            return [False, None]

        assert check_storage(storage_map)[0]
        assert len({id(v) for v in storage_map.values()}) < len(storage_map)
Пример #4
0
    def test_copy_share_memory(self):
        x = fscalar("x")
        # SharedVariable for tests, one of them has update
        y = shared(value=1)
        z = shared(value=2)
        out = tanh((x + y + 2) / (x + z - 0.2) ** 2)

        # Test for different linkers
        for mode in ["FAST_RUN", "FAST_COMPILE"]:
            ori = function([x], [out], mode=mode, updates={z: z + 1})
            cpy = ori.copy(share_memory=True)

            # Test if memories shared
            storage_map_ori = ori.fn.storage_map
            storage_map_cpy = cpy.fn.storage_map
            fgraph_cpy = cpy.maker.fgraph

            # Assert intermediate and Constants storages are shared.
            # and output stoarges are not shared
            i_o_variables = fgraph_cpy.inputs + fgraph_cpy.outputs
            ori_storages = storage_map_ori.values()
            l = [
                val
                for key, val in storage_map_cpy.items()
                if key not in i_o_variables or isinstance(key, Constant)
            ]
            for storage in l:
                assert any([storage is s for s in ori_storages])

            # Assert storages of SharedVariable without updates are shared
            for (input, _1, _2), here, there in zip(
                ori.indices, ori.input_storage, cpy.input_storage
            ):
                assert here.data is there.data
Пример #5
0
    def test_composite_elemwise_float16(self):
        w = bvector()
        x = vector(dtype="float16")
        y = fvector()

        cz = tanh(x + aet.cast(y, "float16"))
        o = (
            cz
            - cz ** 2
            + aet.cast(x, "int16")
            + aet.cast(x, "float32")
            + aet.cast(w, "float16")
            - aet.constant(np.float16(1.0))
        )

        aesara.function([w, x, y], o, mode=mode_with_gpu)

        v = vector(dtype="uint8")
        w = vector(dtype="float16")
        x = vector(dtype="float16")
        y = vector(dtype="float16")
        z = vector(dtype="float16")

        o = aet.switch(v, mul(w, x, y), z)
        aesara.function([v, w, x, y, z], o, mode=mode_with_gpu)
Пример #6
0
        def step(x_t, h_tm1, c_tm1):
            i_t = sigmoid(
                dot(x_t, self.W_i) + dot(h_tm1, self.R_i) + self.b_wi +
                self.b_ri)
            f_t = sigmoid(
                dot(x_t, self.W_f) + dot(h_tm1, self.R_f) + self.b_wf +
                self.b_rf)
            o_t = sigmoid(
                dot(x_t, self.W_o) + dot(h_tm1, self.R_o) + self.b_ro +
                self.b_wo)

            c_hat_t = tanh(
                dot(x_t, self.W_c) + dot(h_tm1, self.R_c) + self.b_wc +
                self.b_rc)
            c_t = f_t * c_tm1 + i_t * c_hat_t
            h_t = o_t * tanh(c_t)

            return h_t, c_t
Пример #7
0
def test_subgraph_grad():
    # Tests that the grad method with no known_grads
    # matches what happens if you use successive subgraph_grads

    x = fvector("x")
    t = fvector("t")
    w1 = aesara.shared(np.random.randn(3, 4))
    w2 = aesara.shared(np.random.randn(4, 2))
    a1 = tanh(dot(x, w1))
    a2 = tanh(dot(a1, w2))
    cost2 = sqr(a2 - t).sum()
    cost2 += sqr(w2.sum())
    cost1 = sqr(w1.sum())

    params = [[w2], [w1]]
    costs = [cost2, cost1]
    grad_ends = [[a1], [x]]

    inputs = [t, x]
    rng = np.random.RandomState([2012, 11, 15])
    values = [rng.randn(2), rng.randn(3)]
    values = [np.cast[ipt.dtype](value) for ipt, value in zip(inputs, values)]

    wrt = [w2, w1]
    cost = cost2 + cost1
    true_grads = grad(cost, wrt)
    true_grads = aesara.function(inputs, true_grads)
    true_grads = true_grads(*values)
    next_grad = None
    param_grads = []
    for i in range(2):
        param_grad, next_grad = subgraph_grad(wrt=params[i],
                                              end=grad_ends[i],
                                              start=next_grad,
                                              cost=costs[i])
        next_grad = OrderedDict(zip(grad_ends[i], next_grad))
        param_grads.extend(param_grad)

    pgrads = aesara.function(inputs, param_grads)
    pgrads = pgrads(*values)

    for true_grad, pgrad in zip(true_grads, pgrads):
        assert np.sum(np.abs(true_grad - pgrad)) < 0.00001
Пример #8
0
def ElemwiseOpTime(N, script=False, loops=1000):
    x = vector("x")
    np.random.seed(1235)
    v = np.random.random(N).astype(config.floatX)
    f = aesara.function([x], 2 * x + x * x)
    f1 = aesara.function([x], tanh(x))
    if not script:
        if config.openmp:
            print("With openmp:")
        print("Fast op ", end=" ")
    ceapTime = evalTime(f, v, script=script, loops=loops)
    if not script:
        print("Slow op ", end=" ")
    costlyTime = evalTime(f1, v, script=script, loops=loops)
    return (ceapTime, costlyTime)
Пример #9
0
        def step(inp, s_prev):
            i_t = sigmoid(
                dot(inp, self.W_i) + dot(s_prev, self.R_i) + self.b_wi +
                self.b_ru)
            r_t = sigmoid(
                dot(inp, self.W_r) + dot(s_prev, self.R_r) + self.b_wr +
                self.b_rr)

            h_hat_t = tanh(
                dot(inp, self.W_h) +
                (r_t * (dot(s_prev, self.R_h) + self.b_rh)) + self.b_wh)

            s_curr = ((1.0 - i_t) * h_hat_t) + (i_t * s_prev)

            return s_curr
Пример #10
0
        def rnn_step1(
            # sequences
            x,
            ri,
            zi,
            # outputs_info
            h,
        ):
            pre_r = ri + h.dot(U)
            pre_z = zi + h.dot(V)
            r = nnet.sigmoid(pre_r)
            z = nnet.sigmoid(pre_z)

            after_r = r * h
            pre_h = x + after_r.dot(W)
            new_h = tanh(pre_h)

            res_h = z * new_h + (1 - z) * h
            return res_h
Пример #11
0
def test_no_output_from_implace():
    x = matrix()
    y = matrix()
    a = dot(x, y)
    b = tanh(a)

    # Ensure that the elemwise op that produces the output is inplace when
    # using a mode that does not include the optimization
    fct_no_opt = aesara.function([x, y], b, mode="FAST_RUN")
    op = fct_no_opt.maker.fgraph.outputs[0].owner.op
    assert hasattr(op, "destroy_map") and 0 in op.destroy_map

    # Ensure that the elemwise op that produces the output is not inplace when
    # using a mode that includes the optimization
    opt = AddFeatureOptimizer(NoOutputFromInplace())
    mode_opt = Mode(linker="cvm", optimizer="fast_run").register((opt, 49.9))

    fct_opt = aesara.function([x, y], b, mode=mode_opt)
    op = fct_opt.maker.fgraph.outputs[0].owner.op
    assert not hasattr(op, "destroy_map") or 0 not in op.destroy_map
Пример #12
0
 def step(x_t, h_tm1):
     h = tanh(dot(h_tm1, W_hh) + dot(x_t, W_xh) + b_h)
     return h