Beispiel #1
0
    def test_copy_share_memory(self):
        x = tt.fscalar("x")
        # SharedVariable for tests, one of them has update
        y = aesara.shared(value=1)
        z = aesara.shared(value=2)
        out = tt.tanh((x + y + 2) / (x + z - 0.2) ** 2)

        # Test for different linkers
        for mode in ["FAST_RUN", "FAST_COMPILE"]:
            ori = aesara.function([x], [out], mode=mode, updates={z: z + 1})
            cpy = ori.copy(share_memory=True)

            # Test if memories shared
            storage_map_ori = ori.fn.storage_map
            storage_map_cpy = cpy.fn.storage_map
            fgraph_cpy = cpy.maker.fgraph

            # Assert intermediate and Constants storages are shared.
            # and output stoarges are not shared
            i_o_variables = fgraph_cpy.inputs + fgraph_cpy.outputs
            ori_storages = storage_map_ori.values()
            l = [
                val
                for key, val in storage_map_cpy.items()
                if key not in i_o_variables or isinstance(key, tt.Constant)
            ]
            for storage in l:
                assert any([storage is s for s in ori_storages])

            # Assert storages of SharedVariable without updates are shared
            for (input, _1, _2), here, there in zip(
                ori.indices, ori.input_storage, cpy.input_storage
            ):
                assert here.data is there.data
Beispiel #2
0
def test_reallocation():
    x = tensor.scalar("x")
    y = tensor.scalar("y")
    z = tensor.tanh(3 * x + y) + tensor.cosh(x + 5 * y)
    # The functinality is currently implement for non lazy and non c VM only.
    for linker in [
            vm.VM_Linker(allow_gc=False, lazy=False, use_cloop=False),
            vm.VM_Linker(allow_gc=True, lazy=False, use_cloop=False),
    ]:
        m = aesara.compile.get_mode(aesara.Mode(linker=linker))
        m = m.excluding("fusion", "inplace")

        f = aesara.function([x, y], z, name="test_reduce_memory", mode=m)
        output = f(1, 2)
        assert output
        storage_map = f.fn.storage_map

        def check_storage(storage_map):
            from aesara.tensor.var import TensorConstant

            for i in storage_map:
                if not isinstance(i, TensorConstant):
                    keys_copy = list(storage_map.keys())[:]
                    keys_copy.remove(i)
                    for o in keys_copy:
                        if storage_map[i][
                                0] and storage_map[i][0] is storage_map[o][0]:
                            return [True, storage_map[o][0]]
            return [False, None]

        assert check_storage(storage_map)[0]
        assert len({id(v) for v in storage_map.values()}) < len(storage_map)
Beispiel #3
0
        def step(x_t, h_tm1, c_tm1):
            i_t = tt.nnet.sigmoid(
                tt.dot(x_t, self.W_i) + tt.dot(h_tm1, self.R_i) + self.b_wi +
                self.b_ri)
            f_t = tt.nnet.sigmoid(
                tt.dot(x_t, self.W_f) + tt.dot(h_tm1, self.R_f) + self.b_wf +
                self.b_rf)
            o_t = tt.nnet.sigmoid(
                tt.dot(x_t, self.W_o) + tt.dot(h_tm1, self.R_o) + self.b_ro +
                self.b_wo)

            c_hat_t = tt.tanh(
                tt.dot(x_t, self.W_c) + tt.dot(h_tm1, self.R_c) + self.b_wc +
                self.b_rc)
            c_t = f_t * c_tm1 + i_t * c_hat_t
            h_t = o_t * tt.tanh(c_t)

            return h_t, c_t
Beispiel #4
0
def ElemwiseOpTime(N, script=False, loops=1000):
    x = tt.vector("x")
    np.random.seed(1235)
    v = np.random.random(N).astype(aesara.config.floatX)
    f = aesara.function([x], 2 * x + x * x)
    f1 = aesara.function([x], tt.tanh(x))
    if not script:
        if aesara.config.openmp:
            print("With openmp:")
        print("Fast op ", end=" ")
    ceapTime = evalTime(f, v, script=script, loops=loops)
    if not script:
        print("Slow op ", end=" ")
    costlyTime = evalTime(f1, v, script=script, loops=loops)
    return (ceapTime, costlyTime)
Beispiel #5
0
        def step(inp, s_prev):
            i_t = tt.nnet.sigmoid(
                tt.dot(inp, self.W_i) + tt.dot(s_prev, self.R_i) + self.b_wi +
                self.b_ru)
            r_t = tt.nnet.sigmoid(
                tt.dot(inp, self.W_r) + tt.dot(s_prev, self.R_r) + self.b_wr +
                self.b_rr)

            h_hat_t = tt.tanh(
                tt.dot(inp, self.W_h) +
                (r_t * (tt.dot(s_prev, self.R_h) + self.b_rh)) + self.b_wh)

            s_curr = ((1.0 - i_t) * h_hat_t) + (i_t * s_prev)

            return s_curr
Beispiel #6
0
        def rnn_step1(
            # sequences
            x,
            ri,
            zi,
            # outputs_info
            h,
        ):
            pre_r = ri + h.dot(U)
            pre_z = zi + h.dot(V)
            r = tt.nnet.sigmoid(pre_r)
            z = tt.nnet.sigmoid(pre_z)

            after_r = r * h
            pre_h = x + after_r.dot(W)
            new_h = tt.tanh(pre_h)

            res_h = z * new_h + (1 - z) * h
            return res_h
Beispiel #7
0
    def test_composite_elemwise_float16(self):
        w = aesara.tensor.bvector()
        x = aesara.tensor.vector(dtype="float16")
        y = aesara.tensor.fvector()

        cz = tensor.tanh(x + tensor.cast(y, "float16"))
        o = (cz - cz**2 + tensor.cast(x, "int16") + tensor.cast(x, "float32") +
             tensor.cast(w, "float16") - tensor.constant(np.float16(1.0)))

        aesara.function([w, x, y], o, mode=mode_with_gpu)

        v = aesara.tensor.vector(dtype="uint8")
        w = aesara.tensor.vector(dtype="float16")
        x = aesara.tensor.vector(dtype="float16")
        y = aesara.tensor.vector(dtype="float16")
        z = aesara.tensor.vector(dtype="float16")

        o = tensor.switch(v, tensor.mul(w, x, y), z)
        aesara.function([v, w, x, y, z], o, mode=mode_with_gpu)
Beispiel #8
0
def test_no_output_from_implace():
    x = tt.matrix()
    y = tt.matrix()
    a = tt.dot(x, y)
    b = tt.tanh(a)

    # Ensure that the elemwise op that produces the output is inplace when
    # using a mode that does not include the optimization
    fct_no_opt = aesara.function([x, y], b, mode="FAST_RUN")
    op = fct_no_opt.maker.fgraph.outputs[0].owner.op
    assert hasattr(op, "destroy_map") and 0 in op.destroy_map

    # Ensure that the elemwise op that produces the output is not inplace when
    # using a mode that includes the optimization
    opt = AddFeatureOptimizer(NoOutputFromInplace())
    mode_opt = Mode(linker="cvm", optimizer="fast_run").register((opt, 49.9))

    fct_opt = aesara.function([x, y], b, mode=mode_opt)
    op = fct_opt.maker.fgraph.outputs[0].owner.op
    assert not hasattr(op, "destroy_map") or 0 not in op.destroy_map
Beispiel #9
0
 def deriv(*args):
     (x, ) = args
     return 1.0 - aet.tanh(x)**2
Beispiel #10
0
 def step(x_t, h_tm1):
     h = tt.tanh(tt.dot(h_tm1, W_hh) + tt.dot(x_t, W_xh) + b_h)
     return h
Beispiel #11
0
 def tanh_func(x, x1, x2, w, x0):
     return (x1 + x2) / 2.0 - (x1 - x2) / 2.0 * at.tanh((x - x0) / w)
Beispiel #12
0
 def warp_func(x, a, b, c):
     return x + (a * at.tanh(b * (x - c)))
 def dm_taper(sx):
     """
     tapering function for isopycnal slopes
     """
     return 0.5 * (1.0 + aet.tanh((-abs(sx) + iso_slopec) / iso_dslope))