Ejemplo n.º 1
0
    def runTest(self):
        if cgt.get_config()["backend"] != "python":
            cgt.utils.warn("Skipping test -- only works for backend=python")
            return
        x = cgt.scalar()
        with cgt.debug_context() as dbg:
            cgt.assert_(cgt.equal(x, 1), "yoyoyo")
            cgt.dbg_call(myfunc, x)
            print "dbg", dbg.nodes
            # cgt.assert_(cgt.equal(x, 2))

        f = cgt.make_function([x], [x], dbg=dbg)
        f(1)
        with self.assertRaises(AssertionError):
            f(2)
Ejemplo n.º 2
0
    def runTest(self):
        if cgt.get_config()["backend"] != "python":
            cgt.utils.warn("Skipping test -- only works for backend=python")
            return
        x = cgt.scalar()
        with cgt.debug_context() as dbg:
            cgt.assert_(cgt.equal(x, 1),"yoyoyo")
            cgt.dbg_call(myfunc, x)
            print "dbg",dbg.nodes
            # cgt.assert_(cgt.equal(x, 2))

        f = cgt.make_function([x],[x],dbg=dbg)
        f(1)
        with self.assertRaises(AssertionError):
            f(2)
Ejemplo n.º 3
0
def make_funcs(opt, ntm, total_time, loss_timesteps):
    x_tbk = cgt.tensor3("x", fixed_shape=(total_time, opt.b, opt.k))
    y_tbp = cgt.tensor3("y", fixed_shape=(total_time, opt.b, opt.p))
    loss_timesteps = set(loss_timesteps)

    initial_states = make_ntm_initial_states(opt)
    params = ntm.get_parameters() + get_parameters(initial_states)
    # params = ntm.get_parameters()

    lossCE = 0
    loss01 = 0

    state_arrs = initial_states
    for t in xrange(total_time):
        tmp = ntm([x_tbk[t]] + state_arrs)
        raw_pred = tmp[0]
        state_arrs = tmp[1:4]

        if t in loss_timesteps:
            p_pred = cgt.sigmoid(raw_pred)
            ce = bernoulli_crossentropy(
                y_tbp[t],
                p_pred).sum()  # cross-entropy of bernoulli distribution
            lossCE = lossCE + ce
            loss01 = loss01 + cgt.cast(cgt.equal(y_tbp[t], round01(p_pred)),
                                       cgt.floatX).sum()

    lossCE = lossCE / (len(loss_timesteps) * opt.p * opt.b) / np.log(2)
    loss01 = loss01 / (len(loss_timesteps) * opt.p * opt.b)
    gradloss = cgt.grad(lossCE, params)

    flatgrad = flatcat(gradloss)

    f_loss = cgt.function([x_tbk, y_tbp], lossCE)
    f_loss_and_grad = cgt.function([x_tbk, y_tbp], [lossCE, loss01, flatgrad])

    print "number of nodes in computation graph:", core.count_nodes(
        [lossCE, loss01, flatgrad])

    return f_loss, f_loss_and_grad, params
Ejemplo n.º 4
0
def make_funcs(opt, ntm, total_time, loss_timesteps):    
    x_tbk = cgt.tensor3("x", fixed_shape=(total_time, opt.b, opt.k))
    y_tbp = cgt.tensor3("y", fixed_shape=(total_time, opt.b, opt.p))
    loss_timesteps = set(loss_timesteps)

    initial_states = make_ntm_initial_states(opt)
    params = ntm.get_parameters() + get_parameters(initial_states)
    # params = ntm.get_parameters()

    lossCE = 0
    loss01 = 0

    state_arrs = initial_states
    for t in xrange(total_time):
        tmp = ntm([x_tbk[t]] + state_arrs)
        raw_pred = tmp[0]
        state_arrs = tmp[1:4]

        if t in loss_timesteps:
            p_pred = cgt.sigmoid(raw_pred)
            ce = bernoulli_crossentropy(y_tbp[t] , p_pred).sum() # cross-entropy of bernoulli distribution
            lossCE = lossCE + ce
            loss01 = loss01 + cgt.cast(cgt.equal(y_tbp[t], round01(p_pred)),cgt.floatX).sum()


    lossCE = lossCE / (len(loss_timesteps) * opt.p * opt.b) / np.log(2)
    loss01 = loss01 / (len(loss_timesteps) * opt.p * opt.b)
    gradloss = cgt.grad(lossCE, params)

    flatgrad = flatcat(gradloss)

    f_loss = cgt.function([x_tbk, y_tbp], lossCE)
    f_loss_and_grad = cgt.function([x_tbk, y_tbp], [lossCE, loss01, flatgrad])

    print "number of nodes in computation graph:", core.count_nodes([lossCE, loss01, flatgrad])

    return f_loss, f_loss_and_grad, params
Ejemplo n.º 5
0
Archivo: nn.py Proyecto: x724/cgt
def zero_one_loss(x, y):
    assert x.ndim == 2 and y.ndim in (1, 2) and core.dtype_kind(y.dtype) == 'i'
    return cgt.equal(x.argmax(axis=1, keepdims=False), y.flatten())
Ejemplo n.º 6
0
Archivo: nn.py Proyecto: EdsterG/cgt
def zero_one_loss(x, y):
    assert x.ndim == 2 and y.ndim in (1,2) and core.dtype_kind(y.dtype)=='i'
    return cgt.equal(x.argmax(axis=1,keepdims=False),y.flatten())