Exemple #1
0
 def _callback(pi):
     Y_hat = TeaSpoon.exe(pred, [pi, X])
     out = {}
     out['MSE']   = np.mean(Y_hat==y)
     out['Loss']  = TeaSpoon.exe(loss, [pi, X,y])
     opt = {'freq':10}
     return out, opt
Exemple #2
0
def callback(pi):
    Y_hat = TS.exe(pred_T, [pi, X])
    out = {}
    out['ACC']  = np.mean(Y_hat==y)
    out['Loss'] = TS.exe(loss_T, [pi, X,y])
    out['pi']   = 3.14
    opt = {'freq':1}
    return out, opt
Exemple #3
0
def init(X,y,r=0):
    '''
    '''
    N, F = X.shape
    C = len(np.unique(y))

    w = np.random.rand(F, C)
    b = np.ones(C)

    para = {}
    para['w'] = TeaSpoon.parameter(w, const=False)
    para['b'] = TeaSpoon.parameter(b, const=False)
    para['r'] = TeaSpoon.parameter(r, const=True)

    return para
Exemple #4
0
    def test_default(self):

        p1, res = TeaSpoon.optimize(
            fun=loss,
            p0=p0,
            jac=grad,
            callback='default',
            args=(X, y),
            method='BFGS',
            options = {'maxiter': 20, 'disp': 0},
        )

        loss_0  = TeaSpoon.exe(loss, [p0, X, y])
        loss_1  = TeaSpoon.exe(loss, [p1, X, y])

        assert loss_0 > loss_1
Exemple #5
0
 def test_none(self):
     p1, res = TeaSpoon.optimize(
         fun=loss,
         p0=p0,
         jac=grad,
         args=(X, y),
         method='BFGS',
         options = {'maxiter': 20, 'disp': 0},
     )
Exemple #6
0
    def test_default(self):

        p1, res = TeaSpoon.optimize(
            fun=loss,
            p0=p0,
            jac=grad,
            callback='default',
            args=(X, y),
            method='BFGS',
            options = {'maxiter': 20, 'disp': 0},
        )
Exemple #7
0
    def test_user_counter(self):

        global glob_counter
        glob_counter = 0


        def _callback(pi):
            Y_hat = TeaSpoon.exe(pred, [pi, X])
            out = {}
            out['MSE']   = np.mean(Y_hat==y)
            out['Loss']  = TeaSpoon.exe(loss, [pi, X,y])
            opt = {'freq':10}
            return out, opt

        p1, res = TeaSpoon.optimize(
            fun=loss,
            p0=p0,
            jac=grad,
            callback=_callback,
            args=(X, y),
            method='BFGS',
            options = {'maxiter': 20, 'disp': 0},
        )
Exemple #8
0
import unittest
import TeaSpoon
import numpy as np
from examples import log_reg

X,y  = log_reg.rand_dset()
p0   = log_reg.init(X,y)
pred = TeaSpoon.compile(log_reg.pred, [p0, X], jac=False)
loss, grad = TeaSpoon.compile(log_reg.loss, [p0, X, y], jac=True)


class testcase:

    def test_default(self):

        p1, res = TeaSpoon.optimize(
            fun=loss,
            p0=p0,
            jac=grad,
            callback='default',
            args=(X, y),
            method='BFGS',
            options = {'maxiter': 20, 'disp': 0},
        )

        loss_0  = TeaSpoon.exe(loss, [p0, X, y])
        loss_1  = TeaSpoon.exe(loss, [p1, X, y])

        assert loss_0 > loss_1

Exemple #9
0

######################################################
# generate random dataset:
N = 600 # samples
F = 5   # features
C = 4   # labels
y = np.random.randint(0, C, N)
X = np.random.rand(N, F) * np.array([y+1]).T
######################################################

# initial model 
w = np.random.uniform(-1,1,(F, C))
b = np.zeros(C)
p0 = {}
p0['w'] = TS.parameter(w, const=False)
p0['b'] = TS.parameter(b, const=False)


# define loss function
def loss(pi, X, y):
    '''
    compute loss (average negative conditional log likelihood)
    '''
    w = pi['w'].value
    b = pi['b'].value
    P = TT.nnet.softmax( T.dot(X, w) + b )
    idx = TT.arange(y.shape[0]).astype('int64')
    idy = y.astype('int64')
    return -TT.mean(TT.log(P[idx, idy]))