def test_none(self): p1, res = TeaSpoon.optimize( fun=loss, p0=p0, jac=grad, args=(X, y), method='BFGS', options = {'maxiter': 20, 'disp': 0}, )
def test_default(self): p1, res = TeaSpoon.optimize( fun=loss, p0=p0, jac=grad, callback='default', args=(X, y), method='BFGS', options = {'maxiter': 20, 'disp': 0}, )
def test_default(self): p1, res = TeaSpoon.optimize( fun=loss, p0=p0, jac=grad, callback='default', args=(X, y), method='BFGS', options = {'maxiter': 20, 'disp': 0}, ) loss_0 = TeaSpoon.exe(loss, [p0, X, y]) loss_1 = TeaSpoon.exe(loss, [p1, X, y]) assert loss_0 > loss_1
def test_user_counter(self): global glob_counter glob_counter = 0 def _callback(pi): Y_hat = TeaSpoon.exe(pred, [pi, X]) out = {} out['MSE'] = np.mean(Y_hat==y) out['Loss'] = TeaSpoon.exe(loss, [pi, X,y]) opt = {'freq':10} return out, opt p1, res = TeaSpoon.optimize( fun=loss, p0=p0, jac=grad, callback=_callback, args=(X, y), method='BFGS', options = {'maxiter': 20, 'disp': 0}, )
TS.exe(loss_T, [p0, X, y]) # define your own callback function (optional) def callback(pi): Y_hat = TS.exe(pred_T, [pi, X]) out = {} out['ACC'] = np.mean(Y_hat==y) out['Loss'] = TS.exe(loss_T, [pi, X,y]) out['pi'] = 3.14 opt = {'freq':1} return out, opt # use here the features and options from scipy.optimize # to learn model parameter p1, res = TS.optimize( fun=loss_T, p0=p0, jac=grad_T, callback=callback, args=(X, y), method='BFGS', options = {'maxiter': 20, 'disp': 0}, ) # compute predictions Y_hat = TS.exe(pred_T, [p1, X]) loss = TS.exe(loss_T, [p1, X, y])