def test_TauEstimator_alone(): for i in range(2, TESTING_ITERATIONS): # tests all regularizations for reg in (lp.Tikhonov(), lp.Lasso()): # tests all loss functions for loss in LOSS_FUNCTIONS: # intiates random inputs lamb = randint(0, 20) c1 = np.random.uniform(0.1, 5) c2 = np.random.uniform(0.1, 5) # clippings are randomly chosen between a random number or None with predominance for number clipping_1 = random.choice([c1, c1, c1, None]) clipping_2 = random.choice([c2, c2, c2, None]) # creates a tau instance tau_estimator = lp.TauEstimator( loss_function=loss, regularization=reg, lamb=lamb, clipping_1=clipping_1, clipping_2=clipping_2 ) # creates a tau-estimator with each of the loss functions # random (A,y) tuple with i rows and A has a random number of columns between i and i+100 tau_estimator.estimate( # A=np.random.rand(i, i + randint(0, 100)), a=np.random.rand(i, i + randint(0, 100)), y=np.random.rand(i).reshape(-1))
def test_score_function_is_odd(): for loss in LOSS_FUNCTIONS: my_tau = lp.TauEstimator(loss_function=loss) # print 'loss = ', loss for i in range(2, TESTING_ITERATIONS): # generates a random vector of size i with negative and positive values y = np.random.randn(100) score = my_tau.score_function(y) # print y, score for i in range(0, score.__len__()): assert np.sign(score[i]) == np.sign(y[i])
def test_tau_scale(): for i in range(2, TESTING_ITERATIONS): # generates random clipping between 0.1 and 5 clipping_1 = np.random.uniform(0.1, 5) clipping_2 = np.random.uniform(0.1, 5) # generates a random vector of size between 0 and 100 x = np.random.rand(randint(1, 100)) my_tau = lp.TauEstimator(loss_function=lp.Optimal, clipping_1=clipping_1, clipping_2=clipping_2) linvpy_t = my_tau.tau_scale(x) util_t = util_l.tauscale(x, lossfunction='optimal', b=0.5, clipping=(clipping_1, clipping_2)) np.testing.assert_allclose(linvpy_t, util_t)
def test_scorefunction(): for i in range(2, TESTING_ITERATIONS): # CLIPPINGS = two random numbers between 0.1 and 5 CLIPPINGS = (np.random.uniform(0.1, 5), np.random.uniform(0.1, 5)) # creates an instance of tau estimator with the two random clippings tau = lp.TauEstimator(clipping_1=CLIPPINGS[0], clipping_2=CLIPPINGS[1], loss_function=lp.Optimal) # y = random vector of size between 0 and 100 y = np.random.rand(randint(1, 100)) # toolbox's scorefunction score_util = util.scorefunction(np.asarray(y), 'tau', CLIPPINGS) # linvpy's scorefunction score_lp = tau.score_function(y) # returns an error if the toolbox's scorefunction and lp's scorefunction are not equal np.testing.assert_allclose(score_lp, score_util)
def test_mscale(): for i in range(2, TESTING_ITERATIONS): # generates a random clipping between 0.1 and 5 CLIPPING = np.random.uniform(0.1, 5) # creates an instance of TauEstimator tau = lp.TauEstimator(clipping_1=CLIPPING, clipping_2=CLIPPING, loss_function=lp.Optimal) # generates a random vector of size between 0 and 100 y = np.random.rand(randint(1, 100)) # computes the mscale for linvpy and toolbox linvpy_scale = tau.m_scale(y) toolbox_scale = util.mscaleestimator(u=y, tolerance=1e-5, b=0.5, clipping=CLIPPING, kind='optimal') # verifies that both results are the same assert toolbox_scale == linvpy_scale
def test_TauEstimator_VS_Marta(): for i in range(2, TESTING_ITERATIONS): # generates random clipping between 0.1 and 5 clipping_1 = np.random.uniform(0.1, 5) clipping_2 = np.random.uniform(0.1, 5) # generates a random n_initial_x n_initial_x = 1 # generates a random matrix of size i x i + random(0,100) A = np.random.rand(i, i + randint(0, 10)) # generates a random vector of size i y = np.random.rand(i) my_tau = lp.TauEstimator(loss_function=lp.Optimal, clipping_1=clipping_1, clipping_2=clipping_2) linvpy_output = my_tau.estimate(a=A, y=y) marta_t = lp_l.basictau(a=A, y=np.matrix(y), loss_function='optimal', b=0.5, clipping=(clipping_1, clipping_2), ninitialx=n_initial_x) # print 'LinvPy Tau result = ', linvpy_output # print 'Marta Tau result = ', marta_t # print '========================' # print '========================' # print '========================' # print '========================' # asserts xhat are the same np.testing.assert_allclose(linvpy_output[0].reshape(-1, 1), marta_t[0])
return vfunc(array) # Define your psi function as the derivative of the rho function : you can # copy paste this and just change what's inside the unit_rho def psi(self, array): # rho function of your loss function on ONE single element def unit_psi(element): # Simply return the clipping for example return 1 # Vectorize the function vfunc = np.vectorize(unit_psi) return vfunc(array) custom_tau = lp.TauEstimator(loss_function=CustomLoss) print custom_tau.estimate(a, y) # Define your own regularization class CustomRegularization(lp.Regularization): pass # Define your regularization function here def regularize(self, a, y, lamb=0): return np.ones(a.shape[1]) # Create your custom tau estimator with custom loss and regularization functions # Pay attenation to pass the loss function as a REFERENCE (without the "()" # after the name, and the regularization as an OBJECT, i.e. with the "()").
def cover_fast_tau(): my_tau = lp.TauEstimator() A = np.matrix([[2, 2], [3, 4], [7, 6]]) y = np.array([1, 4, 3]) my_tau.fast_estimate(A, y)