def test_max_lambda_with_weights(self): '''Test that the calculations of max_lambda inside the fortran code and inside the python code give the same result on both dense and sparse matricies, even when sample weights come into play. ''' Xdn = np.random.random(size=(50, 10)) Xsp = csc_matrix(Xdn) w = np.random.random(size=(10, )) y = np.dot(Xdn, w) sw = np.random.uniform(size=(50, )) for alpha in [.01, .5, 1]: for X in (Xdn, Xsp): enet = ElasticNet(alpha=alpha) enet.fit(X, y, weights=sw) ol = enet.out_lambdas max_lambda_from_fortran = ol[1] * (ol[1] / ol[2]) max_lambda_from_python = enet._max_lambda(X, y, weights=sw) self.assertAlmostEqual(max_lambda_from_fortran, max_lambda_from_python, 4)
def test_max_lambda_with_weights(self): '''Test that the calculations of max_lambda inside the fortran code and inside the python code give the same result on both dense and sparse matricies, even when sample weights come into play. ''' Xdn = np.random.random(size=(50,10)) Xsp = csc_matrix(Xdn) w = np.random.random(size=(10,)) y = np.dot(Xdn, w) sw = np.random.uniform(size=(50,)) for alpha in [.01, .5, 1]: for X in (Xdn, Xsp): enet = ElasticNet(alpha=alpha) enet.fit(X, y, weights=sw) ol = enet.out_lambdas max_lambda_from_fortran = ol[1] * (ol[1]/ol[2]) max_lambda_from_python = enet._max_lambda(X, y, weights=sw) self.assertAlmostEqual( max_lambda_from_fortran, max_lambda_from_python, 4 )
def test_max_lambda(self): '''Test that the calculations of max_lambda inside the fortran code and inside the python code give the same result on both dense and sparse matricies. Note that the implementation of max_lambda for alpha=0 in the fortran code is unknown, so we currently do not test against it. ''' Xdn = np.random.random(size=(50, 10)) Xsp = csc_matrix(Xdn) w = np.random.random(size=(10, )) y = np.dot(Xdn, w) for alpha in [.01, .5, 1]: for X in (Xdn, Xsp): enet = ElasticNet(alpha=alpha) enet.fit(X, y) ol = enet.out_lambdas max_lambda_from_fortran = ol[1] * (ol[1] / ol[2]) max_lambda_from_python = enet._max_lambda(X, y) self.assertAlmostEqual(max_lambda_from_fortran, max_lambda_from_python, 4)
def test_max_lambda(self): '''Test that the calculations of max_lambda inside the fortran code and inside the python code give the same result on both dense and sparse matricies. Note that the implementation of max_lambda for alpha=0 in the fortran code is unknown, so we currently do not test against it. ''' Xdn = np.random.random(size=(50,10)) Xsp = csc_matrix(Xdn) w = np.random.random(size=(10,)) y = np.dot(Xdn, w) for alpha in [.01, .5, 1]: for X in (Xdn, Xsp): enet = ElasticNet(alpha=alpha) enet.fit(X, y) ol = enet.out_lambdas max_lambda_from_fortran = ol[1] * (ol[1]/ol[2]) max_lambda_from_python = enet._max_lambda(X, y) self.assertAlmostEqual( max_lambda_from_fortran, max_lambda_from_python, 4 )
def test_edge_cases(self): '''Edge cases in model specification.''' X = np.random.random(size=(50,10)) w = np.random.random(size=(10,)) y = np.dot(X, w) # Edge case # A single lambda is so big that it sets all estimated coefficients # to zero. This used to break the predict method. enet = ElasticNet(alpha=1) enet.fit(X, y, lambdas=[10**5]) _ = enet.predict(X) # Edge case # Multiple lambdas are so big as to set all estiamted coefficients # to zero. This used to break the predict method. enet = ElasticNet(alpha=1) enet.fit(X, y, lambdas=[10**5, 2*10**5]) _ = enet.predict(X) # Edge case: # Some predictors have zero varaince. This used to break lambda # max. X = np.random.random(size=(50,10)) X[:,2] = 0; X[:,8] = 1 y = np.dot(X, w) enet = ElasticNet(alpha=.1) enet.fit(X, y) ol = enet.out_lambdas max_lambda_from_fortran = ol[1] * (ol[1]/ol[2]) max_lambda_from_python = enet._max_lambda(X, y) self.assertAlmostEqual( max_lambda_from_fortran, max_lambda_from_python, 4 ) # Edge case. # All predictors have zero variance. This is an error in # sepcification. with self.assertRaises(ValueError): X = np.ones(shape=(50,10)) enet = ElasticNet(alpha=.1) enet.fit(X, y)
def test_edge_cases(self): '''Edge cases in model specification.''' X = np.random.random(size=(50, 10)) w = np.random.random(size=(10, )) y = np.dot(X, w) # Edge case # A single lambda is so big that it sets all estimated coefficients # to zero. This used to break the predict method. enet = ElasticNet(alpha=1) enet.fit(X, y, lambdas=[10**5]) _ = enet.predict(X) # Edge case # Multiple lambdas are so big as to set all estiamted coefficients # to zero. This used to break the predict method. enet = ElasticNet(alpha=1) enet.fit(X, y, lambdas=[10**5, 2 * 10**5]) _ = enet.predict(X) # Edge case: # Some predictors have zero varaince. This used to break lambda # max. X = np.random.random(size=(50, 10)) X[:, 2] = 0 X[:, 8] = 1 y = np.dot(X, w) enet = ElasticNet(alpha=.1) enet.fit(X, y) ol = enet.out_lambdas max_lambda_from_fortran = ol[1] * (ol[1] / ol[2]) max_lambda_from_python = enet._max_lambda(X, y) self.assertAlmostEqual(max_lambda_from_fortran, max_lambda_from_python, 4) # Edge case. # All predictors have zero variance. This is an error in # sepcification. with self.assertRaises(ValueError): X = np.ones(shape=(50, 10)) enet = ElasticNet(alpha=.1) enet.fit(X, y)