def test_confidenceintervals(self): # NOTE: stata rounds residuals (at least) to sig digits so approx_equal conf1 = self.res1.conf_int() conf2 = self.res2.conf_int() for i in range(len(conf1)): assert_approx_equal(conf1[i][0], conf2[i][0], self.decimal_confidenceintervals) assert_approx_equal(conf1[i][1], conf2[i][1], self.decimal_confidenceintervals)
def test_masked_background(): data = 0.1 * np.ones((6,6)) data[1,1] = 1. data[4,1] = 1. data[1,4] = 1. data[4,4] = 1. mask = np.zeros((6,6), dtype=np.bool) # Background array without mask sky = sep.Background(data, bw=3, bh=3, fw=1, fh=1) bkg1 = sky.back() # Background array with all False mask sky = sep.Background(data, mask=mask, bw=3, bh=3, fw=1, fh=1) bkg2 = sky.back() # All False mask should be the same assert_allclose(bkg1, bkg2) # Masking high pixels should give a flat background mask[1, 1] = True mask[4, 1] = True mask[1, 4] = True mask[4, 4] = True sky = sep.Background(data, mask=mask, bw=3, bh=3, fw=1, fh=1) assert_approx_equal(sky.globalback, 0.1) assert_allclose(sky.back(), 0.1 * np.ones((6, 6)))
def test_HC0_errors(self): # They are split up because the copied results do not have any # DECIMAL_4 places for the last place. assert_almost_equal(self.res1.HC0_se[:-1], self.res2.HC0_se[:-1], DECIMAL_4) assert_approx_equal(np.round(self.res1.HC0_se[-1]), self.res2.HC0_se[-1])
def test_tica_score_1(): X = random.randn(100, 5) for n in range(1, 5): tica = tICA(n_components=n, shrinkage=0) tica.fit([X]) assert_approx_equal(tica.score([X]), tica.eigenvalues_.sum()) assert_approx_equal(tica.score([X]), tica.score_)
def test_dec2dec(): """Test dec2dec against astropy conversion""" # Test against the astropy calculations for dec in ['+14:21:45.003', '-99 04 22', '-00 01 23.456', '00 01']: ans = at.dec2dec(dec) desired = Angle(dec, unit=u.degree).degree assert_approx_equal(ans, desired, err_msg="{0} != {1}".format(ans, desired))
def test_symmetry(self): # Test that a basic V-cycle yields a symmetric linear operator. Common # reasons for failure are problems with using the same rho for the # pres/post-smoothers and using the same block_D_inv for # pre/post-smoothers. n = 500 A = poisson((n,), format='csr') smoothers = [('gauss_seidel', {'sweep': 'symmetric'}), ('schwarz', {'sweep': 'symmetric'}), ('block_gauss_seidel', {'sweep': 'symmetric'}), 'jacobi', 'block_jacobi'] Bs = [np.ones((n, 1)), sp.hstack((np.ones((n, 1)), np.arange(1, n + 1, dtype='float').reshape(-1, 1)))] for smoother in smoothers: for B in Bs: ml = rootnode_solver(A, B, max_coarse=10, presmoother=smoother, postsmoother=smoother) P = ml.aspreconditioner() x = sp.rand(n,) y = sp.rand(n,) assert_approx_equal(np.dot(P * x, y), np.dot(x, P * y))
def test_cuboid(self): self.geo.add_model(propname='throat.perimeter', model=mods.cuboid, regen_mode='normal') a = np.array([0.4]) b = np.unique(self.geo['throat.perimeter']) assert_approx_equal(a, b)
def test_normalization(self): # set up x = arange(12).reshape((3,4)) result = analysis.discrete_trunc_t_logpdf(x, 4, range(12)) # make sure normalized assert_approx_equal(sum(exp(result)), 1)
def test_create_Background_dict_1(self): # name of file in tests directory sample_background_1 = "sample_background_data.csv" # joins absolute path of tests directory with "sample_background_data.csv" sample_background_1 = os.path.join(os.path.dirname(os.path.abspath(__file__)),sample_background_1) twit = Twords() twit.background_path = sample_background_1 twit.create_Background_dict() # read in sample background data manually to compare def read_data(data): with open(data, 'r') as f: data = [row for row in csv.reader(f.read().splitlines())] return data background_data = read_data(sample_background_1) background_data = background_data[1:] background_dict = {unicode(line[0]): (float(line[2]), int(line[1])) for line in background_data} for key in background_dict.keys(): # compare frequency rate - a float assert_approx_equal(background_dict[key][0], twit.background_dict[key][0], 10) # compare occurrences - an integer assert background_dict[key][1] == twit.background_dict[key][1]
def test_circle(self): self.geo.add_model(propname='pore.volume', model=mods.circle, regen_mode='normal') a = np.array([3.14159265/4*1.05**2]) b = np.unique(self.geo['pore.volume']) assert_approx_equal(a, b)
def test_square(self): self.geo.add_model(propname='pore.volume', model=mods.square, regen_mode='normal') a = np.array([1.0*1.05**2]) b = np.unique(self.geo['pore.volume']) assert_approx_equal(a, b)
def test_del_operate_on_gaussian_returns_s_orbital_2(self): primitive_gaussian = PrimitiveBasis(0.15432897, 3.42525091, (0, 0, 0.7316), (0, 0, 0)) array = PrimitiveBasisFactory.del_operator(primitive_gaussian) testing.assert_approx_equal(array[1].contraction, -23.46468759, 7) self.assertEquals(array[1].exponent, 3.42525091) self.assertEquals(array[1].integral_exponents, (2, 0, 0)) self.assertEquals(array[1].coordinates, (0, 0, 0.7316))
def check_We_model(D, rho_gas, Q_gas, mu_gas, sigma_gas, rho_oil, Q_oil, mu_oil, sigma_oil, rho, de_gas, de_oil): """ Check the solution for a modified We-number model """ # Get the mass fluxes of gas and oil md_gas = Q_gas * rho_gas md_oil = Q_oil * rho_oil # Compute the sizes from the model d50_gas, d50_oil = sintef.modified_We_model(D, rho_gas, md_gas, mu_gas, sigma_gas, rho_oil, md_oil, mu_oil, sigma_oil, rho) # Check the model result assert d50_gas == de_gas if d50_gas: assert_approx_equal(d50_gas, de_gas, significant=6) else: assert d50_gas == None if d50_oil: assert_approx_equal(d50_oil, de_oil, significant=6) else: assert d50_oil == None
def __init__(self, distr): self.total = distr.old_total self.context = distr.context probs = distr.values() assert_approx_equal(sum(probs), 1.) self.entropy = -1. * sum([p * np.log(p) for p in probs])
def case(chan1, chan2, expected, significant=4): # We again take a generous tolerance so that we don't kill off # SCS solvers. assert_approx_equal( dnorm(chan1, chan2), expected, significant=significant )
def test_normalize(self): # Test normalize option of Lomb-Scarge. # Input parameters ampl = 2. w = 1. phi = 0.5 * np.pi nin = 100 nout = 1000 p = 0.7 # Fraction of points to select # Randomly select a fraction of an array with timesteps np.random.seed(2353425) r = np.random.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times x = ampl * np.sin(w*t + phi) # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # Calculate Lomb-Scargle periodogram pgram = lombscargle(t, x, f) pgram2 = lombscargle(t, x, f, normalize=True) # check if normalization works as expected assert_allclose(pgram * 2 / np.dot(x, x), pgram2) assert_approx_equal(np.max(pgram2), 1.0, significant=2)
def get_profile(data, z_col, z_start, p_col, P, z_min, z_max, nr, nc): """ Run the ambient.extract_profile function and test that the data are correctly parsed per the inputs given above. """ # Apply the profile extraction function prof_data = ambient.extract_profile(data, z_col=z_col, z_start=z_start, p_col=p_col, P_atm=P) # Check that the returned profile extends to the free surface assert prof_data[0,z_col] == 0.0 # Check that the profile is clipped at the expected depths assert_approx_equal(prof_data[1,z_col], z_min, significant = 6) assert_approx_equal(prof_data[-1,z_col], z_max, significant = 6) # Check that the returned profile is the right shape and data type assert prof_data.shape[0] == nr if nc is not None: assert prof_data.shape[1] == nc assert isinstance(prof_data, np.ndarray) # Check the that returned profile is in ascending order for i in range(1, prof_data.shape[0]): assert prof_data[i,z_col] > prof_data[i-1,z_col] # Send back the extracted profile return prof_data
def test_modelparams_obj(): """ Test the behavior of the `ModelParams` object Test the instantiation and attribute data for the `ModelParams object of the `stratified_plume_model` module. """ # Get the ambient CTD data profile = get_profile() # Initialize the ModelParams object p = stratified_plume_model.ModelParams(profile) # Check if the attributes are set correctly assert_approx_equal(p.rho_r, 1031.035855535142, significant=6) assert p.alpha_1 == 0.055 assert p.alpha_2 == 0.110 assert p.alpha_3 == 0.110 assert p.lambda_2 == 1.00 assert p.epsilon == 0.015 assert p.qdis_ic == 0.1 assert p.c1 == 0. assert p.c2 == 1. assert p.fe == 0.1 assert p.gamma_i == 1.10 assert p.gamma_o == 1.10 assert p.Fr_0 == 1.6 assert p.Fro_0 == 0.1 assert p.nwidths == 1 assert p.naverage == 1 assert p.g == 9.81 assert p.Ru == 8.314510
def test_cylinder(self): self.geo.add_model(propname='throat.perimeter', model=mods.cylinder, regen_mode='normal') a = np.array([0.31415927]) b = np.unique(self.geo['throat.perimeter']) assert_approx_equal(a, b)
def test_ra2dec(): """Test ra2dec against astropy conversion""" # Test against the astropy calculations for ra in ['14:21:45.003', '-12 04 22', '-00 01 12.003']: ans = at.ra2dec(ra) desired = Angle(ra, unit=u.hourangle).hour * 15 assert_approx_equal(ans, desired, "{0} != {1}".format(ans, desired))
def test_rectangle(self): self.geo.add_model(propname='throat.perimeter', model=mods.rectangle, regen_mode='normal') a = np.array([1.0]) b = np.unique(self.geo['throat.perimeter']) assert_approx_equal(a, b)
def test_EllipsoidRadius(): np = Nanoparticle() np.setEllipsoidRadius(30.0, 30.0) assert_approx_equal(30.0, np.getEllipsoidZRadius()) assert_approx_equal(30.0, np.getEllipsoidXYRadius()) with raises(ValueError): np.setEllipsoidRadius(0.0, 0.0)
def test_circle(self): self.geo.add_model(propname='pore.area', model=mods.circle, regen_mode='normal') a = np.array([1.0]) b = np.unique(self.geo['pore.area']) assert_approx_equal(a, b)
def test_amplitude(self): """Test if height of peak in normalized Lomb-Scargle periodogram corresponds to amplitude of the generated input signal. """ # Input parameters ampl = 2. w = 1. phi = 0.5 * np.pi nin = 100 nout = 1000 p = 0.7 # Fraction of points to select # Randomly select a fraction of an array with timesteps np.random.seed(2353425) r = np.random.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times x = ampl * np.sin(w*t + phi) # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # Calculate Lomb-Scargle periodogram pgram = lombscargle(t, x, f) # Normalize pgram = np.sqrt(4 * pgram / t.shape[0]) # Check if difference between found frequency maximum and input # frequency is less than accuracy assert_approx_equal(np.max(pgram), ampl, significant=2)
def test_create_evoked_physio_signal(self): import pyhrf.paradigm phy_params = phy.PHY_PARAMS_FRISTON00 tr = 1. duration = 20. ne = np.array([[10., 5.]]) nb_conds, nb_vox = ne.shape # one single stimulation at the begining paradigm = pyhrf.paradigm.Paradigm({'c': [np.array([0.])]}, [duration], {'c': [np.array([1.])]}) s, f, hbr, cbv = phy.create_evoked_physio_signals(phy_params, paradigm, ne, tr) # shape of a signal: (nb_vox, nb_scans) if 0: import matplotlib.pyplot as plt t = np.arange(f[0].size) * tr plt.plot(t, f[0]) plt.title('inflow') plt.show() self.assertEqual(s.shape, (paradigm.get_rastered(tr)['c'][0].size, nb_vox)) # check signal causality: self.assertEqual(f[0, 0], 1.) npt.assert_approx_equal(f[-1, 0], 1., significant=2) # non-regression test: self.assertEqual(np.argmax(f[:, 0]) * tr, 2)
def testcompare(m1, m2): from numpy.testing import assert_almost_equal, assert_approx_equal decimal = 12 #inv assert_almost_equal(m1.minv, m2.minv, decimal=decimal) #matrix half and invhalf #fix sign in test, should this be standardized s1 = np.sign(m1.mhalf.sum(1))[:,None] s2 = np.sign(m2.mhalf.sum(1))[:,None] scorr = s1/s2 assert_almost_equal(m1.mhalf, m2.mhalf * scorr, decimal=decimal) assert_almost_equal(m1.minvhalf, m2.minvhalf, decimal=decimal) #eigenvalues, eigenvectors evals1, evecs1 = m1.meigh evals2, evecs2 = m2.meigh assert_almost_equal(evals1, evals2, decimal=decimal) #normalization can be different: evecs in columns s1 = np.sign(evecs1.sum(0)) s2 = np.sign(evecs2.sum(0)) scorr = s1/s2 assert_almost_equal(evecs1, evecs2 * scorr, decimal=decimal) #determinant assert_approx_equal(m1.mdet, m2.mdet, significant=13) assert_approx_equal(m1.mlogdet, m2.mlogdet, significant=13)
def test_calculate_log_ratios_calculates_correct_values(): tpms = _get_test_tpms() t.calculate_log_ratios(tpms) for index, row in tpms.iterrows(): val = np.log10(CALC_TPMS_VALS[index]/float(REAL_TPMS_VALS[index])) npt.assert_approx_equal(row[t.LOG10_RATIO], val)
def test_laminar(): for Re in [1, 10, 100, 1000]: assert_approx_equal( friction_factor(Re, 1), 64./Re, significant=2 )
def test_ntc_resistance(self): # Values arbitrarily from Murata NCP15WB473D03RC assert_approx_equal(ntc_resistance("47k", "4050K", "25°C"), 47000) assert_approx_equal(ntc_resistance("47k", "4050K", "0°C"), 162942.79) assert_approx_equal(ntc_resistance("47k", "4050K", "-18°C"), 463773.791) assert_approx_equal(ntc_resistance("47k", "4050K", "5°C"), 124819.66) assert_approx_equal(ntc_resistance("47k", "4050K", "60°C"), 11280.407)
def test_model_obj(): """ Test the object behavior for the `Model` object Test the instantiation and attribute data for the `Model` object of the `single_bubble_model` module. Notes ----- This test function only tests instantiation from a netCDF file of ambient CTD data and does not test any of the object methods. Instantiation from simulation data and testing of the object methods is done in the remaining test functions. See Also -------- test_simulation """ # Get the ambient profile data profile = get_profile() # Initialize a Model object sbm = single_bubble_model.Model(profile) # Check the model attributes assert_approx_equal(sbm.p.rho_r, 1031.035855535142, significant=6) (T, S, P) = profile.get_values(1000., ['temperature', 'salinity', 'pressure']) (Tp, Sp, Pp) = sbm.profile.get_values(1000., ['temperature', 'salinity', 'pressure']) assert Tp == T assert Sp == S assert Pp == P
def test_model_perf(self): np_testing.assert_approx_equal( self.exercise.classifier.history.history['val_accuracy'][0], self.classifier.history.history['val_accuracy'][0], significant=1)
def test_from_txt(): """ Test the ambient data methods on simple text files. This unit test reads in the text files ./data/C.dat and ./data/T.dat using `numpy.loadtxt` and then uses this data to test the data manipulation and storage methods in ambient.py. """ cdat_file = os.path.join(DATA_DIR, 'C.dat') tdat_file = os.path.join(DATA_DIR, 'T.dat') # Load in the raw data using np.loadtxt C_raw = np.loadtxt(cdat_file, comments='%') T_raw = np.loadtxt(tdat_file, comments='%') # Clean the profile to remove depth reversals C_data = get_profile(C_raw, 1, 25, None, 0., 1.0256410e+01, 8.0000000e+02, 34, 2) T_data = get_profile(T_raw, 1, 25, None, 0., 1.0831721e+01, 7.9922631e+02, 34, 2) # Convert the data to standard units C_data, C_units = get_units(C_data, ['psu', 'm'], 34, 2, ['psu', 'm']) T_data, T_units = get_units(T_data, ['deg C', 'm'], 34, 2, ['K', 'm']) # Create an empty netCDF4-classic dataset to store the CTD information nc_file = os.path.join(OUTPUT_DIR, 'test_DS.nc') summary = 'Py.Test test file' source = 'Profiles from the SINTEF DeepSpill Report' sea_name = 'Norwegian Sea' p_lat = 64.99066 p_lon = 4.84725 p_time = date2num(datetime(2000, 6, 27, 12, 0, 0), units='seconds since 1970-01-01 00:00:00 0:00', calendar='julian') nc = check_nc_db(nc_file, summary, source, sea_name, p_lat, p_lon, p_time) # Fill the netCDF4-classic dataset with the data in the salinity profile symbols = ['salinity', 'z'] comments = ['measured', 'measured'] long_names = ['Practical salinity', 'depth below the water surface'] std_names = ['salinity', 'depth'] nc = get_filled_nc_db(nc, C_data, symbols, C_units, comments, 1, long_names, std_names) # Because the temperature data will be interpolated to the vertical # coordinates in the salinity profile, insert the data and test that # insertion worked correctly by hand symbols = ['temperature', 'z'] comments = ['measured', 'measured'] long_names = ['Absolute temperature', 'depth below the water surface'] std_names = ['temperature', 'depth'] nc = ambient.fill_nc_db(nc, T_data, symbols, T_units, comments, 1) assert_array_almost_equal(nc.variables['z'][:], C_data[:, 1], decimal=6) z = nc.variables['z'][:] T = nc.variables['temperature'][:] f = interp1d(z, T) for i in range(T_data.shape[0]): assert_approx_equal(T_data[i, 0], f(T_data[i, 1]), significant=5) assert nc.variables['temperature'].comment == comments[0] # Calculate and insert the pressure data z = nc.variables['z'][:] T = nc.variables['temperature'][:] S = nc.variables['salinity'][:] P = ambient.compute_pressure(z, T, S, 0) P_data = np.vstack((z, P)).transpose() nc = ambient.fill_nc_db(nc, P_data, ['z', 'pressure'], ['m', 'Pa'], ['measured', 'computed'], 0) # Test the Profile object ds = get_profile_obj(nc, [], []) # Close down the pipes to the netCDF dataset files ds.nc.close() return ds
def test_all(self): d = macrodata.load_pandas().data #import datasetswsm.greene as g #d = g.load('5-1') #growth rates gs_l_realinv = 400 * np.diff(np.log(d['realinv'].values)) gs_l_realgdp = 400 * np.diff(np.log(d['realgdp'].values)) #simple diff, not growthrate, I want heteroscedasticity later for testing endogd = np.diff(d['realinv']) exogd = add_constant(np.c_[np.diff(d['realgdp'].values), d['realint'][:-1].values]) endogg = gs_l_realinv exogg = add_constant(np.c_[gs_l_realgdp, d['realint'][:-1].values]) res_ols = OLS(endogg, exogg).fit() #print res_ols.params mod_g1 = GLSAR(endogg, exogg, rho=-0.108136) res_g1 = mod_g1.fit() #print res_g1.params mod_g2 = GLSAR(endogg, exogg, rho=-0.108136) #-0.1335859) from R res_g2 = mod_g2.iterative_fit(maxiter=5) #print res_g2.params rho = -0.108136 # coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL partable = np.array([ [-9.50990, 0.990456, -9.602, 3.65e-018, -11.4631, -7.55670], # *** [4.37040, 0.208146, 21.00, 2.93e-052, 3.95993, 4.78086], # *** [-0.579253, 0.268009, -2.161, 0.0319, -1.10777, -0.0507346] ]) # ** #Statistics based on the rho-differenced data: result_gretl_g1 = dict(endog_mean=("Mean dependent var", 3.113973), endog_std=("S.D. dependent var", 18.67447), ssr=("Sum squared resid", 22530.90), mse_resid_sqrt=("S.E. of regression", 10.66735), rsquared=("R-squared", 0.676973), rsquared_adj=("Adjusted R-squared", 0.673710), fvalue=("F(2, 198)", 221.0475), f_pvalue=("P-value(F)", 3.56e-51), resid_acf1=("rho", -0.003481), dw=("Durbin-Watson", 1.993858)) #fstatistic, p-value, df1, df2 reset_2_3 = [5.219019, 0.00619, 2, 197, "f"] reset_2 = [7.268492, 0.00762, 1, 198, "f"] reset_3 = [5.248951, 0.023, 1, 198, "f"] #LM-statistic, p-value, df arch_4 = [7.30776, 0.120491, 4, "chi2"] #multicollinearity vif = [1.002, 1.002] cond_1norm = 6862.0664 determinant = 1.0296049e+009 reciprocal_condition_number = 0.013819244 #Chi-square(2): test-statistic, pvalue, df normality = [20.2792, 3.94837e-005, 2] #tests res = res_g1 #with rho from Gretl #basic assert_almost_equal(res.params, partable[:, 0], 4) assert_almost_equal(res.bse, partable[:, 1], 6) assert_almost_equal(res.tvalues, partable[:, 2], 2) assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2) #assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl #assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL #assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5) assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=4) assert_approx_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], significant=2) #assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO #arch #sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None) sm_arch = smsdia.het_arch(res.wresid, maxlag=4) assert_almost_equal(sm_arch[0], arch_4[0], decimal=4) assert_almost_equal(sm_arch[1], arch_4[1], decimal=6) #tests res = res_g2 #with estimated rho #estimated lag coefficient assert_almost_equal(res.model.rho, rho, decimal=3) #basic assert_almost_equal(res.params, partable[:, 0], 4) assert_almost_equal(res.bse, partable[:, 1], 3) assert_almost_equal(res.tvalues, partable[:, 2], 2) assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2) #assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl #assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL #assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5) assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=0) assert_almost_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], decimal=6) #assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO c = oi.reset_ramsey(res, degree=2) compare_ftest(c, reset_2, decimal=(2, 4)) c = oi.reset_ramsey(res, degree=3) compare_ftest(c, reset_2_3, decimal=(2, 4)) #arch #sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None) sm_arch = smsdia.het_arch(res.wresid, maxlag=4) assert_almost_equal(sm_arch[0], arch_4[0], decimal=1) assert_almost_equal(sm_arch[1], arch_4[1], decimal=2) ''' Performing iterative calculation of rho... ITER RHO ESS 1 -0.10734 22530.9 2 -0.10814 22530.9 Model 4: Cochrane-Orcutt, using observations 1959:3-2009:3 (T = 201) Dependent variable: ds_l_realinv rho = -0.108136 coefficient std. error t-ratio p-value ------------------------------------------------------------- const -9.50990 0.990456 -9.602 3.65e-018 *** ds_l_realgdp 4.37040 0.208146 21.00 2.93e-052 *** realint_1 -0.579253 0.268009 -2.161 0.0319 ** Statistics based on the rho-differenced data: Mean dependent var 3.113973 S.D. dependent var 18.67447 Sum squared resid 22530.90 S.E. of regression 10.66735 R-squared 0.676973 Adjusted R-squared 0.673710 F(2, 198) 221.0475 P-value(F) 3.56e-51 rho -0.003481 Durbin-Watson 1.993858 ''' ''' RESET test for specification (squares and cubes) Test statistic: F = 5.219019, with p-value = P(F(2,197) > 5.21902) = 0.00619 RESET test for specification (squares only) Test statistic: F = 7.268492, with p-value = P(F(1,198) > 7.26849) = 0.00762 RESET test for specification (cubes only) Test statistic: F = 5.248951, with p-value = P(F(1,198) > 5.24895) = 0.023: ''' ''' Test for ARCH of order 4 coefficient std. error t-ratio p-value -------------------------------------------------------- alpha(0) 97.0386 20.3234 4.775 3.56e-06 *** alpha(1) 0.176114 0.0714698 2.464 0.0146 ** alpha(2) -0.0488339 0.0724981 -0.6736 0.5014 alpha(3) -0.0705413 0.0737058 -0.9571 0.3397 alpha(4) 0.0384531 0.0725763 0.5298 0.5968 Null hypothesis: no ARCH effect is present Test statistic: LM = 7.30776 with p-value = P(Chi-square(4) > 7.30776) = 0.120491: ''' ''' Variance Inflation Factors Minimum possible value = 1.0 Values > 10.0 may indicate a collinearity problem ds_l_realgdp 1.002 realint_1 1.002 VIF(j) = 1/(1 - R(j)^2), where R(j) is the multiple correlation coefficient between variable j and the other independent variables Properties of matrix X'X: 1-norm = 6862.0664 Determinant = 1.0296049e+009 Reciprocal condition number = 0.013819244 ''' ''' Test for ARCH of order 4 - Null hypothesis: no ARCH effect is present Test statistic: LM = 7.30776 with p-value = P(Chi-square(4) > 7.30776) = 0.120491 Test of common factor restriction - Null hypothesis: restriction is acceptable Test statistic: F(2, 195) = 0.426391 with p-value = P(F(2, 195) > 0.426391) = 0.653468 Test for normality of residual - Null hypothesis: error is normally distributed Test statistic: Chi-square(2) = 20.2792 with p-value = 3.94837e-005: ''' #no idea what this is ''' Augmented regression for common factor test OLS, using observations 1959:3-2009:3 (T = 201) Dependent variable: ds_l_realinv coefficient std. error t-ratio p-value --------------------------------------------------------------- const -10.9481 1.35807 -8.062 7.44e-014 *** ds_l_realgdp 4.28893 0.229459 18.69 2.40e-045 *** realint_1 -0.662644 0.334872 -1.979 0.0492 ** ds_l_realinv_1 -0.108892 0.0715042 -1.523 0.1294 ds_l_realgdp_1 0.660443 0.390372 1.692 0.0923 * realint_2 0.0769695 0.341527 0.2254 0.8219 Sum of squared residuals = 22432.8 Test of common factor restriction Test statistic: F(2, 195) = 0.426391, with p-value = 0.653468 ''' ################ with OLS, HAC errors #Model 5: OLS, using observations 1959:2-2009:3 (T = 202) #Dependent variable: ds_l_realinv #HAC standard errors, bandwidth 4 (Bartlett kernel) #coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL #for confidence interval t(199, 0.025) = 1.972 partable = np.array([ [-9.48167, 1.17709, -8.055, 7.17e-014, -11.8029, -7.16049], # *** [4.37422, 0.328787, 13.30, 2.62e-029, 3.72587, 5.02258], #*** [-0.613997, 0.293619, -2.091, 0.0378, -1.19300, -0.0349939] ]) # ** result_gretl_g1 = dict(endog_mean=("Mean dependent var", 3.257395), endog_std=("S.D. dependent var", 18.73915), ssr=("Sum squared resid", 22799.68), mse_resid_sqrt=("S.E. of regression", 10.70380), rsquared=("R-squared", 0.676978), rsquared_adj=("Adjusted R-squared", 0.673731), fvalue=("F(2, 199)", 90.79971), f_pvalue=("P-value(F)", 9.53e-29), llf=("Log-likelihood", -763.9752), aic=("Akaike criterion", 1533.950), bic=("Schwarz criterion", 1543.875), hqic=("Hannan-Quinn", 1537.966), resid_acf1=("rho", -0.107341), dw=("Durbin-Watson", 2.213805)) linear_logs = [1.68351, 0.430953, 2, "chi2"] #for logs: dropping 70 nan or incomplete observations, T=133 #(res_ols.model.exog <=0).any(1).sum() = 69 ?not 70 linear_squares = [7.52477, 0.0232283, 2, "chi2"] #Autocorrelation, Breusch-Godfrey test for autocorrelation up to order 4 lm_acorr4 = [1.17928, 0.321197, 4, 195, "F"] lm2_acorr4 = [4.771043, 0.312, 4, "chi2"] acorr_ljungbox4 = [5.23587, 0.264, 4, "chi2"] #break cusum_Harvey_Collier = [0.494432, 0.621549, 198, "t"] #stats.t.sf(0.494432, 198)*2 #see cusum results in files break_qlr = [3.01985, 0.1, 3, 196, "maxF"] #TODO check this, max at 2001:4 break_chow = [13.1897, 0.00424384, 3, "chi2"] # break at 1984:1 arch_4 = [3.43473, 0.487871, 4, "chi2"] normality = [23.962, 0.00001, 2, "chi2"] het_white = [33.503723, 0.000003, 5, "chi2"] het_breusch_pagan = [1.302014, 0.521520, 2, "chi2"] #TODO: not available het_breusch_pagan_konker = [0.709924, 0.701200, 2, "chi2"] reset_2_3 = [5.219019, 0.00619, 2, 197, "f"] reset_2 = [7.268492, 0.00762, 1, 198, "f"] reset_3 = [5.248951, 0.023, 1, 198, "f"] #not available cond_1norm = 5984.0525 determinant = 7.1087467e+008 reciprocal_condition_number = 0.013826504 vif = [1.001, 1.001] names = 'date residual leverage influence DFFITS'.split( ) cur_dir = os.path.abspath(os.path.dirname(__file__)) fpath = os.path.join(cur_dir, 'results/leverage_influence_ols_nostars.txt') lev = np.genfromtxt(fpath, skip_header=3, skip_footer=1, converters={0: lambda s: s}) #either numpy 1.6 or python 3.2 changed behavior if np.isnan(lev[-1]['f1']): lev = np.genfromtxt(fpath, skip_header=3, skip_footer=2, converters={0: lambda s: s}) lev.dtype.names = names res = res_ols #for easier copying cov_hac = sw.cov_hac_simple(res, nlags=4, use_correction=False) bse_hac = sw.se_cov(cov_hac) assert_almost_equal(res.params, partable[:, 0], 5) assert_almost_equal(bse_hac, partable[:, 1], 5) #TODO assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2) assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=4) #not in gretl assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=6) #FAIL assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=6) #FAIL assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5) #f-value is based on cov_hac I guess #res2 = res.get_robustcov_results(cov_type='HC1') # TODO: fvalue differs from Gretl, trying any of the HCx #assert_almost_equal(res2.fvalue, result_gretl_g1['fvalue'][1], decimal=0) #FAIL #assert_approx_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], significant=1) #FAIL #assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO c = oi.reset_ramsey(res, degree=2) compare_ftest(c, reset_2, decimal=(6, 5)) c = oi.reset_ramsey(res, degree=3) compare_ftest(c, reset_2_3, decimal=(6, 5)) linear_sq = smsdia.linear_lm(res.resid, res.model.exog) assert_almost_equal(linear_sq[0], linear_squares[0], decimal=6) assert_almost_equal(linear_sq[1], linear_squares[1], decimal=7) hbpk = smsdia.het_breuschpagan(res.resid, res.model.exog) assert_almost_equal(hbpk[0], het_breusch_pagan_konker[0], decimal=6) assert_almost_equal(hbpk[1], het_breusch_pagan_konker[1], decimal=6) hw = smsdia.het_white(res.resid, res.model.exog) assert_almost_equal(hw[:2], het_white[:2], 6) #arch #sm_arch = smsdia.acorr_lm(res.resid**2, maxlag=4, autolag=None) sm_arch = smsdia.het_arch(res.resid, maxlag=4) assert_almost_equal(sm_arch[0], arch_4[0], decimal=5) assert_almost_equal(sm_arch[1], arch_4[1], decimal=6) vif2 = [ oi.variance_inflation_factor(res.model.exog, k) for k in [1, 2] ] infl = oi.OLSInfluence(res_ols) #print np.max(np.abs(lev['DFFITS'] - infl.dffits[0])) #print np.max(np.abs(lev['leverage'] - infl.hat_matrix_diag)) #print np.max(np.abs(lev['influence'] - infl.influence)) #just added this based on Gretl #just rough test, low decimal in Gretl output, assert_almost_equal(lev['residual'], res.resid, decimal=3) assert_almost_equal(lev['DFFITS'], infl.dffits[0], decimal=3) assert_almost_equal(lev['leverage'], infl.hat_matrix_diag, decimal=3) assert_almost_equal(lev['influence'], infl.influence, decimal=4)
def test_parse_angle(dim, expected_out): actual_out = vizier.core._parse_angle(dim) actual_unit, actual_value = actual_out expected_unit, expected_value = expected_out assert actual_unit == expected_unit npt.assert_approx_equal(actual_value, expected_value, significant=2)
def testCTerPhi(self): assert_approx_equal(174.160, calcPhi(UBI_CTER), 2)
def testCalcPhi(self): assert_approx_equal(87.723, calcPhi(UBI_GLY10), 2)
def test_HC0_errors(self): #They are split up because the copied results do not have any DECIMAL_4 #places for the last place. assert_almost_equal(self.res1.HC0_se[:-1], self.res2.HC0_se[:-1], DECIMAL_4) assert_approx_equal(np.round(self.res1.HC0_se[-1]), self.res2.HC0_se[-1])
def test_rectangle(self): self.geo.add_model(propname='throat.volume', model=mods.rectangle) a = np.array([0.1]) b = np.unique(self.geo['throat.volume']) assert_approx_equal(a, b)
def test_cube(self): self.geo.add_model(propname='throat.volume', model=mods.cuboid) a = np.array([0.01]) b = np.unique(self.geo['throat.volume']) assert_approx_equal(a, b)
def test_split(): v1 = 3 v2 = 5 x1, x2 = split(v_left=v1, v_right=v2) nt.assert_allclose(np.array([x1, x2]), np.array([5.0, 3.0]) / 8.0) nt.assert_approx_equal(x1 + x2, 1.0)
def check_from_roms(): """ Test the ambient data methods on data read from ROMS. this unit test reads in a ROMS netCDF output file, extracts the profile information, and creates a new netCDF dataset and Profile class object for use by the TAMOC modeling suite. TODO (S. Socolofsky 7/15/2013): After fixing the octant.roms module to have monotonically increasing depth, try to reinstate this test by changing the function name from check_from_roms() to test_from_roms(). I was also having problems with being allowed to use the THREDDS netCDF file with py.test. I could run the test under ipython, but not under py.test. """ # Get a path to a ROMS dataset on a THREDDS server nc_roms = 'http://barataria.tamu.edu:8080/thredds/dodsC/' + \ 'ROMS_Daily/08122012/ocean_his_08122012_24.nc' # Prepare the remaining inputs to the get_nc_db_from_roms() function # call nc_file = os.path.join(OUTPUT_DIR, 'test_roms.nc') t_idx = 0 j_idx = 400 i_idx = 420 chem_names = ['dye_01', 'dye_02'] (nc, nc_roms) = ambient.get_nc_db_from_roms(nc_roms, nc_file, t_idx, j_idx, i_idx, chem_names) # Check the data are inserted correctly from ROMS into the new netCDF # dataset assert nc.summary == 'ROMS Simulation Data' assert nc.sea_name == 'ROMS' assert nc.variables['z'][:].shape[0] == 51 assert nc.variables['z'][0] == nc.variables['z'].valid_min assert nc.variables['z'][-1] == nc.variables['z'].valid_max assert_approx_equal(nc.variables['temperature'][0], 303.24728393554688, significant=6) assert_approx_equal(nc.variables['salinity'][0], 36.157352447509766, significant=6) assert_approx_equal(nc.variables['pressure'][0], 101325.0, significant=6) assert_approx_equal(nc.variables['dye_01'][0], 3.4363944759034656e-22, significant=6) assert_approx_equal(nc.variables['dye_02'][0], 8.8296093939330156e-21, significant=6) assert_approx_equal(nc.variables['temperature'][-1], 290.7149658203125, significant=6) assert_approx_equal(nc.variables['salinity'][-1], 35.829414367675781, significant=6) assert_approx_equal(nc.variables['pressure'][-1], 3217586.2927573984, significant=6) assert_approx_equal(nc.variables['dye_01'][-1], 8.7777050221856635e-22, significant=6) assert_approx_equal(nc.variables['dye_02'][-1], 4.0334050451121613e-20, significant=6) # Create a Profile object from this netCDF dataset and test the Profile # methods roms = get_profile_obj(nc, chem_names, ['kg/m^3', 'kg/m^3']) # Close the pipe to the netCDF dataset roms.nc.close() nc_roms.close()
def test_results_bootstrapped(self): results = cbook.boxplot_stats(self.data, bootstrap=10000) res = results[0] for key, value in self.known_bootstrapped_ci.items(): assert_approx_equal(res[key], value)
def testCalcPsi(self): assert_approx_equal(14.386, calcPsi(UBI_GLY10), 2)
def test_HC3_errors(self): assert_almost_equal(self.res1.HC3_se[:-1], self.res2.HC3_se[:-1], DECIMAL_4) assert_approx_equal(self.res1.HC3_se[-1], self.res2.HC3_se[-1])
def testNTerPsi(self): assert_approx_equal(153.553, calcPsi(UBI_NTER), 2)
def test_aic(self): assert_approx_equal(self.res1.aic+2, self.res2.aic, 3)
def nominal_case_returns_expected_values(self): preflare_irradiance = determine_preflare_irradiance( self.light_curve.copy(), estimated_time_of_peak_start=self.flare_peak_time) assert_approx_equal(preflare_irradiance, 5.85e-5, significant=3)
def test_bic(self): assert_approx_equal(self.res1.bic, self.res2.bic, 2)
def assert_approx_equal(self, *args, **kwargs): """ Check if two items are not equal up to significant digits. """ return assert_approx_equal(*args, **kwargs)
def get_profile_obj(nc, chem_names, chem_units): """ Check that an ambient.Profile object is created correctly and that the methods operate as expected. """ if isinstance(chem_names, str): chem_names = [chem_names] if isinstance(chem_units, str): chem_units = [chem_units] # Create the profile object prf = ambient.Profile(nc, chem_names=chem_names) # Check the chemical names and units are correct for i in range(len(chem_names)): assert prf.chem_names[i] == chem_names[i] assert prf.nchems == len(chem_names) # Check the error criteria on the interpolator assert prf.err == 0.01 # Check the get_units method name_list = ['temperature', 'salinity', 'pressure'] + chem_names unit_list = ['K', 'psu', 'Pa'] + chem_units for i in range(len(name_list)): assert prf.get_units(name_list[i])[0] == unit_list[i] units = prf.get_units(name_list) for i in range(len(name_list)): assert units[i] == unit_list[i] # Check the interpolator function ... # Pick a point in the middle of the raw dataset and read off the depth # and the values of all the variables nz = prf.nc.variables['z'].shape[0] // 2 z = prf.z[nz] y = prf.y[nz, :] # Get an interpolated set of values at this same elevation yp = prf.f(z) # Check if the results are within the level of error expected by err for i in range(len(name_list)): assert np.abs((yp[i] - y[i]) / yp[i]) <= prf.err # Next, check that the variables returned by the get_values function are # the variables we expect Tp, Sp, Pp = prf.get_values(z, ['temperature', 'salinity', 'pressure']) T = prf.nc.variables['temperature'][nz] S = prf.nc.variables['salinity'][nz] P = prf.nc.variables['pressure'][nz] assert np.abs((Tp - T) / T) <= prf.err assert np.abs((Sp - S) / S) <= prf.err assert np.abs((Pp - P) / P) <= prf.err if prf.nchems > 0: c = np.zeros(prf.nchems) cp = np.zeros(prf.nchems) for i in range(prf.nchems): c[i] = prf.nc.variables[chem_names[i]][nz] cp[i] = prf.get_values(z, chem_names[i]) assert np.abs((cp[i] - c[i]) / c[i]) <= prf.err # Test the append() method by inserting the temperature data as a new # profile, this time in degrees celsius using the variable name temp n0 = prf.nchems z = prf.nc.variables['z'][:] T = prf.nc.variables['temperature'][:] T_degC = T - 273.15 assert_array_almost_equal(T_degC + 273.15, T, decimal=6) data = np.vstack((z, T_degC)).transpose() symbols = ['z', 'temp'] units = ['m', 'deg C'] comments = ['measured', 'identical to temperature, but in deg C'] prf.append(data, symbols, units, comments, 0) # Check that the data were inserted correctly Tnc = prf.nc.variables['temp'][:] assert_array_almost_equal(Tnc, T_degC, decimal=6) assert prf.nc.variables['temp'].units == 'deg C' # Check that get_values works correctly with vector inputs for depth depths = np.linspace(prf.nc.variables['z'].valid_min, prf.nc.variables['z'].valid_max, 100) Temps = prf.get_values(depths, ['temperature', 'temp']) for i in range(len(depths)): assert_approx_equal(Temps[i, 0], Temps[i, 1] + 273.15, significant=6) # Make sure the units are returned correctly assert prf.get_units('temp')[0] == 'deg C' assert prf.nc.variables['temp'].units == 'deg C' # Check that temp is now listed as a chemical assert prf.nchems == n0 + 1 assert prf.chem_names[-1] == 'temp' # Test the API for calculating the buoyancy frequency (note that we do # not check the result, just that the function call does not raise an # error) N = prf.buoyancy_frequency(depths) N = prf.buoyancy_frequency(depths[50], h=0.1) # Send back the Profile object return prf
def test_johnson_nyquist_noise_voltage(self): v = johnson_nyquist_noise_voltage("20 MΩ", "Δ10000 Hz", "20 °C") assert_equal( auto_format(johnson_nyquist_noise_voltage, "20 MΩ", "Δ10000 Hz", "20 °C"), "56.9 µV") assert_approx_equal(v, 56.9025e-6, significant=5)
def check_net_numpy(net_ds, num_ds, currents): """ Check that an ambient.Profile object is created correctly and that the methods operate as expected. """ chem_names = net_ds.f_names chem_units = net_ds.f_units # Check the chemical names and units are correct for i in range(3): assert num_ds.f_names[i] == chem_names[i] assert num_ds.f_units[i] == chem_units[i] assert num_ds.nchems == 2 # Check the error criteria on the interpolator assert num_ds.err == 0.01 # Check the get_units method name_list = ['temperature', 'salinity', 'pressure'] + chem_names[0:3] unit_list = ['K', 'psu', 'Pa'] + chem_units[0:3] for i in range(3): assert num_ds.get_units(name_list[i])[0] == unit_list[i] units = num_ds.get_units(name_list) for i in range(3): assert units[i] == unit_list[i] # Check the interpolator function ... z = np.linspace(num_ds.z_min, num_ds.z_max, 100) # Next, check that the variables returned by the get_values function are # the variables we expect for depth in z: assert num_ds.get_values(depth, 'temperature') == \ net_ds.get_values(depth, 'temperature') assert num_ds.get_values(depth, 'salinity') == \ net_ds.get_values(depth, 'salinity') assert num_ds.get_values(depth, 'pressure') == \ net_ds.get_values(depth, 'pressure') # Test the append() method by inserting the temperature data as a new # profile, this time in degrees celsius using the variable name temp n0 = num_ds.nchems z = num_ds.data[:, 0] T = num_ds.data[:, 1] T_degC = T - 273.15 data = np.vstack((z, T_degC)).transpose() symbols = ['z', 'temp'] units = ['m', 'deg C'] comments = ['measured', 'identical to temperature, but in deg C'] num_ds.append(data, symbols, units, comments, 0) # Check that the data were inserted correctly Tnc = num_ds.data[:, num_ds.chem_names.index('temp') + 7] assert_array_almost_equal(Tnc, T_degC, decimal=6) assert num_ds.get_units('temp')[0] == 'deg C' # Check that get_values works correctly with vector inputs for depth Temps = num_ds.get_values(z, ['temperature', 'temp']) for i in range(len(z)): assert_approx_equal(Temps[i, 0], Temps[i, 1] + 273.15, significant=6) # Make sure the units are returned correctly assert num_ds.get_units('temp')[0] == 'deg C' # Check that temp is now listed as a chemical assert num_ds.nchems == n0 + 1 assert num_ds.chem_names[-1] == 'temp' # Test the API for calculating the buoyancy frequency (note that we do # not check the result, just that the function call does not raise an # error) N_num = num_ds.buoyancy_frequency(z) N_net = num_ds.buoyancy_frequency(z) assert_array_almost_equal(N_num, N_net, decimal=6)
def test_valid_telemetry(): telemetry = csim_parser.parse_packet() assert isinstance(telemetry, dict) assert len(telemetry) == 27 assert telemetry['FlightModel'] == 1 assert telemetry['CommandAcceptCount'] == 2691 assert telemetry['SpacecraftMode'] == 4 assert telemetry['PointingMode'] == 1 assert telemetry['Eclipse'] == 0 assert telemetry['EnableX123'] == 1 assert telemetry['EnableSps'] == 1 assert_approx_equal(telemetry['SpsX'], -0.36, significant=2) assert_approx_equal(telemetry['SpsY'], 0.16, significant=2) assert telemetry['Xp'] == 153.0 assert_approx_equal(telemetry['CdhBoardTemperature'], 12.25, significant=4) assert_approx_equal(telemetry['CommBoardTemperature'], 8.56, significant=3) assert_approx_equal(telemetry['MotherboardTemperature'], 8.62, significant=3) assert_approx_equal(telemetry['EpsBoardTemperature'], 31.56, significant=4) assert_approx_equal(telemetry['SolarPanelMinusYTemperature'], 52.23, significant=4) assert_approx_equal(telemetry['SolarPanelPlusXTemperature'], 53.80, significant=4) assert_approx_equal(telemetry['SolarPanelPlusYTemperature'], 46.82, significant=4) assert_approx_equal(telemetry['BatteryTemperature'], 12.34, significant=4) assert_approx_equal(telemetry['BatteryVoltage'], 7.97, significant=3) assert_approx_equal(telemetry['BatteryChargeCurrent'], 347.4, significant=4) assert_approx_equal(telemetry['BatteryDischargeCurrent'], 9.54, significant=3) assert_approx_equal(telemetry['SolarPanelMinusYCurrent'], 134, significant=3) assert_approx_equal(telemetry['SolarPanelPlusXCurrent'], 536, significant=3) assert_approx_equal(telemetry['SolarPanelPlusYCurrent'], 136, significant=3) assert_approx_equal(telemetry['SolarPanelMinusYVoltage'], 16.9, significant=3) assert_approx_equal(telemetry['SolarPanelPlusXVoltage'], 9.77, significant=3) assert_approx_equal(telemetry['SolarPanelPlusYVoltage'], 16.5, significant=3)
def test_inductive_reactance(self): assert_approx_equal(inductive_reactance("100 µH", "3.2 MHz"), 2010.619) assert_approx_equal(inductive_reactance(100e-6, 3.2e6), 2010.619) self.assertEqual(auto_format(inductive_reactance, "100 µH", "3.2 MHz"), "2.01 kΩ")
def test_johnson_nyquist_noise_current(self): v = johnson_nyquist_noise_current("20 MΩ", "Δ10000 Hz", "20 °C") assert_approx_equal(v, 2.84512e-12, significant=5) assert_equal( auto_format(johnson_nyquist_noise_current, "20 MΩ", "Δ10000 Hz", "20 °C"), "2.85 pA")
def test_exponential_atmosphere_scalar(self): h = 712345 # [m] altitude h /= 1000 # convert to [km] rho = atmos.exponential_density_model(h) npt.assert_approx_equal(rho, 3.144284600e-14)
def test_f_vs_efunda(): assert_approx_equal( friction_factor(reynolds_number(U, D, nu), D), 0.0263, significant=2 )
def test_location_stats_scalars(location, attr): expected = { "useros": { True: True, False: False }, "cov": { True: 0.5887644, False: 0.5280314 }, "geomean": { True: 8.0779865, False: 8.8140731 }, "geostd": { True: 1.8116975, False: 1.7094616 }, "logmean": { True: 2.0891426, False: 2.1763497 }, "logstd": { True: 0.5942642, False: 0.5361785 }, "mean": { True: 9.5888515, False: 10.120571 }, "median": { True: 7.5000000, False: 8.7100000 }, "pctl10": { True: 4.0460279, False: 5.0000000 }, "pctl25": { True: 5.6150000, False: 5.8050000 }, "pctl75": { True: 11.725000, False: 11.725000 }, "pctl90": { True: 19.178000, False: 19.178000 }, "skew": { True: 0.8692107, False: 0.8537566 }, "std": { True: 5.6455746, False: 5.3439797 }, } nptest.assert_approx_equal(getattr(location, attr), expected[attr][location.useros], significant=5)
def test_profile_deeper(): """ Test the methods to compute buoyancy_frequency and to extend a CTD profile to greater depths. We just test the data from ctd_bm54.cnv since these methods are independent of the source of data. """ # Make sure the netCDF file for the ctd_BM54.cnv is already created by # running the test file that creates it. test_from_ctd() # Get a Profile object from this dataset nc_file = os.path.join(OUTPUT_DIR, 'test_BM54.nc') ctd = ambient.Profile(nc_file, chem_names=['oxygen']) # Compute the buoyancy frequency at 1500 m and verify that the result is # correct N = ctd.buoyancy_frequency(1529.789, h=0.01) assert_approx_equal(N, 0.00061463758327116565, significant=6) # Record a few values to check after running the extension method T0, S0, P0, o20 = ctd.get_values( 1000., ['temperature', 'salinity', 'pressure', 'oxygen']) z0 = ctd.data[:, 0] # Extend the profile to 2500 m nc_file = os.path.join(OUTPUT_DIR, 'test_BM54_deeper.nc') ctd.extend_profile_deeper(2500., nc_file) # Check if the original data is preserved T1, S1, P1, o21 = ctd.get_values( 1000., ['temperature', 'salinity', 'pressure', 'oxygen']) z1 = ctd.data[:, 0] # Make sure the results are still right assert_approx_equal(T1, T0, significant=6) assert_approx_equal(S1, S0, significant=6) assert_approx_equal(P1, P0, significant=6) assert_approx_equal(o21, o20, significant=6) assert z1.shape[0] > z0.shape[0] assert z1[-1] == 2500. # Note that the buoyancy frequency shifts very slightly because density # is not linearly proportional to salinity. Nonetheless, the results are # close to what we want, so this method of extending the profile works # adequately. N = ctd.buoyancy_frequency(1500.) assert_approx_equal(N, 0.0006377576016247663, significant=6) N = ctd.buoyancy_frequency(2500.) assert_approx_equal(N, 0.0006146292892002274, significant=6) ctd.close_nc()
def test_psm_Model(): """ Test the `Model` class Test all of the functionality in the `Model` class. This class uses fluid property values computed by the `dbm` module. The main thing that needs to be tested is that the connections to the `BaseModel` class are implemented correctly. """ # Get the TAMOC objects for a typical spill profile, oil, mass_flux, z0, Tj = get_blowout_model() # Create a psm.Model object spill = psm.Model(profile, oil, mass_flux, z0, Tj) # Simulate breakup from a blowout ---------------------------------------- d0 = 0.15 spill.simulate(d0, model_gas='wang_etal', model_oil='sintef') # Create the particle size distributions nbins_gas = 10 nbins_oil = 15 de_gas_model, vf_gas_model, de_oil_model, vf_oil_model = \ spill.get_distributions(nbins_gas, nbins_oil) de_max_gas = 0.031667073026852774 de_max_oil = 0.019433783423489368 d50_gas = 0.004757196250447496 d50_oil = 0.004183783481056991 de_gas = np.array([ 0.00239291, 0.00274617, 0.00315159, 0.00361687, 0.00415083, 0.00476362, 0.00546688, 0.00627396, 0.0072002, 0.00826317 ]) vf_gas = np.array([ 0.01545088, 0.0432876, 0.09350044, 0.15570546, 0.19990978, 0.19788106, 0.15101303, 0.08885147, 0.04030462, 0.01409565 ]) de_oil = np.array([ 0.0004472, 0.00056405, 0.00071143, 0.00089732, 0.00113177, 0.00142749, 0.00180047, 0.00227091, 0.00286426, 0.00361265, 0.00455658, 0.00574714, 0.00724879, 0.00914279, 0.01153166 ]) vf_oil = np.array([ 0.00522565, 0.00788413, 0.01185467, 0.01773296, 0.02631885, 0.03859967, 0.05559785, 0.07791868, 0.10476347, 0.13228731, 0.15193437, 0.15128424, 0.12160947, 0.0710618, 0.02592687 ]) assert_approx_equal(spill.get_de_max(0), de_max_gas) assert_approx_equal(spill.get_de_max(1), de_max_oil) assert_approx_equal(spill.get_d50(0), d50_gas) assert_approx_equal(spill.get_d50(1), d50_oil) assert_array_almost_equal(spill.de_gas, de_gas, decimal=6) assert_array_almost_equal(spill.vf_gas, vf_gas, decimal=6) assert_array_almost_equal(spill.de_oil, de_oil, decimal=6) assert_array_almost_equal(spill.vf_oil, vf_oil, decimal=6) # Switch oil model to li_etal -------------------------------------------- spill.simulate(d0, model_gas='wang_etal', model_oil='li_etal') # Create the particle size distributions nbins_gas = 10 nbins_oil = 15 de_gas_model, vf_gas_model, de_oil_model, vf_oil_model = \ spill.get_distributions(nbins_gas, nbins_oil) d50_oil = 0.0022201887727817814 de_oil = np.array([ 0.00023732, 0.00029932, 0.00037753, 0.00047618, 0.00060059, 0.00075752, 0.00095545, 0.00120509, 0.00151996, 0.00191711, 0.00241802, 0.00304981, 0.00384668, 0.00485176, 0.00611945 ]) vf_oil = np.array([ 0.00522565, 0.00788413, 0.01185467, 0.01773296, 0.02631885, 0.03859967, 0.05559785, 0.07791868, 0.10476347, 0.13228731, 0.15193437, 0.15128424, 0.12160947, 0.0710618, 0.02592687 ]) assert_approx_equal(spill.get_de_max(0), de_max_gas) assert_approx_equal(spill.get_de_max(1), de_max_oil) assert_approx_equal(spill.get_d50(0), d50_gas) assert_approx_equal(spill.get_d50(1), d50_oil) assert_array_almost_equal(spill.de_gas, de_gas, decimal=6) assert_array_almost_equal(spill.vf_gas, vf_gas, decimal=6) assert_array_almost_equal(spill.de_oil, de_oil, decimal=6) assert_array_almost_equal(spill.vf_oil, vf_oil, decimal=6) # Switch gas model to li_etal -------------------------------------------- spill.simulate(d0, model_gas='li_etal') # Create the particle size distributions nbins_gas = 10 nbins_oil = 15 de_gas_model, vf_gas_model, de_oil_model, vf_oil_model = \ spill.get_distributions(nbins_gas, nbins_oil) de_max_gas = 0.031667073026852774 d50_gas = 0.00042029308524755096 d50_oil = 0.004183783481056991 de_gas = np.array([ 0.00011944, 0.00015369, 0.00019776, 0.00025447, 0.00032744, 0.00042133, 0.00054215, 0.00069762, 0.00089766, 0.00115507 ]) vf_gas = np.array([ 0.02515921, 0.06286577, 0.12110766, 0.17987423, 0.20597111, 0.18183759, 0.12376591, 0.0649469, 0.02627579, 0.00819583 ]) de_oil = np.array([ 0.0004472, 0.00056405, 0.00071143, 0.00089732, 0.00113177, 0.00142749, 0.00180047, 0.00227091, 0.00286426, 0.00361265, 0.00455658, 0.00574714, 0.00724879, 0.00914279, 0.01153166 ]) vf_oil = np.array([ 0.00522565, 0.00788413, 0.01185467, 0.01773296, 0.02631885, 0.03859967, 0.05559785, 0.07791868, 0.10476347, 0.13228731, 0.15193437, 0.15128424, 0.12160947, 0.0710618, 0.02592687 ]) assert_approx_equal(spill.get_de_max(0), de_max_gas) assert_approx_equal(spill.get_de_max(1), de_max_oil) assert_approx_equal(spill.get_d50(0), d50_gas) assert_approx_equal(spill.get_d50(1), d50_oil) assert_array_almost_equal(spill.de_gas, de_gas, decimal=6) assert_array_almost_equal(spill.vf_gas, vf_gas, decimal=6) assert_array_almost_equal(spill.de_oil, de_oil, decimal=6) assert_array_almost_equal(spill.vf_oil, vf_oil, decimal=6) # Try a case with no gas ------------------------------------------------- spill.update_z0(1000.) spill.simulate(d0, model_gas='wang_etal', model_oil='sintef') # Create the particle size distributions nbins_gas = 10 nbins_oil = 15 de_gas_model, vf_gas_model, de_oil_model, vf_oil_model = \ spill.get_distributions(nbins_gas, nbins_oil) de_max_oil = 0.017327034580027646 d50_gas = 0.0 d50_oil = 0.007683693892124441 de_gas = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan ]) vf_gas = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) de_oil = np.array([ 0.00082131, 0.00103591, 0.00130657, 0.00164796, 0.00207855, 0.00262164, 0.00330664, 0.00417061, 0.00526033, 0.00663478, 0.00836835, 0.01055487, 0.0133127, 0.01679111, 0.02117837 ]) vf_oil = np.array([ 0.00522565, 0.00788413, 0.01185467, 0.01773296, 0.02631885, 0.03859967, 0.05559785, 0.07791868, 0.10476347, 0.13228731, 0.15193437, 0.15128424, 0.12160947, 0.0710618, 0.02592687 ]) assert_approx_equal(spill.get_de_max(1), de_max_oil) assert_approx_equal(spill.get_d50(0), d50_gas) assert_approx_equal(spill.get_d50(1), d50_oil) assert_array_almost_equal(spill.de_gas, de_gas, decimal=6) assert_array_almost_equal(spill.vf_gas, vf_gas, decimal=6) assert_array_almost_equal(spill.de_oil, de_oil, decimal=6) assert_array_almost_equal(spill.vf_oil, vf_oil, decimal=6) return spill