def calc_Vsh_l(A, lm1_sqrt, sanity_checks=False): D = A.shape[2] Dm1 = A.shape[1] q = A.shape[0] if q * Dm1 - D <= 0: return None L = sp.zeros((D, q, Dm1), dtype=A.dtype, order='C') for s in xrange(q): L[:,s,:] = lm1_sqrt.dot(A[s]).conj().T L = L.reshape((D, q * Dm1)) V = ns.nullspace_qr(L) if sanity_checks: if not sp.allclose(L.dot(V), 0): log.warning("Sanity Fail in calc_Vsh_l!: LV != 0") if not sp.allclose(V.conj().T.dot(V), sp.eye(V.shape[1])): log.warning("Sanity Fail in calc_Vsh_l!: V H(V) != eye") V = V.reshape((q, Dm1, q * Dm1 - D)) Vsh = sp.transpose(V.conj(), axes=(0, 2, 1)) Vsh = sp.asarray(Vsh, order='C') if sanity_checks: M = eps_l_noop(lm1_sqrt, A, V) if not sp.allclose(M, 0): log.warning("Sanity Fail in calc_Vsh_l!: Bad Vsh") return Vsh
def test1_read_excel_curve_data(self): dirs = determine_this_file_path() excel_file = 'synthetic_data_Flood_2012.xls' excel_file = os.path.join(dirs, excel_file) temp = create_vuln_xml.read_excel_curve_data(excel_file) depths, fab, contents = temp self.assertTrue(allclose(depths, array([0., 1.0]))) actually_fab = {u'FCM1_INSURED': array([0., 0.1]), u'FCM2_INSURED': array([0., 0.12]), u'FCM1_UNINSURED': array([0., 0.5]), u'FCM2_UNINSURED': array([0., 0.52])} act_cont = { u'FCM1_INSURED_SAVE': array([0., 0.2]), u'FCM1_INSURED_NOACTION': array([0., 0.3]), u'FCM1_INSURED_EXPOSE': array([0., 0.4]), u'FCM1_UNINSURED_SAVE': array([0., 0.6]), u'FCM1_UNINSURED_NOACTION': array([0., 0.7]), u'FCM1_UNINSURED_EXPOSE': array([0., 0.8]), u'FCM2_INSURED_SAVE': array([0., 0.22]), u'FCM2_INSURED_NOACTION': array([0., 0.32]), u'FCM2_INSURED_EXPOSE': array([0., 0.42]), u'FCM2_UNINSURED_SAVE': array([0., 0.62]), u'FCM2_UNINSURED_NOACTION': array([0., 0.72]), u'FCM2_UNINSURED_EXPOSE': array([0., 0.82]) } for key in actually_fab: self.assertTrue(allclose(actually_fab[key], fab[key])) for key in act_cont: self.assertTrue(allclose(act_cont[key], contents[key]))
def test_outside_polygon(self): U = [[0,0], [1,0], [1,1], [0,1]] #Unit square assert not is_outside_polygon( [0.5, 0.5], U ) #evaluate to False as the point 0.5, 0.5 is inside the unit square assert is_outside_polygon( [1.5, 0.5], U ) #evaluate to True as the point 1.5, 0.5 is outside the unit square indices = outside_polygon( [[0.5, 0.5], [1, -0.5], [0.3, 0.2]], U ) assert allclose( indices, [1] ) #One more test of vector formulation returning indices polygon = [[0,0], [1,0], [0.5,-1], [2, -1], [2,1], [0,1]] points = [ [0.5, 0.5], [1, -0.5], [1.5, 0], [0.5, 1.5], [0.5, -0.5]] res = outside_polygon( points, polygon ) assert allclose( res, [3, 4] ) polygon = [[0,0], [1,0], [0.5,-1], [2, -1], [2,1], [0,1]] points = [ [0.5, 1.4], [0.5, 0.5], [1, -0.5], [1.5, 0], [0.5, 1.5], [0.5, -0.5]] res = outside_polygon( points, polygon ) assert allclose( res, [0, 4, 5] )
def test_gets_axes_right(self): Map = tools.set_up_map(self.Data, (0.0, 5.2), (45, 23), (0.4, 0.5)) Map.calc_axes() self.Data.calc_freq() self.assertTrue(sp.allclose(Map.freq, self.Data.freq)) self.assertTrue(sp.allclose(Map.long, tools.calc_bins(0.0, 45, 0.4, "middle"))) self.assertTrue(sp.allclose(Map.lat, tools.calc_bins(5.2, 23, 0.5, "middle")))
def test_rigged_pointing(self) : Data = self.blocks[0] Data.calc_freq() map = self.map # Set all data = (f + cal_ind)*time_ind Data.data[:,:,:,:] = (sp.arange(-4.5, 5) [:,sp.newaxis,sp.newaxis,sp.newaxis] *(Data.freq/100e6)) Data.data[...] -= sp.mean(Data.data, 0) Data.data[...] += (sp.arange(6,8).reshape((1,1,2,1)) * (Data.freq/100e6) * sp.arange(-4.5, 5).reshape((10, 1, 1, 1))) map[:,:,:] = 0.0 # Set 10 pixels to match data (except for cal_ind part). map[:, range(10), range(10)] = (sp.arange(-4.5, 5)[None,:] * map.get_axis('freq')[:,None]/100e6) # We should be completely insensitive to the map mean. Th following # should have no effect. map[...] += 0.352*map.get_axis('freq')[:, None, None]/800.0e7 # Rig the pointing to point to those 10 pixels. def rigged_pointing() : Data.ra = map.get_axis('ra')[range(10)] Data.dec = map.get_axis('dec')[range(10)] Data.calc_pointing = rigged_pointing smd.sub_map(Data, map) # Now data should be just f*time_ind*(cal_ind+6), within 2.0 MHz/2. Data.data /= sp.arange(-4.5, 5)[:,sp.newaxis,sp.newaxis,sp.newaxis] Data.data /= Data.freq/100e6 # Relative tol of 1/700, is the frequency bin width. self.assertTrue(sp.allclose(Data.data[:,:,0,:], 6.0, rtol=1.0/700)) self.assertTrue(sp.allclose(Data.data[:,:,1,:], 7.0, rtol=1.0/700))
def test_correlate(self) : Data = self.blocks[0] Data.calc_freq() map = self.map gain = 3.45 const = 2.14 # Set all data = gain*(cos(time_ind)). Data.data[:,:,:,:] = gain*sp.cos(sp.arange(1,11) [:,sp.newaxis,sp.newaxis,sp.newaxis]) # Explicitly set time mean to something known. Data.data -= ma.mean(Data.data, 0) Data.data += gain*const*Data.freq/800.0e6 # Now the Map. map[:,:,:] = 0.0 # Set 10 pixels to match cos part of data. map[:, range(10), range(10)] = ( sp.cos(sp.arange(1,11)[None, :])) map[:, range(10), range(10)] -= ma.mean( map[:, range(10), range(10)], 1)[:, None] # Give Map a mean to test things out. Should really have no effect. map[...] += 0.352*map.get_axis('freq')[:, None, None]/800.0e6 # Rig the pointing to point to those 10 pixels. def rigged_pointing() : Data.ra = map.get_axis('ra')[range(10)] Data.dec = map.get_axis('dec')[range(10)] Data.calc_pointing = rigged_pointing solved_gains = smd.sub_map(Data, map, correlate=True) # Now data should be just be gain*const*f, within machine precision. Data.data /= gain*Data.freq/800.0e6 self.assertTrue(sp.allclose(Data.data[:,:,:,:], const)) self.assertTrue(sp.allclose(solved_gains, gain))
def calc_Vsh(A, r_s, sanity_checks=False): D = A.shape[2] Dm1 = A.shape[1] q = A.shape[0] if q * D - Dm1 <= 0: return None R = sp.zeros((D, q, Dm1), dtype=A.dtype, order='C') for s in xrange(q): R[:,s,:] = r_s.dot(A[s].conj().T) R = R.reshape((q * D, Dm1)) Vconj = ns.nullspace_qr(R.conj().T).T if sanity_checks: if not sp.allclose(mm.mmul(Vconj.conj(), R), 0): log.warning("Sanity Fail in calc_Vsh!: VR != 0") if not sp.allclose(mm.mmul(Vconj, Vconj.conj().T), sp.eye(Vconj.shape[0])): log.warning("Sanity Fail in calc_Vsh!: V H(V) != eye") Vconj = Vconj.reshape((q * D - Dm1, D, q)) Vsh = Vconj.T Vsh = sp.asarray(Vsh, order='C') if sanity_checks: Vs = sp.transpose(Vsh, axes=(0, 2, 1)).conj() M = eps_r_noop(r_s, Vs, A) if not sp.allclose(M, 0): log.warning("Sanity Fail in calc_Vsh!: Bad Vsh") return Vsh
def test_build_replacement_ratios(self): #usage_values_per_struct = ['RES1', # 'RES3', 'COM8', 'GOV1', 'GOV1', 'GOV1'] usage_values_per_struct = [111, 231, 231] rcp_actual = {'structural':[0.2344, 0.1918, 0.1918], 'nonstructural drift sensitive':[0.5,0.3288, 0.3288], 'nonstructural acceleration sensitive':[0.2656, 0.4795, 0.4795 ]} buildings_usage_classification = 'FCB' rcp = build_replacement_ratios(usage_values_per_struct, buildings_usage_classification) components = ['structural', 'nonstructural drift sensitive', 'nonstructural acceleration sensitive'] for comp in components: self.assert_ (allclose(rcp[comp], rcp_actual[comp], 0.001)) usage_values_per_struct = ['RES1', 'COM4', 'COM4'] rcp_actual = {'structural':[0.2344, 0.1918, 0.1918], 'nonstructural drift sensitive':[0.5,0.3288, 0.3288], 'nonstructural acceleration sensitive':[0.2656, 0.4795, 0.4795 ]} buildings_usage_classification = 'HAZUS' rcp = build_replacement_ratios(usage_values_per_struct, buildings_usage_classification) components = ['structural', 'nonstructural drift sensitive', 'nonstructural acceleration sensitive'] for comp in components: self.assert_ (allclose(rcp[comp], rcp_actual[comp], 0.001))
def test_wind_v3_template(self): # Test running an end to end cyclone test based # on a wind config template. # The output file f = tempfile.NamedTemporaryFile( suffix='.npz', prefix='HAZIMPt_wind_scenarios_test_const', delete=False) wind_dir = os.path.join(misc.EXAMPLE_DIR, 'wind') exp_filename = os.path.join(wind_dir, 'syn_small_exposure_tcrm.csv') wind_filename = os.path.join(wind_dir, 'gust01.txt') a_config = [{TEMPLATE: WINDV3}, {LOADCSVEXPOSURE: {'file_name': exp_filename, 'exposure_latitude': 'LATITUDE', 'exposure_longitude': 'LONGITUDE'}}, {LOADWINDTCRM: [wind_filename]}, {CALCSTRUCTLOSS: {REP_VAL_NAME: 'REPLACEMENT_VALUE'}}, {SAVE: f.name}] context = hazimp.start(config_list=a_config) self.assertTrue(allclose( context.exposure_att['structural_loss'], context.exposure_att['calced-loss'])) # Only the head node writes a file if parallel.STATE.rank == 0: exp_dict = numpy.load(f.name) self.assertTrue(allclose(exp_dict['structural_loss'], exp_dict['calced-loss'])) os.remove(f.name)
def test_lowrank_ard(self): theta = SP.array(SP.random.randn(1+self.n_train)**2) theta_hat = SP.exp(2*theta) _K = theta_hat[0]*SP.dot(self.Xtrain,self.Xtrain.T) + SP.diag(theta_hat[1:]) _Kcross = theta_hat[0]*SP.dot(self.Xtrain,self.Xtest.T) _Kgrad_theta = 2*theta_hat[0]*SP.dot(self.Xtrain,self.Xtrain.T) cov = lowrank.LowRankArdCF(n_dimensions=self.n_dimensions,n_hyperparameters=self.n_train+1) cov.X = self.Xtrain cov.Xcross = self.Xtest K = cov.K(theta) Kcross = cov.Kcross(theta) assert SP.allclose(K,_K), 'ouch, covariance matrix is wrong' assert SP.allclose(Kcross,_Kcross), 'ouch, cross covariance matrix is wrong' assert SP.allclose(_Kgrad_theta,cov.Kgrad_theta(theta,0)), 'ouch gradient with respect to theta[0] is wrong' # gradient with respect to parameters of the diagonal matrix for i in range(self.n_train): Kgrad_theta = cov.Kgrad_theta(theta,i+1) _Kgrad_theta = SP.zeros(Kgrad_theta.shape) _Kgrad_theta[i,i] = 2*theta_hat[i+1] assert SP.allclose(Kgrad_theta, _Kgrad_theta), 'ouch gradient with respect to theta[%d] is wrong'%(i+1) # gradient with respect to latent factors for i in range(self.n_dimensions): for j in range(self.n_train): Xgrad = SP.zeros(self.Xtrain.shape) Xgrad[j,i] = 1 _Kgrad_x = theta_hat[0]*(SP.dot(Xgrad,self.Xtrain.T) + SP.dot(self.Xtrain,Xgrad.T)) Kgrad_x = cov.Kgrad_x(theta,i,j) assert SP.allclose(Kgrad_x,_Kgrad_x), 'ouch, gradient with respect to x is wrong for entry [%d,%d]'%(i,j)
def test_calc_activities_Characteristic(self): ## As far as I can tell you can regard this function as generating ## events for testing. def make_bins(min_mag,max_magnitude,num_bins, recurrence_model_dist = 'bounded_gutenberg_richter'): if (recurrence_model_dist == 'characteristic'): m2=0.5 m_c=max_magnitude-m2 delta_mag = (m_c-min_mag)/(num_bins) bins = r_[min_mag+delta_mag/2:m_c-delta_mag/2:(num_bins)*1j] characteristic_bin = array([m_c+(m2/2)]) bins = append(bins,characteristic_bin) else: delta_mag = (max_magnitude-min_mag)/num_bins bins = r_[min_mag+delta_mag/2:max_magnitude-delta_mag/2:num_bins*1j] #approximate the number of earthquakes in discrete (0.1 unit) bins return bins max_magnitude = 7.0 min_magnitude = 4.0 slip_rate_mm = 2.0 area_kms = float(30*10) b = 1. prob_number_of_mag_sample_bins = 10 bin_centroids = make_bins(min_magnitude, max_magnitude, prob_number_of_mag_sample_bins, 'characteristic') # event_bins = r_[0:10] # event_bins = sorted(event_bins) # A_minCharacteristic = calc_A_min_from_slip_rate_Characteristic( b, min_magnitude, max_magnitude, slip_rate_mm, area_kms) pdfs = calc_activities_Characteristic(bin_centroids, b, min_magnitude, max_magnitude) # event_activity_source = array( # [(A_minCharacteristic*pdfs[z]/(sum(where( # event_bins == z, 1,0)))) for z in event_bins]) event_activity_source = array(A_minCharacteristic*pdfs) self.assert_(allclose(sum(event_activity_source),A_minCharacteristic)) self.assert_(allclose(event_activity_source,[1.09104980e-02, 6.13542392e-03, 3.45020242e-03, 1.94019140e-03, 1.09104980e-03, 6.13542392e-04, 3.45020242e-04, 1.94019140e-04, 1.09104980e-04, 6.13542392e-05, 4.60091887e-04]))
def test_linear(self): theta = SP.array([SP.random.randn()**2]) theta_hat = SP.exp(2*theta) _K = SP.dot(self.Xtrain,self.Xtrain.T) _Kcross = SP.dot(self.Xtrain,self.Xtest.T) cov = linear.LinearCF(n_dimensions=self.n_dimensions) cov.X = self.Xtrain cov.Xcross = self.Xtest K = cov.K(theta) Kcross = cov.Kcross(theta) Kgrad_x = cov.Kgrad_x(theta,0) Kgrad_theta = cov.Kgrad_theta(theta,0) assert SP.allclose(K, theta_hat*_K), 'ouch covariance matrix is wrong' assert SP.allclose(Kgrad_theta, 2*theta_hat*_K), 'ouch, gradient with respect to theta is wrong' assert SP.allclose(Kcross, theta_hat*_Kcross), 'ouch, cross covariance is wrong' # gradient with respect to latent factors # for each entry for i in range(self.n_dimensions): for j in range(self.n_train): Xgrad = SP.zeros(self.Xtrain.shape) Xgrad[j,i] = 1 _Kgrad_x = theta_hat*(SP.dot(Xgrad,self.Xtrain.T) + SP.dot(self.Xtrain,Xgrad.T)) Kgrad_x = cov.Kgrad_x(theta,i,j) assert SP.allclose(Kgrad_x,_Kgrad_x), 'ouch, gradient with respect to x is wrong for entry [%d,%d]'%(i,j)
def test_lowrank_iso(self): theta = SP.array(SP.random.randn(2)**2) theta_hat = SP.exp(2*theta) _K = theta_hat[0]*SP.dot(self.Xtrain,self.Xtrain.T) + theta_hat[1]*SP.eye(self.n_train) _Kcross = theta_hat[0]*SP.dot(self.Xtrain,self.Xtest.T) _Kgrad_theta = [] _Kgrad_theta.append(2*theta_hat[0]*SP.dot(self.Xtrain,self.Xtrain.T) ) _Kgrad_theta.append(2*theta_hat[1]*SP.eye(self.n_train)) cov = lowrank.LowRankCF(self.n_dimensions) cov.X = self.Xtrain cov.Xcross = self.Xtest K = cov.K(theta) Kcross = cov.Kcross(theta) assert SP.allclose(K,_K), 'ouch, covariance matrix is wrong' assert SP.allclose(Kcross,_Kcross), 'ouch, cross covariance matrix is wrong' assert SP.allclose(_Kgrad_theta[0],cov.Kgrad_theta(theta,0)) assert SP.allclose(_Kgrad_theta[1],cov.Kgrad_theta(theta,1)) # gradient with respect to latent factors for i in range(self.n_dimensions): for j in range(self.n_train): Xgrad = SP.zeros(self.Xtrain.shape) Xgrad[j,i] = 1 _Kgrad_x = theta_hat[0]*(SP.dot(Xgrad,self.Xtrain.T) + SP.dot(self.Xtrain,Xgrad.T)) Kgrad_x = cov.Kgrad_x(theta,i,j) assert SP.allclose(Kgrad_x,_Kgrad_x), 'ouch, gradient with respect to x is wrong for entry [%d,%d]'%(i,j)
def test_correlated_scatter(self) : n = 50 r = (sp.arange(n, dtype=float) + 10.0*n)/10.0*n data = sp.sin(sp.arange(n)) * r amp = 25.0 theory = data/amp # Generate correlated matrix. C = random.rand(n, n) # [0, 1) # Raise to high power to make values near 1 rare. C = (C**10) * 0.2 C = (C + C.T)/2.0 C += sp.identity(n) C *= r[:, None]/2.0 C *= r[None, :]/2.0 # Generate random numbers in diagonal frame. h, R = linalg.eigh(C) self.assertTrue(sp.alltrue(h>0)) rand_vals = random.normal(size=n)*sp.sqrt(h) # Rotate back. data += sp.dot(R.T, rand_vals) out = utils.ampfit(data, C, theory) a, s = out['amp'], out['error'] self.assertTrue(sp.allclose(a, amp, atol=5.0*s, rtol=0)) # Expect the next line to fail 1/100 trials. self.assertFalse(sp.allclose(a, amp, atol=0.01*s, rtol=0))
def unnfinished_test_quick_convert_csv_to_arrays_lats_longs_file(self): (handle, file_name) = tempfile.mkstemp('.csv', 'test_csv_interface_') os.close(handle) LONGITUDE = [11.5, 11.6, 11.7, 11.8] LATITUDE = [-3.1, -3.2, -3.3, -3.4] WALLS = ['Brick veneer', 'Double Brick', 'Fibro', 'Double Brick'] attribute_dic = {'LONGITUDE': LONGITUDE, 'LATITUDE': LATITUDE, 'WALLS': WALLS} title_index_dic = {'LONGITUDE': 0, 'LATITUDE': 1, 'WALLS': 2} util.dict2csv(file_name, title_index_dic, attribute_dic) print "file_name", file_name lon = csvi.quick_convert_csv_to_arrays(file_name, LONGITUDE=float) assert lon.keys()[0] == 'LONGITUDE' assert len(lon.keys()) == 1 assert scipy.allclose(LONGITUDE, lon['LONGITUDE']) all_conversions = {'LONGITUDE': float, 'LATITUDE': float, 'WALLS': str} all = csvi.quick_convert_csv_to_arrays(self.dummy_f, **all_conversions) assert len(all.keys()) == 3 assert scipy.allclose(LATITUDE, all['LATITUDE']) assert scipy.allclose(LONGITUDE, all['LONGITUDE']) assert scipy.allclose(self.WALLS == all['WALLS']) os.remove(file_name)
def restore_LCF(self, use_QR=True, update_r=True, diag_r=True): """Use a gauge-transformation to restore left canonical form. See restore_RCF. """ if use_QR: tm.restore_LCF_l_seq(self.A, self.l, sanity_checks=self.sanity_checks, sc_data="restore_LCF_l") else: G = sp.eye(self.D[0], dtype=self.typ) #This is actually just the number 1 for n in xrange(1, self.N + 1): self.l[n], G, Gi = tm.restore_LCF_l(self.A[n], self.l[n - 1], G, zero_tol=self.zero_tol, sanity_checks=self.sanity_checks) if self.sanity_checks: lN = tm.eps_l_noop(self.l[self.N - 1], self.A[self.N], self.A[self.N]) if not sp.allclose(lN, 1, atol=1E-12, rtol=1E-12): log.warning("Sanity Fail in restore_LCF!: l_N is bad / norm failure") if diag_r: tm.restore_LCF_r_seq(self.A, self.r, sanity_checks=self.sanity_checks, sc_data="restore_LCF_r") if self.sanity_checks: if not sp.allclose(self.r[0].A, 1, atol=1E-12, rtol=1E-12): log.warning("Sanity Fail in restore_LCF!: r_0 is bad / norm failure") log.warning("r_0 = %s", self.r[0].squeeze().real) for n in xrange(1, self.N + 1): l = tm.eps_l_noop(self.l[n - 1], self.A[n], self.A[n]) if not sp.allclose(l, self.l[n], atol=1E-11, rtol=1E-11): log.warning("Sanity Fail in restore_LCF!: l_%u is bad (off by %g)", n, la.norm(l - self.l[n])) elif update_r: self.calc_r()
def check_RCF(self): """Tests for right canonical form. Uses the criteria listed in sub-section 3.1, theorem 1 of arXiv:quant-ph/0608197v2. This is a consistency check mainly intended for debugging purposes. FIXME: The tolerances appear to be too tight! Returns ------- (rnsOK, ls_trOK, ls_pos, ls_diag, normOK) : tuple of bool rnsOK: Right orthonormalization is fullfilled (self.r[n] = eye) ls_trOK: all self.l[n] have trace 1 ls_pos: all self.l[n] are positive-definite ls_diag: all self.l[n] are diagonal normOK: the state it normalized """ rnsOK = True ls_trOK = True ls_herm = True ls_pos = True ls_diag = True for n in xrange(1, self.N + 1): rnsOK = rnsOK and sp.allclose(self.r[n], sp.eye(self.r[n].shape[0]), atol=self.eps*2, rtol=0) ls_herm = ls_herm and sp.allclose(self.l[n] - m.H(self.l[n]), 0, atol=self.eps*2) ls_trOK = ls_trOK and sp.allclose(sp.trace(self.l[n]), 1, atol=self.eps*1000, rtol=0) ls_pos = ls_pos and all(la.eigvalsh(self.l[n]) > 0) ls_diag = ls_diag and sp.allclose(self.l[n], sp.diag(self.l[n].diagonal())) normOK = sp.allclose(self.l[self.N], 1., atol=self.eps*1000, rtol=0) return (rnsOK, ls_trOK, ls_pos, ls_diag, normOK)
def test_fold_point(self): self.assertTrue( scipy.allclose(fold_point([0., -0.5, 0.5], lattice=self.rec_latt), self.rec_latt.get_cartesian_coords([0., 0.5, 0.5]))) self.assertTrue( scipy.allclose(fold_point([0.1, -0.6, 0.2], lattice=self.rec_latt), self.rec_latt.get_cartesian_coords([0.1, 0.4, 0.2])))
def test_apply_threshold_distance_partial(self): """ Test apply_threshold_distance function for atten_threshold_distance scenario where apply_threshold_distance sets some SA figures to zero """ # Use the ones array for the initial SA figures bedrock_SA = self.SA_ones.copy() soil_SA = self.SA_ones.copy() # Set a normal threshold distance # event 0 event 1 # distances [[ 337.69538742 27105.63126916]] atten_threshold_distance = 400 # Set up SA arrays that match the expected outcome, noting the distances # in the comment above site_inds = [0] event_inds = [1] bedrock_SA_expected = bedrock_SA.copy() bedrock_SA_expected[...,site_inds,event_inds,:] = 0 soil_SA_expected = soil_SA.copy() soil_SA_expected[...,site_inds,event_inds,:] = 0 # Run the threshold distance function apply_threshold_distance(bedrock_SA, soil_SA, self.sites, atten_threshold_distance, self.use_amplification, self.event_set) assert allclose(bedrock_SA, bedrock_SA_expected) assert allclose(soil_SA, soil_SA_expected)
def test_calc_annloss_deagg_grid(self): # See documentation/annualised_loss_calc.xls # for the calculations of the expected values. lat = scipy.array([-25, -24]) lon = scipy.array([130, 132]) total_building_loss = scipy.array([[2000.0, 5.0], [ 10.0, 2001.0]]) total_building_value = scipy.array([2020.0, 2030.0]) event_activity = scipy.array([0.01, 0.001]) bins = (1,2) percent_ann_loss, lat_lon, _, _ = ca.calc_annloss_deagg_grid( lat, lon, total_building_loss, total_building_value, event_activity, bins=bins) expected_loss = scipy.array([[0.049233, 0.491135]]) expected_lat_lon = scipy.array([[-24.5,130.5],[-24.5,131.5]]) #print "percent_ann_loss", percent_ann_loss #print "expected_loss", expected_loss self.failUnless(scipy.allclose(percent_ann_loss, expected_loss)) self.failUnless(scipy.allclose(lat_lon, expected_lat_lon))
def test_small_checkable(self): # See documentation/annualised_loss_calc.xls # for the calculations of the expected values. saved_ecloss = [[2000.0, 5.0], [ 10.0, 2001.0]] saved_ecbval2 = [2020.0, 2030.0] nu = [0.01, 0.001] expected_ann_loss = scipy.array([19.95996, 0.4928386]) expected_cum_ann_loss = scipy.array([ [1000, 90.909090], [19.95996, 0.0], [0.4928386, 0.0]]) # call function (ann_loss, cum_ann_loss) = ca.calc_annloss(saved_ecloss, saved_ecbval2, nu) #print('expected_ann_loss=%s' % str(expected_ann_loss)) #print('ann_loss=%s' % str(ann_loss)) #print('cum_ann_loss=%s' % str(cum_ann_loss)) # test return values self.failUnless(scipy.allclose(ann_loss, expected_ann_loss)) self.failUnless(scipy.allclose(cum_ann_loss, expected_cum_ann_loss))
def test_few_masked(self): Blocks = self.make_blocks() for Data in Blocks: Data.data[:,:,:,6] = ma.masked Data.data[:,:,:,13] = ma.masked model_name = 'freq_modes_over_f_' + str(3) parameters = mn.measure_noise_parameters(Blocks, [model_name]) for pol, pol_params in parameters.iteritems(): for ii in range(3): mode_noise = pol_params[model_name]['over_f_mode_' + str(ii)] #self.assertTrue(sp.allclose(mode_noise['thermal'], self.dt, # rtol=0.5)) self.assertTrue(sp.allclose(mode_noise['mode'][6], 0, atol=1.e-10)) self.assertTrue(sp.allclose(mode_noise['mode'][13], 0, atol=1.e-10)) expected = sp.ones(self.nf, dtype=float) * self.dt expected[6] = T_infinity**2 expected[13] = T_infinity**2 thermal = pol_params[model_name]['thermal'] # weak test since the above modes may have favoured a few channels. #print thermal, expected self.assertTrue(sp.allclose(thermal, expected, rtol=0.8)) measured_general_ind = pol_params[model_name]['all_channel_index'] measured_corner = pol_params[model_name]['all_channel_corner_f'] self.assertTrue(measured_corner < 4. / self.dt / self.nt / self.nb)
def test_fit_over_f_plus_const(self): dt = 0.13 n_time = 10000 amp = 0.67 # K**2/Hz index = -1.3 f_0 = 1.0 thermal = 2.7 # K**2/Hz BW = 1./dt/2 window = sig.get_window('hanning', n_time) n_spec = 10 p = 0 for ii in range(n_spec): time_stream = noise_power.generate_overf_noise(amp, index, f_0, dt, n_time) time_stream += rand.normal(size=n_time) * sp.sqrt(thermal * BW * 2) time_stream -= sp.mean(time_stream) time_stream *= window p += noise_power.calculate_power(time_stream) p /= n_spec p = noise_power.make_power_physical_units(p, dt) w = noise_power.calculate_power(window) w_norm = sp.mean(w).real #w /= w_norm p = noise_power.prune_power(p).real #p /= w_norm f = noise_power.ps_freq_axis(dt, n_time) p = p[1:] f = f[1:] amp_m, index_m, f0_m, thermal_m = mn.fit_overf_const(p, w, f) self.assertTrue(sp.allclose(amp_m, amp, atol=0.2)) self.assertTrue(sp.allclose(index_m, index, atol=0.1)) self.assertTrue(sp.allclose(thermal_m, thermal, atol=0.1))
def testing(self): attributes = {"mo": array(["money", "soup"]), "SITE_CLASS": array(["E", "C"])} latitude = [10, 20] longitude = [1, 2] sites = Sites(latitude, longitude, **attributes) site_class2Vs30 = {"C": 30, "E": 40} sites.set_Vs30(site_class2Vs30) actual = array(latitude) self.assert_(allclose(sites.latitude, actual, 0.001)) actual = array(longitude) self.assert_(allclose(sites.longitude, actual, 0.001)) actual = array(["money", "soup"]) for (att, act) in map(None, sites.attributes["mo"], actual): self.assert_(att == act) actual = array([40, 30]) self.assert_(allclose(sites.attributes["Vs30"], actual, 0.001)) site_class2Vs30 = {"C": 30} try: sites.set_Vs30(site_class2Vs30) except KeyError: pass else: self.failUnless(False, "KeyError not raised")
def test_gets_thermal_with_correlated(self): """Checks that the part of the freq_modes code that compensates the thermal for mode subtraction works.""" self.data *= sp.sqrt(self.bw * 2) # Makes thermal unity. # Need to add something correlated so the modes arn't just the # channels. correlated_overf = noise_power.generate_overf_noise(1, -2, 0.5, self.dt, self.data.shape[0]) correlated_overf += (rand.normal(size=(self.data.shape[0],)) * sp.sqrt((self.bw * 2) * 0.3)) self.data += correlated_overf[:,None,None,None] / sp.sqrt(self.nf) Blocks = self.make_blocks() # Mask a channel out completly. for Data in Blocks: Data.data[:,:,:,3] = ma.masked model = 'freq_modes_over_f_4' # Take out 20% of the thermal power. parameters = mn.measure_noise_parameters(Blocks, [model]) right_ans = sp.ones(self.nf) right_ans[3] = T_infinity**2 for p in parameters.itervalues(): pars = p[model] thermal = pars['thermal'] self.assertTrue(sp.allclose(thermal, right_ans, rtol=0.3)) mean_thermal = sp.mean(thermal[right_ans==1]) self.assertTrue(sp.allclose(mean_thermal, 1, rtol=0.05)) self.assertTrue(sp.allclose(pars['over_f_mode_0']['thermal'], 0.3, atol=0.1))
def test_gaussian_mixture_generator_replicatability(): "Test the GaussianMixtureModel generator" import tempfile fname = tempfile.mktemp() N = 1000 n = 500 D = 10 K = 3 gmm = GaussianMixtureModel.generate(fname, K, D) gmm.set_seed(100) gmm.save() X = gmm.sample(N) del gmm gmm = GaussianMixtureModel.from_file(fname) Y = gmm.sample(N) assert(sc.allclose(X, Y)) del gmm gmm = GaussianMixtureModel.from_file(fname) Y = gmm.sample(N, n) assert(sc.allclose(X[:n], Y))
def _LML_covar(self,hyperparams,debugging=False): """ log marginal likelihood """ try: KV = self.get_covariances(hyperparams,debugging=debugging) except LA.LinAlgError: LG.error('linalg exception in _LML_covar') return 1E6 except ValueError: LG.error('value error in _LML_covar') return 1E6 lml_quad = 0.5*(KV['Ytilde']*KV['UYU']).sum() lml_det = 0.5 * SP.log(KV['S']).sum() lml_const = 0.5*self.n*self.t*(SP.log(2*SP.pi)) if debugging: # do calculation without kronecker tricks and compare _lml_quad = 0.5 * (KV['alpha']*KV['Yvec']).sum() _lml_det = SP.log(SP.diag(KV['L'])).sum() assert SP.allclose(_lml_quad,lml_quad), 'ouch, quadratic form is wrong in _LMLcovar' assert SP.allclose(_lml_det, lml_det), 'ouch, ldet is wrong in _LML_covar' lml = lml_quad + lml_det + lml_const return lml
def intersect(self,e,coords=True,actual=True): #Returns data about the intersection of self and edge e. #if coords, return the intersection as a point or false if intersection DNE. If not coords, return true/false #actual is a boolean for whether the intersection must be on both edges or not. AA = sp.array(e.a-self.a) proj = sp.dot(self.dir[0],AA)*unit(self.dir[0]) point = sp.array(self.a+proj) #One issue with this method is in case of parallel edges, it returns a rather arbitrary point on self. if actual: #Here the parallels are solved because the erroneous intersections won't be on both lines. if self.containsPoint(point) and e.containsPoint(point): if coords: return point return True return False else: #Here we have to check that the point is at least colinear with both. EAP = sp.array(point-e.a) EBP = sp.array(point-e.b) SAP = sp.array(point-self.a) SBP = sp.array(point-self.b) s1 = EAP[0]/EBP[0] s2 = SAP[0]/EBP[0] if sp.allclose(EAP/EBP,s1,1e-8,0) and sp.allclose(SAP/SBP,s2,1e-8,0): if coords: return point return True return False
def test_eps_r_noop_multi(self): r0 = tc.eps_r_noop(tc.eps_r_noop(self.r2, self.A2, self.B2), self.A1, self.B1) r0_ = tc.eps_r_noop_multi(self.r2, [self.A1, self.A2], [self.B1, self.B2]) self.assertTrue(sp.allclose(r0, r0_)) r0__ = tc.eps_r_noop_multi(self.r2, [self.AA12], [self.BB12]) self.assertTrue(sp.allclose(r0, r0__)) r0C = tc.eps_r_op_2s_C12(self.r2, self.C_A12, self.B1, self.B2) r0C_ = tc.eps_r_noop_multi(self.r2, [self.C_A12], [self.B1, self.B2]) self.assertTrue(sp.allclose(r0C, r0C_)) r0C2 = tc.eps_r_op_2s_C12_AA34(self.r2, self.C_A12, self.BB12) r0C2_ = tc.eps_r_noop_multi(self.r2, [self.C_A12], [self.BB12]) self.assertTrue(sp.allclose(r0C2, r0C2_)) r0CA2 = tc.eps_r_op_2s_C12(tc.eps_r_noop(self.r2, self.A2, self.B2), self.C01, self.A0, self.B1) r0CA2_ = tc.eps_r_noop_multi(self.r2, [self.C01, self.A2], [self.A0, self.BB12]) self.assertTrue(sp.allclose(r0CA2, r0CA2_))
def test_slice_interpolate_linear(self): # Construct a 3D array that is a linear function. v = self.vect a = sp.arange(5) a.shape = (5, 1, 1) b = sp.arange(2) b.shape = (1, 2, 1) c = sp.arange(3) c.shape = (1, 1, 3) v[:, :, :] = a + b + c v.set_axis_info('freq', 2, 1) v.set_axis_info('a', 1, 1) v.set_axis_info('b', 1, 1) #### First test the weights. # Test input sanitization. self.assertRaises(ValueError, v.slice_interpolate_weights, [0, 1], 2.5) # Test bounds. self.assertRaises(ValueError, v.slice_interpolate_weights, [1, 2], [2.5, 1.5]) # Test linear interpolations in 1D. points, weights = v.slice_interpolate_weights(0, 2.5, 'linear') self.assertTrue(sp.allclose(weights, 0.5)) self.assertTrue(2 in points) self.assertTrue(3 in points) # Test linear interpolations in multi D. points, weights = v.slice_interpolate_weights([0, 1, 2], [0.5, 0.5, 1.5], 'linear') self.assertTrue(sp.allclose(weights, 1.0/8)) self.assertTrue(points.shape == (8, 3)) points, weights = v.slice_interpolate_weights([0, 1, 2], [3, 1, 2], 'linear') self.assertTrue(sp.allclose(weights % 1, 0)) #### Test linear interpolation on linear function. # Test on the grid points. self.assertEqual(v.slice_interpolate([0, 1, 2], [3.0, 1.0, 1.0]), 3.0 + 1.0 + 1.0) # Test in 1D interpoation. out = a + c + 0.347 out.shape = (5, 3) self.assertTrue(sp.allclose(out, v.slice_interpolate(1, 0.347, 'linear'))) # Test in 2D. out = b + 3.14159 + 1.4112 out.shape = (2,) self.assertTrue(sp.allclose(out, v.slice_interpolate([0, 2], [3.14159, 1.4112], 'linear')))
def test_build_capacityII(self): """ Test that the capacity is the same as matlabs """ surface_displacement = array([ 0, 3.77315416609723, 10.76027112052043, 19.35492219305678, 30.15026521648398, 35.54123657393061, 34.27393320582447, 33.24309574336885, 31.61082700735715, 30.28033350490522, 28.99562491669272, 27.72120498849989, 26.45537611101864, 25.14598796310046, 23.61941589424981, 22.22764650672691, 20.81732741591404, 19.34361135133465, 17.85074552102223, 16.37154426563715, ]) surface_displacement.shape = 1, 1, -1 Ay, Dy, Au, Du = (0.40000000000000, 1.67808144348440, 0.80000000000000, 6.71232577393759) aa, bb, cc, kappa = (-1.08731273138362, 0.59591863308111, 0.80000000000000, 1.000000000000000e-003) capacity_parameters = Dy, Ay, Du, Au, aa, bb, cc capacity_parameters = array(capacity_parameters)[:, newaxis, newaxis, newaxis] capacity = calculate_capacity(surface_displacement, capacity_parameters) #out capacity_m = [ 0, 0.68522523139676, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000000, 0.80000000000 ] assert allclose(capacity[0, 0], capacity_m)
def test_solve_eig_bad_ind(self): # Set all the information in one pixel to nil. self.noise_inv[17, 3, 1, ...] = 0 self.noise_inv[..., 17, 3, 1] = 0 self.dirty_map = al.partial_dot(self.noise_inv, self.clean_map) self.eig() new_clean_map, noise_diag = clean_map.solve_from_eig( self.noise_evalsinv, self.noise_evects, self.dirty_map, True, feedback=0) self.clean_map[17, 3, 1] = 0 self.assertTrue(sp.allclose(new_clean_map, self.clean_map))
def test_to_from_file(self): """Test that vects and mats can be written to and from file and have all thier properties preserved.""" # For vectors. file_io.save('temp.npy', self.vect) new_vect = vector.vect_array(file_io.load('temp.npy')) self.assertTrue(sp.allclose(self.vect, new_vect)) self.assertEqual(self.vect.axes, new_vect.axes) # For matricies. file_io.save('temp.npy', self.mat) new_mat = matrix.mat_array(file_io.load('temp.npy')) self.assertTrue(sp.allclose(self.mat, new_mat)) self.assertEqual(self.mat.axes, new_mat.axes) # Messing with stuf should raise exceptions. new_mat = file_io.load('temp.npy') new_mat.info['cols'] = (0, 3) self.assertRaises(ValueError, matrix.mat_array, new_mat) # Clean up os.remove('temp.npy') os.remove('temp.npy.meta')
def test_partial_dot_mat_vect(self): self.mat.shape = (4, 6, 5) self.mat.rows = (0, 1) self.mat.cols = (2, ) self.mat.axes = ('x', 'y', 'freq') new_vect = dot_products.partial_dot(self.mat, self.vect) self.assertEqual(new_vect.shape, (4, 6, 2, 3)) self.assertEqual(new_vect.axes, ('x', 'y', 'a', 'b')) numerical_result = sp.dot(sp.reshape(self.mat, (4 * 6, 5)), sp.reshape(self.vect, (5, 2 * 3))) self.assertTrue( sp.allclose(numerical_result.flatten(), new_vect.flatten()))
def test_circle_write_read(self): map_copy = copy.deepcopy(self.test_map) fits_map.write(map_copy, 'temp_testmap.fits', feedback=0) read_map = fits_map.read('temp_testmap.fits', feedback=0) os.remove('temp_testmap.fits') self.assertTrue(sp.allclose(self.test_map.data, read_map.data)) for field_name in fits_map.fields: self.assertTrue(read_map.field.has_key(field_name)) self.assertAlmostEqual(self.test_map.field[field_name], read_map.field[field_name]) # Finally, check the history. hist = read_map.history self.assertTrue(hist.has_key('000: Created from scratch.'))
def test_DLN_no_variability(self): # dimensions (2,1,3,4) = 24 elements dim = (2,1,3,4) count_up = arange(1,24,1) log_mean = resize(count_up*10, dim) log_sigma = resize(count_up, dim) var_method = None dist = Distribution_Log_Normal(var_method) sample_values = dist.sample_for_eqrm(log_mean,log_sigma) actual = exp(log_mean) self.assert_(allclose(sample_values, actual)) self.assert_(actual.shape == dim)
def test_spawning(self): spawn_bins = 2 dln = GroundMotionDistributionLogNormal(var_method=SPAWN, atten_spawn_bins=spawn_bins, n_recurrence_models=1) log_mean = ones((1, 1, 3,4)) log_mean *= 10 log_sigma = ones((1, 1, 3,4)) sample_values = dln.ground_motion_sample(log_mean,log_sigma) act_SA_0 = ones((1, 1, 1, 1, 3, 4)) * (10 - 2.5) act_SA_1 = ones((1, 1, 1, 1, 3, 4)) * (10 + 2.5) act_SA = exp(concatenate((act_SA_0, act_SA_1))) self.assert_(allclose(act_SA, sample_values))
def test_load_raster(self): # Write a file to test f = tempfile.NamedTemporaryFile(suffix='.txt', prefix='HAZIMPtest_jobs', delete=False, mode='w+t') f.write('exposure_latitude, exposure_longitude, ID, haz_actual\n') f.write('8.1, 0.1, 1, 4\n') f.write('7.9, 1.5, 2, -9999\n') f.write('8.9, 2.9, 3, 6\n') f.write('8.9, 3.1, 4, -9999\n') f.write('9.9, 2.9, 5, -9999\n') f.close() inst = JOBS[LOADCSVEXPOSURE] con_in = context.Context() con_in.exposure_lat = None con_in.exposure_long = None con_in.exposure_att = {} test_kwargs = {'file_name': f.name} inst(con_in, **test_kwargs) os.remove(f.name) # Write a hazard file f = tempfile.NamedTemporaryFile(suffix='.aai', prefix='HAZIMPtest_jobs', delete=False, mode='w+t') f.write('ncols 3 \r\n') f.write('nrows 2 \r\n') f.write('xllcorner +0. \r\n') f.write('yllcorner +8. \r\n') f.write('cellsize 1 \r\n') f.write('NODATA_value -9999 \r\n') f.write('1 2 -9999 \r\n') f.write('4 5 6 ') f.close() haz_v = 'haz_v' inst = JOBS[LOADRASTER] test_kwargs = {'file_list': [f.name], 'attribute_label': haz_v} inst(con_in, **test_kwargs) the_nans = isnan(con_in.exposure_att[haz_v]) con_in.exposure_att.loc[the_nans, (haz_v, )] = -9999 msg = "con_in.exposure_att[haz_v] " + str(con_in.exposure_att[haz_v]) msg += "\n not = con_in.exposure_att['haz_actual'] " + \ str(con_in.exposure_att['haz_actual']) self.assertTrue( allclose(con_in.exposure_att[haz_v], con_in.exposure_att['haz_actual']), msg) os.remove(f.name)
def test_separate_points_by_polygon(self): U = [[0, 0], [1, 0], [1, 1], [0, 1]] #Unit square indices, count = separate_points_by_polygon( [[0.5, 0.5], [1, -0.5], [0.3, 0.2]], U) assert allclose(indices, [0, 2, 1]) assert count == 2 #One more test of vector formulation returning indices polygon = [[0, 0], [1, 0], [0.5, -1], [2, -1], [2, 1], [0, 1]] points = [[0.5, 0.5], [1, -0.5], [1.5, 0], [0.5, 1.5], [0.5, -0.5]] res, count = separate_points_by_polygon(points, polygon) assert allclose(res, [0, 1, 2, 4, 3]) assert count == 3 polygon = [[0, 0], [1, 0], [0.5, -1], [2, -1], [2, 1], [0, 1]] points = [[0.5, 1.4], [0.5, 0.5], [1, -0.5], [1.5, 0], [0.5, 1.5], [0.5, -0.5]] res, count = separate_points_by_polygon(points, polygon) assert allclose(res, [1, 2, 3, 5, 4, 0]) assert count == 3
def __init__(self, ground_motion_model_names, periods, model_weights): self.periods = periods # Should do this just once, when the para values are first verified. # The -ve value means 'the logic tree is not collapsed' self.model_weights = asarray(model_weights) if not allclose(1, self.model_weights.sum()): print 'model_weights,', -self.model_weights raise ValueError('abs(self.model_weights) did not sum to 1!') self.GM_models = [] for GM_model_name in ground_motion_model_names: self.GM_models.append( Ground_motion_calculator(GM_model_name, periods))
def test_reassign_phase_regenerate_models(self): physmods = OpenPNM.Physics.models net = OpenPNM.Network.Cubic(shape=[5, 5, 5]) geo1 = OpenPNM.Geometry.GenericGeometry(network=net, pores=net.Ps, throats=net.Ts) geo1['pore.diameter'] = 1 geo1['throat.diameter'] = 1 geo1['throat.length'] = 1 phase1 = OpenPNM.Phases.GenericPhase(network=net) phase1['pore.viscosity'] = 1 phase2 = OpenPNM.Phases.GenericPhase(network=net) phase2['pore.viscosity'] = 10 phys = OpenPNM.Physics.GenericPhysics(network=net, phase=phase1, geometry=geo1) phys.models.add(propname='throat.hydraulic_conductance', model=physmods.hydraulic_conductance.hagen_poiseuille) assert sp.allclose(phys['throat.hydraulic_conductance'], 0.02454369) phys.parent_phase = phase2 assert sp.allclose(phys['throat.hydraulic_conductance'], 0.02454369) phys.models.regenerate() assert sp.allclose(phys['throat.hydraulic_conductance'], 0.00245437)
def test_forecast_fatality(self): # This test is bad, since I'm writing it based on the code. MMI = array([1, 2, 4., 10., 11.]) population = ones(MMI.shape) beta = 1.0 theta = 10.0 * e**-2.0 fatality = forecast_fatality(MMI, population, beta, theta) expected = array([0, 0, 0, norm.cdf(2.0), norm.cdf(2.0)]) #print "expected", expected #print "fatality", fatality self.failUnless(allclose(expected, fatality)) MMI = array([5.]) pop_scaler = 5.0 population = ones(MMI.shape) * pop_scaler beta = 1.0 theta = 5.0 * e**-2.0 fatality = forecast_fatality(MMI, population, beta, theta) expected = array([pop_scaler * 1.0 / beta * norm.cdf(2.0)]) #print "expected", expected #print "fatality", fatality self.failUnless(allclose(expected, fatality))
def test_data(self): nf = self.Data.dims[-1] # Rebin by 3's. data = sp.arange(10, dtype=float) * 3.0 data = data[...,None] * sp.arange(4) data = data[...,None] * sp.arange(2) data = data[...,None] * sp.arange(nf) new_data = sp.arange(3, dtype=float) * 9.0 + 3.0 new_data = new_data[...,None] * sp.arange(4) new_data = new_data[...,None] * sp.arange(2) new_data = new_data[...,None] * sp.arange(nf) self.Data.data[...] = data rebin_time.rebin(self.Data, 3) self.assertTrue(sp.allclose(self.Data.data, new_data))
def test_washburn_throat_values(self): self.water['throat.surface_tension'] = 0.072 self.water['throat.contact_angle'] = 120 f = OpenPNM.Physics.models.capillary_pressure.washburn self.phys.models.add(propname='throat.capillary_pressure', model=f, surface_tension='throat.surface_tension', contact_angle='throat.contact_angle', throat_diameter='throat.diameter') a = 0.14399999999999993 assert sp.allclose(self.water['throat.capillary_pressure'][0], a) self.phys.models.remove('throat.capillary_pressure') del self.water['throat.surface_tension'] del self.water['throat.contact_angle']
def __add__(self, other): assert self.ells == other.ells if scipy.allclose(self.sedges, other.sedges, rtol=1e-04, atol=1e-05, equal_nan=False): self.counts += other.counts self.weight_tot += other.weight_tot else: raise ValueError('s-edges are not compatible.') return self
def restore_LCF_r(A, r, Gi, sanity_checks=False): if Gi is None: x = r else: x = Gi.dot(r.dot(Gi.conj().T)) M = eps_r_noop(x, A, A) ev, EV = la.eigh(M) #wraps lapack routines, which return eigenvalues in ascending order if sanity_checks: assert np.all(ev == np.sort(ev)), "unexpected eigenvalue ordering" rm1 = mm.simple_diag_matrix(ev, dtype=A.dtype) Gm1 = EV.conj().T if Gi is None: Gi = EV #for left uniform case r = rm1 #for sanity check for s in xrange(A.shape[0]): A[s] = Gm1.dot(A[s].dot(Gi)) if sanity_checks: rm1_ = eps_r_noop(r, A, A) if not sp.allclose(rm1_, rm1, atol=1E-12, rtol=1E-12): log.warning("Sanity Fail in restore_LCF_r!: r is bad!") log.warning(la.norm(rm1_ - rm1)) Gm1_i = EV if sanity_checks: eye = sp.eye(A.shape[1]) if not sp.allclose(sp.dot(Gm1, Gm1_i), eye, atol=1E-12, rtol=1E-12): log.warning("Sanity Fail in restore_LCF_r!: Bad GT! (off by %g)", la.norm(sp.dot(Gm1, Gm1_i) - eye)) return rm1, Gm1, Gm1_i
def test_MaximizeLikelihood(self): """Tests maximization likelihood. Make sure it gives the same value for several starting points.""" random.seed(1) scipy.random.seed(1) tl = phydmslib.treelikelihood.TreeLikelihood(self.tree, self.alignment, self.model) logliks = [] paramsarrays = [] for itest in range(3): modelparams = self.getModelParams(itest) tl.updateParams(modelparams) startloglik = tl.loglik result = tl.maximizeLikelihood() self.assertTrue( tl.loglik > startloglik, "no loglik increase: " "start = {0}, end = {1}".format(startloglik, tl.loglik)) for (otherloglik, otherparams) in zip(logliks, paramsarrays): self.assertTrue( scipy.allclose(tl.loglik, otherloglik, atol=1e-3, rtol=1e-3), "Large difference in loglik: {0} vs {1}".format( otherloglik, tl.loglik)) self.assertTrue( scipy.allclose(tl.paramsarray, otherparams, atol=1e-2, rtol=1e-1), "Large difference in paramsarray: {0} vs {1}, {2}".format( otherparams, tl.paramsarray, self.model)) logliks.append(tl.loglik) paramsarrays.append(tl.paramsarray)
def test_lowrank_iso(self): theta = SP.array(SP.random.randn(2)**2) theta_hat = SP.exp(2 * theta) _K = theta_hat[0] * SP.dot( self.Xtrain, self.Xtrain.T) + theta_hat[1] * SP.eye(self.n_train) _Kcross = theta_hat[0] * SP.dot(self.Xtrain, self.Xtest.T) _Kgrad_theta = [] _Kgrad_theta.append(2 * theta_hat[0] * SP.dot(self.Xtrain, self.Xtrain.T)) _Kgrad_theta.append(2 * theta_hat[1] * SP.eye(self.n_train)) cov = lowrank.LowRankCF(self.n_dimensions) cov.X = self.Xtrain cov.Xcross = self.Xtest K = cov.K(theta) Kcross = cov.Kcross(theta) assert SP.allclose(K, _K), 'ouch, covariance matrix is wrong' assert SP.allclose(Kcross, _Kcross), 'ouch, cross covariance matrix is wrong' assert SP.allclose(_Kgrad_theta[0], cov.Kgrad_theta(theta, 0)) assert SP.allclose(_Kgrad_theta[1], cov.Kgrad_theta(theta, 1)) # gradient with respect to latent factors for i in range(self.n_dimensions): for j in range(self.n_train): Xgrad = SP.zeros(self.Xtrain.shape) Xgrad[j, i] = 1 _Kgrad_x = theta_hat[0] * (SP.dot(Xgrad, self.Xtrain.T) + SP.dot(self.Xtrain, Xgrad.T)) Kgrad_x = cov.Kgrad_x(theta, i, j) assert SP.allclose( Kgrad_x, _Kgrad_x ), 'ouch, gradient with respect to x is wrong for entry [%d,%d]' % ( i, j)
def test_generate_motion_csv(self): # Parameters output_dir = self.dir site_tag = 'ernabella' #is_bedrock = False soil_amp = False # 1. Create and save analysis objects objects self.save_analysis_objects(output_dir, site_tag) # 2. Run through generate_motion_csv output_filenames = generate_motion_csv(output_dir, site_tag, soil_amp) expected_ground_motion = asarray([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14]]) expected_atten_periods = asarray([0, 1.0, 2.0]) # 3. Read in generated files for gmm_i, filename in enumerate(output_filenames): file_h = open(filename, 'r') text = file_h.read().splitlines() # ditch the comment lines text.pop(0) text.pop(0) text.pop(0) text.pop(0) text.pop(0) # Convert a space separated text line into a numeric float array periods_f = array([float(ix) for ix in text[0].split(' ')]) self.assert_(allclose(periods_f, array(expected_atten_periods))) text.pop(0) motion_f = array([float(ix) for ix in text[0].split(' ')]) self.assert_(allclose(motion_f, expected_ground_motion[gmm_i])) file_h.close()
def _LML_covar(self, hyperparams, debugging=False): """ log marginal likelihood """ self._update_inputs(hyperparams) try: KV = self.get_covariances(hyperparams, debugging=debugging) except LA.LinAlgError: pdb.set_trace() LG.error('linalg exception in _LML_covar') return 1E6 Si = 1. / KV['Stilde_rc'] lml_quad = 0.5 * (ravel(KV['UYtildeU_rc'])**2 * Si).sum() lml_det = 0.5 * (SP.log(KV['S_s']).sum() * self.n + SP.log(KV['S_o']).sum() * self.t) lml_det += 0.5 * SP.log(KV['Stilde_rc']).sum() lml_const = 0.5 * self.nt * (SP.log(2 * SP.pi)) if debugging: # do calculation without kronecker tricks and compare _lml_quad = 0.5 * (KV['alpha'] * KV['Yvec']).sum() _lml_det = SP.log(SP.diag(KV['L'])).sum() assert SP.allclose( _lml_quad, lml_quad, atol=1E-2, rtol=1E-2), 'ouch, quadratic form is wrong: %.2f' % LA.norm( lml_quad, _lml_quad) assert SP.allclose( _lml_det, lml_det, atol=1E-2, rtol=1E-2), 'ouch, ldet is wrong in _LML_covar' % LA.norm( lml_det, _lml_det) lml = lml_quad + lml_det + lml_const return lml
def test_load(self): """Test initial load of Bridges object.""" b = bridges.Bridges.from_csv(self.file_name) actual = scipy.array(self.lat) b_lat_str = self.pp.pformat(b.latitude) actual_str = self.pp.pformat(actual) msg = ('b.latitude != actual\n' '(%s !=\n%s)' % (b_lat_str, actual_str)) self.assert_(scipy.allclose(b.latitude, actual, 0.001), msg) actual = scipy.array(self.lon) b_lon_str = self.pp.pformat(b.longitude) actual_str = self.pp.pformat(actual) msg = ('b.longitude != actual\n' '(%s !=\n%s)' % (b_lon_str, actual_str)) self.assert_(scipy.allclose(b.longitude, actual, 0.001)) actual = scipy.array(['E', 'F', 'G', 'D', 'E', 'F', 'G', 'C']) for (att, act) in map(None, b.attributes['SITE_CLASS'], actual): msg = ('Expected attribute == actual (got %s == %s)' % (str(att), str(act))) self.failUnlessEqual(att, act, msg) actual = scipy.array([0, 32, 20, 4, 0, 0, 12, 0]) for (att, act) in map(None, b.attributes['SKEW'], actual): msg = ('Expected attribute == actual (got %s == %s)' % (str(att), str(act))) self.failUnlessEqual(att, act, msg) actual = scipy.array([2, 3, 4, 5, 6, 7, 8, 9]) for (att, act) in map(None, b.attributes['BID'], actual): msg = ('Expected attribute == actual (got %s == %s)' % (str(att), str(act))) self.failUnlessEqual(att, act, msg)
def test_purcell_throat_values(self): self.water['throat.surface_tension'] = 0.072 self.water['throat.contact_angle'] = 120 f = OpenPNM.Physics.models.capillary_pressure.purcell self.phys.models.add(propname='throat.capillary_pressure', model=f, r_toroid=0.1, surface_tension='throat.surface_tension', contact_angle='throat.contact_angle', throat_diameter='throat.diameter') a = 0.26206427646507374 assert sp.allclose(self.water['throat.capillary_pressure'][0], a) self.phys.models.remove('throat.capillary_pressure') del self.water['throat.surface_tension'] del self.water['throat.contact_angle']
def test_Strasser_et_al_2010_interface_rup_width(self): # Mw = array([4.5, 6.5, 13., 13.]) area=array([100., 100., 100., 100.]) dip = array([ 0., 0., 90., 90.]) fault_width = array([ 15000. , 15000. , 15000., 2.]) scaling_dic = {'scaling_rule':'Strasser_et_al_2010_interface'} width = scaling_calc_rup_width(Mw, scaling_dic, dip, rup_area=area, max_rup_width=fault_width) correct = [ 4.983104559705295, 25.089961794654, 4797.334486366892, 2.] assert allclose(correct, width)
def test_Strasser_et_al_2010_intraslab_rup_width(self): # Mw = array([4.5, 6.5, 13., 13.]) area=array([100., 100., 100., 100.]) dip = array([ 0., 0., 90., 90.]) fault_width = array([ 15000. , 15000. , 15000., 2.]) scaling_dic = {'scaling_rule':'Strasser_et_al_2010_intraslab'} width = scaling_calc_rup_width(Mw, scaling_dic, dip, rup_area=area, max_rup_width=fault_width) correct = [ 3.499451670283573, 18.03017740859569, 3715.352290971724, 2.] assert allclose(correct, width)
def test_linear(self): theta = SP.array([SP.random.randn()**2]) theta_hat = SP.exp(2 * theta) _K = SP.dot(self.Xtrain, self.Xtrain.T) _Kcross = SP.dot(self.Xtrain, self.Xtest.T) cov = linear.LinearCF(n_dimensions=self.n_dimensions) cov.X = self.Xtrain cov.Xcross = self.Xtest K = cov.K(theta) Kcross = cov.Kcross(theta) Kgrad_x = cov.Kgrad_x(theta, 0) Kgrad_theta = cov.Kgrad_theta(theta, 0) assert SP.allclose(K, theta_hat * _K), 'ouch covariance matrix is wrong' assert SP.allclose(Kgrad_theta, 2 * theta_hat * _K), 'ouch, gradient with respect to theta is wrong' assert SP.allclose(Kcross, theta_hat * _Kcross), 'ouch, cross covariance is wrong' # gradient with respect to latent factors # for each entry for i in range(self.n_dimensions): for j in range(self.n_train): Xgrad = SP.zeros(self.Xtrain.shape) Xgrad[j, i] = 1 _Kgrad_x = theta_hat * (SP.dot(Xgrad, self.Xtrain.T) + SP.dot(self.Xtrain, Xgrad.T)) Kgrad_x = cov.Kgrad_x(theta, i, j) assert SP.allclose( Kgrad_x, _Kgrad_x ), 'ouch, gradient with respect to x is wrong for entry [%d,%d]' % ( i, j)
def test_build_replacement_ratios(self): #usage_values_per_struct = ['RES1', # 'RES3', 'COM8', 'GOV1', 'GOV1', 'GOV1'] usage_values_per_struct = [111, 231, 231] rcp_actual = { 'structural': [0.2344, 0.1918, 0.1918], 'nonstructural drift sensitive': [0.5, 0.3288, 0.3288], 'nonstructural acceleration sensitive': [0.2656, 0.4795, 0.4795] } buildings_usage_classification = 'FCB' rcp = build_replacement_ratios(usage_values_per_struct, buildings_usage_classification) components = [ 'structural', 'nonstructural drift sensitive', 'nonstructural acceleration sensitive' ] for comp in components: self.assert_(allclose(rcp[comp], rcp_actual[comp], 0.001)) usage_values_per_struct = ['RES1', 'COM4', 'COM4'] rcp_actual = { 'structural': [0.2344, 0.1918, 0.1918], 'nonstructural drift sensitive': [0.5, 0.3288, 0.3288], 'nonstructural acceleration sensitive': [0.2656, 0.4795, 0.4795] } buildings_usage_classification = 'HAZUS' rcp = build_replacement_ratios(usage_values_per_struct, buildings_usage_classification) components = [ 'structural', 'nonstructural drift sensitive', 'nonstructural acceleration sensitive' ] for comp in components: self.assert_(allclose(rcp[comp], rcp_actual[comp], 0.001))
def check_YNGKP_M0_attributes(self): """Make sure `YNGKP_M0` has the expected attribute values.""" self.assertEqual(self.nsites, self.YNGKP_M0.nsites) # make sure Prxy has rows summing to zero self.assertFalse(scipy.isnan(self.YNGKP_M0.Pxy).any()) self.assertFalse(scipy.isinf(self.YNGKP_M0.Pxy).any()) diag = scipy.eye(N_CODON, dtype='bool') self.assertTrue( scipy.allclose(0, scipy.sum(self.YNGKP_M0.Pxy[0], axis=1))) self.assertTrue(scipy.allclose(0, self.YNGKP_M0.Pxy[0].sum())) self.assertTrue((self.YNGKP_M0.Pxy[0][diag] <= 0).all()) self.assertTrue((self.YNGKP_M0.Pxy[0][~diag] >= 0).all()) assert self.YNGKP_M0.Pxy.shape == (1, N_CODON, N_CODON) for param in self.YNGKP_M0.dPxy: assert self.YNGKP_M0.dPxy[param].shape == (1, N_CODON, N_CODON) assert self.YNGKP_M0.B[param].shape == (1, N_CODON, N_CODON) assert self.YNGKP_M0.Ainv.shape == (1, N_CODON, N_CODON) assert self.YNGKP_M0.A.shape == (1, N_CODON, N_CODON) assert self.YNGKP_M0.M(0.2).shape == (self.nsites, N_CODON, N_CODON) for (pname, value) in self.params.items(): assert self.YNGKP_M0.dM( 0.2, pname, self.YNGKP_M0.M(0.2)).shape == (self.nsites, N_CODON, N_CODON)
def test_transpose_partial_dot(self): self.mat.shape = (5, 4, 6) self.mat.cols = (1, 2) self.mat.rows = (0, ) self.mat.axes = ('freq', 'x', 'y') matT = self.mat.mat_transpose() new_vect = algebra.partial_dot(matT, self.vect) self.assertEqual(new_vect.shape, (4, 6, 2, 3)) self.assertEqual(new_vect.axes, ('x', 'y', 'a', 'b')) # Reform origional matrix to get same numerical result. mat = sp.reshape(self.mat, (5, 4 * 6)) mat = sp.rollaxis(mat, 1, 0) numerical_result = sp.dot(mat, sp.reshape(self.vect, (5, 2 * 3))) self.assertTrue( sp.allclose(numerical_result.flatten(), new_vect.flatten()))
def test_modified_Wells_and_Coppersmith_94_rup_width2(self): # Mw = array([4.5, 6.5, 13., 13.]) area=array([100., 100., 100., 100.]) dip = array([ 0., 0., 90., 90.]) fault_width = array([ 15000. , 15000. , 15000., 2.]) scaling_dic = {'scaling_rule':'modified_Wells_and_Coppersmith_94'} width = scaling_calc_rup_width(Mw, scaling_dic, dip, rup_area=area, max_rup_width=fault_width) correct = [ 10., 10., 5., 2.] assert allclose(correct, width)
def compare_attributes(atts1,atts2): self.assertEqual(len(atts1.keys()),len(atts2.keys()),"{}".format(nameRun)) self.assertListEqual(sorted(atts1.keys()),sorted(atts2.keys()),"{}".format(nameRun)) for item in atts1: nequal = True if isinstance(atts1[item],numpy.ndarray): nequal = sp.logical_not(sp.array_equal(atts1[item],atts2[item])) else: nequal = atts1[item]!=atts2[item] if nequal: print("WARNING: {}: not exactly equal, using allclose for {}".format(nameRun,item)) print(atts1[item],atts2[item]) allclose = sp.allclose(atts1[item],atts2[item]) self.assertTrue(allclose,"{}".format(nameRun)) return