def test_evaluate_sphere( self ): # Evaluation method 1 ip1 = idealized.IdealizedProjection() ip1.generate_sightlines( 10, seed=1234 ) ip1.add_sphere( c = (0., 0.), r = ip1.sidelength / 2., value = 1., evaluate_method = 'add', ) v1s = ip1.evaluate_sightlines( method='add' ) # Evaluation method 2 ip2 = idealized.IdealizedProjection() ip2.generate_sightlines( 10, seed=1234 ) ip2.add_sphere( c = (0., 0.), r = ip2.sidelength / 2., value = 1., evaluate_method = 'highest value', ) v2s = ip2.evaluate_sightlines( method='highest value' ) npt.assert_allclose( v1s, v2s )
def setUp( self ): self.ip = idealized.IdealizedProjection() # Generate sightlines n = 1000 self.ip.generate_sightlines( n, seed=1234 )
def test_evaluate_sightlines( self ): # Setup ip = idealized.IdealizedProjection() ip.generate_sightlines( 1000, seed=1234 ) value = 1. ip.add_background( value ) # Evaluate vs = ip.evaluate_sightlines() # Test npt.assert_allclose( vs, np.full( ( ip.n, ), value, ) )
def test_store_and_load( self ): filepath = './tests/data/pairsamples.h5' # Create an idealized projection to base it on. ip = idealized.IdealizedProjection() ip.add_ellipse( c = (0., 0.), a = 5., ) # Set-up bins edges = np.linspace( 0., ip.sidelength/2., 5 ) v_edges = np.array([ -0.5, 0.5, 1.5 ]) # Generate some sightlines through the data for the primary data coords. n_data_coords = 5000 ip.generate_sightlines( n_data_coords ) is_valid = ip.evaluate_sightlines() > 0 data_coords = np.array( ip.sls )[is_valid,:] # Generate the sampling coords ps1 = sample.PairSampler( ip.sidelength, edges, v_edges ) dd_coords1, dd_coords2 = ps1.generate_pair_sampling_coords( data_coords, label = 'DD', ) dr_coords1, dr_coords2 = ps1.generate_pair_sampling_coords( label = 'DR', ) # Save for later ps1.save( filepath ) # Open ps2 = sample.PairSampler.load( filepath ) # Check for equality npt.assert_allclose( ps1.sidelength, ps2.sidelength ) npt.assert_allclose( ps1.edges, ps2.edges ) npt.assert_allclose( ps1.v_edges, ps2.v_edges ) for label, item in ps1.data['coords'].items(): for key, coords in item.items(): npt.assert_allclose( coords, ps2.data['coords'][label][key] )
def test_generate_sightlines( self ): ip = idealized.IdealizedProjection() # Generate sightlines n = 1000 ip.generate_sightlines( n, seed=1234 ) # Test assert ip.sl_xs.shape == ( n, ) assert ip.sl_ys.shape == ( n, ) assert ip.sl_xs.min() > -10. assert ip.sl_ys.min() > -10. assert ip.sl_xs.max() < 10. assert ip.sl_ys.max() < 10.
def test_calculate_idealized_projection( self ): # Setup ip = idealized.IdealizedProjection() ip.add_ellipse( (0., 0.), 2. ) # Actual calculation ip.generate_idealized_projection() # Check the structures # The background and the ellipse have the same value, so they should # be merged assert len( ip.ip ) == 1. assert ip.ip[0].almost_equals( ip.structs[0] ) # Check the values npt.assert_allclose( np.array( ip.ip_values ), np.array([ 1., ]) )
def test_calculate_idealized_projection_multipolygons_only( self ): # Setup ip = idealized.IdealizedProjection() ip.add_clumps( r_clump = 0.2, c = (0., 0.), r_area = 5., fcov = 0.5, ) ip.add_clumps( r_clump = 0.2, c = (-2., 0.), r_area = 5., fcov = 0.5, ) # Actual calculation ip.generate_idealized_projection() # Check the values npt.assert_allclose( np.array( ip.ip_values ), np.array([ 1., ]) )
def test_evaluate_sightlines_two_structs_add( self ): # Setup ip = idealized.IdealizedProjection() ip.generate_sightlines( 1000, seed=1234 ) value = 1. ip.add_background( value ) ip.add_ellipse( c=(0.,0.), a=3., value=2.*value ) # Evaluate vs = ip.evaluate_sightlines( method='add' ) is_value = np.isclose( vs, value ) is_thrice_value = np.isclose( vs, 3.*value ) # Check # The number of points with that value should scale as the area of # the ellipse npt.assert_allclose( is_thrice_value.sum() / float( ip.n ), ip.structs[1].area / ip.structs[0].area, rtol = 0.05 )
def test_default( self ): '''Hard to do piecewise tests for this, so test the results. ''' # Create an idealized projection to base it on. ip = idealized.IdealizedProjection() ip.add_ellipse( c = (0., 0.), a = 5., ) # Set-up bins edges = np.linspace( 0., ip.sidelength/2., 5 ) v_edges = np.array([ -0.5, 0.5, 1.5 ]) # Generate some sightlines through the data for the primary data coords. n_data_coords = 5000 ip.generate_sightlines( n_data_coords ) is_valid = ip.evaluate_sightlines() > 0 data_coords = np.array( ip.sls )[is_valid,:] # Generate the sampling coords pair_sampler = sample.PairSampler( ip.sidelength, edges, v_edges ) dd_coords1, dd_coords2 = pair_sampler.generate_pair_sampling_coords( data_coords, ) dr_coords1, dr_coords2 = pair_sampler.generate_pair_sampling_coords() # Get sightline evaluations vs = [] for coords in [ dd_coords1, dd_coords2, dr_coords1, dr_coords2 ]: vs_bins = [] for coords_bin in coords: ip.set_sightlines( coords_bin ) vs_bins.append( ip.evaluate_sightlines() ) vs.append( vs_bins ) dd_vs1, dd_vs2, dr_vs1, dr_vs2 = np.array( vs ) # Calculate pair counts from actual = {} actual['n_dd'] = pair_sampler.estimate_pair_counts( dd_vs2 ) actual['n_dr'] = pair_sampler.estimate_pair_counts( dr_vs2 ) # Compare to traditional counts ip.generate_sightlines( 1000 ) is_valid = ip.evaluate_sightlines() > 0 coords = np.array( ip.sls )[is_valid,:] for count in [ 'n_dd', 'n_dr' ]: count_standard, edges = stats.two_point_autocf( coords, mins = [ -ip.sidelength/2., -ip.sidelength/2. ], maxes = [ ip.sidelength/2., ip.sidelength/2. ], bins = pair_sampler.edges, estimator = count, ) normalized_standard = count_standard / count_standard.sum() npt.assert_allclose( actual[count][:,1], normalized_standard, # atol = ( 1. / np.sqrt( count_standard ) ).max(), atol = 0.2 )