コード例 #1
0
ファイル: test_config.py プロジェクト: rmjarvis/TreeCorr
def test_parse_bool():
    """Test parse_bool functionality
    """
    # Booleans have a number of possible specifications
    assert treecorr.config.parse_bool('True') == True
    assert treecorr.config.parse_bool(True) == True
    assert treecorr.config.parse_bool(1) == True
    assert treecorr.config.parse_bool('yes') == True
    assert treecorr.config.parse_bool('T') == True
    assert treecorr.config.parse_bool('y') == True
    assert treecorr.config.parse_bool('1') == True
    assert treecorr.config.parse_bool('10') == True

    assert treecorr.config.parse_bool('False') == False
    assert treecorr.config.parse_bool(False) == False
    assert treecorr.config.parse_bool(0) == False
    assert treecorr.config.parse_bool('no') == False
    assert treecorr.config.parse_bool('F') == False
    assert treecorr.config.parse_bool('n') == False
    assert treecorr.config.parse_bool('0') == False

    with assert_raises(ValueError):
        treecorr.config.parse_bool('G')
    with assert_raises(ValueError):
        treecorr.config.parse_bool(13.8)
    with assert_raises(ValueError):
        treecorr.config.parse_bool('13.8')
    with assert_raises(ValueError):
        treecorr.config.parse_bool('Hello')
コード例 #2
0
ファイル: test_config.py プロジェクト: joezuntz/TreeCorr
def test_parse_bool():
    """Test parse_bool functionality
    """
    # Booleans have a number of possible specifications
    assert treecorr.config.parse_bool('True') == True
    assert treecorr.config.parse_bool(True) == True
    assert treecorr.config.parse_bool(1) == True
    assert treecorr.config.parse_bool('yes') == True
    assert treecorr.config.parse_bool('T') == True
    assert treecorr.config.parse_bool('y') == True
    assert treecorr.config.parse_bool('1') == True
    assert treecorr.config.parse_bool('10') == True

    assert treecorr.config.parse_bool('False') == False
    assert treecorr.config.parse_bool(False) == False
    assert treecorr.config.parse_bool(0) == False
    assert treecorr.config.parse_bool('no') == False
    assert treecorr.config.parse_bool('F') == False
    assert treecorr.config.parse_bool('n') == False
    assert treecorr.config.parse_bool('0') == False

    with assert_raises(ValueError):
        treecorr.config.parse_bool('G')
    with assert_raises(ValueError):
        treecorr.config.parse_bool(13.8)
    with assert_raises(ValueError):
        treecorr.config.parse_bool('13.8')
    with assert_raises(ValueError):
        treecorr.config.parse_bool('Hello')
コード例 #3
0
ファイル: test_config.py プロジェクト: rmjarvis/TreeCorr
def test_parse_unit():
    """Test parse_unit functionality
    """
    assert np.isclose(treecorr.config.parse_unit('radian'), 1.)
    assert np.isclose(treecorr.config.parse_unit('deg'), np.pi / 180.)
    assert np.isclose(treecorr.config.parse_unit('degree'), np.pi / 180.)
    assert np.isclose(treecorr.config.parse_unit('degrees'), np.pi / 180.)
    assert np.isclose(treecorr.config.parse_unit('arcmin'), np.pi / 180. / 60)
    assert np.isclose(treecorr.config.parse_unit('arcminutes'), np.pi / 180. / 60)
    assert np.isclose(treecorr.config.parse_unit('arcsec'), np.pi / 180. / 60 / 60)
    assert np.isclose(treecorr.config.parse_unit('arcseconds'), np.pi / 180. / 60 / 60)

    with assert_raises(ValueError):
        treecorr.config.parse_unit('gradians')
    with assert_raises(ValueError):
        treecorr.config.parse_unit('miles')
    with assert_raises(ValueError):
        treecorr.config.parse_unit('Mpc')
コード例 #4
0
ファイル: test_config.py プロジェクト: joezuntz/TreeCorr
def test_parse_variables():
    """Test parse_variables functionality
    """
    config = treecorr.read_config('configs/nn.yaml')

    # parse_variables is used by corr2 executable to add or change items in config
    # with extra command line arguments
    assert 'file_name2' not in config
    treecorr.config.parse_variable(config, 'file_name2 = data/nn_data.dat')
    assert config['file_name2'] == 'data/nn_data.dat'

    treecorr.config.parse_variable(config, 'file_name2=data/nn_data2.dat')
    assert config['file_name2'] == 'data/nn_data2.dat'

    # It's also used by params parsing, so removes trailing comments
    treecorr.config.parse_variable(
        config, 'file_name2=data/nn_data3.dat # The second file')
    assert config['file_name2'] == 'data/nn_data3.dat'

    # Extra whitespace is ignored
    treecorr.config.parse_variable(
        config, 'file_name2 = \t\tdata/nn_data4.dat       ')
    assert config['file_name2'] == 'data/nn_data4.dat'

    # Invalid if no = sign.
    with assert_raises(ValueError):
        treecorr.config.parse_variable(config, 'file_name2:data/nn_data2.dat')

    # Can specify lists with [], () or {}
    treecorr.config.parse_variable(config, 'file_name2 = [f1, f2, f3]')
    assert config['file_name2'] == ['f1', 'f2', 'f3']
    treecorr.config.parse_variable(config,
                                   'file_name2 = (   g1\t, g2\t, g3\t)')
    assert config['file_name2'] == ['g1', 'g2', 'g3']
    treecorr.config.parse_variable(config, 'file_name2 = {h1,h2,h3}')
    assert config['file_name2'] == ['h1', 'h2', 'h3']

    # In config file, can also separate by whitespace
    treecorr.config.parse_variable(config, 'file_name2 = f1   g2  h3')
    assert config['file_name2'] == ['f1', 'g2', 'h3']

    # If starts with [, needs trailing ] or error.
    with assert_raises(ValueError):
        treecorr.config.parse_variable(config, 'file_name2 = [h1, h2, h3')
コード例 #5
0
ファイル: test_config.py プロジェクト: joezuntz/TreeCorr
def test_get():
    """Test getting a parameter from a config dict
    """
    config1 = treecorr.read_config('configs/kg.yaml')
    assert treecorr.config.get(config1, 'x_col', int) == 1
    assert treecorr.config.get(config1, 'x_col', str) == '1'
    assert treecorr.config.get(config1, 'x_col') == '1'
    assert treecorr.config.get(config1, 'x_col', int, 2) == 1
    assert treecorr.config.get(config1, 'ra_col', int) == None
    assert treecorr.config.get(config1, 'ra_col', int, 2) == 2

    config1['flip_g1'] = True
    assert treecorr.config.get(config1, 'flip_g1', bool) == True
    assert treecorr.config.get(config1, 'flip_g1', bool, False) == True
    assert treecorr.config.get(config1, 'flip_g2', bool, False) == False
    assert treecorr.config.get(config1, 'flip_g2', bool) == None

    assert treecorr.config.get_from_list(config1, 'k_col', 0, int) == 3
    assert treecorr.config.get_from_list(config1, 'k_col', 0, str) == '3'
    assert treecorr.config.get_from_list(config1, 'k_col', 0) == '3'
    assert treecorr.config.get_from_list(config1, 'k_col', 0, int, 2) == 3
    assert treecorr.config.get_from_list(config1, 'k_col', 1, int) == 0
    assert treecorr.config.get_from_list(config1, 'k_col', 1, int, 2) == 0
    assert treecorr.config.get_from_list(config1, 'ra_col', 1, int, 2) == 2
    assert treecorr.config.get_from_list(config1, 'ra_col', 1, int) == None

    config1['flip_g1'] = [True, False]
    assert treecorr.config.get_from_list(config1, 'flip_g1', 0, bool) == True
    assert treecorr.config.get_from_list(config1, 'flip_g1', 1, bool) == False
    assert treecorr.config.get_from_list(config1, 'flip_g1', 0, bool,
                                         False) == True
    assert treecorr.config.get_from_list(config1, 'flip_g2', 1, bool) == None
    assert treecorr.config.get_from_list(config1, 'flip_g2', 1, bool,
                                         False) == False
    assert treecorr.config.get_from_list(config1, 'flip_g2', 2, bool,
                                         False) == False

    with assert_raises(IndexError):
        treecorr.config.get_from_list(config1, 'k_col', 2, int)
    with assert_raises(IndexError):
        treecorr.config.get_from_list(config1, 'flip_g1', 2, bool)
    with assert_raises(IndexError):
        treecorr.config.get_from_list(config1, 'flip_g1', 2, bool, False)
コード例 #6
0
ファイル: test_config.py プロジェクト: rmjarvis/TreeCorr
def test_parse_variables():
    """Test parse_variables functionality
    """
    config = treecorr.read_config('configs/nn.yaml')

    # parse_variables is used by corr2 executable to add or change items in config
    # with extra command line arguments
    assert 'file_name2' not in config
    treecorr.config.parse_variable(config, 'file_name2 = data/nn_data.dat')
    assert config['file_name2'] == 'data/nn_data.dat'

    treecorr.config.parse_variable(config, 'file_name2=data/nn_data2.dat')
    assert config['file_name2'] == 'data/nn_data2.dat'

    # It's also used by params parsing, so removes trailing comments
    treecorr.config.parse_variable(config, 'file_name2=data/nn_data3.dat # The second file')
    assert config['file_name2'] == 'data/nn_data3.dat'

    # Extra whitespace is ignored
    treecorr.config.parse_variable(config, 'file_name2 = \t\tdata/nn_data4.dat       ')
    assert config['file_name2'] == 'data/nn_data4.dat'

    # Invalid if no = sign.
    with assert_raises(ValueError):
        treecorr.config.parse_variable(config, 'file_name2:data/nn_data2.dat')

    # Can specify lists with [], () or {}
    treecorr.config.parse_variable(config, 'file_name2 = [f1, f2, f3]')
    assert config['file_name2'] == ['f1', 'f2', 'f3']
    treecorr.config.parse_variable(config, 'file_name2 = (   g1\t, g2\t, g3\t)')
    assert config['file_name2'] == ['g1', 'g2', 'g3']
    treecorr.config.parse_variable(config, 'file_name2 = {h1,h2,h3}')
    assert config['file_name2'] == ['h1', 'h2', 'h3']

    # In config file, can also separate by whitespace
    treecorr.config.parse_variable(config, 'file_name2 = f1   g2  h3')
    assert config['file_name2'] == ['f1', 'g2', 'h3']

    # If starts with [, needs trailing ] or error.
    with assert_raises(ValueError):
        treecorr.config.parse_variable(config, 'file_name2 = [h1, h2, h3')
コード例 #7
0
ファイル: test_config.py プロジェクト: joezuntz/TreeCorr
def test_parse_unit():
    """Test parse_unit functionality
    """
    assert np.isclose(treecorr.config.parse_unit('radian'), 1.)
    assert np.isclose(treecorr.config.parse_unit('deg'), np.pi / 180.)
    assert np.isclose(treecorr.config.parse_unit('degree'), np.pi / 180.)
    assert np.isclose(treecorr.config.parse_unit('degrees'), np.pi / 180.)
    assert np.isclose(treecorr.config.parse_unit('arcmin'), np.pi / 180. / 60)
    assert np.isclose(treecorr.config.parse_unit('arcminutes'),
                      np.pi / 180. / 60)
    assert np.isclose(treecorr.config.parse_unit('arcsec'),
                      np.pi / 180. / 60 / 60)
    assert np.isclose(treecorr.config.parse_unit('arcseconds'),
                      np.pi / 180. / 60 / 60)

    with assert_raises(ValueError):
        treecorr.config.parse_unit('gradians')
    with assert_raises(ValueError):
        treecorr.config.parse_unit('miles')
    with assert_raises(ValueError):
        treecorr.config.parse_unit('Mpc')
コード例 #8
0
ファイル: test_config.py プロジェクト: rmjarvis/TreeCorr
def test_get():
    """Test getting a parameter from a config dict
    """
    config1 = treecorr.read_config('configs/kg.yaml')
    assert treecorr.config.get(config1, 'x_col', int) == 1
    assert treecorr.config.get(config1, 'x_col', str) == '1'
    assert treecorr.config.get(config1, 'x_col') == '1'
    assert treecorr.config.get(config1, 'x_col', int, 2) == 1
    assert treecorr.config.get(config1, 'ra_col', int) == None
    assert treecorr.config.get(config1, 'ra_col', int, 2) == 2

    config1['flip_g1'] = True
    assert treecorr.config.get(config1, 'flip_g1', bool) == True
    assert treecorr.config.get(config1, 'flip_g1', bool, False) == True
    assert treecorr.config.get(config1, 'flip_g2', bool, False) == False
    assert treecorr.config.get(config1, 'flip_g2', bool) == None

    assert treecorr.config.get_from_list(config1, 'k_col', 0, int) == 3
    assert treecorr.config.get_from_list(config1, 'k_col', 0, str) == '3'
    assert treecorr.config.get_from_list(config1, 'k_col', 0) == '3'
    assert treecorr.config.get_from_list(config1, 'k_col', 0, int, 2) == 3
    assert treecorr.config.get_from_list(config1, 'k_col', 1, int) == 0
    assert treecorr.config.get_from_list(config1, 'k_col', 1, int, 2) == 0
    assert treecorr.config.get_from_list(config1, 'ra_col', 1, int, 2) == 2
    assert treecorr.config.get_from_list(config1, 'ra_col', 1, int) == None

    config1['flip_g1'] = [True, False]
    assert treecorr.config.get_from_list(config1, 'flip_g1', 0, bool) == True
    assert treecorr.config.get_from_list(config1, 'flip_g1', 1, bool) == False
    assert treecorr.config.get_from_list(config1, 'flip_g1', 0, bool, False) == True
    assert treecorr.config.get_from_list(config1, 'flip_g2', 1, bool) == None
    assert treecorr.config.get_from_list(config1, 'flip_g2', 1, bool, False) == False
    assert treecorr.config.get_from_list(config1, 'flip_g2', 2, bool, False) == False

    with assert_raises(IndexError):
        treecorr.config.get_from_list(config1, 'k_col', 2, int)
    with assert_raises(IndexError):
        treecorr.config.get_from_list(config1, 'flip_g1', 2, bool)
    with assert_raises(IndexError):
        treecorr.config.get_from_list(config1, 'flip_g1', 2, bool, False)
コード例 #9
0
ファイル: test_config.py プロジェクト: joezuntz/TreeCorr
def test_merge():
    """Test merging two config dicts.
    """
    # First a simple case with no conflicts
    config1 = treecorr.read_config('Aardvark.yaml')
    kwargs = {'cat_precision': 10}
    valid_params = treecorr.Catalog._valid_params
    config2 = treecorr.config.merge_config(config1, kwargs, valid_params)

    assert config2['cat_precision'] == 10
    assert config2['ra_col'] == 'RA'
    assert config2['verbose'] == 2

    # config is allowed to have invalid parameters
    assert 'gg_file_name' in config1
    assert 'gg_file_name' not in config2

    # If either is None, then return subset of other that is valid
    config2 = treecorr.config.merge_config(config1.copy(), None, valid_params)
    for key in config2:
        assert key in valid_params
        if key in config1:
            assert config2[key] == config1[key] or config2[key] in config1[key]
        else:
            assert config2[key] == valid_params[key][2]
    assert 'gg_file_name' not in config2

    config2 = treecorr.config.merge_config(None, kwargs.copy(), valid_params)
    for key in config2:
        assert key in valid_params
        if key in kwargs:
            assert config2[key] == kwargs[key] or config2[key] in kwargs[key]
        else:
            assert config2[key] == valid_params[key][2]

    # If conflicts, kwargs takes precedence
    kwargs['ra_col'] = 'alpha2000'
    config2 = treecorr.config.merge_config(config1, kwargs,
                                           treecorr.Catalog._valid_params)
    assert config2['ra_col'] == 'alpha2000'

    # If kwargs has invalid parameters, exception is raised
    kwargs = {'cat_prec': 10}
    with assert_raises(TypeError):
        treecorr.config.merge_config(config1, kwargs,
                                     treecorr.Catalog._valid_params)
コード例 #10
0
ファイル: test_config.py プロジェクト: rmjarvis/TreeCorr
def test_merge():
    """Test merging two config dicts.
    """
    # First a simple case with no conflicts
    config1 = treecorr.read_config('Aardvark.yaml')
    kwargs = { 'cat_precision' : 10 }
    valid_params = treecorr.Catalog._valid_params
    config2 = treecorr.config.merge_config(config1, kwargs, valid_params)

    assert config2['cat_precision'] == 10
    assert config2['ra_col'] == 'RA'
    assert config2['verbose'] == 2

    # config is allowed to have invalid parameters
    assert 'gg_file_name' in config1
    assert 'gg_file_name' not in config2

    # If either is None, then return subset of other that is valid
    config2 = treecorr.config.merge_config(config1.copy(), None, valid_params)
    for key in config2:
        assert key in valid_params
        if key in config1:
            assert config2[key] == config1[key] or config2[key] in config1[key]
        else:
            assert config2[key] == valid_params[key][2]
    assert 'gg_file_name' not in config2

    config2 = treecorr.config.merge_config(None, kwargs.copy(), valid_params)
    for key in config2:
        assert key in valid_params
        if key in kwargs:
            assert config2[key] == kwargs[key] or config2[key] in kwargs[key]
        else:
            assert config2[key] == valid_params[key][2]

    # If conflicts, kwargs takes precedence
    kwargs['ra_col'] = 'alpha2000'
    config2 = treecorr.config.merge_config(config1, kwargs, treecorr.Catalog._valid_params)
    assert config2['ra_col'] == 'alpha2000'

    # If kwargs has invalid parameters, exception is raised
    kwargs = { 'cat_prec' : 10 }
    with assert_raises(TypeError):
        treecorr.config.merge_config(config1, kwargs, treecorr.Catalog._valid_params)
コード例 #11
0
def test_get_near():

    nobj = 100000
    rng = np.random.RandomState(8675309)
    x = rng.random_sample(nobj)   # All from 0..1
    y = rng.random_sample(nobj)
    z = rng.random_sample(nobj)
    w = rng.random_sample(nobj)
    use = rng.randint(30, size=nobj).astype(float)
    w[use == 0] = 0

    x0 = 0.5
    y0 = 0.8
    z0 = 0.3
    sep = 0.03

    # Put a small cluster inside our search radius
    x[100:130] = rng.normal(x0+0.03, 0.001, 30)
    y[100:130] = rng.normal(y0-0.02, 0.001, 30)
    z[100:130] = rng.normal(z0+0.01, 0.001, 30)

    # Put another small cluster right on the edge of our search radius
    x[500:550] = rng.normal(x0+sep, 0.001, 50)
    y[500:550] = rng.normal(y0, 0.001, 50)
    z[500:550] = rng.normal(z0, 0.001, 50)

    # Start with flat coords

    cat = treecorr.Catalog(x=x, y=y, w=w, g1=w, g2=w, k=w, keep_zero_weight=True)
    field = cat.getNField()
    field.nTopLevelNodes

    i1 = np.where(((x-x0)**2 + (y-y0)**2 < sep**2))[0]
    t1 = min(timeit.repeat(lambda: np.where(((x-x0)**2 + (y-y0)**2 < sep**2))[0], number=100))
    i2 = field.get_near(x=x0, y=y0, sep=sep)
    t2 = min(timeit.repeat(lambda: field.get_near(x=x0, y=y0, sep=sep), number=100))
    i3 = field.get_near(x0, y0, sep)
    t3 = min(timeit.repeat(lambda: field.get_near(x0, y0, sep), number=100))
    print('i1 = ',i1[:20],'  time = ',t1)
    print('i2 = ',i2[:20],'  time = ',t2)
    print('i3 = ',i3[:20],'  time = ',t3)
    np.testing.assert_array_equal(i2, i1)
    np.testing.assert_array_equal(i3, i1)
    #assert t2 < t1    # These don't always pass.  The tree version is usually a faster,
    #assert t3 < t1    # but not always.  So don't require it in unit test.

    # Invalid ways to specify x,y,sep
    assert_raises(TypeError, field.get_near)
    assert_raises(TypeError, field.get_near, x0)
    assert_raises(TypeError, field.get_near, x0, y0)
    assert_raises(TypeError, field.get_near, x0, y0, sep, sep)
    assert_raises(TypeError, field.get_near, x=x0, y=y0)
    assert_raises(TypeError, field.get_near, x=x0, sep=sep)
    assert_raises(TypeError, field.get_near, y=y0, sep=sep)
    assert_raises(TypeError, field.get_near, x=x0, y=y0, z=x0, sep=sep)
    assert_raises(TypeError, field.get_near, ra=x0, dec=y0, sep=sep)
    assert_raises(TypeError, field.get_near, coord.CelestialCoord.from_xyz(x0,y0,x0), sep=sep)

    # Check G and K
    kfield = cat.getKField(min_size=0.01, max_size=sep, min_top=5)
    gfield = cat.getGField(min_size=0.05, max_size=sep, max_top=2)
    i4 = kfield.get_near(x0, y0, sep=sep)
    i5 = gfield.get_near(x0, y0, sep=sep)
    np.testing.assert_array_equal(i4, i1)
    np.testing.assert_array_equal(i5, i1)

    # 3D coords

    r = np.sqrt(x*x+y*y+z*z)
    dec = np.arcsin(z/r) * coord.radians / coord.degrees
    ra = np.arctan2(y,x) * coord.radians / coord.degrees

    cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='deg', dec_units='deg',
                           w=w, g1=w, g2=w, k=w, keep_zero_weight=True)
    field = cat.getNField()
    field.nTopLevelNodes

    i1 = np.where(((x-x0)**2 + (y-y0)**2 + (z-z0)**2 < sep**2))[0]
    t1 = min(timeit.repeat(lambda: np.where(((x-x0)**2 + (y-y0)**2 + (z-z0)**2 < sep**2))[0],
                           number=100))
    i2 = field.get_near(x=x0, y=y0, z=z0, sep=sep)
    t2 = min(timeit.repeat(lambda: field.get_near(x=x0, y=y0, z=z0, sep=sep), number=100))
    c = coord.CelestialCoord.from_xyz(x0,y0,z0)
    r0 = np.sqrt(x0**2+y0**2+z0**2)
    i3 = field.get_near(ra=c.ra, dec=c.dec, r=r0, sep=sep)
    t3 = min(timeit.repeat(lambda: field.get_near(ra=c.ra, dec=c.dec, r=r0, sep=sep), number=100))
    print('i1 = ',i1[:20],'  time = ',t1)
    print('i2 = ',i2[:20],'  time = ',t2)
    print('i3 = ',i3[:20],'  time = ',t3)
    np.testing.assert_array_equal(i2, i1)
    np.testing.assert_array_equal(i3, i1)
    #assert t2 < t1
    #assert t3 < t1

    # Invalid ways to specify x,y,z,sep
    ra0 = c.ra / coord.degrees
    dec0 = c.dec / coord.degrees
    assert_raises(TypeError, field.get_near)
    assert_raises(TypeError, field.get_near, x0)
    assert_raises(TypeError, field.get_near, x0, y0)
    assert_raises(TypeError, field.get_near, x0, y0, z0)
    assert_raises(TypeError, field.get_near, x=x0)
    assert_raises(TypeError, field.get_near, x=x0, y=y0)
    assert_raises(TypeError, field.get_near, x=x0, y=y0, z=z0)
    assert_raises(TypeError, field.get_near, ra=ra0)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, r=r0)
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, ra_units='deg')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, dec_units='deg')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, sep_units='rad')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, ra_units='deg', dec_units='deg')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, ra_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, sep,
                  ra_units='deg', dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra=ra0)
    assert_raises(TypeError, field.get_near, dec=dec0)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep, ra_units='deg')
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep, dec_units='deg')
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep,
                  ra_units='deg', dec_units='deg')
    assert_raises(TypeError, field.get_near, ra, dec=dec, sep=sep,
                  ra_units='deg', dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra, dec=dec, sep=sep,
                  ra_units='deg', dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, c)
    assert_raises(TypeError, field.get_near, c, r=r0)
    assert_raises(TypeError, field.get_near, c, r=r0, sep=sep, sep_units='rad')
    assert_raises(TypeError, field.get_near, c, r0)
    assert_raises(TypeError, field.get_near, c, r0, sep=sep, sep_units='rad')
    assert_raises(TypeError, field.get_near, c, r0, sep, sep_units='rad')
    assert_raises(TypeError, field.get_near, c, r0, sep, 'deg')
    assert_raises(TypeError, field.get_near, c, r0, sep, sep_unit='deg')
    assert_raises(TypeError, field.get_near, c, r0, sep, sep_units='deg',
                  ra_units='deg', dec_units='deg')
    assert_raises(TypeError, field.get_near, c.ra)
    assert_raises(TypeError, field.get_near, c.ra, c.dec)
    assert_raises(TypeError, field.get_near, c.ra, c.dec, r=r0)
    assert_raises(TypeError, field.get_near, c.ra, c.dec, r0)
    assert_raises(TypeError, field.get_near, c.ra, c.dec, r0, sep=sep, extra=4)
    assert_raises(TypeError, field.get_near, c.ra, c.dec, r0, sep, extra=4)
    assert_raises(TypeError, field.get_near, c.ra, c.dec, r0, sep, sep)

    # Check G and K
    kfield = cat.getKField(min_size=0.01, max_size=sep, min_top=5)
    gfield = cat.getGField(min_size=0.05, max_size=sep, max_top=2)
    i4 = kfield.get_near(c, r0, sep)
    i5 = gfield.get_near(c.ra, c.dec, r0, sep=sep)
    np.testing.assert_array_equal(i4, i1)
    np.testing.assert_array_equal(i5, i1)

    # Spherical
    cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='deg', dec_units='deg',
                           w=w, g1=w, g2=w, k=w, keep_zero_weight=True)
    field = cat.getNField()
    field.nTopLevelNodes

    x /= r
    y /= r
    z /= r
    c = coord.CelestialCoord.from_xyz(x0,y0,z0)
    x0,y0,z0 = c.get_xyz()
    r0 = 2 * np.sin(sep / 2)  # length of chord subtending sep radians.
    i1 = np.where(((x-x0)**2 + (y-y0)**2 + (z-z0)**2 < r0**2))[0]
    t1 = min(timeit.repeat(lambda: np.where(((x-x0)**2 + (y-y0)**2 + (z-z0)**2 < r0**2))[0],
                           number=100))
    i2 = field.get_near(c, sep=sep, sep_units='rad')
    t2 = min(timeit.repeat(lambda: field.get_near(c, sep=sep, sep_units='rad'), number=100))
    i3 = field.get_near(ra=c.ra.rad, dec=c.dec.rad, ra_units='radians', dec_units='radians',
                        sep=sep * coord.radians)
    t3 = min(timeit.repeat(lambda: field.get_near(ra=c.ra.rad, dec=c.dec.rad, ra_units='radians',
                                                  dec_units='radians', sep=sep * coord.radians),
                           number=100))
    print('i1 = ',i1[:20],'  time = ',t1)
    print('i2 = ',i2[:20],'  time = ',t2)
    print('i3 = ',i3[:20],'  time = ',t3)
    np.testing.assert_array_equal(i2, i1)
    np.testing.assert_array_equal(i3, i1)
    #assert t2 < t1
    #assert t3 < t1

    # Invalid ways to specify ra,dec,sep
    assert_raises(TypeError, field.get_near)
    assert_raises(TypeError, field.get_near, ra0)
    assert_raises(TypeError, field.get_near, ra0, dec0)
    assert_raises(TypeError, field.get_near, ra0, dec0, sep)
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, ra_units='deg')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, dec_units='deg')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, sep_units='rad')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, ra_units='deg', dec_units='deg')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, ra_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, sep,
                  ra_units='deg', dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra=ra0)
    assert_raises(TypeError, field.get_near, dec=dec0)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep, ra_units='deg')
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep, dec_units='deg')
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep,
                  ra_units='deg', dec_units='deg')
    assert_raises(TypeError, field.get_near, ra0, dec=dec0, sep=sep,
                  ra_units='deg', dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra0, dec=dec0, sep=sep,
                  ra_units='deg', dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, c)
    assert_raises(TypeError, field.get_near, c, sep)
    assert_raises(TypeError, field.get_near, c, sep, 'deg')
    assert_raises(TypeError, field.get_near, c, sep, sep_unit='deg')
    assert_raises(TypeError, field.get_near, c, sep, sep_units='deg',
                  ra_units='deg', dec_units='deg')

    # Check G and K with other allowed argument patterns.
    kfield = cat.getKField(min_size=0.01, max_size=sep, min_top=5)
    gfield = cat.getGField(min_size=0.05, max_size=sep, max_top=2)
    i4 = gfield.get_near(c, sep*coord.radians/coord.degrees, sep_units='deg')
    i5 = kfield.get_near(c.ra, c.dec, sep*coord.radians)
    np.testing.assert_array_equal(i4, i1)
    np.testing.assert_array_equal(i5, i1)
コード例 #12
0
def test_direct():
    # If the catalogs are small enough, we can do a direct calculation to see if comes out right.
    # This should exactly match the treecorr result if brute=True.

    ngal = 200
    s = 10.
    rng = np.random.RandomState(8675309)
    x1 = rng.normal(0,s, (ngal,) )
    y1 = rng.normal(0,s, (ngal,) )
    w1 = rng.random_sample(ngal)
    k1 = rng.normal(5,1, (ngal,) )

    x2 = rng.normal(0,s, (ngal,) )
    y2 = rng.normal(0,s, (ngal,) )
    w2 = rng.random_sample(ngal)
    g12 = rng.normal(0,0.2, (ngal,) )
    g22 = rng.normal(0,0.2, (ngal,) )

    cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, k=k1)
    cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g12, g2=g22)

    min_sep = 1.
    max_sep = 50.
    nbins = 50
    bin_size = np.log(max_sep/min_sep) / nbins
    kg = treecorr.KGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, brute=True)
    kg.process(cat1, cat2)

    true_npairs = np.zeros(nbins, dtype=int)
    true_weight = np.zeros(nbins, dtype=float)
    true_xi = np.zeros(nbins, dtype=complex)
    for i in range(ngal):
        # It's hard to do all the pairs at once with numpy operations (although maybe possible).
        # But we can at least do all the pairs for each entry in cat1 at once with arrays.
        rsq = (x1[i]-x2)**2 + (y1[i]-y2)**2
        r = np.sqrt(rsq)
        logr = np.log(r)
        expmialpha = ((x1[i]-x2) - 1j*(y1[i]-y2)) / r

        ww = w1[i] * w2
        xi = -ww * k1[i] * (g12 + 1j*g22) * expmialpha**2

        index = np.floor(np.log(r/min_sep) / bin_size).astype(int)
        mask = (index >= 0) & (index < nbins)
        np.add.at(true_npairs, index[mask], 1)
        np.add.at(true_weight, index[mask], ww[mask])
        np.add.at(true_xi, index[mask], xi[mask])

    true_xi /= true_weight

    print('true_npairs = ',true_npairs)
    print('diff = ',kg.npairs - true_npairs)
    np.testing.assert_array_equal(kg.npairs, true_npairs)

    print('true_weight = ',true_weight)
    print('diff = ',kg.weight - true_weight)
    np.testing.assert_allclose(kg.weight, true_weight, rtol=1.e-5, atol=1.e-8)

    print('true_xi = ',true_xi)
    print('kg.xi = ',kg.xi)
    print('kg.xi_im = ',kg.xi_im)
    np.testing.assert_allclose(kg.xi, true_xi.real, rtol=1.e-4, atol=1.e-8)
    np.testing.assert_allclose(kg.xi_im, true_xi.imag, rtol=1.e-4, atol=1.e-8)

    try:
        import fitsio
    except ImportError:
        print('Skipping FITS tests, since fitsio is not installed')
        return

    # Check that running via the corr2 script works correctly.
    config = treecorr.config.read_config('configs/kg_direct.yaml')
    cat1.write(config['file_name'])
    cat2.write(config['file_name2'])
    treecorr.corr2(config)
    data = fitsio.read(config['kg_file_name'])
    np.testing.assert_allclose(data['r_nom'], kg.rnom)
    np.testing.assert_allclose(data['npairs'], kg.npairs)
    np.testing.assert_allclose(data['weight'], kg.weight)
    np.testing.assert_allclose(data['kgamT'], kg.xi, rtol=1.e-3)
    np.testing.assert_allclose(data['kgamX'], kg.xi_im, rtol=1.e-3)

    # Invalid with only one file_name
    del config['file_name2']
    with assert_raises(TypeError):
        treecorr.corr2(config)

    # Repeat with binslop = 0, since code is different for bin_slop=0 and brute=True.
    # And don't do any top-level recursion so we actually test not going to the leaves.
    kg = treecorr.KGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=0,
                                max_top=0)
    kg.process(cat1, cat2)
    np.testing.assert_array_equal(kg.npairs, true_npairs)
    np.testing.assert_allclose(kg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(kg.xi, true_xi.real, rtol=1.e-3, atol=1.e-3)
    np.testing.assert_allclose(kg.xi_im, true_xi.imag, rtol=1.e-3, atol=1.e-3)

    # Check a few basic operations with a KGCorrelation object.
    do_pickle(kg)

    kg2 = kg.copy()
    kg2 += kg
    np.testing.assert_allclose(kg2.npairs, 2*kg.npairs)
    np.testing.assert_allclose(kg2.weight, 2*kg.weight)
    np.testing.assert_allclose(kg2.meanr, 2*kg.meanr)
    np.testing.assert_allclose(kg2.meanlogr, 2*kg.meanlogr)
    np.testing.assert_allclose(kg2.xi, 2*kg.xi)
    np.testing.assert_allclose(kg2.xi_im, 2*kg.xi_im)

    kg2.clear()
    kg2 += kg
    np.testing.assert_allclose(kg2.npairs, kg.npairs)
    np.testing.assert_allclose(kg2.weight, kg.weight)
    np.testing.assert_allclose(kg2.meanr, kg.meanr)
    np.testing.assert_allclose(kg2.meanlogr, kg.meanlogr)
    np.testing.assert_allclose(kg2.xi, kg.xi)
    np.testing.assert_allclose(kg2.xi_im, kg.xi_im)

    ascii_name = 'output/kg_ascii.txt'
    kg.write(ascii_name, precision=16)
    kg3 = treecorr.KGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
    kg3.read(ascii_name)
    np.testing.assert_allclose(kg3.npairs, kg.npairs)
    np.testing.assert_allclose(kg3.weight, kg.weight)
    np.testing.assert_allclose(kg3.meanr, kg.meanr)
    np.testing.assert_allclose(kg3.meanlogr, kg.meanlogr)
    np.testing.assert_allclose(kg3.xi, kg.xi)
    np.testing.assert_allclose(kg3.xi_im, kg.xi_im)

    fits_name = 'output/kg_fits.fits'
    kg.write(fits_name)
    kg4 = treecorr.KGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
    kg4.read(fits_name)
    np.testing.assert_allclose(kg4.npairs, kg.npairs)
    np.testing.assert_allclose(kg4.weight, kg.weight)
    np.testing.assert_allclose(kg4.meanr, kg.meanr)
    np.testing.assert_allclose(kg4.meanlogr, kg.meanlogr)
    np.testing.assert_allclose(kg4.xi, kg.xi)
    np.testing.assert_allclose(kg4.xi_im, kg.xi_im)

    with assert_raises(TypeError):
        kg2 += config
    kg4 = treecorr.KGCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins)
    with assert_raises(ValueError):
        kg2 += kg4
    kg5 = treecorr.KGCorrelation(min_sep=min_sep, max_sep=max_sep*2, nbins=nbins)
    with assert_raises(ValueError):
        kg2 += kg5
    kg6 = treecorr.KGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins*2)
    with assert_raises(ValueError):
        kg2 += kg6
コード例 #13
0
def test_3pt():
    # Test a direct calculation of the 3pt function with the Periodic metric.

    from test_nnn import is_ccw

    ngal = 50
    Lx = 250.
    Ly = 180.
    rng = np.random.RandomState(8675309)
    x = (rng.random_sample(ngal)-0.5) * Lx
    y = (rng.random_sample(ngal)-0.5) * Ly
    cat = treecorr.Catalog(x=x, y=y)

    min_sep = 1.
    max_sep = 40.  # This only really makes sense if max_sep < L/4 for all L.
    nbins = 50
    min_u = 0.13
    max_u = 0.89
    nubins = 10
    min_v = 0.13
    max_v = 0.59
    nvbins = 10

    ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
                                  min_u=min_u, max_u=max_u, nubins=nubins,
                                  min_v=min_v, max_v=max_v, nvbins=nvbins,
                                  bin_slop=0, xperiod=Lx, yperiod=Ly, brute=True)
    ddd.process(cat, metric='Periodic', num_threads=1)
    #print('ddd.ntri = ',ddd.ntri)

    log_min_sep = np.log(min_sep)
    log_max_sep = np.log(max_sep)
    true_ntri = np.zeros( (nbins, nubins, 2*nvbins) )
    bin_size = (log_max_sep - log_min_sep) / nbins
    ubin_size = (max_u-min_u) / nubins
    vbin_size = (max_v-min_v) / nvbins
    for i in range(ngal):
        for j in range(i+1,ngal):
            for k in range(j+1,ngal):
                xi = x[i]
                xj = x[j]
                xk = x[k]
                yi = y[i]
                yj = y[j]
                yk = y[k]
                #print(i,j,k,xi,yi,xj,yj,xk,yk)
                xi,xj = wrap(xi, xj, Lx, xk)
                #print('  ',xi,xj,xk)
                xi,xk = wrap(xi, xk, Lx, xj)
                #print('  ',xi,xj,xk)
                xj,xk = wrap(xj, xk, Lx, xi)
                #print('  ',xi,xj,xk)
                yi,yj = wrap(yi, yj, Ly, yk)
                #print('  ',yi,yj,yk)
                yi,yk = wrap(yi, yk, Ly, yj)
                #print('  ',yi,yj,yk)
                yj,yk = wrap(yj, yk, Ly, yi)
                #print('  ',yi,yj,yk)
                #print('->',xi,yi,xj,yj,xk,yk)
                dij = np.sqrt((xi-xj)**2 + (yi-yj)**2)
                dik = np.sqrt((xi-xk)**2 + (yi-yk)**2)
                djk = np.sqrt((xj-xk)**2 + (yj-yk)**2)
                if dij == 0.: continue
                if dik == 0.: continue
                if djk == 0.: continue
                ccw = True
                if dij < dik:
                    if dik < djk:
                        d3 = dij; d2 = dik; d1 = djk;
                        ccw = is_ccw(xi,yi,xj,yj,xk,yk)
                    elif dij < djk:
                        d3 = dij; d2 = djk; d1 = dik;
                        ccw = is_ccw(xj,yj,xi,yi,xk,yk)
                    else:
                        d3 = djk; d2 = dij; d1 = dik;
                        ccw = is_ccw(xj,yj,xk,yk,xi,yi)
                else:
                    if dij < djk:
                        d3 = dik; d2 = dij; d1 = djk;
                        ccw = is_ccw(xi,yi,xk,yk,xj,yj)
                    elif dik < djk:
                        d3 = dik; d2 = djk; d1 = dij;
                        ccw = is_ccw(xk,yk,xi,yi,xj,yj)
                    else:
                        d3 = djk; d2 = dik; d1 = dij;
                        ccw = is_ccw(xk,yk,xj,yj,xi,yi)

                #print('d1,d2,d3 = ',d1,d2,d3)
                r = d2
                u = d3/d2
                v = (d1-d2)/d3
                if r < min_sep or r >= max_sep: continue
                if u < min_u or u >= max_u: continue
                if v < min_v or v >= max_v: continue
                if not ccw:
                    v = -v
                #print('r,u,v = ',r,u,v)
                kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
                ku = int(np.floor( (u-min_u) / ubin_size ))
                if v > 0:
                    kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
                else:
                    kv = int(np.floor( (v-(-max_v)) / vbin_size ))
                #print('kr,ku,kv = ',kr,ku,kv)
                assert 0 <= kr < nbins
                assert 0 <= ku < nubins
                assert 0 <= kv < 2*nvbins
                true_ntri[kr,ku,kv] += 1
                #print('good.', true_ntri[kr,ku,kv])


    #print('true_ntri => ',true_ntri)
    #print('diff = ',ddd.ntri - true_ntri)
    mask = np.where(true_ntri > 0)
    #print('ddd.ntri[mask] = ',ddd.ntri[mask])
    #print('true_ntri[mask] = ',true_ntri[mask])
    #print('diff[mask] = ',(ddd.ntri - true_ntri)[mask])
    mask2 = np.where(ddd.ntri > 0)
    #print('ddd.ntri[mask2] = ',ddd.ntri[mask2])
    #print('true_ntri[mask2] = ',true_ntri[mask2])
    #print('diff[mask2] = ',(ddd.ntri - true_ntri)[mask2])
    np.testing.assert_array_equal(ddd.ntri, true_ntri)

    # If don't give a period, then an error.
    ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
                                  min_u=min_u, max_u=max_u, nubins=nubins,
                                  min_v=min_v, max_v=max_v, nvbins=nvbins)
    with assert_raises(ValueError):
        ddd.process(cat, metric='Periodic')

    # Or if only give one kind of period
    ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
                                  min_u=min_u, max_u=max_u, nubins=nubins,
                                  min_v=min_v, max_v=max_v, nvbins=nvbins,
                                  xperiod=3)
    with assert_raises(ValueError):
        ddd.process(cat, metric='Periodic')
    ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
                                  min_u=min_u, max_u=max_u, nubins=nubins,
                                  min_v=min_v, max_v=max_v, nvbins=nvbins,
                                  yperiod=3)
    with assert_raises(ValueError):
        ddd.process(cat, metric='Periodic')

    # If give period, but then don't use Periodic metric, that's also an error.
    ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
                                  min_u=min_u, max_u=max_u, nubins=nubins,
                                  min_v=min_v, max_v=max_v, nvbins=nvbins,
                                  period=3)
    with assert_raises(ValueError):
        ddd.process(cat)
コード例 #14
0
def test_direct():
    # If the catalogs are small enough, we can do a direct calculation to see if comes out right.
    # This should exactly match the treecorr result if brute force.

    ngal = 200
    s = 10.
    rng = np.random.RandomState(8675309)
    x1 = rng.normal(0, s, (ngal, ))
    y1 = rng.normal(0, s, (ngal, ))
    w1 = rng.random_sample(ngal)

    x2 = rng.normal(0, s, (ngal, ))
    y2 = rng.normal(0, s, (ngal, ))
    w2 = rng.random_sample(ngal)
    k2 = rng.normal(0, 3, (ngal, ))

    cat1 = treecorr.Catalog(x=x1, y=y1, w=w1)
    cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, k=k2)

    min_sep = 1.
    max_sep = 50.
    nbins = 50
    bin_size = np.log(max_sep / min_sep) / nbins
    nk = treecorr.NKCorrelation(min_sep=min_sep,
                                max_sep=max_sep,
                                nbins=nbins,
                                brute=True)
    nk.process(cat1, cat2)

    true_npairs = np.zeros(nbins, dtype=int)
    true_weight = np.zeros(nbins, dtype=float)
    true_xi = np.zeros(nbins, dtype=float)
    for i in range(ngal):
        # It's hard to do all the pairs at once with numpy operations (although maybe possible).
        # But we can at least do all the pairs for each entry in cat1 at once with arrays.
        rsq = (x1[i] - x2)**2 + (y1[i] - y2)**2
        r = np.sqrt(rsq)

        ww = w1[i] * w2
        xi = ww * k2

        index = np.floor(np.log(r / min_sep) / bin_size).astype(int)
        mask = (index >= 0) & (index < nbins)
        np.add.at(true_npairs, index[mask], 1)
        np.add.at(true_weight, index[mask], ww[mask])
        np.add.at(true_xi, index[mask], xi[mask])

    true_xi /= true_weight

    print('true_npairs = ', true_npairs)
    print('diff = ', nk.npairs - true_npairs)
    np.testing.assert_array_equal(nk.npairs, true_npairs)

    print('true_weight = ', true_weight)
    print('diff = ', nk.weight - true_weight)
    np.testing.assert_allclose(nk.weight, true_weight, rtol=1.e-5, atol=1.e-8)

    print('true_xi = ', true_xi)
    print('nk.xi = ', nk.xi)
    np.testing.assert_allclose(nk.xi, true_xi, rtol=1.e-4, atol=1.e-8)

    try:
        import fitsio
    except ImportError:
        print('Skipping FITS tests, since fitsio is not installed')
        return

    # Check that running via the corr2 script works correctly.
    config = treecorr.config.read_config('configs/nk_direct.yaml')
    cat1.write(config['file_name'])
    cat2.write(config['file_name2'])
    treecorr.corr2(config)
    data = fitsio.read(config['nk_file_name'])
    np.testing.assert_allclose(data['r_nom'], nk.rnom)
    np.testing.assert_allclose(data['npairs'], nk.npairs)
    np.testing.assert_allclose(data['weight'], nk.weight)
    np.testing.assert_allclose(data['kappa'], nk.xi, rtol=1.e-3)

    # Invalid with only one file_name
    del config['file_name2']
    with assert_raises(TypeError):
        treecorr.corr2(config)
    config['file_name2'] = 'data/nk_direct_cat2.fits'
    # Invalid to request compoensated if no rand_file
    config['nk_statistic'] = 'compensated'
    with assert_raises(TypeError):
        treecorr.corr2(config)

    # Repeat with binslop = 0, since the code flow is different from brute=True
    # And don't do any top-level recursion so we actually test not going to the leaves.
    nk = treecorr.NKCorrelation(min_sep=min_sep,
                                max_sep=max_sep,
                                nbins=nbins,
                                bin_slop=0,
                                max_top=0)
    nk.process(cat1, cat2)
    np.testing.assert_array_equal(nk.npairs, true_npairs)
    np.testing.assert_allclose(nk.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(nk.xi, true_xi, rtol=1.e-4, atol=1.e-8)

    # Check a few basic operations with a NKCorrelation object.
    do_pickle(nk)

    nk2 = nk.copy()
    nk2 += nk
    np.testing.assert_allclose(nk2.npairs, 2 * nk.npairs)
    np.testing.assert_allclose(nk2.weight, 2 * nk.weight)
    np.testing.assert_allclose(nk2.meanr, 2 * nk.meanr)
    np.testing.assert_allclose(nk2.meanlogr, 2 * nk.meanlogr)
    np.testing.assert_allclose(nk2.xi, 2 * nk.xi)

    nk2.clear()
    nk2 += nk
    np.testing.assert_allclose(nk2.npairs, nk.npairs)
    np.testing.assert_allclose(nk2.weight, nk.weight)
    np.testing.assert_allclose(nk2.meanr, nk.meanr)
    np.testing.assert_allclose(nk2.meanlogr, nk.meanlogr)
    np.testing.assert_allclose(nk2.xi, nk.xi)

    ascii_name = 'output/nk_ascii.txt'
    nk.write(ascii_name, precision=16)
    nk3 = treecorr.NKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
    nk3.read(ascii_name)
    np.testing.assert_allclose(nk3.npairs, nk.npairs)
    np.testing.assert_allclose(nk3.weight, nk.weight)
    np.testing.assert_allclose(nk3.meanr, nk.meanr)
    np.testing.assert_allclose(nk3.meanlogr, nk.meanlogr)
    np.testing.assert_allclose(nk3.xi, nk.xi)

    with assert_raises(TypeError):
        nk2 += config
    nk4 = treecorr.NKCorrelation(min_sep=min_sep / 2,
                                 max_sep=max_sep,
                                 nbins=nbins)
    with assert_raises(ValueError):
        nk2 += nk4
    nk5 = treecorr.NKCorrelation(min_sep=min_sep,
                                 max_sep=max_sep * 2,
                                 nbins=nbins)
    with assert_raises(ValueError):
        nk2 += nk5
    nk6 = treecorr.NKCorrelation(min_sep=min_sep,
                                 max_sep=max_sep,
                                 nbins=nbins * 2)
    with assert_raises(ValueError):
        nk2 += nk6

    fits_name = 'output/nk_fits.fits'
    nk.write(fits_name)
    nk4 = treecorr.NKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
    nk4.read(fits_name)
    np.testing.assert_allclose(nk4.npairs, nk.npairs)
    np.testing.assert_allclose(nk4.weight, nk.weight)
    np.testing.assert_allclose(nk4.meanr, nk.meanr)
    np.testing.assert_allclose(nk4.meanlogr, nk.meanlogr)
    np.testing.assert_allclose(nk4.xi, nk.xi)
コード例 #15
0
ファイル: test_reader.py プロジェクト: ztq1996/TreeCorr
def test_fits_reader():
    try:
        import fitsio
    except ImportError:
        print('Skipping FitsReader tests, since fitsio not installed.')
        return

    get_from_wiki('Aardvark.fit')
    r = FitsReader(os.path.join('data', 'Aardvark.fit'))

    # Check things not allowed if not in context
    with assert_raises(RuntimeError):
        r.read(['RA'], slice(0, 10, 2), 1)
    with assert_raises(RuntimeError):
        r.read('RA')
    with assert_raises(RuntimeError):
        r.row_count('DEC', 1)
    with assert_raises(RuntimeError):
        r.row_count()
    with assert_raises(RuntimeError):
        r.names(1)
    with assert_raises(RuntimeError):
        r.names()
    with assert_raises(RuntimeError):
        1 in r

    with r:
        assert_raises(ValueError, r.check_valid_ext, 'invalid')
        assert_raises(ValueError, r.check_valid_ext, 0)
        r.check_valid_ext('AARDWOLF')
        r.check_valid_ext(1)

        # Default ext is 1
        assert r.default_ext == 1

        # Default ext is "in" reader
        assert 1 in r

        # Probably can slice, but depends on installed fitsio version
        assert r.can_slice == (fitsio.__version__ > '1.0.6')

        s = slice(0, 10, 2)
        for ext in [1, 'AARDWOLF']:
            data = r.read(['RA'], s, ext)
            dec = r.read('DEC', s, ext)
            assert data['RA'].size == 5
            assert dec.size == 5

            assert r.row_count('RA', ext) == 390935
            assert r.row_count('GAMMA1', ext) == 390935
            assert set(r.names(ext)) == set(
                "INDEX RA DEC Z EPSILON GAMMA1 GAMMA2 KAPPA MU".split())
            assert set(r.names(ext)) == set(r.names())

        # Can read without slice or ext to use defaults
        assert r.row_count() == 390935
        g2 = r.read('GAMMA2')
        assert len(g2) == 390935
        d = r.read(['KAPPA', 'MU'])
        assert len(d['KAPPA']) == 390935
        assert len(d['MU']) == 390935

        # check we can also index by integer, not just number
        d = r.read(['DEC'], np.arange(10), 'AARDWOLF')
        assert d.size == 10

        if sys.version_info < (3, ): return  # mock only available on python 3
        from unittest import mock

    # Again check things not allowed if not in context
    with assert_raises(RuntimeError):
        r.read(['RA'], slice(0, 10, 2), 1)
    with assert_raises(RuntimeError):
        r.read('RA')
    with assert_raises(RuntimeError):
        r.row_count('DEC', 1)
    with assert_raises(RuntimeError):
        r.row_count()
    with assert_raises(RuntimeError):
        r.names(1)
    with assert_raises(RuntimeError):
        r.names()
    with assert_raises(RuntimeError):
        1 in r

    # Regardless of the system's fitsio version, check the two cases in code.
    with mock.patch('fitsio.__version__', '1.0.6'):
        with FitsReader(os.path.join('data', 'Aardvark.fit')) as r:
            assert not r.can_slice
    with mock.patch('fitsio.__version__', '1.1.0'):
        with FitsReader(os.path.join('data', 'Aardvark.fit')) as r:
            assert r.can_slice
コード例 #16
0
ファイル: test_twod.py プロジェクト: rmjarvis/TreeCorr
def test_twod():
    try:
        from scipy.spatial.distance import pdist, squareform
    except ImportError:
        print('Skipping test_twod, since uses scipy, and scipy is not installed.')
        return

    # N random points in 2 dimensions
    rng = np.random.RandomState(8675309)
    N = 200
    x = rng.uniform(-20, 20, N)
    y = rng.uniform(-20, 20, N)
    
    # Give the points a multivariate Gaussian random field for kappa and gamma
    L1 = [[0.33, 0.09], [-0.01, 0.26]]  # Some arbitrary correlation matrix
    invL1 = np.linalg.inv(L1)
    dists = pdist(np.array([x,y]).T, metric='mahalanobis', VI=invL1)
    K = np.exp(-0.5 * dists**2)
    K = squareform(K)
    np.fill_diagonal(K, 1.)

    A = 2.3
    kappa = rng.multivariate_normal(np.zeros(N), K*(A**2))

    # Add some noise
    sigma = A/10.
    kappa += rng.normal(scale=sigma, size=N)
    kappa_err = np.ones_like(kappa) * sigma

    # Make gamma too
    gamma1 = rng.multivariate_normal(np.zeros(N), K*(A**2))
    gamma1 += rng.normal(scale=sigma, size=N)
    gamma2 = rng.multivariate_normal(np.zeros(N), K*(A**2))
    gamma2 += rng.normal(scale=sigma, size=N)
    gamma = gamma1 + 1j * gamma2
    gamma_err = kappa_err

    # Calculate the 2D correlation using brute force
    max_sep = 21.
    nbins = 21
    xi_brut = corr2d(x, y, kappa, kappa, w=None, rmax=max_sep, bins=nbins)

    cat1 = treecorr.Catalog(x=x, y=y, k=kappa, g1=gamma1, g2=gamma2)
    kk = treecorr.KKCorrelation(min_sep=0., max_sep=max_sep, nbins=nbins, bin_type='TwoD',
                                brute=True)

    # First the simplest case to get right: cross correlation of the catalog with itself.
    kk.process(cat1, cat1)

    print('max abs diff = ',np.max(np.abs(kk.xi - xi_brut)))
    print('max rel diff = ',np.max(np.abs(kk.xi - xi_brut)/np.abs(kk.xi)))
    np.testing.assert_allclose(kk.xi, xi_brut, atol=1.e-7)

    # Auto-correlation should do the same thing.
    kk.process(cat1)
    print('max abs diff = ',np.max(np.abs(kk.xi - xi_brut)))
    print('max rel diff = ',np.max(np.abs(kk.xi - xi_brut)/np.abs(kk.xi)))
    np.testing.assert_allclose(kk.xi, xi_brut, atol=1.e-7)

    # Repeat with weights.
    xi_brut = corr2d(x, y, kappa, kappa, w=1./kappa_err**2, rmax=max_sep, bins=nbins)
    cat2 = treecorr.Catalog(x=x, y=y, k=kappa, g1=gamma1, g2=gamma2, w=1./kappa_err**2)
    # NB. Testing that min_sep = 0 is default
    kk = treecorr.KKCorrelation(max_sep=max_sep, nbins=nbins, bin_type='TwoD', brute=True)
    kk.process(cat2, cat2)
    print('max abs diff = ',np.max(np.abs(kk.xi - xi_brut)))
    print('max rel diff = ',np.max(np.abs(kk.xi - xi_brut)/np.abs(kk.xi)))
    np.testing.assert_allclose(kk.xi, xi_brut, atol=1.e-7)

    kk.process(cat2)
    np.testing.assert_allclose(kk.xi, xi_brut, atol=1.e-7)

    # Check GG
    xi_brut = corr2d(x, y, gamma, np.conj(gamma), rmax=max_sep, bins=nbins)
    # Equivalent bin_size = 2.  Check omitting nbins
    gg = treecorr.GGCorrelation(max_sep=max_sep, bin_size=2., bin_type='TwoD', brute=True)
    gg.process(cat1)
    print('max abs diff = ',np.max(np.abs(gg.xip - xi_brut)))
    print('max rel diff = ',np.max(np.abs(gg.xip - xi_brut)/np.abs(gg.xip)))
    np.testing.assert_allclose(gg.xip, xi_brut, atol=2.e-7)

    xi_brut = corr2d(x, y, gamma, np.conj(gamma), w=1./kappa_err**2, rmax=max_sep, bins=nbins)
    # Check omitting max_sep
    gg = treecorr.GGCorrelation(bin_size=2, nbins=nbins, bin_type='TwoD', brute=True)
    gg.process(cat2)
    print('max abs diff = ',np.max(np.abs(gg.xip - xi_brut)))
    print('max rel diff = ',np.max(np.abs(gg.xip - xi_brut)/np.abs(gg.xip)))
    np.testing.assert_allclose(gg.xip, xi_brut, atol=2.e-7)

    # Check NK
    xi_brut = corr2d(x, y, np.ones_like(kappa), kappa, rmax=max_sep, bins=nbins)
    # Check slightly larger bin_size gets rounded down
    nk = treecorr.NKCorrelation(max_sep=max_sep, bin_size=2.05, bin_type='TwoD', brute=True)
    nk.process(cat1, cat1)
    print('max abs diff = ',np.max(np.abs(nk.xi - xi_brut)))
    print('max rel diff = ',np.max(np.abs(nk.xi - xi_brut)/np.abs(nk.xi)))
    np.testing.assert_allclose(nk.xi, xi_brut, atol=1.e-7)

    xi_brut = corr2d(x, y, np.ones_like(kappa), kappa, w=1./kappa_err**2, rmax=max_sep, bins=nbins)
    # Check very small, but non-zeo min_sep
    nk = treecorr.NKCorrelation(min_sep=1.e-6, max_sep=max_sep, nbins=nbins, bin_type='TwoD', brute=True)
    nk.process(cat2, cat2)
    print('max abs diff = ',np.max(np.abs(nk.xi - xi_brut)))
    print('max rel diff = ',np.max(np.abs(nk.xi - xi_brut)/np.abs(nk.xi)))
    np.testing.assert_allclose(nk.xi, xi_brut, atol=1.e-7)

    # Check NN
    xi_brut, counts = corr2d(x, y, np.ones_like(kappa), np.ones_like(kappa),
                             rmax=max_sep, bins=nbins, return_counts=True)
    nn = treecorr.NNCorrelation(max_sep=max_sep, nbins=nbins, bin_type='TwoD', brute=True)
    nn.process(cat1)
    print('max abs diff = ',np.max(np.abs(nn.npairs - counts)))
    print('max rel diff = ',np.max(np.abs(nn.npairs - counts)/np.abs(nn.npairs)))
    np.testing.assert_allclose(nn.npairs, counts, atol=1.e-7)

    nn.process(cat1, cat1)
    print('max abs diff = ',np.max(np.abs(nn.npairs - counts)))
    print('max rel diff = ',np.max(np.abs(nn.npairs - counts)/np.abs(nn.npairs)))
    np.testing.assert_allclose(nn.npairs, counts, atol=1.e-7)

    xi_brut, counts = corr2d(x, y, np.ones_like(kappa), np.ones_like(kappa),
                             w=1./kappa_err**2, rmax=max_sep, bins=nbins, return_counts=True)
    nn = treecorr.NNCorrelation(max_sep=max_sep, nbins=nbins, bin_type='TwoD', brute=True)
    nn.process(cat2)
    print('max abs diff = ',np.max(np.abs(nn.weight - counts)))
    print('max rel diff = ',np.max(np.abs(nn.weight - counts)/np.abs(nn.weight)))
    np.testing.assert_allclose(nn.weight, counts, atol=1.e-7)

    nn.process(cat2, cat2)
    print('max abs diff = ',np.max(np.abs(nn.weight - counts)))
    print('max rel diff = ',np.max(np.abs(nn.weight - counts)/np.abs(nn.weight)))
    np.testing.assert_allclose(nn.weight, counts, atol=1.e-7)

    # The other two, NG and KG can't really be checked with the brute force
    # calculator we have here, so we're counting on the above being a sufficient
    # test of all aspects of the twod binning.  I think that it is sufficient, but I
    # admit I would prefer if we had a real test of these other two pairs, along with
    # xi- for GG.

    # Check invalid constructors
    assert_raises(TypeError, treecorr.NNCorrelation, max_sep=max_sep, nbins=nbins, bin_size=2,
                  bin_type='TwoD')
    assert_raises(TypeError, treecorr.NNCorrelation, nbins=nbins, bin_type='TwoD')
    assert_raises(TypeError, treecorr.NNCorrelation, bin_size=2, bin_type='TwoD')
    assert_raises(TypeError, treecorr.NNCorrelation, max_sep=max_sep, bin_type='TwoD')
コード例 #17
0
ファイル: test_periodic.py プロジェクト: rmjarvis/TreeCorr
def test_direct_count():
    # This is essentially the same as test_nn.py:test_direct_count, but using periodic distances.
    # And the points are uniform in the box, so plenty of pairs crossing the edges.

    ngal = 100
    Lx = 50.
    Ly = 80.
    rng = np.random.RandomState(8675309)
    x1 = (rng.random_sample(ngal)-0.5) * Lx
    y1 = (rng.random_sample(ngal)-0.5) * Ly
    cat1 = treecorr.Catalog(x=x1, y=y1)
    x2 = (rng.random_sample(ngal)-0.5) * Lx
    y2 = (rng.random_sample(ngal)-0.5) * Ly
    cat2 = treecorr.Catalog(x=x2, y=y2)

    min_sep = 1.
    max_sep = 50.
    nbins = 50
    dd = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=0,
                                xperiod=Lx, yperiod=Ly)
    dd.process(cat1, cat2, metric='Periodic')
    print('dd.npairs = ',dd.npairs)

    log_min_sep = np.log(min_sep)
    log_max_sep = np.log(max_sep)
    true_npairs = np.zeros(nbins)
    bin_size = (log_max_sep - log_min_sep) / nbins
    for i in range(ngal):
        for j in range(ngal):
            dx = min(abs(x1[i]-x2[j]), Lx - abs(x1[i]-x2[j]))
            dy = min(abs(y1[i]-y2[j]), Ly - abs(y1[i]-y2[j]))
            rsq = dx**2 + dy**2
            logr = 0.5 * np.log(rsq)
            k = int(np.floor( (logr-log_min_sep) / bin_size ))
            if k < 0: continue
            if k >= nbins: continue
            true_npairs[k] += 1

    print('true_npairs = ',true_npairs)
    print('diff = ',dd.npairs - true_npairs)
    np.testing.assert_array_equal(dd.npairs, true_npairs)

    # Check that running via the corr2 script works correctly.
    file_name1 = os.path.join('data','nn_periodic_data1.dat')
    with open(file_name1, 'w') as fid:
        for i in range(ngal):
            fid.write(('%.20f %.20f\n')%(x1[i],y1[i]))
    file_name2 = os.path.join('data','nn_periodic_data2.dat')
    with open(file_name2, 'w') as fid:
        for i in range(ngal):
            fid.write(('%.20f %.20f\n')%(x2[i],y2[i]))
    nrand = ngal
    rx1 = (rng.random_sample(nrand)-0.5) * Lx
    ry1 = (rng.random_sample(nrand)-0.5) * Ly
    rx2 = (rng.random_sample(nrand)-0.5) * Lx
    ry2 = (rng.random_sample(nrand)-0.5) * Ly
    rcat1 = treecorr.Catalog(x=rx1, y=ry1)
    rcat2 = treecorr.Catalog(x=rx2, y=ry2)
    rand_file_name1 = os.path.join('data','nn_periodic_rand1.dat')
    with open(rand_file_name1, 'w') as fid:
        for i in range(nrand):
            fid.write(('%.20f %.20f\n')%(rx1[i],ry1[i]))
    rand_file_name2 = os.path.join('data','nn_periodic_rand2.dat')
    with open(rand_file_name2, 'w') as fid:
        for i in range(nrand):
            fid.write(('%.20f %.20f\n')%(rx2[i],ry2[i]))
    rr = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=0,
                                verbose=0, xperiod=Lx, yperiod=Ly)
    rr.process(rcat1,rcat2, metric='Periodic')
    xi, varxi = dd.calculateXi(rr)
    print('xi = ',xi)

    # Do this via the corr2 function.
    config = treecorr.config.read_config('configs/nn_periodic.yaml')
    logger = treecorr.config.setup_logger(2)
    treecorr.corr2(config, logger)
    corr2_output = np.genfromtxt(os.path.join('output','nn_periodic.out'), names=True,
                                    skip_header=1)
    np.testing.assert_allclose(corr2_output['r_nom'], dd.rnom, rtol=1.e-3)
    np.testing.assert_allclose(corr2_output['DD'], dd.npairs, rtol=1.e-3)
    np.testing.assert_allclose(corr2_output['npairs'], dd.npairs, rtol=1.e-3)
    np.testing.assert_allclose(corr2_output['RR'], rr.npairs, rtol=1.e-3)
    np.testing.assert_allclose(corr2_output['xi'], xi, rtol=1.e-3)

    # If don't give a period, then an error.
    rr = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
    with assert_raises(ValueError):
        rr.process(rcat1,rcat2, metric='Periodic')

    # Or if only give one kind of period
    rr = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, xperiod=3)
    with assert_raises(ValueError):
        rr.process(rcat1,rcat2, metric='Periodic')
    rr = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, yperiod=3)
    with assert_raises(ValueError):
        rr.process(rcat1,rcat2, metric='Periodic')

    # If give period, but then don't use Periodic metric, that's also an error.
    rr = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, period=3)
    with assert_raises(ValueError):
        rr.process(rcat1,rcat2)
コード例 #18
0
ファイル: test_config.py プロジェクト: joezuntz/TreeCorr
def test_util():
    # Test some error handling in utility functions that shouldn't be possible to get to
    # in normal running, so we need to call things explicitly to get the coverage.

    # First some I/O sanity checks
    a = np.array([1, 2, 3])
    b = np.array([4, 5, 6])
    file_name = 'junk.out'
    with assert_raises(ValueError):
        treecorr.util.gen_write(file_name, ['a', 'b'], [a])
    with assert_raises(ValueError):
        treecorr.util.gen_write(file_name, ['a', 'b'], [a, b, a])
    with assert_raises(ValueError):
        treecorr.util.gen_write(file_name, ['a'], [a, b])
    with assert_raises(ValueError):
        treecorr.util.gen_write(file_name, [], [])
    with assert_raises(ValueError):
        treecorr.util.gen_write(file_name, ['a', 'b'], [a, b[:1]])
    with assert_raises(ValueError):
        treecorr.util.gen_write(file_name, ['a', 'b'], [a, b],
                                file_type='Invalid')

    with assert_raises(ValueError):
        treecorr.util.gen_read(file_name, file_type='Invalid')

    # Now some places that do sanity checks for invalid coords or metrics which would already
    # be checked in normal operation.
    with assert_raises(ValueError):
        treecorr.util.parse_metric('Euclidean', 'invalid')
    with assert_raises(ValueError):
        treecorr.util.parse_metric('Invalid', 'flat')
    with assert_raises(ValueError):
        treecorr.util.coord_enum('invalid')
    with assert_raises(ValueError):
        treecorr.util.metric_enum('Invalid')
コード例 #19
0
def test_twod():
    try:
        from scipy.spatial.distance import pdist, squareform
    except ImportError:
        print('Skipping test_twod, since uses scipy, and scipy is not installed.')
        return

    # N random points in 2 dimensions
    rng = np.random.RandomState(8675309)
    N = 200
    x = rng.uniform(-20, 20, N)
    y = rng.uniform(-20, 20, N)
    
    # Give the points a multivariate Gaussian random field for kappa and gamma
    L1 = [[0.33, 0.09], [-0.01, 0.26]]  # Some arbitrary correlation matrix
    invL1 = np.linalg.inv(L1)
    dists = pdist(np.array([x,y]).T, metric='mahalanobis', VI=invL1)
    K = np.exp(-0.5 * dists**2)
    K = squareform(K)
    np.fill_diagonal(K, 1.)

    A = 2.3
    kappa = rng.multivariate_normal(np.zeros(N), K*(A**2))

    # Add some noise
    sigma = A/10.
    kappa += rng.normal(scale=sigma, size=N)
    kappa_err = np.ones_like(kappa) * sigma

    # Make gamma too
    gamma1 = rng.multivariate_normal(np.zeros(N), K*(A**2))
    gamma1 += rng.normal(scale=sigma, size=N)
    gamma2 = rng.multivariate_normal(np.zeros(N), K*(A**2))
    gamma2 += rng.normal(scale=sigma, size=N)
    gamma = gamma1 + 1j * gamma2
    gamma_err = kappa_err

    # Calculate the 2D correlation using brute force
    max_sep = 21.
    nbins = 21
    xi_brut = corr2d(x, y, kappa, kappa, w=None, rmax=max_sep, bins=nbins)

    cat1 = treecorr.Catalog(x=x, y=y, k=kappa, g1=gamma1, g2=gamma2)
    kk = treecorr.KKCorrelation(min_sep=0., max_sep=max_sep, nbins=nbins, bin_type='TwoD',
                                brute=True)

    # First the simplest case to get right: cross correlation of the catalog with itself.
    kk.process(cat1, cat1)

    print('max abs diff = ',np.max(np.abs(kk.xi - xi_brut)))
    print('max rel diff = ',np.max(np.abs(kk.xi - xi_brut)/np.abs(kk.xi)))
    np.testing.assert_allclose(kk.xi, xi_brut, atol=1.e-7)

    # Auto-correlation should do the same thing.
    kk.process(cat1)
    print('max abs diff = ',np.max(np.abs(kk.xi - xi_brut)))
    print('max rel diff = ',np.max(np.abs(kk.xi - xi_brut)/np.abs(kk.xi)))
    np.testing.assert_allclose(kk.xi, xi_brut, atol=1.e-7)

    # Repeat with weights.
    xi_brut = corr2d(x, y, kappa, kappa, w=1./kappa_err**2, rmax=max_sep, bins=nbins)
    cat2 = treecorr.Catalog(x=x, y=y, k=kappa, g1=gamma1, g2=gamma2, w=1./kappa_err**2)
    # NB. Testing that min_sep = 0 is default
    kk = treecorr.KKCorrelation(max_sep=max_sep, nbins=nbins, bin_type='TwoD', brute=True)
    kk.process(cat2, cat2)
    print('max abs diff = ',np.max(np.abs(kk.xi - xi_brut)))
    print('max rel diff = ',np.max(np.abs(kk.xi - xi_brut)/np.abs(kk.xi)))
    np.testing.assert_allclose(kk.xi, xi_brut, atol=1.e-7)

    kk.process(cat2)
    np.testing.assert_allclose(kk.xi, xi_brut, atol=1.e-7)

    # Check GG
    xi_brut = corr2d(x, y, gamma, np.conj(gamma), rmax=max_sep, bins=nbins)
    # Equivalent bin_size = 2.  Check omitting nbins
    gg = treecorr.GGCorrelation(max_sep=max_sep, bin_size=2., bin_type='TwoD', brute=True)
    gg.process(cat1)
    print('max abs diff = ',np.max(np.abs(gg.xip - xi_brut)))
    print('max rel diff = ',np.max(np.abs(gg.xip - xi_brut)/np.abs(gg.xip)))
    np.testing.assert_allclose(gg.xip, xi_brut, atol=2.e-7)

    xi_brut = corr2d(x, y, gamma, np.conj(gamma), w=1./kappa_err**2, rmax=max_sep, bins=nbins)
    # Check omitting max_sep
    gg = treecorr.GGCorrelation(bin_size=2, nbins=nbins, bin_type='TwoD', brute=True)
    gg.process(cat2)
    print('max abs diff = ',np.max(np.abs(gg.xip - xi_brut)))
    print('max rel diff = ',np.max(np.abs(gg.xip - xi_brut)/np.abs(gg.xip)))
    np.testing.assert_allclose(gg.xip, xi_brut, atol=2.e-7)

    # Check NK
    xi_brut = corr2d(x, y, np.ones_like(kappa), kappa, rmax=max_sep, bins=nbins)
    # Check slightly larger bin_size gets rounded down
    nk = treecorr.NKCorrelation(max_sep=max_sep, bin_size=2.05, bin_type='TwoD', brute=True)
    nk.process(cat1, cat1)
    print('max abs diff = ',np.max(np.abs(nk.xi - xi_brut)))
    print('max rel diff = ',np.max(np.abs(nk.xi - xi_brut)/np.abs(nk.xi)))
    np.testing.assert_allclose(nk.xi, xi_brut, atol=1.e-7)

    xi_brut = corr2d(x, y, np.ones_like(kappa), kappa, w=1./kappa_err**2, rmax=max_sep, bins=nbins)
    # Check very small, but non-zeo min_sep
    nk = treecorr.NKCorrelation(min_sep=1.e-6, max_sep=max_sep, nbins=nbins, bin_type='TwoD', brute=True)
    nk.process(cat2, cat2)
    print('max abs diff = ',np.max(np.abs(nk.xi - xi_brut)))
    print('max rel diff = ',np.max(np.abs(nk.xi - xi_brut)/np.abs(nk.xi)))
    np.testing.assert_allclose(nk.xi, xi_brut, atol=1.e-7)

    # Check NN
    xi_brut, counts = corr2d(x, y, np.ones_like(kappa), np.ones_like(kappa),
                             rmax=max_sep, bins=nbins, return_counts=True)
    nn = treecorr.NNCorrelation(max_sep=max_sep, nbins=nbins, bin_type='TwoD', brute=True)
    nn.process(cat1)
    print('max abs diff = ',np.max(np.abs(nn.npairs - counts)))
    print('max rel diff = ',np.max(np.abs(nn.npairs - counts)/np.abs(nn.npairs)))
    np.testing.assert_allclose(nn.npairs, counts, atol=1.e-7)

    nn.process(cat1, cat1)
    print('max abs diff = ',np.max(np.abs(nn.npairs - counts)))
    print('max rel diff = ',np.max(np.abs(nn.npairs - counts)/np.abs(nn.npairs)))
    np.testing.assert_allclose(nn.npairs, counts, atol=1.e-7)

    xi_brut, counts = corr2d(x, y, np.ones_like(kappa), np.ones_like(kappa),
                             w=1./kappa_err**2, rmax=max_sep, bins=nbins, return_counts=True)
    nn = treecorr.NNCorrelation(max_sep=max_sep, nbins=nbins, bin_type='TwoD', brute=True)
    nn.process(cat2)
    print('max abs diff = ',np.max(np.abs(nn.weight - counts)))
    print('max rel diff = ',np.max(np.abs(nn.weight - counts)/np.abs(nn.weight)))
    np.testing.assert_allclose(nn.weight, counts, atol=1.e-7)

    nn.process(cat2, cat2)
    print('max abs diff = ',np.max(np.abs(nn.weight - counts)))
    print('max rel diff = ',np.max(np.abs(nn.weight - counts)/np.abs(nn.weight)))
    np.testing.assert_allclose(nn.weight, counts, atol=1.e-7)

    # The other two, NG and KG can't really be checked with the brute force
    # calculator we have here, so we're counting on the above being a sufficient
    # test of all aspects of the twod binning.  I think that it is sufficient, but I
    # admit I would prefer if we had a real test of these other two pairs, along with
    # xi- for GG.

    # Check invalid constructors
    assert_raises(TypeError, treecorr.NNCorrelation, max_sep=max_sep, nbins=nbins, bin_size=2,
                  bin_type='TwoD')
    assert_raises(TypeError, treecorr.NNCorrelation, nbins=nbins, bin_type='TwoD')
    assert_raises(TypeError, treecorr.NNCorrelation, bin_size=2, bin_type='TwoD')
    assert_raises(TypeError, treecorr.NNCorrelation, max_sep=max_sep, bin_type='TwoD')
コード例 #20
0
ファイル: test_periodic.py プロジェクト: rmjarvis/TreeCorr
def test_3pt():
    # Test a direct calculation of the 3pt function with the Periodic metric.

    from test_nnn import is_ccw

    ngal = 50
    Lx = 250.
    Ly = 180.
    rng = np.random.RandomState(8675309)
    x = (rng.random_sample(ngal)-0.5) * Lx
    y = (rng.random_sample(ngal)-0.5) * Ly
    cat = treecorr.Catalog(x=x, y=y)

    min_sep = 1.
    max_sep = 40.  # This only really makes sense if max_sep < L/4 for all L.
    nbins = 50
    min_u = 0.13
    max_u = 0.89
    nubins = 10
    min_v = 0.13
    max_v = 0.59
    nvbins = 10

    ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
                                  min_u=min_u, max_u=max_u, nubins=nubins,
                                  min_v=min_v, max_v=max_v, nvbins=nvbins,
                                  bin_slop=0, xperiod=Lx, yperiod=Ly, brute=True)
    ddd.process(cat, metric='Periodic', num_threads=1)
    #print('ddd.ntri = ',ddd.ntri)

    log_min_sep = np.log(min_sep)
    log_max_sep = np.log(max_sep)
    true_ntri = np.zeros( (nbins, nubins, 2*nvbins) )
    bin_size = (log_max_sep - log_min_sep) / nbins
    ubin_size = (max_u-min_u) / nubins
    vbin_size = (max_v-min_v) / nvbins
    for i in range(ngal):
        for j in range(i+1,ngal):
            for k in range(j+1,ngal):
                xi = x[i]
                xj = x[j]
                xk = x[k]
                yi = y[i]
                yj = y[j]
                yk = y[k]
                #print(i,j,k,xi,yi,xj,yj,xk,yk)
                xi,xj = wrap(xi, xj, Lx, xk)
                #print('  ',xi,xj,xk)
                xi,xk = wrap(xi, xk, Lx, xj)
                #print('  ',xi,xj,xk)
                xj,xk = wrap(xj, xk, Lx, xi)
                #print('  ',xi,xj,xk)
                yi,yj = wrap(yi, yj, Ly, yk)
                #print('  ',yi,yj,yk)
                yi,yk = wrap(yi, yk, Ly, yj)
                #print('  ',yi,yj,yk)
                yj,yk = wrap(yj, yk, Ly, yi)
                #print('  ',yi,yj,yk)
                #print('->',xi,yi,xj,yj,xk,yk)
                dij = np.sqrt((xi-xj)**2 + (yi-yj)**2)
                dik = np.sqrt((xi-xk)**2 + (yi-yk)**2)
                djk = np.sqrt((xj-xk)**2 + (yj-yk)**2)
                if dij == 0.: continue
                if dik == 0.: continue
                if djk == 0.: continue
                ccw = True
                if dij < dik:
                    if dik < djk:
                        d3 = dij; d2 = dik; d1 = djk;
                        ccw = is_ccw(xi,yi,xj,yj,xk,yk)
                    elif dij < djk:
                        d3 = dij; d2 = djk; d1 = dik;
                        ccw = is_ccw(xj,yj,xi,yi,xk,yk)
                    else:
                        d3 = djk; d2 = dij; d1 = dik;
                        ccw = is_ccw(xj,yj,xk,yk,xi,yi)
                else:
                    if dij < djk:
                        d3 = dik; d2 = dij; d1 = djk;
                        ccw = is_ccw(xi,yi,xk,yk,xj,yj)
                    elif dik < djk:
                        d3 = dik; d2 = djk; d1 = dij;
                        ccw = is_ccw(xk,yk,xi,yi,xj,yj)
                    else:
                        d3 = djk; d2 = dik; d1 = dij;
                        ccw = is_ccw(xk,yk,xj,yj,xi,yi)

                #print('d1,d2,d3 = ',d1,d2,d3)
                r = d2
                u = d3/d2
                v = (d1-d2)/d3
                if r < min_sep or r >= max_sep: continue
                if u < min_u or u >= max_u: continue
                if v < min_v or v >= max_v: continue
                if not ccw:
                    v = -v
                #print('r,u,v = ',r,u,v)
                kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
                ku = int(np.floor( (u-min_u) / ubin_size ))
                if v > 0:
                    kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
                else:
                    kv = int(np.floor( (v-(-max_v)) / vbin_size ))
                #print('kr,ku,kv = ',kr,ku,kv)
                assert 0 <= kr < nbins
                assert 0 <= ku < nubins
                assert 0 <= kv < 2*nvbins
                true_ntri[kr,ku,kv] += 1
                #print('good.', true_ntri[kr,ku,kv])


    #print('true_ntri => ',true_ntri)
    #print('diff = ',ddd.ntri - true_ntri)
    mask = np.where(true_ntri > 0)
    #print('ddd.ntri[mask] = ',ddd.ntri[mask])
    #print('true_ntri[mask] = ',true_ntri[mask])
    #print('diff[mask] = ',(ddd.ntri - true_ntri)[mask])
    mask2 = np.where(ddd.ntri > 0)
    #print('ddd.ntri[mask2] = ',ddd.ntri[mask2])
    #print('true_ntri[mask2] = ',true_ntri[mask2])
    #print('diff[mask2] = ',(ddd.ntri - true_ntri)[mask2])
    np.testing.assert_array_equal(ddd.ntri, true_ntri)

    # If don't give a period, then an error.
    ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
                                  min_u=min_u, max_u=max_u, nubins=nubins,
                                  min_v=min_v, max_v=max_v, nvbins=nvbins)
    with assert_raises(ValueError):
        ddd.process(cat, metric='Periodic')

    # Or if only give one kind of period
    ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
                                  min_u=min_u, max_u=max_u, nubins=nubins,
                                  min_v=min_v, max_v=max_v, nvbins=nvbins,
                                  xperiod=3)
    with assert_raises(ValueError):
        ddd.process(cat, metric='Periodic')
    ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
                                  min_u=min_u, max_u=max_u, nubins=nubins,
                                  min_v=min_v, max_v=max_v, nvbins=nvbins,
                                  yperiod=3)
    with assert_raises(ValueError):
        ddd.process(cat, metric='Periodic')

    # If give period, but then don't use Periodic metric, that's also an error.
    ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
                                  min_u=min_u, max_u=max_u, nubins=nubins,
                                  min_v=min_v, max_v=max_v, nvbins=nvbins,
                                  period=3)
    with assert_raises(ValueError):
        ddd.process(cat)
コード例 #21
0
ファイル: test_ggg.py プロジェクト: zchvsre/TreeCorr
def test_direct():
    # If the catalogs are small enough, we can do a direct calculation to see if comes out right.
    # This should exactly match the treecorr result if brute=True.

    ngal = 100
    s = 10.
    rng = np.random.RandomState(8675309)
    x = rng.normal(0,s, (ngal,) )
    y = rng.normal(0,s, (ngal,) )
    w = rng.random_sample(ngal)
    g1 = rng.normal(0,0.2, (ngal,) )
    g2 = rng.normal(0,0.2, (ngal,) )

    cat = treecorr.Catalog(x=x, y=y, w=w, g1=g1, g2=g2)

    min_sep = 1.
    bin_size = 0.2
    nrbins = 10
    nubins = 5
    nvbins = 5
    max_sep = min_sep * np.exp(nrbins * bin_size)
    ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True)
    ggg.process(cat, num_threads=2)

    true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int)
    true_weight = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)
    true_gam0 = np.zeros((nrbins, nubins, 2*nvbins), dtype=complex)
    true_gam1 = np.zeros((nrbins, nubins, 2*nvbins), dtype=complex)
    true_gam2 = np.zeros((nrbins, nubins, 2*nvbins), dtype=complex)
    true_gam3 = np.zeros((nrbins, nubins, 2*nvbins), dtype=complex)
    for i in range(ngal):
        for j in range(i+1,ngal):
            for k in range(j+1,ngal):
                d12 = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2)
                d23 = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2)
                d31 = np.sqrt((x[k]-x[i])**2 + (y[k]-y[i])**2)

                d3, d2, d1 = sorted([d12, d23, d31])
                rindex = np.floor(np.log(d2/min_sep) / bin_size).astype(int)
                if rindex < 0 or rindex >= nrbins: continue

                if [d1, d2, d3] == [d23, d31, d12]: ii,jj,kk = i,j,k
                elif [d1, d2, d3] == [d23, d12, d31]: ii,jj,kk = i,k,j
                elif [d1, d2, d3] == [d31, d12, d23]: ii,jj,kk = j,k,i
                elif [d1, d2, d3] == [d31, d23, d12]: ii,jj,kk = j,i,k
                elif [d1, d2, d3] == [d12, d23, d31]: ii,jj,kk = k,i,j
                elif [d1, d2, d3] == [d12, d31, d23]: ii,jj,kk = k,j,i
                else: assert False
                # Now use ii, jj, kk rather than i,j,k, to get the indices
                # that correspond to the points in the right order.

                u = d3/d2
                v = (d1-d2)/d3
                if (x[jj]-x[ii])*(y[kk]-y[ii]) < (x[kk]-x[ii])*(y[jj]-y[ii]):
                    v = -v

                uindex = np.floor(u / bin_size).astype(int)
                assert 0 <= uindex < nubins
                vindex = np.floor((v+1) / bin_size).astype(int)
                assert 0 <= vindex < 2*nvbins

                # Rotate shears to coordinates where line connecting to center is horizontal.
                cenx = (x[i] + x[j] + x[k])/3.
                ceny = (y[i] + y[j] + y[k])/3.

                expmialpha1 = (x[ii]-cenx) - 1j*(y[ii]-ceny)
                expmialpha1 /= abs(expmialpha1)
                expmialpha2 = (x[jj]-cenx) - 1j*(y[jj]-ceny)
                expmialpha2 /= abs(expmialpha2)
                expmialpha3 = (x[kk]-cenx) - 1j*(y[kk]-ceny)
                expmialpha3 /= abs(expmialpha3)

                www = w[i] * w[j] * w[k]
                g1p = (g1[ii] + 1j*g2[ii]) * expmialpha1**2
                g2p = (g1[jj] + 1j*g2[jj]) * expmialpha2**2
                g3p = (g1[kk] + 1j*g2[kk]) * expmialpha3**2
                gam0 = www * g1p * g2p * g3p
                gam1 = www * np.conjugate(g1p) * g2p * g3p
                gam2 = www * g1p * np.conjugate(g2p) * g3p
                gam3 = www * g1p * g2p * np.conjugate(g3p)

                true_ntri[rindex,uindex,vindex] += 1
                true_weight[rindex,uindex,vindex] += www
                true_gam0[rindex,uindex,vindex] += gam0
                true_gam1[rindex,uindex,vindex] += gam1
                true_gam2[rindex,uindex,vindex] += gam2
                true_gam3[rindex,uindex,vindex] += gam3

    pos = true_weight > 0
    true_gam0[pos] /= true_weight[pos]
    true_gam1[pos] /= true_weight[pos]
    true_gam2[pos] /= true_weight[pos]
    true_gam3[pos] /= true_weight[pos]

    np.testing.assert_array_equal(ggg.ntri, true_ntri)
    np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam0r, true_gam0.real, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam0i, true_gam0.imag, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam1r, true_gam1.real, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam1i, true_gam1.imag, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam2r, true_gam2.real, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam2i, true_gam2.imag, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam3r, true_gam3.real, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam3i, true_gam3.imag, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam0, true_gam0, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam1, true_gam1, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam2, true_gam2, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam3, true_gam3, rtol=1.e-5, atol=1.e-8)

    try:
        import fitsio
    except ImportError:
        print('Skipping FITS tests, since fitsio is not installed')
        return

    # Check that running via the corr3 script works correctly.
    config = treecorr.config.read_config('configs/ggg_direct.yaml')
    cat.write(config['file_name'])
    treecorr.corr3(config)
    data = fitsio.read(config['ggg_file_name'])
    np.testing.assert_allclose(data['r_nom'], ggg.rnom.flatten())
    np.testing.assert_allclose(data['u_nom'], ggg.u.flatten())
    np.testing.assert_allclose(data['v_nom'], ggg.v.flatten())
    np.testing.assert_allclose(data['ntri'], ggg.ntri.flatten())
    np.testing.assert_allclose(data['weight'], ggg.weight.flatten())
    np.testing.assert_allclose(data['gam0r'], ggg.gam0r.flatten(), rtol=1.e-3)
    np.testing.assert_allclose(data['gam0i'], ggg.gam0i.flatten(), rtol=1.e-3)
    np.testing.assert_allclose(data['gam1r'], ggg.gam1r.flatten(), rtol=1.e-3)
    np.testing.assert_allclose(data['gam1i'], ggg.gam1i.flatten(), rtol=1.e-3)
    np.testing.assert_allclose(data['gam2r'], ggg.gam2r.flatten(), rtol=1.e-3)
    np.testing.assert_allclose(data['gam2i'], ggg.gam2i.flatten(), rtol=1.e-3)
    np.testing.assert_allclose(data['gam3r'], ggg.gam3r.flatten(), rtol=1.e-3)
    np.testing.assert_allclose(data['gam3i'], ggg.gam3i.flatten(), rtol=1.e-3)

    # Also check the "cross" calculation.  (Real cross doesn't work, but this should.)
    ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True)
    ggg.process(cat, cat, cat, num_threads=2)
    np.testing.assert_array_equal(ggg.ntri, true_ntri)
    np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam0r, true_gam0.real, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam0i, true_gam0.imag, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam1r, true_gam1.real, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam1i, true_gam1.imag, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam2r, true_gam2.real, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam2i, true_gam2.imag, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam3r, true_gam3.real, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam3i, true_gam3.imag, rtol=1.e-5, atol=1.e-8)

    config['file_name2'] = config['file_name']
    config['file_name3'] = config['file_name']
    treecorr.corr3(config)
    data = fitsio.read(config['ggg_file_name'])
    np.testing.assert_allclose(data['r_nom'], ggg.rnom.flatten())
    np.testing.assert_allclose(data['u_nom'], ggg.u.flatten())
    np.testing.assert_allclose(data['v_nom'], ggg.v.flatten())
    np.testing.assert_allclose(data['ntri'], ggg.ntri.flatten())
    np.testing.assert_allclose(data['weight'], ggg.weight.flatten())
    np.testing.assert_allclose(data['gam0r'], ggg.gam0r.flatten(), rtol=1.e-3)
    np.testing.assert_allclose(data['gam0i'], ggg.gam0i.flatten(), rtol=1.e-3)
    np.testing.assert_allclose(data['gam1r'], ggg.gam1r.flatten(), rtol=1.e-3)
    np.testing.assert_allclose(data['gam1i'], ggg.gam1i.flatten(), rtol=1.e-3)
    np.testing.assert_allclose(data['gam2r'], ggg.gam2r.flatten(), rtol=1.e-3)
    np.testing.assert_allclose(data['gam2i'], ggg.gam2i.flatten(), rtol=1.e-3)
    np.testing.assert_allclose(data['gam3r'], ggg.gam3r.flatten(), rtol=1.e-3)
    np.testing.assert_allclose(data['gam3i'], ggg.gam3i.flatten(), rtol=1.e-3)

    # Repeat with binslop = 0, since the code flow is different from brute=True.
    # And don't do any top-level recursion so we actually test not going to the leaves.
    ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                  bin_slop=0, max_top=0)
    ggg.process(cat)
    np.testing.assert_array_equal(ggg.ntri, true_ntri)
    np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam0r, true_gam0.real, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(ggg.gam0i, true_gam0.imag, rtol=1.e-5, atol=1.e-4)
    np.testing.assert_allclose(ggg.gam1r, true_gam1.real, rtol=1.e-3, atol=1.e-4)
    np.testing.assert_allclose(ggg.gam1i, true_gam1.imag, rtol=1.e-3, atol=1.e-4)
    np.testing.assert_allclose(ggg.gam2r, true_gam2.real, rtol=1.e-3, atol=1.e-4)
    np.testing.assert_allclose(ggg.gam2i, true_gam2.imag, rtol=1.e-3, atol=1.e-4)
    np.testing.assert_allclose(ggg.gam3r, true_gam3.real, rtol=1.e-3, atol=1.e-4)
    np.testing.assert_allclose(ggg.gam3i, true_gam3.imag, rtol=1.e-3, atol=1.e-4)

    # Check a few basic operations with a GGCorrelation object.
    do_pickle(ggg)

    ggg2 = ggg.copy()
    ggg2 += ggg
    np.testing.assert_allclose(ggg2.ntri, 2*ggg.ntri)
    np.testing.assert_allclose(ggg2.weight, 2*ggg.weight)
    np.testing.assert_allclose(ggg2.meand1, 2*ggg.meand1)
    np.testing.assert_allclose(ggg2.meand2, 2*ggg.meand2)
    np.testing.assert_allclose(ggg2.meand3, 2*ggg.meand3)
    np.testing.assert_allclose(ggg2.meanlogd1, 2*ggg.meanlogd1)
    np.testing.assert_allclose(ggg2.meanlogd2, 2*ggg.meanlogd2)
    np.testing.assert_allclose(ggg2.meanlogd3, 2*ggg.meanlogd3)
    np.testing.assert_allclose(ggg2.meanu, 2*ggg.meanu)
    np.testing.assert_allclose(ggg2.meanv, 2*ggg.meanv)
    np.testing.assert_allclose(ggg2.gam0r, 2*ggg.gam0r)
    np.testing.assert_allclose(ggg2.gam0i, 2*ggg.gam0i)
    np.testing.assert_allclose(ggg2.gam1r, 2*ggg.gam1r)
    np.testing.assert_allclose(ggg2.gam1i, 2*ggg.gam1i)
    np.testing.assert_allclose(ggg2.gam2r, 2*ggg.gam2r)
    np.testing.assert_allclose(ggg2.gam2i, 2*ggg.gam2i)
    np.testing.assert_allclose(ggg2.gam3r, 2*ggg.gam3r)
    np.testing.assert_allclose(ggg2.gam3i, 2*ggg.gam3i)

    ggg2.clear()
    ggg2 += ggg
    np.testing.assert_allclose(ggg2.ntri, ggg.ntri)
    np.testing.assert_allclose(ggg2.weight, ggg.weight)
    np.testing.assert_allclose(ggg2.meand1, ggg.meand1)
    np.testing.assert_allclose(ggg2.meand2, ggg.meand2)
    np.testing.assert_allclose(ggg2.meand3, ggg.meand3)
    np.testing.assert_allclose(ggg2.meanlogd1, ggg.meanlogd1)
    np.testing.assert_allclose(ggg2.meanlogd2, ggg.meanlogd2)
    np.testing.assert_allclose(ggg2.meanlogd3, ggg.meanlogd3)
    np.testing.assert_allclose(ggg2.meanu, ggg.meanu)
    np.testing.assert_allclose(ggg2.meanv, ggg.meanv)
    np.testing.assert_allclose(ggg2.gam0r, ggg.gam0r)
    np.testing.assert_allclose(ggg2.gam0i, ggg.gam0i)
    np.testing.assert_allclose(ggg2.gam1r, ggg.gam1r)
    np.testing.assert_allclose(ggg2.gam1i, ggg.gam1i)
    np.testing.assert_allclose(ggg2.gam2r, ggg.gam2r)
    np.testing.assert_allclose(ggg2.gam2i, ggg.gam2i)
    np.testing.assert_allclose(ggg2.gam3r, ggg.gam3r)
    np.testing.assert_allclose(ggg2.gam3i, ggg.gam3i)

    ascii_name = 'output/ggg_ascii.txt'
    ggg.write(ascii_name, precision=16)
    ggg3 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins)
    ggg3.read(ascii_name)
    np.testing.assert_allclose(ggg3.ntri, ggg.ntri)
    np.testing.assert_allclose(ggg3.weight, ggg.weight)
    np.testing.assert_allclose(ggg3.meand1, ggg.meand1)
    np.testing.assert_allclose(ggg3.meand2, ggg.meand2)
    np.testing.assert_allclose(ggg3.meand3, ggg.meand3)
    np.testing.assert_allclose(ggg3.meanlogd1, ggg.meanlogd1)
    np.testing.assert_allclose(ggg3.meanlogd2, ggg.meanlogd2)
    np.testing.assert_allclose(ggg3.meanlogd3, ggg.meanlogd3)
    np.testing.assert_allclose(ggg3.meanu, ggg.meanu)
    np.testing.assert_allclose(ggg3.meanv, ggg.meanv)
    np.testing.assert_allclose(ggg3.gam0r, ggg.gam0r)
    np.testing.assert_allclose(ggg3.gam0i, ggg.gam0i)
    np.testing.assert_allclose(ggg3.gam1r, ggg.gam1r)
    np.testing.assert_allclose(ggg3.gam1i, ggg.gam1i)
    np.testing.assert_allclose(ggg3.gam2r, ggg.gam2r)
    np.testing.assert_allclose(ggg3.gam2i, ggg.gam2i)
    np.testing.assert_allclose(ggg3.gam3r, ggg.gam3r)
    np.testing.assert_allclose(ggg3.gam3i, ggg.gam3i)

    fits_name = 'output/ggg_fits.fits'
    ggg.write(fits_name)
    ggg4 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins)
    ggg4.read(fits_name)
    np.testing.assert_allclose(ggg4.ntri, ggg.ntri)
    np.testing.assert_allclose(ggg4.weight, ggg.weight)
    np.testing.assert_allclose(ggg4.meand1, ggg.meand1)
    np.testing.assert_allclose(ggg4.meand2, ggg.meand2)
    np.testing.assert_allclose(ggg4.meand3, ggg.meand3)
    np.testing.assert_allclose(ggg4.meanlogd1, ggg.meanlogd1)
    np.testing.assert_allclose(ggg4.meanlogd2, ggg.meanlogd2)
    np.testing.assert_allclose(ggg4.meanlogd3, ggg.meanlogd3)
    np.testing.assert_allclose(ggg4.meanu, ggg.meanu)
    np.testing.assert_allclose(ggg4.meanv, ggg.meanv)
    np.testing.assert_allclose(ggg4.gam0r, ggg.gam0r)
    np.testing.assert_allclose(ggg4.gam0i, ggg.gam0i)
    np.testing.assert_allclose(ggg4.gam1r, ggg.gam1r)
    np.testing.assert_allclose(ggg4.gam1i, ggg.gam1i)
    np.testing.assert_allclose(ggg4.gam2r, ggg.gam2r)
    np.testing.assert_allclose(ggg4.gam2i, ggg.gam2i)
    np.testing.assert_allclose(ggg4.gam3r, ggg.gam3r)
    np.testing.assert_allclose(ggg4.gam3i, ggg.gam3i)

    with assert_raises(TypeError):
        ggg2 += config
    ggg5 = treecorr.GGGCorrelation(min_sep=min_sep/2, bin_size=bin_size, nbins=nrbins)
    with assert_raises(ValueError):
        ggg2 += ggg5
    ggg6 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size/2, nbins=nrbins)
    with assert_raises(ValueError):
        ggg2 += ggg6
    ggg7 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins*2)
    with assert_raises(ValueError):
        ggg2 += ggg7
    ggg8 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   min_u=0.1)
    with assert_raises(ValueError):
        ggg2 += ggg8
    ggg0 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   max_u=0.1)
    with assert_raises(ValueError):
        ggg2 += ggg0
    ggg10 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   nubins=nrbins*2)
    with assert_raises(ValueError):
        ggg2 += ggg10
    ggg11 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   min_v=0.1)
    with assert_raises(ValueError):
        ggg2 += ggg11
    ggg12 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   max_v=0.1)
    with assert_raises(ValueError):
        ggg2 += ggg12
    ggg13 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   nvbins=nrbins*2)
    with assert_raises(ValueError):
        ggg2 += ggg13

    # Currently not implemented to only have cat2 or cat3
    with assert_raises(NotImplementedError):
        ggg.process(cat, cat2=cat)
    with assert_raises(NotImplementedError):
        ggg.process(cat, cat3=cat)
    with assert_raises(NotImplementedError):
        ggg.process_cross21(cat, cat)
コード例 #22
0
ファイル: test_nk.py プロジェクト: rmjarvis/TreeCorr
def test_direct():
    # If the catalogs are small enough, we can do a direct calculation to see if comes out right.
    # This should exactly match the treecorr result if brute force.

    ngal = 200
    s = 10.
    rng = np.random.RandomState(8675309)
    x1 = rng.normal(0,s, (ngal,) )
    y1 = rng.normal(0,s, (ngal,) )
    w1 = rng.random_sample(ngal)

    x2 = rng.normal(0,s, (ngal,) )
    y2 = rng.normal(0,s, (ngal,) )
    w2 = rng.random_sample(ngal)
    k2 = rng.normal(0,3, (ngal,) )

    cat1 = treecorr.Catalog(x=x1, y=y1, w=w1)
    cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, k=k2)

    min_sep = 1.
    max_sep = 50.
    nbins = 50
    bin_size = np.log(max_sep/min_sep) / nbins
    nk = treecorr.NKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, brute=True)
    nk.process(cat1, cat2)

    true_npairs = np.zeros(nbins, dtype=int)
    true_weight = np.zeros(nbins, dtype=float)
    true_xi = np.zeros(nbins, dtype=float)
    for i in range(ngal):
        # It's hard to do all the pairs at once with numpy operations (although maybe possible).
        # But we can at least do all the pairs for each entry in cat1 at once with arrays.
        rsq = (x1[i]-x2)**2 + (y1[i]-y2)**2
        r = np.sqrt(rsq)
        logr = np.log(r)

        ww = w1[i] * w2
        xi = ww * k2

        index = np.floor(np.log(r/min_sep) / bin_size).astype(int)
        mask = (index >= 0) & (index < nbins)
        np.add.at(true_npairs, index[mask], 1)
        np.add.at(true_weight, index[mask], ww[mask])
        np.add.at(true_xi, index[mask], xi[mask])

    true_xi /= true_weight

    print('true_npairs = ',true_npairs)
    print('diff = ',nk.npairs - true_npairs)
    np.testing.assert_array_equal(nk.npairs, true_npairs)

    print('true_weight = ',true_weight)
    print('diff = ',nk.weight - true_weight)
    np.testing.assert_allclose(nk.weight, true_weight, rtol=1.e-5, atol=1.e-8)

    print('true_xi = ',true_xi)
    print('nk.xi = ',nk.xi)
    np.testing.assert_allclose(nk.xi, true_xi, rtol=1.e-4, atol=1.e-8)

    try:
        import fitsio
    except ImportError:
        print('Skipping FITS tests, since fitsio is not installed')
        return

    # Check that running via the corr2 script works correctly.
    config = treecorr.config.read_config('configs/nk_direct.yaml')
    cat1.write(config['file_name'])
    cat2.write(config['file_name2'])
    treecorr.corr2(config)
    data = fitsio.read(config['nk_file_name'])
    np.testing.assert_allclose(data['r_nom'], nk.rnom)
    np.testing.assert_allclose(data['npairs'], nk.npairs)
    np.testing.assert_allclose(data['weight'], nk.weight)
    np.testing.assert_allclose(data['kappa'], nk.xi, rtol=1.e-3)

    # Invalid with only one file_name
    del config['file_name2']
    with assert_raises(TypeError):
        treecorr.corr2(config)
    config['file_name2'] = 'data/nk_direct_cat2.fits'
    # Invalid to request compoensated if no rand_file
    config['nk_statistic'] = 'compensated'
    with assert_raises(TypeError):
        treecorr.corr2(config)

    # Repeat with binslop = 0, since the code flow is different from brute=True
    # And don't do any top-level recursion so we actually test not going to the leaves.
    nk = treecorr.NKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=0,
                                max_top=0)
    nk.process(cat1, cat2)
    np.testing.assert_array_equal(nk.npairs, true_npairs)
    np.testing.assert_allclose(nk.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(nk.xi, true_xi, rtol=1.e-4, atol=1.e-8)

    # Check a few basic operations with a NKCorrelation object.
    do_pickle(nk)

    nk2 = nk.copy()
    nk2 += nk
    np.testing.assert_allclose(nk2.npairs, 2*nk.npairs)
    np.testing.assert_allclose(nk2.weight, 2*nk.weight)
    np.testing.assert_allclose(nk2.meanr, 2*nk.meanr)
    np.testing.assert_allclose(nk2.meanlogr, 2*nk.meanlogr)
    np.testing.assert_allclose(nk2.xi, 2*nk.xi)

    nk2.clear()
    nk2 += nk
    np.testing.assert_allclose(nk2.npairs, nk.npairs)
    np.testing.assert_allclose(nk2.weight, nk.weight)
    np.testing.assert_allclose(nk2.meanr, nk.meanr)
    np.testing.assert_allclose(nk2.meanlogr, nk.meanlogr)
    np.testing.assert_allclose(nk2.xi, nk.xi)

    ascii_name = 'output/nk_ascii.txt'
    nk.write(ascii_name, precision=16)
    nk3 = treecorr.NKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
    nk3.read(ascii_name)
    np.testing.assert_allclose(nk3.npairs, nk.npairs)
    np.testing.assert_allclose(nk3.weight, nk.weight)
    np.testing.assert_allclose(nk3.meanr, nk.meanr)
    np.testing.assert_allclose(nk3.meanlogr, nk.meanlogr)
    np.testing.assert_allclose(nk3.xi, nk.xi)

    with assert_raises(TypeError):
        nk2 += config
    nk4 = treecorr.NKCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins)
    with assert_raises(ValueError):
        nk2 += nk4
    nk5 = treecorr.NKCorrelation(min_sep=min_sep, max_sep=max_sep*2, nbins=nbins)
    with assert_raises(ValueError):
        nk2 += nk5
    nk6 = treecorr.NKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins*2)
    with assert_raises(ValueError):
        nk2 += nk6

    fits_name = 'output/nk_fits.fits'
    nk.write(fits_name)
    nk4 = treecorr.NKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
    nk4.read(fits_name)
    np.testing.assert_allclose(nk4.npairs, nk.npairs)
    np.testing.assert_allclose(nk4.weight, nk.weight)
    np.testing.assert_allclose(nk4.meanr, nk.meanr)
    np.testing.assert_allclose(nk4.meanlogr, nk.meanlogr)
    np.testing.assert_allclose(nk4.xi, nk.xi)
コード例 #23
0
ファイル: test_config.py プロジェクト: rmjarvis/TreeCorr
def test_check():
    """Test checking the validity of config values.
    """
    # First a simple case with no conflicts
    config1 = treecorr.read_config('configs/kg.yaml')
    valid_params = treecorr.corr2_valid_params
    config2 = treecorr.config.check_config(config1.copy(), valid_params)

    # Just check a few values
    assert config2['x_col'] == '1'
    assert config2['k_col'] == ['3', '0']
    assert config2['verbose'] == 1
    assert config2['kg_file_name'] == 'output/kg.out'

    # Will also have other parameters filled from the valid_params dict
    for key in config2:
        assert key in valid_params
        if key in config1:
            if isinstance(config1[key], list):
                assert [str(v) for v in config2[key]] == [str(v) for v in config1[key]]
            else:
                assert config2[key] == config1[key] or str(config2[key]) == str(config1[key])
        else:
            assert config2[key] == valid_params[key][2]

    # Check list of bool
    config1['flip_g1'] = [True, 0]
    config2 = treecorr.config.check_config(config1.copy(), valid_params)
    assert config2['flip_g1'] == [True, False]

    # Longer names are allowed
    config1['x_units'] = 'arcminutes'
    config1['y_units'] = 'arcminute'
    config2 = treecorr.config.check_config(config1.copy(), valid_params)
    assert config2['x_units'] == 'arcmin'
    assert config2['y_units'] == 'arcmin'

    # Also other aliases, but you need to list them explicitly.
    config1['reverse_g1'] = True
    with assert_raises(TypeError):
        treecorr.config.check_config(config1.copy(), valid_params)
    config2 = treecorr.config.check_config(config1.copy(), valid_params,
                                           aliases={'reverse_g1' : 'flip_g1'})
    assert config2['flip_g1'] == True
    assert 'reverse_g1' not in config2
    del config1['reverse_g1']

    # Invalid values raise errors
    config1['verbose'] = -1
    with assert_raises(ValueError):
        treecorr.config.check_config(config1.copy(), valid_params)
    config1['verbose'] = 1
    config1['metric'] = 'hyperbolic'
    with assert_raises(ValueError):
        treecorr.config.check_config(config1.copy(), valid_params)
    del config1['metric']

    # With a logger, aliases emit a warning.
    config1['n2_file_name'] = 'output/n2.out'
    with CaptureLog() as cl:
        config2 = treecorr.config.check_config(config1.copy(), valid_params, logger=cl.logger,
                                               aliases={'n2_file_name' : 'nn_file_name'})
    assert "The parameter n2_file_name is deprecated." in cl.output
    assert "You should use nn_file_name instead." in cl.output

    # corr2 has a list of standard aliases
    # It is currently empty, but let's mock it up to test the functionality.
    if sys.version_info < (3,): return  # mock only available on python 3
    from unittest import mock
    with mock.patch('treecorr.corr2_aliases', {'n2_file_name' : 'nn_file_name'}):
        config2 = treecorr.config.check_config(config1.copy(), valid_params,
                                               aliases=treecorr.corr2_aliases)
    assert 'n2_file_name' not in config2
    assert config2['nn_file_name'] == 'output/n2.out'
    del config1['n2_file_name']
コード例 #24
0
ファイル: test_catalog.py プロジェクト: yuchenpang/TreeCorr
def test_lru():
    print('Start test_lru')
    f = lambda x: x+1
    size = 10
    # Test correct size cache gets created
    cache = treecorr.util.LRU_Cache(f, maxsize=size)
    assert len(cache.cache) == size
    # Insert f(0) = 1 into cache and check that we can get it back
    assert cache(0) == f(0)
    assert cache(0) == f(0)

    # Manually manipulate cache so we can check for hit
    cache.cache[(0,)][3] = 2
    assert cache(0) == 2

    # Insert (and check) 1 thru size into cache.  This should bump out the (0,).
    for i in range(1, size+1):
        assert cache(i) == f(i)
    assert (0,) not in cache.cache

    # Test non-destructive cache expansion
    newsize = 20
    cache.resize(newsize)
    for i in range(1, size+1):
        assert (i,) in cache.cache
        assert cache(i) == f(i)
    assert len(cache.cache) == 20

    # Add new items until the (1,) gets bumped
    for i in range(size+1, newsize+2):
        assert cache(i) == f(i)
    assert (1,) not in cache.cache

    # "Resize" to same size does nothing.
    cache.resize(newsize)
    assert len(cache.cache) == 20
    assert (1,) not in cache.cache
    for i in range(2, newsize+2):
        assert (i,) in cache.cache

    # Test mostly non-destructive cache contraction.
    # Already bumped (0,) and (1,), so (2,) should be the first to get bumped
    for i in range(newsize-1, size, -1):
        assert (newsize - (i - 1),) in cache.cache
        cache.resize(i)
        assert (newsize - (i - 1),) not in cache.cache

    # Check if is works with size=0
    cache.resize(0)
    print('cache.cache = ',cache.cache)
    print('cache.root = ',cache.root)
    assert cache.root[0] == cache.root
    assert cache.root[1] == cache.root
    for i in range(10):
        assert cache(i) == f(i)
    print('=> cache.cache = ',cache.cache)
    print('=> cache.root = ',cache.root)
    assert cache.root[0] == cache.root
    assert cache.root[1] == cache.root

    assert_raises(ValueError, cache.resize, -20)
    print('Done test_lru')
コード例 #25
0
ファイル: test_config.py プロジェクト: rmjarvis/TreeCorr
def test_read():
    """Test different ways of reading a config file.
    """
    # The config files for nn_list are designed to hit all the major features here.
    # Tests that use these config files are in test_nn.py:test_list()

    config1 = treecorr.config.read_config('configs/nn_list1.yaml')
    assert config1 == {
        'file_list': 'data/nn_list_data_files.txt',
        'rand_file_list': 'data/nn_list_rand_files.txt',
        'x_col': 1,
        'y_col': 2,
        'verbose': 1,
        'min_sep': 1.,
        'max_sep': 25.,
        'bin_size': 0.10,
        'nn_file_name': 'output/nn_list1.out',
        'nn_statistic': 'simple',
    }

    config2 = treecorr.config.read_config('configs/nn_list2.json')
    assert config2 == {
        'file_list': 'data/nn_list_data_files.txt',
        'rand_file_name': 'data/nn_list_randx.dat',
        'x_col': 1,
        'y_col': 2,
        'verbose': 1,
        'min_sep': 1.,
        'max_sep': 25.,
        'bin_size': 0.10,
        'nn_file_name': 'output/nn_list2.out',
        'nn_statistic': 'simple',
    }

    config3 = treecorr.config.read_config('configs/nn_list3.params')
    assert config3 == {
        'file_name': 'data/nn_list_datax.dat',
        'rand_file_name': ['data/nn_list_rand0.dat', 'data/nn_list_rand1.dat',
                           'data/nn_list_rand2.dat'],
        'x_col': '1',
        'y_col': '2',
        'verbose': '1',
        'min_sep': '1.',
        'max_sep': '25.',
        'bin_size': '0.10',
        'nn_file_name': 'output/nn_list3.out',
        'nn_statistic': 'simple',
    }

    config4 = treecorr.config.read_config('configs/nn_list4.config', file_type='yaml')
    assert config4 == {
        'file_list': 'data/nn_list_data_files.txt',
        'rand_file_list': 'data/nn_list_rand_files.txt',
        'file_list2': 'data/nn_list_data_files.txt',
        'rand_file_name2': 'data/nn_list_randx.dat',
        'x_col': 1,
        'y_col': 2,
        'verbose': 1,
        'min_sep': 1.,
        'max_sep': 25.,
        'bin_size': 0.10,
        'nn_file_name': 'output/nn_list4.out',
        'nn_statistic': 'simple',
    }

    config5 = treecorr.config.read_config('configs/nn_list5.config', file_type='json')
    assert config5 == {
        'file_list': 'data/nn_list_data_files.txt',
        'rand_file_name': 'data/nn_list_randx.dat',
        'file_name2': 'data/nn_list_datax.dat',
        'rand_file_list2': 'data/nn_list_rand_files.txt',
        'x_col': 1,
        'y_col': 2,
        'verbose': 1,
        'min_sep': 1.,
        'max_sep': 25.,
        'bin_size': 0.10,
        'nn_file_name': 'output/nn_list5.out',
        'nn_statistic': 'simple',
    }

    config6 = treecorr.config.read_config('configs/nn_list6.config', file_type='params')
    assert config6 == {
        'file_name': ['data/nn_list_data0.dat', 'data/nn_list_data1.dat', 'data/nn_list_data2.dat'],
        'rand_file_name': ['data/nn_list_rand0.dat', 'data/nn_list_rand1.dat', 'data/nn_list_rand2.dat'],
        'file_list2': 'data/nn_list_data_files.txt',
        'rand_file_list2': 'data/nn_list_rand_files.txt',
        'x_col': '1',
        'y_col': '2',
        'verbose': '1',
        'min_sep': '1.',
        'max_sep': '25.',
        'bin_size': '0.10',
        'nn_file_name': 'nn_list6.out',
        'nn_statistic': 'simple',
    }

    with assert_raises(ValueError):
        treecorr.config.read_config('configs/nn_list6.config', file_type='simple')
    with assert_raises(ValueError):
        treecorr.config.read_config('configs/nn_list6.config')
コード例 #26
0
ファイル: test_index.py プロジェクト: rmjarvis/TreeCorr
def test_get_near():

    nobj = 100000
    rng = np.random.RandomState(8675309)
    x = rng.random_sample(nobj)   # All from 0..1
    y = rng.random_sample(nobj)
    z = rng.random_sample(nobj)
    w = rng.random_sample(nobj)
    use = rng.randint(30, size=nobj).astype(float)
    w[use == 0] = 0

    x0 = 0.5
    y0 = 0.8
    z0 = 0.3
    sep = 0.03

    # Put a small cluster inside our search radius
    x[100:130] = rng.normal(x0+0.03, 0.001, 30)
    y[100:130] = rng.normal(y0-0.02, 0.001, 30)
    z[100:130] = rng.normal(z0+0.01, 0.001, 30)

    # Put another small cluster right on the edge of our search radius
    x[500:550] = rng.normal(x0+sep, 0.001, 50)
    y[500:550] = rng.normal(y0, 0.001, 50)
    z[500:550] = rng.normal(z0, 0.001, 50)

    # Start with flat coords

    cat = treecorr.Catalog(x=x, y=y, w=w, g1=w, g2=w, k=w)
    field = cat.getNField()

    t0 = time.time()
    i1 = np.where(((x-x0)**2 + (y-y0)**2 < sep**2) & (w > 0))[0]
    t1 = time.time()
    i2 = field.get_near(x=x0, y=y0, sep=sep)
    t2 = time.time()
    i3 = field.get_near(x0, y0, sep)
    t3 = time.time()
    print('i1 = ',i1[:20],'  time = ',t1-t0)
    print('i2 = ',i2[:20],'  time = ',t2-t1)
    print('i3 = ',i3[:20],'  time = ',t3-t2)
    np.testing.assert_array_equal(i2, i1)
    np.testing.assert_array_equal(i3, i1)
    #assert t2-t1 < t1-t0  # These don't always pass.  The tree version is usually a bit faster,
    #assert t3-t2 < t1-t0  # but not by much and not always.  So don't require it in unit test.

    # Invalid ways to specify x,y,sep
    assert_raises(TypeError, field.get_near)
    assert_raises(TypeError, field.get_near, x0)
    assert_raises(TypeError, field.get_near, x0, y0)
    assert_raises(TypeError, field.get_near, x0, y0, sep, sep)
    assert_raises(TypeError, field.get_near, x=x0, y=y0)
    assert_raises(TypeError, field.get_near, x=x0, sep=sep)
    assert_raises(TypeError, field.get_near, y=y0, sep=sep)
    assert_raises(TypeError, field.get_near, x=x0, y=y0, z=x0, sep=sep)
    assert_raises(TypeError, field.get_near, ra=x0, dec=y0, sep=sep)
    assert_raises(TypeError, field.get_near, coord.CelestialCoord.from_xyz(x0,y0,x0), sep=sep)

    # Check G and K
    kfield = cat.getKField(min_size=0.01, max_size=sep, min_top=5)
    gfield = cat.getGField(min_size=0.05, max_size=sep, max_top=2)
    i4 = kfield.get_near(x0, y0, sep=sep)
    i5 = gfield.get_near(x0, y0, sep=sep)
    np.testing.assert_array_equal(i4, i1)
    np.testing.assert_array_equal(i5, i1)

    # 3D coords

    r = np.sqrt(x*x+y*y+z*z)
    dec = np.arcsin(z/r) * coord.radians / coord.degrees
    ra = np.arctan2(y,x) * coord.radians / coord.degrees

    cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='deg', dec_units='deg',
                           w=w, g1=w, g2=w, k=w)
    field = cat.getNField()

    t0 = time.time()
    i1 = np.where(((x-x0)**2 + (y-y0)**2 + (z-z0)**2 < sep**2) & (w > 0))[0]
    t1 = time.time()
    i2 = field.get_near(x=x0, y=y0, z=z0, sep=sep)
    t2 = time.time()
    c = coord.CelestialCoord.from_xyz(x0,y0,z0)
    r0 = np.sqrt(x0**2+y0**2+z0**2)
    i3 = field.get_near(ra=c.ra, dec=c.dec, r=r0, sep=sep)
    t3 = time.time()
    print('i1 = ',i1[:20],'  time = ',t1-t0)
    print('i2 = ',i2[:20],'  time = ',t2-t1)
    print('i3 = ',i3[:20],'  time = ',t3-t2)
    np.testing.assert_array_equal(i2, i1)
    np.testing.assert_array_equal(i3, i1)
    #assert t2-t1 < t1-t0
    #assert t3-t2 < t1-t0

    # Invalid ways to specify x,y,z,sep
    ra0 = c.ra / coord.degrees
    dec0 = c.dec / coord.degrees
    assert_raises(TypeError, field.get_near)
    assert_raises(TypeError, field.get_near, x0)
    assert_raises(TypeError, field.get_near, x0, y0)
    assert_raises(TypeError, field.get_near, x0, y0, z0)
    assert_raises(TypeError, field.get_near, x=x0)
    assert_raises(TypeError, field.get_near, x=x0, y=y0)
    assert_raises(TypeError, field.get_near, x=x0, y=y0, z=z0)
    assert_raises(TypeError, field.get_near, ra=ra0)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, r=r0)
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, ra_units='deg')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, dec_units='deg')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, sep_units='rad')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, ra_units='deg', dec_units='deg')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, ra_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, sep,
                  ra_units='deg', dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra=ra0)
    assert_raises(TypeError, field.get_near, dec=dec0)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep, ra_units='deg')
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep, dec_units='deg')
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep,
                  ra_units='deg', dec_units='deg')
    assert_raises(TypeError, field.get_near, ra, dec=dec, sep=sep,
                  ra_units='deg', dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra, dec=dec, sep=sep,
                  ra_units='deg', dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, c)
    assert_raises(TypeError, field.get_near, c, r=r0)
    assert_raises(TypeError, field.get_near, c, r=r0, sep=sep, sep_units='rad')
    assert_raises(TypeError, field.get_near, c, r0)
    assert_raises(TypeError, field.get_near, c, r0, sep=sep, sep_units='rad')
    assert_raises(TypeError, field.get_near, c, r0, sep, sep_units='rad')
    assert_raises(TypeError, field.get_near, c, r0, sep, 'deg')
    assert_raises(TypeError, field.get_near, c, r0, sep, sep_unit='deg')
    assert_raises(TypeError, field.get_near, c, r0, sep, sep_units='deg',
                  ra_units='deg', dec_units='deg')
    assert_raises(TypeError, field.get_near, c.ra)
    assert_raises(TypeError, field.get_near, c.ra, c.dec)
    assert_raises(TypeError, field.get_near, c.ra, c.dec, r=r0)
    assert_raises(TypeError, field.get_near, c.ra, c.dec, r0)
    assert_raises(TypeError, field.get_near, c.ra, c.dec, r0, sep=sep, extra=4)
    assert_raises(TypeError, field.get_near, c.ra, c.dec, r0, sep, extra=4)
    assert_raises(TypeError, field.get_near, c.ra, c.dec, r0, sep, sep)

    # Check G and K
    kfield = cat.getKField(min_size=0.01, max_size=sep, min_top=5)
    gfield = cat.getGField(min_size=0.05, max_size=sep, max_top=2)
    i4 = kfield.get_near(c, r0, sep)
    i5 = gfield.get_near(c.ra, c.dec, r0, sep=sep)
    np.testing.assert_array_equal(i4, i1)
    np.testing.assert_array_equal(i5, i1)

    # Spherical
    cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='deg', dec_units='deg',
                           w=w, g1=w, g2=w, k=w)
    field = cat.getNField()

    x /= r
    y /= r
    z /= r
    c = coord.CelestialCoord.from_xyz(x0,y0,z0)
    x0,y0,z0 = c.get_xyz()
    r0 = 2 * np.sin(sep / 2)  # length of chord subtending sep radians.
    t0 = time.time()
    i1 = np.where(((x-x0)**2 + (y-y0)**2 + (z-z0)**2 < r0**2) & (w > 0))[0]
    t1 = time.time()
    i2 = field.get_near(c, sep=sep, sep_units='rad')
    t2 = time.time()
    i3 = field.get_near(ra=c.ra.rad, dec=c.dec.rad, ra_units='radians', dec_units='radians',
                        sep=sep * coord.radians)
    t3 = time.time()
    print('i1 = ',i1[:20],'  time = ',t1-t0)
    print('i2 = ',i2[:20],'  time = ',t2-t1)
    print('i3 = ',i3[:20],'  time = ',t3-t2)
    np.testing.assert_array_equal(i2, i1)
    np.testing.assert_array_equal(i3, i1)
    #assert t2-t1 < t1-t0
    #assert t3-t2 < t1-t0

    # Invalid ways to specify ra,dec,sep
    assert_raises(TypeError, field.get_near)
    assert_raises(TypeError, field.get_near, ra0)
    assert_raises(TypeError, field.get_near, ra0, dec0)
    assert_raises(TypeError, field.get_near, ra0, dec0, sep)
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, ra_units='deg')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, dec_units='deg')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, sep_units='rad')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, ra_units='deg', dec_units='deg')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, ra_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra0, dec0, sep, sep,
                  ra_units='deg', dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra=ra0)
    assert_raises(TypeError, field.get_near, dec=dec0)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep)
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep, ra_units='deg')
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep, dec_units='deg')
    assert_raises(TypeError, field.get_near, ra=ra0, dec=dec0, sep=sep,
                  ra_units='deg', dec_units='deg')
    assert_raises(TypeError, field.get_near, ra0, dec=dec0, sep=sep,
                  ra_units='deg', dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, ra0, dec=dec0, sep=sep,
                  ra_units='deg', dec_units='deg', sep_units='rad')
    assert_raises(TypeError, field.get_near, c)
    assert_raises(TypeError, field.get_near, c, sep)
    assert_raises(TypeError, field.get_near, c, sep, 'deg')
    assert_raises(TypeError, field.get_near, c, sep, sep_unit='deg')
    assert_raises(TypeError, field.get_near, c, sep, sep_units='deg',
                  ra_units='deg', dec_units='deg')

    # Check G and K with other allowed argument patterns.
    kfield = cat.getKField(min_size=0.01, max_size=sep, min_top=5)
    gfield = cat.getGField(min_size=0.05, max_size=sep, max_top=2)
    i4 = gfield.get_near(c, sep*coord.radians/coord.degrees, sep_units='deg')
    i5 = kfield.get_near(c.ra, c.dec, sep*coord.radians)
    np.testing.assert_array_equal(i4, i1)
    np.testing.assert_array_equal(i5, i1)
コード例 #27
0
ファイル: test_config.py プロジェクト: rmjarvis/TreeCorr
def test_util():
    # Test some error handling in utility functions that shouldn't be possible to get to
    # in normal running, so we need to call things explicitly to get the coverage.

    # First some I/O sanity checks
    a = np.array([1,2,3])
    b = np.array([4,5,6])
    file_name = 'junk.out'
    with assert_raises(ValueError):
        treecorr.util.gen_write(file_name, ['a', 'b'], [a])
    with assert_raises(ValueError):
        treecorr.util.gen_write(file_name, ['a', 'b'], [a, b, a])
    with assert_raises(ValueError):
        treecorr.util.gen_write(file_name, ['a'], [a, b])
    with assert_raises(ValueError):
        treecorr.util.gen_write(file_name, [], [])
    with assert_raises(ValueError):
        treecorr.util.gen_write(file_name, ['a', 'b'], [a, b[:1]])
    with assert_raises(ValueError):
        treecorr.util.gen_write(file_name, ['a', 'b'], [a, b], file_type='Invalid')

    with assert_raises(ValueError):
        treecorr.util.gen_read(file_name, file_type='Invalid')

    # Now some places that do sanity checks for invalid coords or metrics which would already
    # be checked in normal operation.
    with assert_raises(ValueError):
        treecorr.util.parse_metric('Euclidean', 'invalid')
    with assert_raises(ValueError):
        treecorr.util.parse_metric('Invalid', 'flat')
    with assert_raises(ValueError):
        treecorr.util.coord_enum('invalid')
    with assert_raises(ValueError):
        treecorr.util.metric_enum('Invalid')
コード例 #28
0
ファイル: test_config.py プロジェクト: joezuntz/TreeCorr
def test_read():
    """Test different ways of reading a config file.
    """
    # The config files for nn_list are designed to hit all the major features here.
    # Tests that use these config files are in test_nn.py:test_list()

    config1 = treecorr.config.read_config('configs/nn_list1.yaml')
    assert config1 == {
        'file_list': 'data/nn_list_data_files.txt',
        'rand_file_list': 'data/nn_list_rand_files.txt',
        'x_col': 1,
        'y_col': 2,
        'verbose': 1,
        'min_sep': 1.,
        'max_sep': 25.,
        'bin_size': 0.10,
        'nn_file_name': 'output/nn_list1.out',
        'nn_statistic': 'simple',
    }

    config2 = treecorr.config.read_config('configs/nn_list2.json')
    assert config2 == {
        'file_list': 'data/nn_list_data_files.txt',
        'rand_file_name': 'data/nn_list_randx.dat',
        'x_col': 1,
        'y_col': 2,
        'verbose': 1,
        'min_sep': 1.,
        'max_sep': 25.,
        'bin_size': 0.10,
        'nn_file_name': 'output/nn_list2.out',
        'nn_statistic': 'simple',
    }

    config3 = treecorr.config.read_config('configs/nn_list3.params')
    assert config3 == {
        'file_name':
        'data/nn_list_datax.dat',
        'rand_file_name': [
            'data/nn_list_rand0.dat', 'data/nn_list_rand1.dat',
            'data/nn_list_rand2.dat'
        ],
        'x_col':
        '1',
        'y_col':
        '2',
        'verbose':
        '1',
        'min_sep':
        '1.',
        'max_sep':
        '25.',
        'bin_size':
        '0.10',
        'nn_file_name':
        'output/nn_list3.out',
        'nn_statistic':
        'simple',
    }

    config4 = treecorr.config.read_config('configs/nn_list4.config',
                                          file_type='yaml')
    assert config4 == {
        'file_list': 'data/nn_list_data_files.txt',
        'rand_file_list': 'data/nn_list_rand_files.txt',
        'file_list2': 'data/nn_list_data_files.txt',
        'rand_file_name2': 'data/nn_list_randx.dat',
        'x_col': 1,
        'y_col': 2,
        'verbose': 1,
        'min_sep': 1.,
        'max_sep': 25.,
        'bin_size': 0.10,
        'nn_file_name': 'output/nn_list4.out',
        'nn_statistic': 'simple',
    }

    config5 = treecorr.config.read_config('configs/nn_list5.config',
                                          file_type='json')
    assert config5 == {
        'file_list': 'data/nn_list_data_files.txt',
        'rand_file_name': 'data/nn_list_randx.dat',
        'file_name2': 'data/nn_list_datax.dat',
        'rand_file_list2': 'data/nn_list_rand_files.txt',
        'x_col': 1,
        'y_col': 2,
        'verbose': 1,
        'min_sep': 1.,
        'max_sep': 25.,
        'bin_size': 0.10,
        'nn_file_name': 'output/nn_list5.out',
        'nn_statistic': 'simple',
    }

    config6 = treecorr.config.read_config('configs/nn_list6.config',
                                          file_type='params')
    assert config6 == {
        'file_name': [
            'data/nn_list_data0.dat', 'data/nn_list_data1.dat',
            'data/nn_list_data2.dat'
        ],
        'rand_file_name': [
            'data/nn_list_rand0.dat', 'data/nn_list_rand1.dat',
            'data/nn_list_rand2.dat'
        ],
        'file_list2':
        'data/nn_list_data_files.txt',
        'rand_file_list2':
        'data/nn_list_rand_files.txt',
        'x_col':
        '1',
        'y_col':
        '2',
        'verbose':
        '1',
        'min_sep':
        '1.',
        'max_sep':
        '25.',
        'bin_size':
        '0.10',
        'nn_file_name':
        'nn_list6.out',
        'nn_statistic':
        'simple',
    }

    with assert_raises(ValueError):
        treecorr.config.read_config('configs/nn_list6.config',
                                    file_type='simple')
    with assert_raises(ValueError):
        treecorr.config.read_config('configs/nn_list6.config')
コード例 #29
0
ファイル: test_reader.py プロジェクト: ztq1996/TreeCorr
def test_hdf_reader():
    try:
        import h5py
    except ImportError:
        print('Skipping HdfReader tests, since h5py not installed.')
        return

    get_from_wiki('Aardvark.hdf5')
    r = HdfReader(os.path.join('data', 'Aardvark.hdf5'))

    # Check things not allowed if not in context
    with assert_raises(RuntimeError):
        r.read(['RA'], slice(0, 10, 2), '/')
    with assert_raises(RuntimeError):
        r.read('RA')
    with assert_raises(RuntimeError):
        r.row_count('DEC', '/')
    with assert_raises(RuntimeError):
        r.row_count('DEC')
    with assert_raises(RuntimeError):
        r.names('/')
    with assert_raises(RuntimeError):
        r.names()
    with assert_raises(RuntimeError):
        '/' in r

    with r:

        # '/' is the only extension in this file.
        # TODO: Add an hdf5 example with other valid choices for ext
        assert_raises(ValueError, r.check_valid_ext, 'invalid')
        r.check_valid_ext('/')

        # Default ext is '/'
        assert r.default_ext == '/'

        # Default ext is "in" reader
        assert '/' in r

        # Can always slice
        assert r.can_slice

        s = slice(0, 10, 2)
        data = r.read(['RA'], s)
        dec = r.read('DEC', s)
        assert data['RA'].size == 5
        assert dec.size == 5

        assert r.row_count('RA') == 390935
        assert r.row_count('RA', '/') == 390935
        assert r.row_count('GAMMA1') == 390935
        # Unlike the other readers, this needs a column name.
        assert_raises(TypeError, r.row_count)
        assert set(r.names()) == set(
            "INDEX RA DEC Z EPSILON GAMMA1 GAMMA2 KAPPA MU".split())
        assert set(r.names('/')) == set(r.names())

    # Again check things not allowed if not in context
    with assert_raises(RuntimeError):
        r.read(['RA'], slice(0, 10, 2), '/')
    with assert_raises(RuntimeError):
        r.read('RA')
    with assert_raises(RuntimeError):
        r.row_count('DEC', '/')
    with assert_raises(RuntimeError):
        r.row_count('DEC')
    with assert_raises(RuntimeError):
        r.names('/')
    with assert_raises(RuntimeError):
        r.names()
    with assert_raises(RuntimeError):
        '/' in r
コード例 #30
0
ファイル: test_config.py プロジェクト: joezuntz/TreeCorr
def test_check():
    """Test checking the validity of config values.
    """
    # First a simple case with no conflicts
    config1 = treecorr.read_config('configs/kg.yaml')
    valid_params = treecorr.corr2_valid_params
    config2 = treecorr.config.check_config(config1.copy(), valid_params)

    # Just check a few values
    assert config2['x_col'] == '1'
    assert config2['k_col'] == ['3', '0']
    assert config2['verbose'] == 1
    assert config2['kg_file_name'] == 'output/kg.out'

    # Will also have other parameters filled from the valid_params dict
    for key in config2:
        assert key in valid_params
        if key in config1:
            if isinstance(config1[key], list):
                assert [str(v) for v in config2[key]
                        ] == [str(v) for v in config1[key]]
            else:
                assert config2[key] == config1[key] or str(
                    config2[key]) == str(config1[key])
        else:
            assert config2[key] == valid_params[key][2]

    # Check list of bool
    config1['flip_g1'] = [True, 0]
    config2 = treecorr.config.check_config(config1.copy(), valid_params)
    assert config2['flip_g1'] == [True, False]

    # Longer names are allowed
    config1['x_units'] = 'arcminutes'
    config1['y_units'] = 'arcminute'
    config2 = treecorr.config.check_config(config1.copy(), valid_params)
    assert config2['x_units'] == 'arcmin'
    assert config2['y_units'] == 'arcmin'

    # Also other aliases, but you need to list them explicitly.
    config1['reverse_g1'] = True
    with assert_raises(TypeError):
        treecorr.config.check_config(config1.copy(), valid_params)
    config2 = treecorr.config.check_config(config1.copy(),
                                           valid_params,
                                           aliases={'reverse_g1': 'flip_g1'})
    assert config2['flip_g1'] == True
    assert 'reverse_g1' not in config2
    del config1['reverse_g1']

    # Invalid values raise errors
    config1['verbose'] = -1
    with assert_raises(ValueError):
        treecorr.config.check_config(config1.copy(), valid_params)
    config1['verbose'] = 1
    config1['metric'] = 'hyperbolic'
    with assert_raises(ValueError):
        treecorr.config.check_config(config1.copy(), valid_params)
    del config1['metric']

    # With a logger, aliases emit a warning.
    config1['n2_file_name'] = 'output/n2.out'
    with CaptureLog() as cl:
        config2 = treecorr.config.check_config(
            config1.copy(),
            valid_params,
            logger=cl.logger,
            aliases={'n2_file_name': 'nn_file_name'})
    assert "The parameter n2_file_name is deprecated." in cl.output
    assert "You should use nn_file_name instead." in cl.output

    # corr2 has a list of standard aliases
    # It is currently empty, but let's mock it up to test the functionality.
    if sys.version_info < (3, ): return  # mock only available on python 3
    from unittest import mock
    with mock.patch('treecorr.corr2_aliases',
                    {'n2_file_name': 'nn_file_name'}):
        config2 = treecorr.config.check_config(config1.copy(),
                                               valid_params,
                                               aliases=treecorr.corr2_aliases)
    assert 'n2_file_name' not in config2
    assert config2['nn_file_name'] == 'output/n2.out'
    del config1['n2_file_name']
コード例 #31
0
ファイル: test_kmeans.py プロジェクト: zchvsre/TreeCorr
def test_init_kmpp():
    # Test the init=random option

    ngal = 100000
    s = 1.
    rng = np.random.RandomState(8675309)
    x = rng.normal(0, s, (ngal, ))
    y = rng.normal(0, s, (ngal, ))
    z = rng.normal(0, s, (ngal, ))
    cat = treecorr.Catalog(x=x, y=y, z=z)
    xyz = np.array([x, y, z]).T

    # Skip the refine_centers step.
    print('3d with init=kmeans++')
    npatch = 10
    field = cat.getNField()
    cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
    #print('cen = ',cen1)
    assert cen1.shape == (npatch, 3)
    p1 = field.kmeans_assign_patches(cen1)
    print('patches = ', np.unique(p1))
    assert len(p1) == cat.ntot
    assert min(p1) == 0
    assert max(p1) == npatch - 1

    inertia1 = np.array(
        [np.sum((xyz[p1 == i] - cen1[i])**2) for i in range(npatch)])
    counts1 = np.array([np.sum(p1 == i) for i in range(npatch)])
    print('counts = ', counts1)
    print('rms counts = ', np.std(counts1))
    print('total inertia = ', np.sum(inertia1))

    # Now run the normal way
    # Use higher max_iter, since random isn't a great initialization.
    p2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
    cen2 = np.array([xyz[p2 == i].mean(axis=0) for i in range(npatch)])
    inertia2 = np.array(
        [np.sum((xyz[p2 == i] - cen2[i])**2) for i in range(npatch)])
    counts2 = np.array([np.sum(p2 == i) for i in range(npatch)])
    print('rms counts => ', np.std(counts2))
    print('total inertia => ', np.sum(inertia2))
    assert np.sum(inertia2) < np.sum(inertia1)

    # Use a field with lots of top level cells
    print('3d with init=kmeans++, min_top=10')
    field = cat.getNField(min_top=10)
    cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
    #print('cen = ',cen1)
    assert cen1.shape == (npatch, 3)
    p1 = field.kmeans_assign_patches(cen1)
    print('patches = ', np.unique(p1))
    assert len(p1) == cat.ntot
    assert min(p1) == 0
    assert max(p1) == npatch - 1

    inertia1 = np.array(
        [np.sum((xyz[p1 == i] - cen1[i])**2) for i in range(npatch)])
    counts1 = np.array([np.sum(p1 == i) for i in range(npatch)])
    print('counts = ', counts1)
    print('rms counts = ', np.std(counts1))
    print('total inertia = ', np.sum(inertia1))

    # Now run the normal way
    p2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
    cen2 = np.array([xyz[p2 == i].mean(axis=0) for i in range(npatch)])
    inertia2 = np.array(
        [np.sum((xyz[p2 == i] - cen2[i])**2) for i in range(npatch)])
    counts2 = np.array([np.sum(p2 == i) for i in range(npatch)])
    print('rms counts => ', np.std(counts2))
    print('total inertia => ', np.sum(inertia2))
    assert np.sum(inertia2) < np.sum(inertia1)

    # Repeat in 2d
    print('2d with init=kmeans++')
    cat = treecorr.Catalog(x=x, y=y)
    xy = np.array([x, y]).T
    field = cat.getNField()
    cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
    #print('cen = ',cen1)
    assert cen1.shape == (npatch, 2)
    p1 = field.kmeans_assign_patches(cen1)
    print('patches = ', np.unique(p1))
    assert len(p1) == cat.ntot
    assert min(p1) == 0
    assert max(p1) == npatch - 1

    inertia1 = np.array(
        [np.sum((xy[p1 == i] - cen1[i])**2) for i in range(npatch)])
    counts1 = np.array([np.sum(p1 == i) for i in range(npatch)])
    print('counts = ', counts1)
    print('rms counts = ', np.std(counts1))
    print('total inertia = ', np.sum(inertia1))

    # Now run the normal way
    p2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
    cen2 = np.array([xy[p2 == i].mean(axis=0) for i in range(npatch)])
    inertia2 = np.array(
        [np.sum((xy[p2 == i] - cen2[i])**2) for i in range(npatch)])
    counts2 = np.array([np.sum(p2 == i) for i in range(npatch)])
    print('rms counts => ', np.std(counts2))
    print('total inertia => ', np.sum(inertia2))
    assert np.sum(inertia2) < np.sum(inertia1)

    # Repeat in spherical
    print('spher with init=kmeans++')
    ra, dec = coord.CelestialCoord.xyz_to_radec(x, y, z)
    cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad')
    xyz = np.array([cat.x, cat.y, cat.z]).T
    field = cat.getNField()
    cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
    #print('cen = ',cen1)
    assert cen1.shape == (npatch, 3)
    p1 = field.kmeans_assign_patches(cen1)
    print('patches = ', np.unique(p1))
    assert len(p1) == cat.ntot
    assert min(p1) == 0
    assert max(p1) == npatch - 1

    inertia1 = np.array(
        [np.sum((xyz[p1 == i] - cen1[i])**2) for i in range(npatch)])
    counts1 = np.array([np.sum(p1 == i) for i in range(npatch)])
    print('counts = ', counts1)
    print('rms counts = ', np.std(counts1))
    print('total inertia = ', np.sum(inertia1))

    # Now run the normal way
    p2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
    cen2 = np.array([xyz[p2 == i].mean(axis=0) for i in range(npatch)])
    inertia2 = np.array(
        [np.sum((xyz[p2 == i] - cen2[i])**2) for i in range(npatch)])
    counts2 = np.array([np.sum(p2 == i) for i in range(npatch)])
    print('rms counts => ', np.std(counts2))
    print('total inertia => ', np.sum(inertia2))
    assert np.sum(inertia2) < np.sum(inertia1)

    with assert_raises(ValueError):
        field.kmeans_initialize_centers(npatch=ngal * 2, init='kmeans++')
    with assert_raises(ValueError):
        field.kmeans_initialize_centers(npatch=ngal + 1, init='kmeans++')
    with assert_raises(ValueError):
        field.kmeans_initialize_centers(npatch=0, init='kmeans++')
    with assert_raises(ValueError):
        field.kmeans_initialize_centers(npatch=-100, init='kmeans++')

    # Should be valid to give npatch = 1, although not particularly useful.
    cen_1 = field.kmeans_initialize_centers(npatch=1, init='kmeans++')
    p_1 = field.kmeans_assign_patches(cen_1)
    np.testing.assert_equal(p_1, np.zeros(ngal))

    # If same number of patches as galaxies, each galaxy gets a patch.
    # (This is stupid of course, but check that it doesn't fail.)
    # Do this with fewer points though, since it's not particularly fast with N=10^5.
    n = 100
    cat = treecorr.Catalog(ra=ra[:n],
                           dec=dec[:n],
                           ra_units='rad',
                           dec_units='rad')
    field = cat.getNField()
    cen_n = field.kmeans_initialize_centers(npatch=n, init='kmeans++')
    p_n = field.kmeans_assign_patches(cen_n)
    np.testing.assert_equal(sorted(p_n), list(range(n)))
コード例 #32
0
ファイル: test_catalog.py プロジェクト: rmjarvis/TreeCorr
def test_ascii():

    nobj = 5000
    rng = np.random.RandomState(8675309)
    x = rng.random_sample(nobj)
    y = rng.random_sample(nobj)
    z = rng.random_sample(nobj)
    ra = rng.random_sample(nobj)
    dec = rng.random_sample(nobj)
    r = rng.random_sample(nobj)
    wpos = rng.random_sample(nobj)
    g1 = rng.random_sample(nobj)
    g2 = rng.random_sample(nobj)
    k = rng.random_sample(nobj)

    # Some elements have both w and wpos = 0.
    w = wpos.copy()
    use = rng.randint(30, size=nobj).astype(float)
    w[use == 0] = 0
    wpos[use == 0] = 0

    # Others just have w = 0
    use = rng.randint(30, size=nobj).astype(float)
    w[use == 0] = 0

    flags = np.zeros(nobj).astype(int)
    for flag in [ 1, 2, 4, 8, 16 ]:
        sub = rng.random_sample(nobj) < 0.1
        flags[sub] = np.bitwise_or(flags[sub], flag)

    file_name = os.path.join('data','test.dat')
    with open(file_name, 'w') as fid:
        # These are intentionally in a different order from the order we parse them.
        fid.write('# ra,dec,x,y,k,g1,g2,w,z,r,wpos,flag\n')
        for i in range(nobj):
            fid.write((('%.8f '*11)+'%d\n')%(
                ra[i],dec[i],x[i],y[i],k[i],g1[i],g2[i],w[i],z[i],r[i],wpos[i],flags[i]))

    # Check basic input
    config = {
        'x_col' : 3,
        'y_col' : 4,
        'z_col' : 9,
        'x_units' : 'rad',
        'y_units' : 'rad',
        'w_col' : 8,
        'wpos_col' : 11,
        'k_col' : 5,
        'g1_col' : 6,
        'g2_col' : 7,
        'kk_file_name' : 'kk.out',  # These make sure k and g are required.
        'gg_file_name' : 'gg.out',
    }
    cat1 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat1.x, x)
    np.testing.assert_almost_equal(cat1.y, y)
    np.testing.assert_almost_equal(cat1.z, z)
    np.testing.assert_almost_equal(cat1.w, w)
    np.testing.assert_almost_equal(cat1.g1, g1)
    np.testing.assert_almost_equal(cat1.g2, g2)
    np.testing.assert_almost_equal(cat1.k, k)
    np.testing.assert_almost_equal(cat1.wpos, wpos)

    assert_raises(TypeError, treecorr.Catalog, file_name)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, x=x)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, y=y)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, z=z)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, ra=ra)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, dec=dec)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, r=r)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, g2=g2)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, k=k)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, w=w)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, wpos=wpos)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, flag=flag)
    assert_raises(ValueError, treecorr.Catalog, file_name, config, file_type='Invalid')

    assert_raises(TypeError, treecorr.Catalog, file_name, config, x_col=-1)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, x_col=100)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, y_col=-1)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, y_col=100)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, z_col=-1)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, z_col=100)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, w_col=-1)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, w_col=100)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, wpos_col=-1)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, wpos_col=100)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, k_col=-1)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, k_col=100)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, g1_col=-1)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, g1_col=100)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, g2_col=-1)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, g2_col=100)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, flag_col=-1)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, flag_col=100)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, ra_col=4)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, dec_col=4)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, r_col=4)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, x_col=0)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, y_col=0)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, x_col=0, y_col=0, z_col=0)

    # Check flags
    config['flag_col'] = 12
    print('config = ',config)
    cat2 = treecorr.Catalog(file_name, config, file_type='ASCII')
    np.testing.assert_almost_equal(cat2.w[flags==0], w[flags==0])
    np.testing.assert_almost_equal(cat2.w[flags!=0], 0.)

    # Check ok_flag
    config['ok_flag'] = 4
    cat3 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat3.w[np.logical_or(flags==0, flags==4)],
                                      w[np.logical_or(flags==0, flags==4)])
    np.testing.assert_almost_equal(cat3.w[np.logical_and(flags!=0, flags!=4)], 0.)

    # Check ignore_flag
    del config['ok_flag']
    config['ignore_flag'] = 16
    cat4 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat4.w[flags < 16], w[flags < 16])
    np.testing.assert_almost_equal(cat4.w[flags >= 16], 0.)

    # If weight is missing, automatically make it when there are flags
    del config['w_col']
    del config['wpos_col']
    cat4 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat4.w[flags < 16], 1.)
    np.testing.assert_almost_equal(cat4.w[flags >= 16], 0.)
    config['w_col'] = 8  # Put them back for later.
    config['wpos_col'] = 11

    # Check different units for x,y
    config['x_units'] = 'arcsec'
    config['y_units'] = 'arcsec'
    del config['z_col']
    cat5 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat5.x, x * (pi/180./3600.))
    np.testing.assert_almost_equal(cat5.y, y * (pi/180./3600.))

    config['x_units'] = 'arcmin'
    config['y_units'] = 'arcmin'
    cat5 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat5.x, x * (pi/180./60.))
    np.testing.assert_almost_equal(cat5.y, y * (pi/180./60.))

    config['x_units'] = 'deg'
    config['y_units'] = 'deg'
    cat5 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat5.x, x * (pi/180.))
    np.testing.assert_almost_equal(cat5.y, y * (pi/180.))

    del config['x_units']  # Default is radians
    del config['y_units']
    cat5 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat5.x, x)
    np.testing.assert_almost_equal(cat5.y, y)

    # Check ra,dec
    del config['x_col']
    del config['y_col']
    config['ra_col'] = 1
    config['dec_col'] = 2
    config['r_col'] = 10
    config['ra_units'] = 'rad'
    config['dec_units'] = 'rad'
    cat6 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat6.ra, ra)
    np.testing.assert_almost_equal(cat6.dec, dec)

    config['ra_units'] = 'deg'
    config['dec_units'] = 'deg'
    cat6 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat6.ra, ra * (pi/180.))
    np.testing.assert_almost_equal(cat6.dec, dec * (pi/180.))

    config['ra_units'] = 'hour'
    config['dec_units'] = 'deg'
    cat6 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat6.ra, ra * (pi/12.))
    np.testing.assert_almost_equal(cat6.dec, dec * (pi/180.))

    assert_raises(TypeError, treecorr.Catalog, file_name, config, ra_col=-1)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, ra_col=100)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, dec_col=-1)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, dec_col=100)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, r_col=-1)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, r_col=100)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, x_col=4)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, y_col=4)
    assert_raises(TypeError, treecorr.Catalog, file_name, config, z_col=4)

    # Check using a different delimiter, comment marker
    csv_file_name = os.path.join('data','test.csv')
    with open(csv_file_name, 'w') as fid:
        # These are intentionally in a different order from the order we parse them.
        fid.write('% This file uses commas for its delimiter')
        fid.write('% And more than one header line.')
        fid.write('% Plus some extra comment lines every so often.')
        fid.write('% And we use a weird comment marker to boot.')
        fid.write('% ra,dec,x,y,k,g1,g2,w,flag\n')
        for i in range(nobj):
            fid.write((('%.8f,'*11)+'%d\n')%(
                ra[i],dec[i],x[i],y[i],k[i],g1[i],g2[i],w[i],z[i],r[i],wpos[i],flags[i]))
            if i%100 == 0:
                fid.write('%%%% Line %d\n'%i)
    config['delimiter'] = ','
    config['comment_marker'] = '%'
    cat7 = treecorr.Catalog(csv_file_name, config)
    np.testing.assert_almost_equal(cat7.ra, ra * (pi/12.))
    np.testing.assert_almost_equal(cat7.dec, dec * (pi/180.))
    np.testing.assert_almost_equal(cat7.r, r)
    np.testing.assert_almost_equal(cat7.g1, g1)
    np.testing.assert_almost_equal(cat7.g2, g2)
    np.testing.assert_almost_equal(cat7.w[flags < 16], w[flags < 16])
    np.testing.assert_almost_equal(cat7.w[flags >= 16], 0.)

    # Check flip_g1, flip_g2
    del config['delimiter']
    del config['comment_marker']
    config['flip_g1'] = True
    cat8 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat8.g1, -g1)
    np.testing.assert_almost_equal(cat8.g2, g2)

    config['flip_g2'] = 'true'
    cat8 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat8.g1, -g1)
    np.testing.assert_almost_equal(cat8.g2, -g2)

    config['flip_g1'] = 'n'
    config['flip_g2'] = 'yes'
    cat8 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat8.g1, g1)
    np.testing.assert_almost_equal(cat8.g2, -g2)

    # Check overriding values with kwargs
    cat8 = treecorr.Catalog(file_name, config, flip_g1=True, flip_g2=False)
    np.testing.assert_almost_equal(cat8.g1, -g1)
    np.testing.assert_almost_equal(cat8.g2, g2)

    # Check copy command
    cat9 = cat8.copy()
    np.testing.assert_almost_equal(cat9.ra, cat8.ra)
    np.testing.assert_almost_equal(cat9.dec, cat8.dec)
    np.testing.assert_almost_equal(cat9.r, cat8.r)
    np.testing.assert_almost_equal(cat9.g1, cat8.g1)
    np.testing.assert_almost_equal(cat9.g2, cat8.g2)
    np.testing.assert_almost_equal(cat9.w, cat8.w)

    # Swapping w and wpos leads to zeros being copied from wpos to w
    cat10 = treecorr.Catalog(file_name, config, w_col=11, wpos_col=8, flag_col=0)
    np.testing.assert_almost_equal(cat10.wpos, w)
    np.testing.assert_almost_equal(cat10.w, w)

    # And if there is wpos, but no w, copy over the zeros, but not the other values
    with CaptureLog() as cl:
        cat10 = treecorr.Catalog(file_name, config, w_col=0, wpos_col=11, flag_col=0,
                                 logger=cl.logger)
    np.testing.assert_almost_equal(cat10.wpos, wpos)
    np.testing.assert_almost_equal(cat10.w[wpos==0], 0)
    np.testing.assert_almost_equal(cat10.w[wpos!=0], 1)
    assert 'Some wpos values are zero, setting w=0 for these points' in cl.output

    do_pickle(cat1)
    do_pickle(cat2)
    do_pickle(cat3)
    do_pickle(cat4)
    do_pickle(cat5)
    do_pickle(cat6)
    do_pickle(cat7)
    do_pickle(cat8)
    do_pickle(cat9)
    do_pickle(cat10)
コード例 #33
0
ファイル: test_catalog.py プロジェクト: rmjarvis/TreeCorr
def test_direct():

    nobj = 5000
    rng = np.random.RandomState(8675309)
    x = rng.random_sample(nobj)
    y = rng.random_sample(nobj)
    ra = rng.random_sample(nobj)
    dec = rng.random_sample(nobj)
    w = rng.random_sample(nobj)
    g1 = rng.random_sample(nobj)
    g2 = rng.random_sample(nobj)
    k = rng.random_sample(nobj)

    cat1 = treecorr.Catalog(x=x, y=y, w=w, g1=g1, g2=g2, k=k)
    np.testing.assert_almost_equal(cat1.x, x)
    np.testing.assert_almost_equal(cat1.y, y)
    np.testing.assert_almost_equal(cat1.w, w)
    np.testing.assert_almost_equal(cat1.g1, g1)
    np.testing.assert_almost_equal(cat1.g2, g2)
    np.testing.assert_almost_equal(cat1.k, k)

    cat2 = treecorr.Catalog(ra=ra, dec=dec, w=w, g1=g1, g2=g2, k=k,
                            ra_units='hours', dec_units='degrees')
    np.testing.assert_almost_equal(cat2.ra, ra * coord.hours / coord.radians)
    np.testing.assert_almost_equal(cat2.dec, dec * coord.degrees / coord.radians)
    np.testing.assert_almost_equal(cat2.w, w)
    np.testing.assert_almost_equal(cat2.g1, g1)
    np.testing.assert_almost_equal(cat2.g2, g2)
    np.testing.assert_almost_equal(cat2.k, k)

    do_pickle(cat1)
    do_pickle(cat2)

    assert_raises(TypeError, treecorr.Catalog, x=x)
    assert_raises(TypeError, treecorr.Catalog, y=y)
    assert_raises(TypeError, treecorr.Catalog, z=x)
    assert_raises(TypeError, treecorr.Catalog, r=x)
    assert_raises(TypeError, treecorr.Catalog, x=x, y=y, r=x)
    assert_raises(TypeError, treecorr.Catalog, x=x, y=y, ra=ra, dec=dec)
    assert_raises(TypeError, treecorr.Catalog, x=x, y=y, ra=ra, dec=dec,
                  ra_units='hours', dec_units='degrees')
    assert_raises(TypeError, treecorr.Catalog, x=x, y=y, ra_units='hours')
    assert_raises(TypeError, treecorr.Catalog, x=x, y=y, dec_units='degrees')
    assert_raises(TypeError, treecorr.Catalog, ra=ra, ra_units='hours')
    assert_raises(TypeError, treecorr.Catalog, dec=dec, dec_units='degrees')
    assert_raises(TypeError, treecorr.Catalog, x=x, y=y, g1=g1)
    assert_raises(TypeError, treecorr.Catalog, x=x, y=y, g2=g2)
    assert_raises(TypeError, treecorr.Catalog, ra=ra, dec=dec,
                  ra_units='hours', dec_units='degrees', x_units='arcmin')
    assert_raises(TypeError, treecorr.Catalog, ra=ra, dec=dec,
                  ra_units='hours', dec_units='degrees', y_units='arcmin')
    assert_raises(TypeError, treecorr.Catalog, x=x, y=y, x_units='arcmin')
    assert_raises(TypeError, treecorr.Catalog, x=x, y=y, y_units='arcmin')
    assert_raises(TypeError, treecorr.Catalog, x=x, y=y, z=x, z_units='arcmin')

    assert_raises(ValueError, treecorr.Catalog, x=x, y=y[4:])
    assert_raises(ValueError, treecorr.Catalog, x=x, y=y, z=x[4:])
    assert_raises(ValueError, treecorr.Catalog, x=x, y=y, w=w[4:])
    assert_raises(ValueError, treecorr.Catalog, x=x, y=y, w=w, wpos=w[4:])
    assert_raises(ValueError, treecorr.Catalog, x=x, y=y, w=w, g1=g1[4:], g2=g2[4:])
    assert_raises(ValueError, treecorr.Catalog, x=x, y=y, w=w, g1=g1[4:], g2=g2)
    assert_raises(ValueError, treecorr.Catalog, x=x, y=y, w=w, g1=g1, g2=g2[4:])
    assert_raises(ValueError, treecorr.Catalog, x=x, y=y, w=w, k=k[4:])
    assert_raises(ValueError, treecorr.Catalog, ra=ra, dec=dec[4:], w=w, g1=g1, g2=g2, k=k,
                  ra_units='hours', dec_units='degrees')
    assert_raises(ValueError, treecorr.Catalog, ra=ra[4:], dec=dec, w=w, g1=g1, g2=g2, k=k,
                  ra_units='hours', dec_units='degrees')
    assert_raises(ValueError, treecorr.Catalog, ra=ra, dec=dec, r=x[4:], w=w, g1=g1, g2=g2, k=k,
                  ra_units='hours', dec_units='degrees')
    assert_raises(ValueError, treecorr.Catalog, x=[], y=[])
    assert_raises(ValueError, treecorr.Catalog, x=x, y=y, w=np.zeros_like(x))
コード例 #34
0
ファイル: test_kkk.py プロジェクト: rmjarvis/TreeCorr
def test_direct():
    # If the catalogs are small enough, we can do a direct calculation to see if comes out right.
    # This should exactly match the treecorr result if brute=True.

    ngal = 100
    s = 10.
    rng = np.random.RandomState(8675309)
    x = rng.normal(0,s, (ngal,) )
    y = rng.normal(0,s, (ngal,) )
    w = rng.random_sample(ngal)
    kap = rng.normal(0,3, (ngal,) )

    cat = treecorr.Catalog(x=x, y=y, w=w, k=kap)

    min_sep = 1.
    bin_size = 0.2
    nrbins = 10
    nubins = 5
    nvbins = 5
    max_sep = min_sep * np.exp(nrbins * bin_size)
    kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True)
    kkk.process(cat, num_threads=2)

    true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int)
    true_weight = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)
    true_zeta = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)
    for i in range(ngal):
        for j in range(i+1,ngal):
            for k in range(j+1,ngal):
                d12 = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2)
                d23 = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2)
                d31 = np.sqrt((x[k]-x[i])**2 + (y[k]-y[i])**2)

                d3, d2, d1 = sorted([d12, d23, d31])
                rindex = np.floor(np.log(d2/min_sep) / bin_size).astype(int)
                if rindex < 0 or rindex >= nrbins: continue

                if [d1, d2, d3] == [d23, d31, d12]: ii,jj,kk = i,j,k
                elif [d1, d2, d3] == [d23, d12, d31]: ii,jj,kk = i,k,j
                elif [d1, d2, d3] == [d31, d12, d23]: ii,jj,kk = j,k,i
                elif [d1, d2, d3] == [d31, d23, d12]: ii,jj,kk = j,i,k
                elif [d1, d2, d3] == [d12, d23, d31]: ii,jj,kk = k,i,j
                elif [d1, d2, d3] == [d12, d31, d23]: ii,jj,kk = k,j,i
                else: assert False
                # Now use ii, jj, kk rather than i,j,k, to get the indices
                # that correspond to the points in the right order.

                u = d3/d2
                v = (d1-d2)/d3
                if (x[jj]-x[ii])*(y[kk]-y[ii]) < (x[kk]-x[ii])*(y[jj]-y[ii]):
                    v = -v

                uindex = np.floor(u / bin_size).astype(int)
                assert 0 <= uindex < nubins
                vindex = np.floor((v+1) / bin_size).astype(int)
                assert 0 <= vindex < 2*nvbins

                www = w[i] * w[j] * w[k]
                zeta = www * kap[i] * kap[j] * kap[k]

                true_ntri[rindex,uindex,vindex] += 1
                true_weight[rindex,uindex,vindex] += www
                true_zeta[rindex,uindex,vindex] += zeta

    pos = true_weight > 0
    true_zeta[pos] /= true_weight[pos]

    np.testing.assert_array_equal(kkk.ntri, true_ntri)
    np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8)

    try:
        import fitsio
    except ImportError:
        print('Skipping FITS tests, since fitsio is not installed')
        return

    # Check that running via the corr3 script works correctly.
    config = treecorr.config.read_config('configs/kkk_direct.yaml')
    cat.write(config['file_name'])
    treecorr.corr3(config)
    data = fitsio.read(config['kkk_file_name'])
    np.testing.assert_allclose(data['r_nom'], kkk.rnom.flatten())
    np.testing.assert_allclose(data['u_nom'], kkk.u.flatten())
    np.testing.assert_allclose(data['v_nom'], kkk.v.flatten())
    np.testing.assert_allclose(data['ntri'], kkk.ntri.flatten())
    np.testing.assert_allclose(data['weight'], kkk.weight.flatten())
    np.testing.assert_allclose(data['zeta'], kkk.zeta.flatten(), rtol=1.e-3)

    # Also check the "cross" calculation.  (Real cross doesn't work, but this should.)
    kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True)
    kkk.process(cat, cat, cat, num_threads=2)
    np.testing.assert_array_equal(kkk.ntri, true_ntri)
    np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8)

    config['file_name2'] = config['file_name']
    config['file_name3'] = config['file_name']
    treecorr.corr3(config)
    data = fitsio.read(config['kkk_file_name'])
    np.testing.assert_allclose(data['r_nom'], kkk.rnom.flatten())
    np.testing.assert_allclose(data['u_nom'], kkk.u.flatten())
    np.testing.assert_allclose(data['v_nom'], kkk.v.flatten())
    np.testing.assert_allclose(data['ntri'], kkk.ntri.flatten())
    np.testing.assert_allclose(data['weight'], kkk.weight.flatten())
    np.testing.assert_allclose(data['zeta'], kkk.zeta.flatten(), rtol=1.e-3)

    # Repeat with binslop = 0
    # And don't do any top-level recursion so we actually test not going to the leaves.
    kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                  bin_slop=0, max_top=0)
    kkk.process(cat)
    np.testing.assert_array_equal(kkk.ntri, true_ntri)
    np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8)

    # Check a few basic operations with a GGCorrelation object.
    do_pickle(kkk)

    kkk2 = kkk.copy()
    kkk2 += kkk
    np.testing.assert_allclose(kkk2.ntri, 2*kkk.ntri)
    np.testing.assert_allclose(kkk2.weight, 2*kkk.weight)
    np.testing.assert_allclose(kkk2.meand1, 2*kkk.meand1)
    np.testing.assert_allclose(kkk2.meand2, 2*kkk.meand2)
    np.testing.assert_allclose(kkk2.meand3, 2*kkk.meand3)
    np.testing.assert_allclose(kkk2.meanlogd1, 2*kkk.meanlogd1)
    np.testing.assert_allclose(kkk2.meanlogd2, 2*kkk.meanlogd2)
    np.testing.assert_allclose(kkk2.meanlogd3, 2*kkk.meanlogd3)
    np.testing.assert_allclose(kkk2.meanu, 2*kkk.meanu)
    np.testing.assert_allclose(kkk2.meanv, 2*kkk.meanv)
    np.testing.assert_allclose(kkk2.zeta, 2*kkk.zeta)

    kkk2.clear()
    kkk2 += kkk
    np.testing.assert_allclose(kkk2.ntri, kkk.ntri)
    np.testing.assert_allclose(kkk2.weight, kkk.weight)
    np.testing.assert_allclose(kkk2.meand1, kkk.meand1)
    np.testing.assert_allclose(kkk2.meand2, kkk.meand2)
    np.testing.assert_allclose(kkk2.meand3, kkk.meand3)
    np.testing.assert_allclose(kkk2.meanlogd1, kkk.meanlogd1)
    np.testing.assert_allclose(kkk2.meanlogd2, kkk.meanlogd2)
    np.testing.assert_allclose(kkk2.meanlogd3, kkk.meanlogd3)
    np.testing.assert_allclose(kkk2.meanu, kkk.meanu)
    np.testing.assert_allclose(kkk2.meanv, kkk.meanv)
    np.testing.assert_allclose(kkk2.zeta, kkk.zeta)

    ascii_name = 'output/kkk_ascii.txt'
    kkk.write(ascii_name, precision=16)
    kkk3 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins)
    kkk3.read(ascii_name)
    np.testing.assert_allclose(kkk3.ntri, kkk.ntri)
    np.testing.assert_allclose(kkk3.weight, kkk.weight)
    np.testing.assert_allclose(kkk3.meand1, kkk.meand1)
    np.testing.assert_allclose(kkk3.meand2, kkk.meand2)
    np.testing.assert_allclose(kkk3.meand3, kkk.meand3)
    np.testing.assert_allclose(kkk3.meanlogd1, kkk.meanlogd1)
    np.testing.assert_allclose(kkk3.meanlogd2, kkk.meanlogd2)
    np.testing.assert_allclose(kkk3.meanlogd3, kkk.meanlogd3)
    np.testing.assert_allclose(kkk3.meanu, kkk.meanu)
    np.testing.assert_allclose(kkk3.meanv, kkk.meanv)
    np.testing.assert_allclose(kkk3.zeta, kkk.zeta)

    fits_name = 'output/kkk_fits.fits'
    kkk.write(fits_name)
    kkk4 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins)
    kkk4.read(fits_name)
    np.testing.assert_allclose(kkk4.ntri, kkk.ntri)
    np.testing.assert_allclose(kkk4.weight, kkk.weight)
    np.testing.assert_allclose(kkk4.meand1, kkk.meand1)
    np.testing.assert_allclose(kkk4.meand2, kkk.meand2)
    np.testing.assert_allclose(kkk4.meand3, kkk.meand3)
    np.testing.assert_allclose(kkk4.meanlogd1, kkk.meanlogd1)
    np.testing.assert_allclose(kkk4.meanlogd2, kkk.meanlogd2)
    np.testing.assert_allclose(kkk4.meanlogd3, kkk.meanlogd3)
    np.testing.assert_allclose(kkk4.meanu, kkk.meanu)
    np.testing.assert_allclose(kkk4.meanv, kkk.meanv)
    np.testing.assert_allclose(kkk4.zeta, kkk.zeta)

    with assert_raises(TypeError):
        kkk2 += config
    kkk5 = treecorr.KKKCorrelation(min_sep=min_sep/2, bin_size=bin_size, nbins=nrbins)
    with assert_raises(ValueError):
        kkk2 += kkk5
    kkk6 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size/2, nbins=nrbins)
    with assert_raises(ValueError):
        kkk2 += kkk6
    kkk7 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins*2)
    with assert_raises(ValueError):
        kkk2 += kkk7
    kkk8 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   min_u=0.1)
    with assert_raises(ValueError):
        kkk2 += kkk8
    kkk0 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   max_u=0.1)
    with assert_raises(ValueError):
        kkk2 += kkk0
    kkk10 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   nubins=nrbins*2)
    with assert_raises(ValueError):
        kkk2 += kkk10
    kkk11 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   min_v=0.1)
    with assert_raises(ValueError):
        kkk2 += kkk11
    kkk12 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   max_v=0.1)
    with assert_raises(ValueError):
        kkk2 += kkk12
    kkk13 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   nvbins=nrbins*2)
    with assert_raises(ValueError):
        kkk2 += kkk13

    # Currently not implemented to only have cat2 or cat3
    with assert_raises(NotImplementedError):
        kkk.process(cat, cat2=cat)
    with assert_raises(NotImplementedError):
        kkk.process(cat, cat3=cat)
    with assert_raises(NotImplementedError):
        kkk.process_cross21(cat, cat)
コード例 #35
0
ファイル: test_reader.py プロジェクト: ztq1996/TreeCorr
def _test_ascii_reader(r, has_names=True):
    # Same tests for AsciiReader and PandasReader

    # Check things not allowed if not in context
    with assert_raises(RuntimeError):
        r.read([1, 3, 9], None)
    with assert_raises(RuntimeError):
        r.read([1, 3, 9])
    with assert_raises(RuntimeError):
        r.read('ra')
    with assert_raises(RuntimeError):
        r.row_count(1, None)
    with assert_raises(RuntimeError):
        r.row_count()
    with assert_raises(RuntimeError):
        r.names(None)
    with assert_raises(RuntimeError):
        r.names()

    with r:
        # None is only value ext.
        assert_raises(ValueError, r.check_valid_ext, 'invalid')
        assert_raises(ValueError, r.check_valid_ext, '0')
        assert_raises(ValueError, r.check_valid_ext, 1)
        r.check_valid_ext(None)
        assert r.default_ext == None

        # Default ext is "in" reader
        assert None in r

        # Can always slice
        assert r.can_slice

        # cols are: ra, dec, x, y, k, g1, g2, w, z, r, wpos, flag
        s = slice(0, 10, 2)
        data = r.read([1, 3, 9], s)
        dec = r.read(2, s)
        assert sorted(data.keys()) == [1, 3, 9]
        assert data[1].size == 5
        assert data[3].size == 5
        assert data[9].size == 5
        assert dec.size == 5
        # Check a few random values
        assert data[1][0] == 0.34044927  # ra, row 1
        assert data[3][4] == 0.01816738  # x, row 9
        assert data[9][3] == 0.79008204  # z, row 7

        assert r.row_count(1, None) == 20
        assert r.row_count() == 20
        assert r.ncols == 12
        for i in range(12):
            assert str(i + 1) in r.names()

        all_data = r.read(range(1, r.ncols + 1))
        assert len(all_data) == 12
        assert len(all_data[1]) == 20
        assert r.row_count() == 20

        # Check reading specific rows
        s2 = np.array([0, 6, 8])
        data2 = r.read([1, 3, 9], s2)
        dec2 = r.read(2, s2)
        assert sorted(data2.keys()) == [1, 3, 9]
        assert data2[1].size == 3
        assert data2[3].size == 3
        assert data2[9].size == 3
        assert dec2.size == 3
        # Check the same values in this selection
        assert data2[1][0] == 0.34044927  # ra, row 1
        assert data2[3][2] == 0.01816738  # x, row 9
        assert data2[9][1] == 0.79008204  # z, row 7

        if not has_names:
            return
        # Repeat with column names
        data = r.read(['ra', 'x', 'z'], s)
        dec = r.read('dec', s)
        assert sorted(data.keys()) == ['ra', 'x', 'z']
        assert data['ra'].size == 5
        assert data['x'].size == 5
        assert data['z'].size == 5
        assert dec.size == 5
        # Check the same random values
        assert data['ra'][0] == 0.34044927
        assert data['x'][4] == 0.01816738
        assert data['z'][3] == 0.79008204

        assert r.row_count('ra', None) == 20
        assert r.row_count() == 20
        assert r.ncols == 12
        names = [
            'ra', 'dec', 'x', 'y', 'k', 'g1', 'g2', 'w', 'z', 'r', 'wpos',
            'flag'
        ]
        for name in names:
            assert name in r.names()

        all_data = r.read(names)
        assert len(all_data) == 12
        assert len(all_data['ra']) == 20
        assert r.row_count() == 20

        # Check reading specific rows
        data2 = r.read(['ra', 'x', 'z'], s2)
        dec2 = r.read('dec', s2)
        assert sorted(data2.keys()) == ['ra', 'x', 'z']
        assert data2['ra'].size == 3
        assert data2['x'].size == 3
        assert data2['z'].size == 3
        assert dec2.size == 3
        assert data2['ra'][0] == 0.34044927
        assert data2['x'][2] == 0.01816738
        assert data2['z'][1] == 0.79008204

    # Again check things not allowed if not in context
    with assert_raises(RuntimeError):
        r.read([1, 3, 9], None)
    with assert_raises(RuntimeError):
        r.read([1, 3, 9])
    with assert_raises(RuntimeError):
        r.read('ra')
    with assert_raises(RuntimeError):
        r.row_count(1, None)
    with assert_raises(RuntimeError):
        r.row_count()
    with assert_raises(RuntimeError):
        r.names(None)
    with assert_raises(RuntimeError):
        r.names()
コード例 #36
0
ファイル: test_catalog.py プロジェクト: rmjarvis/TreeCorr
def test_lru():
    f = lambda x: x+1
    size = 10
    # Test correct size cache gets created
    cache = treecorr.util.LRU_Cache(f, maxsize=size)
    assert len(cache.cache) == size
    assert cache.size == size
    assert cache.count == 0
    # Insert f(0) = 1 into cache and check that we can get it back
    assert cache(0) == f(0)
    assert cache.size == size
    assert cache.count == 1
    assert cache(0) == f(0)
    assert cache.size == size
    assert cache.count == 1

    # Manually manipulate cache so we can check for hit
    cache.cache[(0,)][3] = 2
    cache.count += 1
    assert cache(0) == 2
    assert cache.count == 2

    # Insert (and check) 1 thru size into cache.  This should bump out the (0,).
    for i in range(1, size+1):
        assert cache(i) == f(i)
    assert (0,) not in cache.cache
    assert cache.size == size
    assert cache.count == size

    # Test non-destructive cache expansion
    newsize = 20
    cache.resize(newsize)
    for i in range(1, size+1):
        assert (i,) in cache.cache
        assert cache(i) == f(i)
    assert len(cache.cache) == newsize
    assert cache.size == newsize
    assert cache.count == size

    # Add new items until the (1,) gets bumped
    for i in range(size+1, newsize+2):
        assert cache(i) == f(i)
    assert (1,) not in cache.cache
    assert cache.size == newsize
    assert cache.count == newsize

    # "Resize" to same size does nothing.
    cache.resize(newsize)
    assert len(cache.cache) == newsize
    assert cache.size == newsize
    assert cache.count == newsize
    assert (1,) not in cache.cache
    for i in range(2, newsize+2):
        assert (i,) in cache.cache
    assert cache.size == newsize
    assert cache.count == newsize

    # Test mostly non-destructive cache contraction.
    # Already bumped (0,) and (1,), so (2,) should be the first to get bumped
    for i in range(newsize-1, size, -1):
        assert (newsize - (i - 1),) in cache.cache
        cache.resize(i)
        assert (newsize - (i - 1),) not in cache.cache

    # Check if is works with size=0
    cache.resize(0)
    print('cache.cache = ',cache.cache)
    print('cache.root = ',cache.root)
    assert cache.root[0] == cache.root
    assert cache.root[1] == cache.root
    assert cache.size == 0
    assert cache.count == 0
    for i in range(10):
        assert cache(i) == f(i)
    print('=> cache.cache = ',cache.cache)
    print('=> cache.root = ',cache.root)
    assert cache.root[0] == cache.root
    assert cache.root[1] == cache.root
    assert cache.size == 0
    assert cache.count == 0

    assert_raises(ValueError, cache.resize, -20)
コード例 #37
0
def test_direct():
    # If the catalogs are small enough, we can do a direct calculation to see if comes out right.
    # This should exactly match the treecorr result if brute=True.

    ngal = 100
    s = 10.
    rng = np.random.RandomState(8675309)
    x = rng.normal(0, s, (ngal, ))
    y = rng.normal(0, s, (ngal, ))
    w = rng.random_sample(ngal)
    kap = rng.normal(0, 3, (ngal, ))

    cat = treecorr.Catalog(x=x, y=y, w=w, k=kap)

    min_sep = 1.
    bin_size = 0.2
    nrbins = 10
    nubins = 5
    nvbins = 5
    max_sep = min_sep * np.exp(nrbins * bin_size)
    kkk = treecorr.KKKCorrelation(min_sep=min_sep,
                                  bin_size=bin_size,
                                  nbins=nrbins,
                                  brute=True)
    kkk.process(cat, num_threads=2)

    true_ntri = np.zeros((nrbins, nubins, 2 * nvbins), dtype=int)
    true_weight = np.zeros((nrbins, nubins, 2 * nvbins), dtype=float)
    true_zeta = np.zeros((nrbins, nubins, 2 * nvbins), dtype=float)
    for i in range(ngal):
        for j in range(i + 1, ngal):
            for k in range(j + 1, ngal):
                d12 = np.sqrt((x[i] - x[j])**2 + (y[i] - y[j])**2)
                d23 = np.sqrt((x[j] - x[k])**2 + (y[j] - y[k])**2)
                d31 = np.sqrt((x[k] - x[i])**2 + (y[k] - y[i])**2)

                d3, d2, d1 = sorted([d12, d23, d31])
                rindex = np.floor(np.log(d2 / min_sep) / bin_size).astype(int)
                if rindex < 0 or rindex >= nrbins: continue

                if [d1, d2, d3] == [d23, d31, d12]: ii, jj, kk = i, j, k
                elif [d1, d2, d3] == [d23, d12, d31]: ii, jj, kk = i, k, j
                elif [d1, d2, d3] == [d31, d12, d23]: ii, jj, kk = j, k, i
                elif [d1, d2, d3] == [d31, d23, d12]: ii, jj, kk = j, i, k
                elif [d1, d2, d3] == [d12, d23, d31]: ii, jj, kk = k, i, j
                elif [d1, d2, d3] == [d12, d31, d23]: ii, jj, kk = k, j, i
                else: assert False
                # Now use ii, jj, kk rather than i,j,k, to get the indices
                # that correspond to the points in the right order.

                u = d3 / d2
                v = (d1 - d2) / d3
                if (x[jj] - x[ii]) * (y[kk] - y[ii]) < (x[kk] - x[ii]) * (
                        y[jj] - y[ii]):
                    v = -v

                uindex = np.floor(u / bin_size).astype(int)
                assert 0 <= uindex < nubins
                vindex = np.floor((v + 1) / bin_size).astype(int)
                assert 0 <= vindex < 2 * nvbins

                www = w[i] * w[j] * w[k]
                zeta = www * kap[i] * kap[j] * kap[k]

                true_ntri[rindex, uindex, vindex] += 1
                true_weight[rindex, uindex, vindex] += www
                true_zeta[rindex, uindex, vindex] += zeta

    pos = true_weight > 0
    true_zeta[pos] /= true_weight[pos]

    np.testing.assert_array_equal(kkk.ntri, true_ntri)
    np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8)

    try:
        import fitsio
    except ImportError:
        print('Skipping FITS tests, since fitsio is not installed')
        return

    # Check that running via the corr3 script works correctly.
    config = treecorr.config.read_config('configs/kkk_direct.yaml')
    cat.write(config['file_name'])
    treecorr.corr3(config)
    data = fitsio.read(config['kkk_file_name'])
    np.testing.assert_allclose(data['r_nom'], kkk.rnom.flatten())
    np.testing.assert_allclose(data['u_nom'], kkk.u.flatten())
    np.testing.assert_allclose(data['v_nom'], kkk.v.flatten())
    np.testing.assert_allclose(data['ntri'], kkk.ntri.flatten())
    np.testing.assert_allclose(data['weight'], kkk.weight.flatten())
    np.testing.assert_allclose(data['zeta'], kkk.zeta.flatten(), rtol=1.e-3)

    # Also check the "cross" calculation.  (Real cross doesn't work, but this should.)
    kkk = treecorr.KKKCorrelation(min_sep=min_sep,
                                  bin_size=bin_size,
                                  nbins=nrbins,
                                  brute=True)
    kkk.process(cat, cat, cat, num_threads=2)
    np.testing.assert_array_equal(kkk.ntri, true_ntri)
    np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8)

    config['file_name2'] = config['file_name']
    config['file_name3'] = config['file_name']
    treecorr.corr3(config)
    data = fitsio.read(config['kkk_file_name'])
    np.testing.assert_allclose(data['r_nom'], kkk.rnom.flatten())
    np.testing.assert_allclose(data['u_nom'], kkk.u.flatten())
    np.testing.assert_allclose(data['v_nom'], kkk.v.flatten())
    np.testing.assert_allclose(data['ntri'], kkk.ntri.flatten())
    np.testing.assert_allclose(data['weight'], kkk.weight.flatten())
    np.testing.assert_allclose(data['zeta'], kkk.zeta.flatten(), rtol=1.e-3)

    # Repeat with binslop = 0
    # And don't do any top-level recursion so we actually test not going to the leaves.
    kkk = treecorr.KKKCorrelation(min_sep=min_sep,
                                  bin_size=bin_size,
                                  nbins=nrbins,
                                  bin_slop=0,
                                  max_top=0)
    kkk.process(cat)
    np.testing.assert_array_equal(kkk.ntri, true_ntri)
    np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8)

    # Check a few basic operations with a GGCorrelation object.
    do_pickle(kkk)

    kkk2 = kkk.copy()
    kkk2 += kkk
    np.testing.assert_allclose(kkk2.ntri, 2 * kkk.ntri)
    np.testing.assert_allclose(kkk2.weight, 2 * kkk.weight)
    np.testing.assert_allclose(kkk2.meand1, 2 * kkk.meand1)
    np.testing.assert_allclose(kkk2.meand2, 2 * kkk.meand2)
    np.testing.assert_allclose(kkk2.meand3, 2 * kkk.meand3)
    np.testing.assert_allclose(kkk2.meanlogd1, 2 * kkk.meanlogd1)
    np.testing.assert_allclose(kkk2.meanlogd2, 2 * kkk.meanlogd2)
    np.testing.assert_allclose(kkk2.meanlogd3, 2 * kkk.meanlogd3)
    np.testing.assert_allclose(kkk2.meanu, 2 * kkk.meanu)
    np.testing.assert_allclose(kkk2.meanv, 2 * kkk.meanv)
    np.testing.assert_allclose(kkk2.zeta, 2 * kkk.zeta)

    kkk2.clear()
    kkk2 += kkk
    np.testing.assert_allclose(kkk2.ntri, kkk.ntri)
    np.testing.assert_allclose(kkk2.weight, kkk.weight)
    np.testing.assert_allclose(kkk2.meand1, kkk.meand1)
    np.testing.assert_allclose(kkk2.meand2, kkk.meand2)
    np.testing.assert_allclose(kkk2.meand3, kkk.meand3)
    np.testing.assert_allclose(kkk2.meanlogd1, kkk.meanlogd1)
    np.testing.assert_allclose(kkk2.meanlogd2, kkk.meanlogd2)
    np.testing.assert_allclose(kkk2.meanlogd3, kkk.meanlogd3)
    np.testing.assert_allclose(kkk2.meanu, kkk.meanu)
    np.testing.assert_allclose(kkk2.meanv, kkk.meanv)
    np.testing.assert_allclose(kkk2.zeta, kkk.zeta)

    ascii_name = 'output/kkk_ascii.txt'
    kkk.write(ascii_name, precision=16)
    kkk3 = treecorr.KKKCorrelation(min_sep=min_sep,
                                   bin_size=bin_size,
                                   nbins=nrbins)
    kkk3.read(ascii_name)
    np.testing.assert_allclose(kkk3.ntri, kkk.ntri)
    np.testing.assert_allclose(kkk3.weight, kkk.weight)
    np.testing.assert_allclose(kkk3.meand1, kkk.meand1)
    np.testing.assert_allclose(kkk3.meand2, kkk.meand2)
    np.testing.assert_allclose(kkk3.meand3, kkk.meand3)
    np.testing.assert_allclose(kkk3.meanlogd1, kkk.meanlogd1)
    np.testing.assert_allclose(kkk3.meanlogd2, kkk.meanlogd2)
    np.testing.assert_allclose(kkk3.meanlogd3, kkk.meanlogd3)
    np.testing.assert_allclose(kkk3.meanu, kkk.meanu)
    np.testing.assert_allclose(kkk3.meanv, kkk.meanv)
    np.testing.assert_allclose(kkk3.zeta, kkk.zeta)

    fits_name = 'output/kkk_fits.fits'
    kkk.write(fits_name)
    kkk4 = treecorr.KKKCorrelation(min_sep=min_sep,
                                   bin_size=bin_size,
                                   nbins=nrbins)
    kkk4.read(fits_name)
    np.testing.assert_allclose(kkk4.ntri, kkk.ntri)
    np.testing.assert_allclose(kkk4.weight, kkk.weight)
    np.testing.assert_allclose(kkk4.meand1, kkk.meand1)
    np.testing.assert_allclose(kkk4.meand2, kkk.meand2)
    np.testing.assert_allclose(kkk4.meand3, kkk.meand3)
    np.testing.assert_allclose(kkk4.meanlogd1, kkk.meanlogd1)
    np.testing.assert_allclose(kkk4.meanlogd2, kkk.meanlogd2)
    np.testing.assert_allclose(kkk4.meanlogd3, kkk.meanlogd3)
    np.testing.assert_allclose(kkk4.meanu, kkk.meanu)
    np.testing.assert_allclose(kkk4.meanv, kkk.meanv)
    np.testing.assert_allclose(kkk4.zeta, kkk.zeta)

    with assert_raises(TypeError):
        kkk2 += config
    kkk5 = treecorr.KKKCorrelation(min_sep=min_sep / 2,
                                   bin_size=bin_size,
                                   nbins=nrbins)
    with assert_raises(ValueError):
        kkk2 += kkk5
    kkk6 = treecorr.KKKCorrelation(min_sep=min_sep,
                                   bin_size=bin_size / 2,
                                   nbins=nrbins)
    with assert_raises(ValueError):
        kkk2 += kkk6
    kkk7 = treecorr.KKKCorrelation(min_sep=min_sep,
                                   bin_size=bin_size,
                                   nbins=nrbins * 2)
    with assert_raises(ValueError):
        kkk2 += kkk7
    kkk8 = treecorr.KKKCorrelation(min_sep=min_sep,
                                   bin_size=bin_size,
                                   nbins=nrbins,
                                   min_u=0.1)
    with assert_raises(ValueError):
        kkk2 += kkk8
    kkk0 = treecorr.KKKCorrelation(min_sep=min_sep,
                                   bin_size=bin_size,
                                   nbins=nrbins,
                                   max_u=0.1)
    with assert_raises(ValueError):
        kkk2 += kkk0
    kkk10 = treecorr.KKKCorrelation(min_sep=min_sep,
                                    bin_size=bin_size,
                                    nbins=nrbins,
                                    nubins=nrbins * 2)
    with assert_raises(ValueError):
        kkk2 += kkk10
    kkk11 = treecorr.KKKCorrelation(min_sep=min_sep,
                                    bin_size=bin_size,
                                    nbins=nrbins,
                                    min_v=0.1)
    with assert_raises(ValueError):
        kkk2 += kkk11
    kkk12 = treecorr.KKKCorrelation(min_sep=min_sep,
                                    bin_size=bin_size,
                                    nbins=nrbins,
                                    max_v=0.1)
    with assert_raises(ValueError):
        kkk2 += kkk12
    kkk13 = treecorr.KKKCorrelation(min_sep=min_sep,
                                    bin_size=bin_size,
                                    nbins=nrbins,
                                    nvbins=nrbins * 2)
    with assert_raises(ValueError):
        kkk2 += kkk13

    # Currently not implemented to only have cat2 or cat3
    with assert_raises(NotImplementedError):
        kkk.process(cat, cat2=cat)
    with assert_raises(NotImplementedError):
        kkk.process(cat, cat3=cat)
    with assert_raises(NotImplementedError):
        kkk.process_cross21(cat, cat)
コード例 #38
0
ファイル: test_catalog.py プロジェクト: rmjarvis/TreeCorr
def test_fits():
    try:
        import fitsio
    except ImportError:
        print('Skipping FITS tests, since fitsio is not installed')
        return

    get_from_wiki('Aardvark.fit')
    file_name = os.path.join('data','Aardvark.fit')
    config = treecorr.read_config('Aardvark.yaml')
    config['verbose'] = 1
    config['kk_file_name'] = 'kk.fits'
    config['gg_file_name'] = 'gg.fits'

    # Just test a few random particular values
    cat1 = treecorr.Catalog(file_name, config)
    np.testing.assert_equal(len(cat1.ra), 390935)
    np.testing.assert_equal(cat1.nobj, 390935)
    np.testing.assert_almost_equal(cat1.ra[0], 56.4195 * (pi/180.))
    np.testing.assert_almost_equal(cat1.ra[390934], 78.4782 * (pi/180.))
    np.testing.assert_almost_equal(cat1.dec[290333], 83.1579 * (pi/180.))
    np.testing.assert_almost_equal(cat1.g1[46392], 0.0005066675)
    np.testing.assert_almost_equal(cat1.g2[46392], -0.0001006742)
    np.testing.assert_almost_equal(cat1.k[46392], -0.0008628797)

    assert_raises(ValueError, treecorr.Catalog, file_name, config, ra_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, dec_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, r_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, w_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, wpos_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, flag_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, g1_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, g2_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, k_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, ra_col='0')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, dec_col='0')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, x_col='x')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, y_col='y')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, z_col='z')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, ra_col='0', dec_col='0')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, g1_col='0')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, g2_col='0')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, k_col='0')
    assert_raises(TypeError, treecorr.Catalog, file_name, config, x_units='arcmin')
    assert_raises(TypeError, treecorr.Catalog, file_name, config, y_units='arcmin')
    del config['ra_units']
    assert_raises(TypeError, treecorr.Catalog, file_name, config)
    del config['dec_units']
    assert_raises(TypeError, treecorr.Catalog, file_name, config, ra_units='deg')

    # The catalog doesn't have x, y, or w, but test that functionality as well.
    del config['ra_col']
    del config['dec_col']
    config['x_col'] = 'RA'
    config['y_col'] = 'DEC'
    config['w_col'] = 'MU'
    config['flag_col'] = 'INDEX'
    config['ignore_flag'] = 64
    cat2 = treecorr.Catalog(file_name, config)
    np.testing.assert_almost_equal(cat2.x[390934], 78.4782, decimal=4)
    np.testing.assert_almost_equal(cat2.y[290333], 83.1579, decimal=4)
    np.testing.assert_almost_equal(cat2.w[46392], 0.)        # index = 1200379
    np.testing.assert_almost_equal(cat2.w[46393], 0.9995946) # index = 1200386

    assert_raises(ValueError, treecorr.Catalog, file_name, config, x_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, y_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, z_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, ra_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, dec_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, r_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, w_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, wpos_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, flag_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, g1_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, g2_col='invalid')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, k_col='invalid')

    # Test using a limited set of rows
    config['first_row'] = 101
    config['last_row'] = 50000
    cat3 = treecorr.Catalog(file_name, config)
    np.testing.assert_equal(len(cat3.x), 49900)
    np.testing.assert_equal(cat3.ntot, 49900)
    np.testing.assert_equal(cat3.nobj, sum(cat3.w != 0))
    np.testing.assert_equal(cat3.sumw, sum(cat3.w))
    np.testing.assert_equal(cat3.sumw, sum(cat2.w[100:50000]))
    np.testing.assert_almost_equal(cat3.g1[46292], 0.0005066675)
    np.testing.assert_almost_equal(cat3.g2[46292], -0.0001006742)
    np.testing.assert_almost_equal(cat3.k[46292], -0.0008628797)

    cat4 = treecorr.read_catalogs(config, key='file_name', is_rand=True)[0]
    np.testing.assert_equal(len(cat4.x), 49900)
    np.testing.assert_equal(cat4.ntot, 49900)
    np.testing.assert_equal(cat4.nobj, sum(cat4.w != 0))
    np.testing.assert_equal(cat4.sumw, sum(cat4.w))
    np.testing.assert_equal(cat4.sumw, sum(cat2.w[100:50000]))
    assert cat4.g1 is None
    assert cat4.g2 is None
    assert cat4.k is None

    do_pickle(cat1)
    do_pickle(cat2)
    do_pickle(cat3)
    do_pickle(cat4)

    assert_raises(ValueError, treecorr.Catalog, file_name, config, first_row=-10)
    assert_raises(ValueError, treecorr.Catalog, file_name, config, first_row=0)
    assert_raises(ValueError, treecorr.Catalog, file_name, config, first_row=60000)
    assert_raises(ValueError, treecorr.Catalog, file_name, config, first_row=50001)

    assert_raises(TypeError, treecorr.read_catalogs, config)
    assert_raises(TypeError, treecorr.read_catalogs, config, key='file_name', list_key='file_name')

    # If gg output not given, it is still invalid to only have one or the other of g1,g2.
    del config['gg_file_name']
    assert_raises(ValueError, treecorr.Catalog, file_name, config, g1_col='0')
    assert_raises(ValueError, treecorr.Catalog, file_name, config, g2_col='0')
コード例 #39
0
def test_direct_count():
    # This is essentially the same as test_nn.py:test_direct_count, but using periodic distances.
    # And the points are uniform in the box, so plenty of pairs crossing the edges.

    ngal = 100
    Lx = 50.
    Ly = 80.
    rng = np.random.RandomState(8675309)
    x1 = (rng.random_sample(ngal)-0.5) * Lx
    y1 = (rng.random_sample(ngal)-0.5) * Ly
    cat1 = treecorr.Catalog(x=x1, y=y1)
    x2 = (rng.random_sample(ngal)-0.5) * Lx
    y2 = (rng.random_sample(ngal)-0.5) * Ly
    cat2 = treecorr.Catalog(x=x2, y=y2)

    min_sep = 1.
    max_sep = 50.
    nbins = 50
    dd = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=0,
                                xperiod=Lx, yperiod=Ly)
    dd.process(cat1, cat2, metric='Periodic')
    print('dd.npairs = ',dd.npairs)

    log_min_sep = np.log(min_sep)
    log_max_sep = np.log(max_sep)
    true_npairs = np.zeros(nbins)
    bin_size = (log_max_sep - log_min_sep) / nbins
    for i in range(ngal):
        for j in range(ngal):
            dx = min(abs(x1[i]-x2[j]), Lx - abs(x1[i]-x2[j]))
            dy = min(abs(y1[i]-y2[j]), Ly - abs(y1[i]-y2[j]))
            rsq = dx**2 + dy**2
            logr = 0.5 * np.log(rsq)
            k = int(np.floor( (logr-log_min_sep) / bin_size ))
            if k < 0: continue
            if k >= nbins: continue
            true_npairs[k] += 1

    print('true_npairs = ',true_npairs)
    print('diff = ',dd.npairs - true_npairs)
    np.testing.assert_array_equal(dd.npairs, true_npairs)

    # Check that running via the corr2 script works correctly.
    file_name1 = os.path.join('data','nn_periodic_data1.dat')
    with open(file_name1, 'w') as fid:
        for i in range(ngal):
            fid.write(('%.20f %.20f\n')%(x1[i],y1[i]))
    file_name2 = os.path.join('data','nn_periodic_data2.dat')
    with open(file_name2, 'w') as fid:
        for i in range(ngal):
            fid.write(('%.20f %.20f\n')%(x2[i],y2[i]))
    nrand = ngal
    rx1 = (rng.random_sample(nrand)-0.5) * Lx
    ry1 = (rng.random_sample(nrand)-0.5) * Ly
    rx2 = (rng.random_sample(nrand)-0.5) * Lx
    ry2 = (rng.random_sample(nrand)-0.5) * Ly
    rcat1 = treecorr.Catalog(x=rx1, y=ry1)
    rcat2 = treecorr.Catalog(x=rx2, y=ry2)
    rand_file_name1 = os.path.join('data','nn_periodic_rand1.dat')
    with open(rand_file_name1, 'w') as fid:
        for i in range(nrand):
            fid.write(('%.20f %.20f\n')%(rx1[i],ry1[i]))
    rand_file_name2 = os.path.join('data','nn_periodic_rand2.dat')
    with open(rand_file_name2, 'w') as fid:
        for i in range(nrand):
            fid.write(('%.20f %.20f\n')%(rx2[i],ry2[i]))
    rr = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=0,
                                verbose=0, xperiod=Lx, yperiod=Ly)
    rr.process(rcat1,rcat2, metric='Periodic')
    xi, varxi = dd.calculateXi(rr)
    print('xi = ',xi)

    # Do this via the corr2 function.
    config = treecorr.config.read_config('configs/nn_periodic.yaml')
    logger = treecorr.config.setup_logger(2)
    treecorr.corr2(config, logger)
    corr2_output = np.genfromtxt(os.path.join('output','nn_periodic.out'), names=True,
                                    skip_header=1)
    np.testing.assert_allclose(corr2_output['r_nom'], dd.rnom, rtol=1.e-3)
    np.testing.assert_allclose(corr2_output['DD'], dd.npairs, rtol=1.e-3)
    np.testing.assert_allclose(corr2_output['npairs'], dd.npairs, rtol=1.e-3)
    np.testing.assert_allclose(corr2_output['RR'], rr.npairs, rtol=1.e-3)
    np.testing.assert_allclose(corr2_output['xi'], xi, rtol=1.e-3)

    # If don't give a period, then an error.
    rr = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
    with assert_raises(ValueError):
        rr.process(rcat1,rcat2, metric='Periodic')

    # Or if only give one kind of period
    rr = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, xperiod=3)
    with assert_raises(ValueError):
        rr.process(rcat1,rcat2, metric='Periodic')
    rr = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, yperiod=3)
    with assert_raises(ValueError):
        rr.process(rcat1,rcat2, metric='Periodic')

    # If give period, but then don't use Periodic metric, that's also an error.
    rr = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, period=3)
    with assert_raises(ValueError):
        rr.process(rcat1,rcat2)
コード例 #40
0
ファイル: test_catalog.py プロジェクト: rmjarvis/TreeCorr
def test_field():
    # Test making various kinds of fields
    # Note: This is mostly just a coverage test to make sure there aren't any errors
    # when doing this manually.  The real functionality tests of using the fields are
    # all elsewhere.

    ngal = 2000
    s = 10.
    rng = np.random.RandomState(8675309)
    x = rng.normal(222,50, (ngal,) )
    y = rng.normal(138,20, (ngal,) )
    z = rng.normal(912,130, (ngal,) )
    w = rng.normal(1.3, 0.1, (ngal,) )

    ra = rng.normal(11.34, 0.9, (ngal,) )
    dec = rng.normal(-48.12, 4.3, (ngal,) )
    r = rng.normal(1024, 230, (ngal,) )

    k = rng.normal(0,s, (ngal,) )
    g1 = rng.normal(0,s, (ngal,) )
    g2 = rng.normal(0,s, (ngal,) )

    cat1 = treecorr.Catalog(x=x, y=y, z=z, g1=g1, g2=g2, k=k)
    cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='hour', dec_units='deg',
                            w=w, g1=g1, g2=g2, k=k)
    cat2.logger = None
    cat3 = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, k=k, w=w)
    cat4 = treecorr.Catalog(x=x, y=y, w=w)
    logger = treecorr.config.setup_logger(1)

    assert cat1.field is None  # Before calling get*Field, this is None.
    assert cat2.field is None
    assert cat3.field is None

    t0 = time.time()
    nfield1 = cat1.getNField()
    nfield2 = cat2.getNField(0.01, 1)
    nfield3 = cat3.getNField(1,300, logger=logger)
    t1 = time.time()
    nfield1b = cat1.getNField()
    nfield2b = cat2.getNField(0.01, 1)
    nfield3b = cat3.getNField(1,300, logger=logger)
    t2 = time.time()
    assert cat1.nfields.count == 1
    assert cat2.nfields.count == 1
    assert cat3.nfields.count == 1
    assert cat1.nfields.last_value is nfield1
    assert cat2.nfields.last_value is nfield2
    assert cat3.nfields.last_value is nfield3
    assert cat1.field is nfield1
    assert cat2.field is nfield2
    assert cat3.field is nfield3
    # The second time, they should already be made and taken from the cache, so much faster.
    print('nfield: ',t1-t0,t2-t1)
    assert t2-t1 < t1-t0

    t0 = time.time()
    gfield1 = cat1.getGField()
    gfield2 = cat2.getGField(0.01, 1)
    gfield3 = cat3.getGField(1,300, logger=logger)
    t1 = time.time()
    gfield1b = cat1.getGField()
    gfield2b = cat2.getGField(0.01, 1)
    gfield3b = cat3.getGField(1,300, logger=logger)
    t2 = time.time()
    assert_raises(TypeError, cat4.getGField)
    assert cat1.gfields.count == 1
    assert cat2.gfields.count == 1
    assert cat3.gfields.count == 1
    assert cat1.field is gfield1
    assert cat2.field is gfield2
    assert cat3.field is gfield3
    print('gfield: ',t1-t0,t2-t1)
    assert t2-t1 < t1-t0

    t0 = time.time()
    kfield1 = cat1.getKField()
    kfield2 = cat2.getKField(0.01, 1)
    kfield3 = cat3.getKField(1,300, logger=logger)
    t1 = time.time()
    kfield1b = cat1.getKField()
    kfield2b = cat2.getKField(0.01, 1)
    kfield3b = cat3.getKField(1,300, logger=logger)
    t2 = time.time()
    assert_raises(TypeError, cat4.getKField)
    assert cat1.kfields.count == 1
    assert cat2.kfields.count == 1
    assert cat3.kfields.count == 1
    assert cat1.field is kfield1
    assert cat2.field is kfield2
    assert cat3.field is kfield3
    print('kfield: ',t1-t0,t2-t1)
    assert t2-t1 < t1-t0

    t0 = time.time()
    nsimplefield1 = cat1.getNSimpleField()
    nsimplefield2 = cat2.getNSimpleField()
    nsimplefield3 = cat3.getNSimpleField(logger=logger)
    t1 = time.time()
    nsimplefield1b = cat1.getNSimpleField()
    nsimplefield2b = cat2.getNSimpleField()
    nsimplefield3b = cat3.getNSimpleField(logger=logger)
    t2 = time.time()
    assert cat1.nsimplefields.count == 1
    assert cat2.nsimplefields.count == 1
    assert cat3.nsimplefields.count == 1
    assert cat1.field is kfield1   # SimpleFields don't supplant the field attribute
    assert cat2.field is kfield2
    assert cat3.field is kfield3
    print('nsimplefield: ',t1-t0,t2-t1)
    assert t2-t1 < t1-t0

    t0 = time.time()
    gsimplefield1 = cat1.getGSimpleField()
    gsimplefield2 = cat2.getGSimpleField()
    gsimplefield3 = cat3.getGSimpleField(logger=logger)
    t1 = time.time()
    gsimplefield1b = cat1.getGSimpleField()
    gsimplefield2b = cat2.getGSimpleField()
    gsimplefield3b = cat3.getGSimpleField(logger=logger)
    t2 = time.time()
    assert_raises(TypeError, cat4.getGSimpleField)
    assert cat1.gsimplefields.count == 1
    assert cat2.gsimplefields.count == 1
    assert cat3.gsimplefields.count == 1
    assert cat1.field is kfield1   # SimpleFields don't supplant the field attribute
    assert cat2.field is kfield2
    assert cat3.field is kfield3
    print('gsimplefield: ',t1-t0,t2-t1)
    assert t2-t1 < t1-t0

    t0 = time.time()
    ksimplefield1 = cat1.getKSimpleField()
    ksimplefield2 = cat2.getKSimpleField()
    ksimplefield3 = cat3.getKSimpleField(logger=logger)
    t1 = time.time()
    ksimplefield1b = cat1.getKSimpleField()
    ksimplefield2b = cat2.getKSimpleField()
    ksimplefield3b = cat3.getKSimpleField(logger=logger)
    t2 = time.time()
    assert_raises(TypeError, cat4.getKSimpleField)
    assert cat1.ksimplefields.count == 1
    assert cat2.ksimplefields.count == 1
    assert cat3.ksimplefields.count == 1
    assert cat1.field is kfield1   # SimpleFields don't supplant the field attribute
    assert cat2.field is kfield2
    assert cat3.field is kfield3
    print('ksimplefield: ',t1-t0,t2-t1)
    assert t2-t1 < t1-t0

    # By default, only one is saved.  Check resize_cache option.
    cat1.resize_cache(3)
    assert cat1.nfields.size == 3
    assert cat1.kfields.size == 3
    assert cat1.gfields.size == 3
    assert cat1.nsimplefields.size == 3
    assert cat1.ksimplefields.size == 3
    assert cat1.gsimplefields.size == 3
    assert cat1.nfields.count == 1
    assert cat1.kfields.count == 1
    assert cat1.gfields.count == 1
    assert cat1.nsimplefields.count == 1
    assert cat1.ksimplefields.count == 1
    assert cat1.gsimplefields.count == 1
    assert cat1.field is kfield1

    t0 = time.time()
    nfield1 = cat1.getNField()
    nfield2 = cat1.getNField(0.01, 1)
    nfield3 = cat1.getNField(1,300, logger=logger)
    t1 = time.time()
    nfield1b = cat1.getNField()
    nfield2b = cat1.getNField(0.01, 1)
    nfield3b = cat1.getNField(1,300, logger=logger)
    t2 = time.time()
    assert cat1.nfields.count == 3
    print('after resize(3) nfield: ',t1-t0,t2-t1)
    assert t2-t1 < t1-t0
    assert nfield1b is nfield1
    assert nfield2b is nfield2
    assert nfield3b is nfield3
    assert cat1.nfields.values()
    assert nfield2 in cat1.nfields.values()
    assert nfield3 in cat1.nfields.values()
    assert cat1.nfields.last_value is nfield3
    assert cat1.field is nfield3

    # clear_cache will manually remove them.
    cat1.clear_cache()
    print('values = ',cat1.nfields.values())
    print('len(cache) = ',len(cat1.nfields.cache))
    assert len(cat1.nfields.values()) == 0
    assert cat1.nfields.count == 0
    assert cat1.gfields.count == 0
    assert cat1.kfields.count == 0
    assert cat1.nsimplefields.count == 0
    assert cat1.gsimplefields.count == 0
    assert cat1.ksimplefields.count == 0
    assert cat1.field is None

    # Can also resize to 0
    cat1.resize_cache(0)
    assert cat1.nfields.count == 0
    assert cat1.nfields.size == 0
    t0 = time.time()
    nfield1 = cat1.getNField()
    nfield2 = cat1.getNField(0.01, 1)
    nfield3 = cat1.getNField(1,300, logger=logger)
    t1 = time.time()
    nfield1b = cat1.getNField()
    nfield2b = cat1.getNField(0.01, 1)
    nfield3b = cat1.getNField(1,300, logger=logger)
    t2 = time.time()
    # This time, not much time difference.
    print('after resize(0) nfield: ',t1-t0,t2-t1)
    assert cat1.nfields.count == 0
    assert nfield1b is not nfield1
    assert nfield2b is not nfield2
    assert nfield3b is not nfield3
    assert len(cat1.nfields.values()) == 0
    assert cat1.nfields.last_value is None

    # The field still holds this, since it hasn't been garbage collected.
    assert cat1.field is nfield3b
    del nfield3b  # Delete the version from this scope so it can be garbage collected.
    print('before garbage collection: cat1.field = ',cat1.field)
    gc.collect()
    print('after garbage collection: cat1.field = ',cat1.field)
    assert cat1.field is None

    # Check NotImplementedError for base classes.
    assert_raises(NotImplementedError, treecorr.Field)
    assert_raises(NotImplementedError, treecorr.SimpleField)
コード例 #41
0
ファイル: test_kmeans.py プロジェクト: rmjarvis/TreeCorr
def test_init_kmpp():
    # Test the init=random option

    ngal = 100000
    s = 1.
    rng = np.random.RandomState(8675309)
    x = rng.normal(0,s, (ngal,) )
    y = rng.normal(0,s, (ngal,) )
    z = rng.normal(0,s, (ngal,) )
    cat = treecorr.Catalog(x=x, y=y, z=z)
    xyz = np.array([x, y, z]).T

    # Skip the refine_centers step.
    print('3d with init=kmeans++')
    npatch = 10
    field = cat.getNField()
    cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
    #print('cen = ',cen1)
    assert cen1.shape == (npatch, 3)
    p1 = field.kmeans_assign_patches(cen1)
    print('patches = ',np.unique(p1))
    assert len(p1) == cat.ntot
    assert min(p1) == 0
    assert max(p1) == npatch-1

    inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
    counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
    print('counts = ',counts1)
    print('rms counts = ',np.std(counts1))
    print('total inertia = ',np.sum(inertia1))

    # Now run the normal way
    # Use higher max_iter, since random isn't a great initialization.
    p2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
    cen2 = np.array([xyz[p2==i].mean(axis=0) for i in range(npatch)])
    inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
    counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
    print('rms counts => ',np.std(counts2))
    print('total inertia => ',np.sum(inertia2))
    assert np.std(counts2) < np.std(counts1)
    assert np.sum(inertia2) < np.sum(inertia1)

    # Use a field with lots of top level cells
    print('3d with init=kmeans++, min_top=10')
    field = cat.getNField(min_top=10)
    cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
    #print('cen = ',cen1)
    assert cen1.shape == (npatch, 3)
    p1 = field.kmeans_assign_patches(cen1)
    print('patches = ',np.unique(p1))
    assert len(p1) == cat.ntot
    assert min(p1) == 0
    assert max(p1) == npatch-1

    inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
    counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
    print('counts = ',counts1)
    print('rms counts = ',np.std(counts1))
    print('total inertia = ',np.sum(inertia1))

    # Now run the normal way
    p2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
    cen2 = np.array([xyz[p2==i].mean(axis=0) for i in range(npatch)])
    inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
    counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
    print('rms counts => ',np.std(counts2))
    print('total inertia => ',np.sum(inertia2))
    assert np.std(counts2) < np.std(counts1)
    assert np.sum(inertia2) < np.sum(inertia1)

    # Repeat in 2d
    print('2d with init=kmeans++')
    cat = treecorr.Catalog(x=x, y=y)
    xy = np.array([x, y]).T
    field = cat.getNField()
    cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
    #print('cen = ',cen1)
    assert cen1.shape == (npatch, 2)
    p1 = field.kmeans_assign_patches(cen1)
    print('patches = ',np.unique(p1))
    assert len(p1) == cat.ntot
    assert min(p1) == 0
    assert max(p1) == npatch-1

    inertia1 = np.array([np.sum((xy[p1==i] - cen1[i])**2) for i in range(npatch)])
    counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
    print('counts = ',counts1)
    print('rms counts = ',np.std(counts1))
    print('total inertia = ',np.sum(inertia1))

    # Now run the normal way
    p2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
    cen2 = np.array([xy[p2==i].mean(axis=0) for i in range(npatch)])
    inertia2 = np.array([np.sum((xy[p2==i] - cen2[i])**2) for i in range(npatch)])
    counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
    print('rms counts => ',np.std(counts2))
    print('total inertia => ',np.sum(inertia2))
    assert np.std(counts2) < np.std(counts1)
    assert np.sum(inertia2) < np.sum(inertia1)

    # Repeat in spherical
    print('spher with init=kmeans++')
    ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
    cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad')
    xyz = np.array([cat.x, cat.y, cat.z]).T
    field = cat.getNField()
    cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
    #print('cen = ',cen1)
    assert cen1.shape == (npatch, 3)
    p1 = field.kmeans_assign_patches(cen1)
    print('patches = ',np.unique(p1))
    assert len(p1) == cat.ntot
    assert min(p1) == 0
    assert max(p1) == npatch-1

    inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
    counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
    print('counts = ',counts1)
    print('rms counts = ',np.std(counts1))
    print('total inertia = ',np.sum(inertia1))

    # Now run the normal way
    p2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
    cen2 = np.array([xyz[p2==i].mean(axis=0) for i in range(npatch)])
    inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
    counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
    print('rms counts => ',np.std(counts2))
    print('total inertia => ',np.sum(inertia2))
    assert np.std(counts2) < np.std(counts1)
    assert np.sum(inertia2) < np.sum(inertia1)

    with assert_raises(ValueError):
        field.kmeans_initialize_centers(npatch=ngal*2, init='kmeans++')
    with assert_raises(ValueError):
        field.kmeans_initialize_centers(npatch=ngal+1, init='kmeans++')
    with assert_raises(ValueError):
        field.kmeans_initialize_centers(npatch=0, init='kmeans++')
    with assert_raises(ValueError):
        field.kmeans_initialize_centers(npatch=-100, init='kmeans++')

    # Should be valid to give npatch = 1, although not particularly useful.
    cen_1 = field.kmeans_initialize_centers(npatch=1, init='kmeans++')
    p_1 = field.kmeans_assign_patches(cen_1)
    np.testing.assert_equal(p_1, np.zeros(ngal))

    # If same number of patches as galaxies, each galaxy gets a patch.
    # (This is stupid of course, but check that it doesn't fail.)
    # Do this with fewer points though, since it's not particularly fast with N=10^5.
    n = 100
    cat = treecorr.Catalog(ra=ra[:n], dec=dec[:n], ra_units='rad', dec_units='rad')
    field = cat.getNField()
    cen_n = field.kmeans_initialize_centers(npatch=n, init='kmeans++')
    p_n = field.kmeans_assign_patches(cen_n)
    np.testing.assert_equal(sorted(p_n), list(range(n)))