コード例 #1
0
                    help='use shapes w/o response bias, i.e. unit response')
parser.add_argument('--region', type=int, help='region of the sky', default=1)
args = parser.parse_args()

# %%

output_directory = os.path.join('region_{}'.format(args.region), 'precompute')

if not os.path.exists(output_directory):
    os.makedirs(output_directory)

try:
    centers = np.genfromtxt(os.path.join(output_directory, 'centers.csv'))
except OSError:
    table_l = zebu.read_mock_data('random', 0)
    table_l = add_continous_fields(table_l, distance_threshold=1)
    centers = jackknife_field_centers(table_l, n_jk=100)
    np.savetxt(os.path.join(output_directory, 'centers.csv'), centers)

# %%

source_magnification = args.stage >= 2
lens_magnification = args.stage >= 3
fiber_assignment = args.stage >= 4

if args.stage == 0:
    survey_list = ['gen']
else:
    if args.source_bin < 4:
        survey_list = ['des', 'hsc', 'kids']
    else:
コード例 #2
0
ファイル: run.py プロジェクト: rama270677/Zebu
                                     rp_bins,
                                     n_jobs=40,
                                     comoving=False,
                                     table_c=table_c,
                                     cosmology=FlatLambdaCDM(H0=70, Om0=0.3))
    print('Working on randoms in bin {}...'.format(lens_bin + 1))
    table_r_pre = precompute_catalog(table_r,
                                     table_s,
                                     rp_bins,
                                     n_jobs=40,
                                     comoving=False,
                                     table_c=table_c,
                                     cosmology=FlatLambdaCDM(H0=70, Om0=0.3))

    # Create the jackknife fields.
    table_l_pre = add_continous_fields(table_l_pre, distance_threshold=2)
    centers = jackknife_field_centers(table_l_pre, 100)
    table_l_pre = add_jackknife_fields(table_l_pre, centers)
    table_r_pre = add_jackknife_fields(table_r_pre, centers)

    kwargs = {
        'return_table': True,
        'shear_bias_correction': True,
        'random_subtraction': True,
        'photo_z_dilution_correction': True,
        'table_r': table_r_pre
    }

    result = excess_surface_density(table_l_pre, **kwargs)
    kwargs['return_table'] = False
    ds_cov = jackknife_resampling(excess_surface_density, table_l_pre,
コード例 #3
0
ファイル: prepare.py プロジェクト: dr-guangtou/jianbing
def prepare_lens_catalog(cat,
                         src,
                         rp_bins=None,
                         calib=None,
                         z_min=0.19,
                         z_max=0.52,
                         z='z',
                         ra='ra',
                         dec='dec',
                         comoving=False,
                         n_jobs=4,
                         field=None,
                         w_sys=None,
                         r_max_mpc=2.0,
                         verbose=True,
                         col_used=None,
                         cosmology=None):
    """Prepare the lens catalog:
        1. Select lenses in the right redshift range defined by `z_min` < z <= `z_max`.
        2. Match to the source catalog using the KDTree. Matching radius is defined as `r_max_mpc`.
        3. Prepare the catalog for pre-computation: adding `field` and lense weight if necessary.
    """
    # Generate a KDTree to match
    src_tree = catalog.catalog_to_kdtree(src, 'ra', 'dec')

    # Cosmology parameters
    if cosmology is None:
        cosmology = FlatLambdaCDM(H0=70.0, Om0=0.3)

    # Radial bins
    if rp_bins is None:
        rp_bins = np.logspace(np.log10(0.1), np.log10(20), 11)

    # Redshift cut
    cat_use = cat[(cat[z] > z_min) & (cat[z] <= z_max)]
    if len(cat_use) < 1:
        print("# No useful objects left after the redshift cut!")
        return
    if verbose:
        print("# {:d} / {:d} objects left after the redshift cut".format(
            len(cat_use), len(cat)))

    # Match to the source catalog
    # Maximum matching radius in deg
    r_max_deg = (cosmology.arcsec_per_kpc_proper(cat_use[z]) *
                 (r_max_mpc * u.Mpc).to(u.kpc)).to(u.degree).value

    # Maximum matching radius in the 3-D Cartesian coordinates used by the KDTree
    r_max_3d = np.sqrt(2 - 2 * np.cos(np.deg2rad(r_max_deg)))

    # Get the KDTree of the lens catalog
    cat_kdtree = catalog.catalog_to_kdtree(cat_use, ra, dec)

    cat_index = list(
        itertools.chain(
            *src_tree.query_ball_tree(cat_kdtree, r=r_max_3d.max())))
    cat_use = cat_use[np.unique(np.asarray(cat_index))]

    if len(cat_use) < 1:
        print("# No useful objects left after the source catalog match!")
        return
    if verbose:
        print("# {:d} / {:d} objects left after the source catalog match!".
              format(len(cat_use), len(cat)))

    # Add continued fields
    cat_use.rename_column(ra, 'ra')
    cat_use.rename_column(dec, 'dec')

    if field is None:
        cat_pre = add_continous_fields(cat_use,
                                       n_samples=10000,
                                       distance_threshold=1.0)
        field = 'field'

    # Add a place holder for systematic weight if necessary
    if w_sys is None:
        w_sys = 1.0

    # Organize the columns need to be transfered
    if col_used is None:
        col_used = cat_pre.colnames

    for col in ['ra', 'dec', 'z', z, field, 'w_sys']:
        if col in col_used:
            col_used.remove(col)

    col_kwargs = {}
    for col in col_used:
        col_kwargs[col.lower()] = col

    # Get the catalog ready for dsigma
    cat_pre = helpers.dsigma_table(cat_pre,
                                   'lens',
                                   ra='ra',
                                   dec='dec',
                                   z=z,
                                   field=field,
                                   w_sys=w_sys,
                                   **col_kwargs)

    # Pre-computation for the lenses
    cat_pre = add_precompute_results(cat_pre,
                                     src,
                                     rp_bins,
                                     table_c=calib,
                                     cosmology=cosmology,
                                     comoving=comoving,
                                     n_jobs=n_jobs)

    # Remove the ones with no useful lensing information
    cat_pre['n_s_tot'] = np.sum(cat_pre['sum 1'], axis=1)
    cat_pre = cat_pre[cat_pre['n_s_tot'] > 0]

    return cat_pre
コード例 #4
0
            table_l_part = table_l_part[
                table_l_part['z'] < np.amax(table_c_part['z_l_max'])]

        if len(table_l_part) == 0:
            continue

        if np.amin(table_l_part['z']) >= np.amax(table_s_part['z_l_max']):
            continue

        add_precompute_results(table_l_part, table_s_part, rp_bins,
                               **precompute_kwargs)

        # Create the jackknife fields.
        table_l_part['n_s_tot'] = np.sum(table_l_part['sum 1'], axis=1)
        table_l_part = table_l_part[table_l_part['n_s_tot'] > 0]
        table_l_part = add_continous_fields(table_l_part, distance_threshold=2)
        centers = jackknife_field_centers(table_l_part, 100)
        table_l_part = add_jackknife_fields(table_l_part, centers)

        stacking_kwargs['return_table'] = True
        result = excess_surface_density(table_l_part, **stacking_kwargs)
        stacking_kwargs['return_table'] = False
        ds_cov = jackknife_resampling(excess_surface_density, table_l_part,
                                      **stacking_kwargs)
        result['ds_err'] = np.sqrt(np.diag(ds_cov))

        fname_base = '{}_l{}_s{}'.format(args.survey.lower(), lens_bin,
                                         source_bin)

        np.savetxt(os.path.join('results', fname_base + '_cov.csv'), ds_cov)
コード例 #5
0
ファイル: wlensing.py プロジェクト: dr-guangtou/jianbing
def stack_dsigma_profile(lens,
                         rand,
                         mask=None,
                         n_rand=None,
                         use_dsigma=False,
                         bootstrap=False,
                         n_boot=500,
                         jackknife=True,
                         n_jobs=None,
                         n_jk=45):
    """Get the DeltaSigma profile of a sample of lens."""
    # Check to see the setup for lens and random
    assert np.all(lens.meta['bins'] == rand.meta['bins'])
    assert lens.meta['H0'] == rand.meta['H0']
    assert lens.meta['Om0'] == rand.meta['Om0']
    assert (lens['n_s_tot'] > 0).sum() == len(lens)
    assert (rand['n_s_tot'] > 0).sum() == len(rand)

    # Apply the mask
    lens_use = lens if mask is None else lens[mask]

    # Randomly downsample the random objects if necessary
    if n_rand is not None and n_rand < len(rand):
        rand_use = Table(np.random.choice(rand, size=n_rand, replace=False))
        rand_use.meta = rand.meta
    else:
        rand_use = rand

    # Get the stacked lensing profiles
    if use_dsigma:
        # Configurations for calculating HSC
        kwargs = {
            'return_table': True,
            'shear_bias_correction': True,
            'shear_responsivity_correction': True,
            'selection_bias_correction': True,
            'boost_correction': False,
            'random_subtraction': True,
            'photo_z_dilution_correction': True,
            'rotation': False,
            'table_r': rand_use
        }

        result = excess_surface_density(lens_use, **kwargs)
    else:
        result = Table()
        result['ds'] = dsigma_no_wsys(lens_use, rand_use)

    if jackknife:
        if n_jk <= 5:
            raise Exception(
                "Number of jackknife fields is too small, should >5")

        if len(lens_use) <= 5:
            print("Number of lenses < 5, cannot use Jackknife resampling")
            jackknife = False
        else:
            # Deal with situations with small sample
            if len(lens_use) <= n_jk - 5:
                n_jk = len(lens) - 5

        # Add consistent Jackknife fields to both the lens and random catalogs
        add_continous_fields(lens_use, distance_threshold=2)
        centers = jackknife_field_centers(lens_use, n_jk, weight='n_s_tot')
        add_jackknife_fields(lens_use, centers)
        add_jackknife_fields(rand_use, centers)

        # Estimate the covariance matrix using Jackknife resampling
        cov_jk = dsigma_jk_resample(lens_use, rand_use, n_jobs=n_jobs)

        result['ds_err_jk'] = np.sqrt(np.diag(cov_jk))
        result.meta['cov_jk'] = cov_jk
        result.meta['s2n_jk'] = np.sqrt(
            np.dot(result['ds'].T.dot(np.linalg.inv(cov_jk)), result['ds']))

    # Estimate the covariance matrix using Bootstrap resampling
    if bootstrap:
        cov_bt = dsigma_bootstrap(lens_use,
                                  rand_use,
                                  n_boot=n_boot,
                                  n_jobs=n_jobs)

        result['ds_err_bt'] = np.sqrt(np.diag(cov_bt))
        result.meta['cov_bt'] = cov_bt
        result.meta['s2n_bt'] = np.sqrt(
            np.dot(result['ds'].T.dot(np.linalg.inv(cov_bt)), result['ds']))

    return result