def make_unitless(xspec_arr, radius_arr=None, ndim=None):
    """multiply by  surface area in ndim sphere / 2pi^ndim * |k|^D
    (e.g. k^3 / 2 pi^2 in 3D)
    """
    if radius_arr is None:
        radius_arr = binning.radius_array(xspec_arr)

    if ndim is None:
        ndim = xspec_arr.ndim

    factor = 2. * math.pi ** (ndim / 2.) / scipy.special.gamma(ndim / 2.)
    factor /= (2. * math.pi) ** ndim
    return xspec_arr * radius_arr ** ndim * factor
示例#2
0
def make_unitless(xspec_arr, radius_arr=None, ndim=None):
    """multiply by  surface area in ndim sphere / 2pi^ndim * |k|^D
    (e.g. k^3 / 2 pi^2 in 3D)
    """
    if radius_arr is None:
        radius_arr = binning.radius_array(xspec_arr)

    if ndim is None:
        ndim = xspec_arr.ndim

    factor = 2. * math.pi ** (ndim / 2.) / scipy.special.gamma(ndim / 2.)
    factor /= (2. * math.pi) ** ndim
    return xspec_arr * radius_arr ** ndim * factor
示例#3
0
def sum_window(argt):
    """A given bin in 2D k-space (labelled by bin_index_2d) is the sum over a
    "washer" in 3D k-space, a band in k_parallel and an annulus in k_x, k_y.
    Let all of the 3D bins in k_space be indexed by bin_3d. The window function
    is centered at k_3d=0, and let these indices defining the center of the 3d
    volume be given in center_3d.
    TODO: replace this with NlogN convolution
    TODO: implement 0-padded roll instead of np.roll, algebra.roll_zeropad()
    """
    (filename, bin_index_2d, k_2d, bin_3d, center_3d) = argt
    # load the cross-power of the weighting functions
    xspec = algebra.make_vect(algebra.load(filename))
    windowsum = algebra.zeros_like(xspec)

    num_3dbins_in_2dbin = bin_3d.shape[0]

    print "%d: summing over %d bins" % (bin_index_2d, num_3dbins_in_2dbin)

    for bin_3dind in range(num_3dbins_in_2dbin):
        # TODO: is this sign right, does it matter?
        off = bin_3d[bin_3dind] - center_3d
        #print off
        windowsum += np.roll(np.roll(np.roll(xspec,
                                             off[0], axis=0),
                                             off[1], axis=1),
                                             off[2], axis=2)

    k_perp_arr = binning.radius_array(xspec, zero_axes=[0])
    k_parallel_arr = binning.radius_array(xspec, zero_axes=[1, 2])
    kx_2d = copy.deepcopy(k_2d)
    ky_2d = copy.deepcopy(k_2d)
    counts_histo_2d, binavg_2d = binning.bin_an_array_2d(windowsum,
                                                         k_perp_arr,
                                                         k_parallel_arr,
                                                         kx_2d, ky_2d)

    return (bin_index_2d, counts_histo_2d, binavg_2d)
示例#4
0
def sum_window(argt):
    """A given bin in 2D k-space (labelled by bin_index_2d) is the sum over a
    "washer" in 3D k-space, a band in k_parallel and an annulus in k_x, k_y.
    Let all of the 3D bins in k_space be indexed by bin_3d. The window function
    is centered at k_3d=0, and let these indices defining the center of the 3d
    volume be given in center_3d.
    TODO: replace this with NlogN convolution
    TODO: implement 0-padded roll instead of np.roll, algebra.roll_zeropad()
    """
    (filename, bin_index_2d, k_2d, bin_3d, center_3d) = argt
    # load the cross-power of the weighting functions
    xspec = algebra.make_vect(algebra.load(filename))
    windowsum = algebra.zeros_like(xspec)

    num_3dbins_in_2dbin = bin_3d.shape[0]

    print "%d: summing over %d bins" % (bin_index_2d, num_3dbins_in_2dbin)

    for bin_3dind in range(num_3dbins_in_2dbin):
        # TODO: is this sign right, does it matter?
        off = bin_3d[bin_3dind] - center_3d
        #print off
        windowsum += np.roll(np.roll(np.roll(xspec, off[0], axis=0),
                                     off[1],
                                     axis=1),
                             off[2],
                             axis=2)

    k_perp_arr = binning.radius_array(xspec, zero_axes=[0])
    k_parallel_arr = binning.radius_array(xspec, zero_axes=[1, 2])
    kx_2d = copy.deepcopy(k_2d)
    ky_2d = copy.deepcopy(k_2d)
    counts_histo_2d, binavg_2d = binning.bin_an_array_2d(
        windowsum, k_perp_arr, k_parallel_arr, kx_2d, ky_2d)

    return (bin_index_2d, counts_histo_2d, binavg_2d)
示例#5
0
def calculate_mixing(weight_file1, weight_file2, bins, xspec_fileout,
                     mixing_fileout,
                     unitless=False, refinement=2, pad=5, order=1,
                     window='blackman', zero_pad=False, identity_test=False):
    print "loading the weights and converting to physical coordinates"
    weight1_obs = algebra.make_vect(algebra.load(weight_file1))
    weight1 = bh.repackage_kiyo(pg.physical_grid(
                                weight1_obs,
                                refinement=refinement,
                                pad=pad, order=order))

    weight2_obs = algebra.make_vect(algebra.load(weight_file2))
    weight2 = bh.repackage_kiyo(pg.physical_grid(
                                weight2_obs,
                                refinement=refinement,
                                pad=pad, order=order))

    if window:
        window_function = fftutil.window_nd(weight1.shape, name=window)
        weight1 *= window_function
        weight2 *= window_function

    print "calculating the cross-power of the spatial weighting functions"
    arr1 = algebra.ones_like(weight1)
    arr2 = algebra.ones_like(weight2)

    # no window applied here (applied above)
    xspec = pe.cross_power_est(weight1, weight2, arr1, arr2,
                               window=None, nonorm=True)

    # for each point in the cube, find |k|, k_perp, k_parallel
    # TODO: speed this up by using one direct numpy call (not limiting)
    k_mag_arr = binning.radius_array(xspec)
    k_perp_arr = binning.radius_array(xspec, zero_axes=[0])
    k_parallel_arr = binning.radius_array(xspec, zero_axes=[1, 2])

    if unitless:
        xspec = pe.make_unitless(xspec, radius_arr=k_mag_arr)

    # NOTE: assuming lowest k bin has only one point in 3D k-space
    # could make this floor of dimensions divided by 2 also
    center_3d = np.transpose(np.transpose(np.where(k_mag_arr == 0.))[0])

    # In the estimator, we devide by 1/sum(w1 * w2) to get most of the effect
    # of the weighing. The mixing matrix here can be thought of as a correction
    # that that diagonal-only estimate.
    leakage_ratio = xspec[center_3d[0], center_3d[1], center_3d[2]] / \
                    np.sum(weight1 * weight2)
    print "power leakage ratio: %10.5g" % leakage_ratio

    xspec /= np.sum(weight1 * weight2)

    print "partitioning the 3D kspace up into the 2D k bins"
    (kflat, ret_indices) = bin_indices_2d(k_perp_arr, k_parallel_arr,
                                          bins, bins)

    # perform a test where the window function is a delta function at the
    # origin so that the mixing matrix is identity
    if identity_test:
        xspec = algebra.zeros_like(xspec)
        xspec[center_3d[0], center_3d[1], center_3d[2]] = 1.

    # now save the window cross-power for downstream pooled users
    algebra.save(xspec_fileout, xspec)

    runlist = []
    for bin_index in range(kflat.shape[0]):
        bin_3d = ret_indices[repr(bin_index)]
        if bin_3d is not None:
            runlist.append((xspec_fileout, bin_index, bins, bin_3d, center_3d))

    pool = multiprocessing.Pool(processes=(multiprocessing.cpu_count() - 4))
    # the longest runs get pushed to the end; randomize for better job packing
    random.shuffle(runlist)
    results = pool.map(sum_window, runlist)
    #gnuplot_single_slice(runlist[0])  # for troubleshooting

    # now save the results for post-processing
    params = {"unitless": unitless, "refinement": refinement, "pad": pad,
              "order": order, "window": window, "zero_pad": zero_pad,
              "identity_test": identity_test, "weight_file1": weight_file1,
              "weight_file2": weight_file2, "bins": bins}

    outshelve = shelve.open(mixing_fileout, "n")
    outshelve["params"] = params        # parameters for this run
    outshelve["weight1"] = weight1      # weight map 1
    outshelve["weight2"] = weight2      # weight map 2
    outshelve["xspec"] = xspec          # copy of the weight spectra
    outshelve["kflat"] = kflat          # 2D k bin vector
    outshelve["bins_3d"] = ret_indices  # indices to k3d for a 2d k bin
    outshelve["results"] = results      # mixing matrix columns
    outshelve.close()
def calculate_xspec(cube1, cube2, weight1, weight2,
                    window="blackman", unitless=True, bins=None,
                    truncate=False, nbins=40, logbins=True, return_3d=False):

    print "finding the signal power spectrum"
    pwrspec3d_signal = cross_power_est(cube1, cube2, weight1, weight2,
                               window=window)

    radius_arr = binning.radius_array(pwrspec3d_signal)

    # find the k_perp by not including k_nu in the distance
    radius_arr_perp = binning.radius_array(pwrspec3d_signal,
                                           zero_axes=[0])

    # find the k_perp by not including k_RA,Dec in the distance
    radius_arr_parallel = binning.radius_array(pwrspec3d_signal,
                                               zero_axes=[1, 2])

    if bins is None:
        bins = binning.suggest_bins(pwrspec3d_signal,
                                          truncate=truncate,
                                          logbins=logbins,
                                          nbins=nbins,
                                          radius_arr=radius_arr)

    if unitless:
        print "making the power spectrum unitless"
        pwrspec3d_signal = make_unitless(pwrspec3d_signal,
                                         radius_arr=radius_arr)

    print "calculating the 2D histogram"
    # TODO: do better independent binning; for now:
    bins_x = copy.deepcopy(bins)
    bins_y = copy.deepcopy(bins)
    counts_histo_2d, binavg_2d = binning.bin_an_array_2d(pwrspec3d_signal,
                                                         radius_arr_perp,
                                                         radius_arr_parallel,
                                                         bins_x, bins_y)

    print "calculating the 1D histogram"
    counts_histo, binavg = binning.bin_an_array(pwrspec3d_signal, bins,
                                                radius_arr=radius_arr)

    bin_left_x, bin_center_x, bin_right_x = binning.bin_edges(bins_x,
                                                              log=logbins)

    bin_left_y, bin_center_y, bin_right_y = binning.bin_edges(bins_y,
                                                              log=logbins)

    bin_left, bin_center, bin_right = binning.bin_edges(bins, log=logbins)

    pwrspec2d_product = {}
    pwrspec2d_product['bin_x_left'] = bin_left_x
    pwrspec2d_product['bin_x_center'] = bin_center_x
    pwrspec2d_product['bin_x_right'] = bin_right_x
    pwrspec2d_product['bin_y_left'] = bin_left_y
    pwrspec2d_product['bin_y_center'] = bin_center_y
    pwrspec2d_product['bin_y_right'] = bin_right_y
    pwrspec2d_product['counts_histo'] = counts_histo_2d
    pwrspec2d_product['binavg'] = binavg_2d

    pwrspec1d_product = {}
    pwrspec1d_product['bin_left'] = bin_left
    pwrspec1d_product['bin_center'] = bin_center
    pwrspec1d_product['bin_right'] = bin_right
    pwrspec1d_product['counts_histo'] = counts_histo
    pwrspec1d_product['binavg'] = binavg

    if not return_3d:
        return pwrspec2d_product, pwrspec1d_product
    else:
        return pwrspec3d_signal, pwrspec2d_product, pwrspec1d_product
示例#7
0
def calculate_xspec(cube1, cube2, weight1, weight2,
                    window="blackman", unitless=True, bins=None,
                    truncate=False, nbins=40, logbins=True, return_3d=False):

    print "finding the signal power spectrum"
    pwrspec3d_signal = cross_power_est(cube1, cube2, weight1, weight2,
                                       window=window)

    radius_arr = binning.radius_array(pwrspec3d_signal)

    if bins is None:
        bins = binning.suggest_bins(pwrspec3d_signal,
                                          truncate=truncate,
                                          logbins=logbins,
                                          nbins=nbins,
                                          radius_arr=radius_arr)

    if unitless:
        print "making the power spectrum unitless"
        pwrspec3d_signal = make_unitless(pwrspec3d_signal,
                                         radius_arr=radius_arr)

    print "calculating the 1D histogram"
    counts_histo, binavg = binning.bin_an_array(pwrspec3d_signal, bins,
                                                radius_arr=radius_arr)

    del radius_arr
    gc.collect()

    print "calculating the 2D histogram"
    # find the k_perp by not including k_nu in the distance
    radius_arr_perp = binning.radius_array(pwrspec3d_signal,
                                           zero_axes=[0])

    # find the k_perp by not including k_RA,Dec in the distance
    radius_arr_parallel = binning.radius_array(pwrspec3d_signal,
                                               zero_axes=[1, 2])

    # TODO: do better independent binning; for now:
    bins_x = copy.deepcopy(bins)
    bins_y = copy.deepcopy(bins)
    counts_histo_2d, binavg_2d = binning.bin_an_array_2d(pwrspec3d_signal,
                                                         radius_arr_perp,
                                                         radius_arr_parallel,
                                                         bins_x, bins_y)

    del radius_arr_perp
    del radius_arr_parallel
    gc.collect()

    bin_left_x, bin_center_x, bin_right_x = binning.bin_edges(bins_x,
                                                              log=logbins)

    bin_left_y, bin_center_y, bin_right_y = binning.bin_edges(bins_y,
                                                              log=logbins)

    bin_left, bin_center, bin_right = binning.bin_edges(bins, log=logbins)

    pwrspec2d_product = {}
    pwrspec2d_product['bin_x_left'] = bin_left_x
    pwrspec2d_product['bin_x_center'] = bin_center_x
    pwrspec2d_product['bin_x_right'] = bin_right_x
    pwrspec2d_product['bin_y_left'] = bin_left_y
    pwrspec2d_product['bin_y_center'] = bin_center_y
    pwrspec2d_product['bin_y_right'] = bin_right_y
    pwrspec2d_product['counts_histo'] = counts_histo_2d
    pwrspec2d_product['binavg'] = binavg_2d

    pwrspec1d_product = {}
    pwrspec1d_product['bin_left'] = bin_left
    pwrspec1d_product['bin_center'] = bin_center
    pwrspec1d_product['bin_right'] = bin_right
    pwrspec1d_product['counts_histo'] = counts_histo
    pwrspec1d_product['binavg'] = binavg

    if not return_3d:
        return pwrspec2d_product, pwrspec1d_product
    else:
        return pwrspec3d_signal, pwrspec2d_product, pwrspec1d_product
示例#8
0
def calculate_mixing(weight_file1,
                     weight_file2,
                     bins,
                     xspec_fileout,
                     mixing_fileout,
                     unitless=False,
                     refinement=2,
                     pad=5,
                     order=1,
                     window='blackman',
                     zero_pad=False,
                     identity_test=False):
    print "loading the weights and converting to physical coordinates"
    weight1_obs = algebra.make_vect(algebra.load(weight_file1))
    weight1 = bh.repackage_kiyo(
        pg.physical_grid(weight1_obs,
                         refinement=refinement,
                         pad=pad,
                         order=order))

    weight2_obs = algebra.make_vect(algebra.load(weight_file2))
    weight2 = bh.repackage_kiyo(
        pg.physical_grid(weight2_obs,
                         refinement=refinement,
                         pad=pad,
                         order=order))

    if window:
        window_function = fftutil.window_nd(weight1.shape, name=window)
        weight1 *= window_function
        weight2 *= window_function

    print "calculating the cross-power of the spatial weighting functions"
    arr1 = algebra.ones_like(weight1)
    arr2 = algebra.ones_like(weight2)

    # no window applied here (applied above)
    xspec = pe.cross_power_est(weight1,
                               weight2,
                               arr1,
                               arr2,
                               window=None,
                               nonorm=True)

    # for each point in the cube, find |k|, k_perp, k_parallel
    # TODO: speed this up by using one direct numpy call (not limiting)
    k_mag_arr = binning.radius_array(xspec)
    k_perp_arr = binning.radius_array(xspec, zero_axes=[0])
    k_parallel_arr = binning.radius_array(xspec, zero_axes=[1, 2])

    if unitless:
        xspec = pe.make_unitless(xspec, radius_arr=k_mag_arr)

    # NOTE: assuming lowest k bin has only one point in 3D k-space
    # could make this floor of dimensions divided by 2 also
    center_3d = np.transpose(np.transpose(np.where(k_mag_arr == 0.))[0])

    # In the estimator, we devide by 1/sum(w1 * w2) to get most of the effect
    # of the weighing. The mixing matrix here can be thought of as a correction
    # that that diagonal-only estimate.
    leakage_ratio = xspec[center_3d[0], center_3d[1], center_3d[2]] / \
                    np.sum(weight1 * weight2)
    print "power leakage ratio: %10.5g" % leakage_ratio

    xspec /= np.sum(weight1 * weight2)

    print "partitioning the 3D kspace up into the 2D k bins"
    (kflat, ret_indices) = bin_indices_2d(k_perp_arr, k_parallel_arr, bins,
                                          bins)

    # perform a test where the window function is a delta function at the
    # origin so that the mixing matrix is identity
    if identity_test:
        xspec = algebra.zeros_like(xspec)
        xspec[center_3d[0], center_3d[1], center_3d[2]] = 1.

    # now save the window cross-power for downstream pooled users
    algebra.save(xspec_fileout, xspec)

    runlist = []
    for bin_index in range(kflat.shape[0]):
        bin_3d = ret_indices[repr(bin_index)]
        if bin_3d is not None:
            runlist.append((xspec_fileout, bin_index, bins, bin_3d, center_3d))

    pool = multiprocessing.Pool(processes=(multiprocessing.cpu_count() - 4))
    # the longest runs get pushed to the end; randomize for better job packing
    random.shuffle(runlist)
    results = pool.map(sum_window, runlist)
    #gnuplot_single_slice(runlist[0])  # for troubleshooting

    # now save the results for post-processing
    params = {
        "unitless": unitless,
        "refinement": refinement,
        "pad": pad,
        "order": order,
        "window": window,
        "zero_pad": zero_pad,
        "identity_test": identity_test,
        "weight_file1": weight_file1,
        "weight_file2": weight_file2,
        "bins": bins
    }

    outshelve = shelve.open(mixing_fileout, "n")
    outshelve["params"] = params  # parameters for this run
    outshelve["weight1"] = weight1  # weight map 1
    outshelve["weight2"] = weight2  # weight map 2
    outshelve["xspec"] = xspec  # copy of the weight spectra
    outshelve["kflat"] = kflat  # 2D k bin vector
    outshelve["bins_3d"] = ret_indices  # indices to k3d for a 2d k bin
    outshelve["results"] = results  # mixing matrix columns
    outshelve.close()