Пример #1
0
def generate_windows(window="blackman"):
    datapath_db = data_paths.DataPath()
    # first generate a window for the full physical volume
    filename = datapath_db.fetch('simideal_15hr_physical', intend_read=True,
                                 pick='1')
    print filename
    pcube = algebra.make_vect(algebra.load(filename))
    pwindow = algebra.make_vect(fftutil.window_nd(pcube.shape, name=window),
                                axis_names=('freq', 'ra', 'dec'))
    pwindow.copy_axis_info(pcube)
    print pwindow.shape
    algebra.save("physical_window.npy", pwindow)

    # now generate one for the observed region and project onto the physical
    # volume.
    filename = datapath_db.fetch('simideal_15hr_beam', intend_read=True,
                                 pick='1')
    print filename
    ocube = algebra.make_vect(algebra.load(filename))
    owindow = algebra.make_vect(fftutil.window_nd(ocube.shape, name=window),
                                axis_names=('freq', 'ra', 'dec'))
    owindow.copy_axis_info(ocube)
    print owindow.shape
    print owindow.axes
    algebra.save("observed_window.npy", owindow)
    pwindow = physical_gridding.physical_grid(owindow, refinement=2)
    print pwindow.shape
    algebra.save("observed_window_physreg.npy", pwindow)
Пример #2
0
def cross_power_est_highmem(arr1, arr2, weight1, weight2,
                    window="blackman", nonorm=False):
    """Calculate the cross-power spectrum of a two nD fields.

    The arrays must be identical and have the same length (physically
    and in pixel number) along each axis.

    Same goal as above without the emphasis on saving memory.
    This is the "tried and true" legacy function.
    """
    if window:
        window_function = fftutil.window_nd(arr1.shape, name=window)
        weight1 *= window_function
        weight2 *= window_function

    warr1 = arr1 * weight1
    warr2 = arr2 * weight2
    ndim = arr1.ndim

    fft_arr1 = np.fft.fftshift(np.fft.fftn(warr1))
    fft_arr2 = np.fft.fftshift(np.fft.fftn(warr2))
    xspec = fft_arr1 * fft_arr2.conj()
    xspec = xspec.real

    # correct for the weighting
    product_weight = weight1 * weight2
    xspec /= np.sum(product_weight)

    # make the axes
    k_axes = tuple(["k_" + axis_name for axis_name in arr1.axes])
    xspec_arr = algebra.make_vect(xspec, axis_names=k_axes)

    info = {'axes': k_axes, 'type': 'vect'}
    width = np.zeros(ndim)
    for axis_index in range(ndim):
        n_axis = arr1.shape[axis_index]
        axis_name = arr1.axes[axis_index]
        axis_vector = arr1.get_axis(axis_name)
        delta_axis = abs(axis_vector[1] - axis_vector[0])
        width[axis_index] = delta_axis

        k_axis = np.fft.fftshift(np.fft.fftfreq(n_axis, d=delta_axis))
        k_axis *= 2. * math.pi
        delta_k_axis = abs(k_axis[1] - k_axis[0])

        k_name = k_axes[axis_index]
        info[k_name + "_delta"] = delta_k_axis
        info[k_name + "_centre"] = 0.
        #print k_axis
        #print k_name, n_axis, delta_axis

    xspec_arr.info = info
    #print xspec_arr.get_axis("k_dec")

    if not nonorm:
        xspec_arr *= width.prod()

    return xspec_arr
def cross_power_est_highmem(arr1, arr2, weight1, weight2,
                    window="blackman", nonorm=False):
    """Calculate the cross-power spectrum of a two nD fields.

    The arrays must be identical and have the same length (physically
    and in pixel number) along each axis.

    Same goal as above without the emphasis on saving memory.
    This is the "tried and true" legacy function.
    """
    if window:
        window_function = fftutil.window_nd(arr1.shape, name=window)
        weight1 *= window_function
        weight2 *= window_function

    warr1 = arr1 * weight1
    warr2 = arr2 * weight2
    ndim = arr1.ndim

    fft_arr1 = np.fft.fftshift(np.fft.fftn(warr1))
    fft_arr2 = np.fft.fftshift(np.fft.fftn(warr2))
    xspec = fft_arr1 * fft_arr2.conj()
    xspec = xspec.real

    # correct for the weighting
    product_weight = weight1 * weight2
    xspec /= np.sum(product_weight)

    # make the axes
    k_axes = tuple(["k_" + axis_name for axis_name in arr1.axes])
    xspec_arr = algebra.make_vect(xspec, axis_names=k_axes)

    info = {'axes': k_axes, 'type': 'vect'}
    width = np.zeros(ndim)
    for axis_index in range(ndim):
        n_axis = arr1.shape[axis_index]
        axis_name = arr1.axes[axis_index]
        axis_vector = arr1.get_axis(axis_name)
        delta_axis = abs(axis_vector[1] - axis_vector[0])
        width[axis_index] = delta_axis

        k_axis = np.fft.fftshift(np.fft.fftfreq(n_axis, d=delta_axis))
        k_axis *= 2. * math.pi
        delta_k_axis = abs(k_axis[1] - k_axis[0])

        k_name = k_axes[axis_index]
        info[k_name + "_delta"] = delta_k_axis
        info[k_name + "_centre"] = 0.
        #print k_axis
        #print k_name, n_axis, delta_axis

    xspec_arr.info = info
    #print xspec_arr.get_axis("k_dec")

    if not nonorm:
        xspec_arr *= width.prod()

    return xspec_arr
Пример #4
0
    def estimate_ps_3d(self, window="blackman"):

        window_function = fftutil.window_nd(self.nbox1.shape, name=window)
        self.nbox1 *= window_function
        self.nbox2 *= window_function

        self.ibox1 *= self.nbox1
        self.ibox2 *= self.nbox2

        self.subtract_mean()

        normal = (self.nbox1 * self.nbox2).flatten().sum()
        delta_v = self.boxunit**3

        iput_1 = np.zeros(self.boxshape, dtype=complex)
        #oput_1 = np.zeros(self.boxshape, dtype=complex)
        #plan_1 = FFTW.Plan(iput_1, oput_1, direction='forward', flags=['measure'])
        iput_1.imag = 0.
        iput_1.real = self.ibox1
        #FFTW.execute(plan_1)
        oput_1 = np.fft.fftn(iput_1)

        iput_2 = np.zeros(self.boxshape, dtype=complex)
        #oput_2 = np.zeros(self.boxshape, dtype=complex)
        #plan_2 = FFTW.Plan(iput_2, oput_2, direction='forward', flags=['measure'])
        iput_2.imag = 0.
        iput_2.real = self.ibox2
        #FFTW.execute(plan_2)
        oput_2 = np.fft.fftn(iput_2)

        oput_1 = np.fft.fftshift(oput_1)
        oput_2 = np.fft.fftshift(oput_2)

        self.ps_3d  = (oput_1 * oput_2.conj()).real
        self.ps_3d *= delta_v/normal

        del iput_1
        del iput_2
        del oput_1
        del oput_2
        gc.collect()
Пример #5
0
def calculate_mixing(weight_file1, weight_file2, bins, xspec_fileout,
                     mixing_fileout,
                     unitless=False, refinement=2, pad=5, order=1,
                     window='blackman', zero_pad=False, identity_test=False):
    print "loading the weights and converting to physical coordinates"
    weight1_obs = algebra.make_vect(algebra.load(weight_file1))
    weight1 = bh.repackage_kiyo(pg.physical_grid(
                                weight1_obs,
                                refinement=refinement,
                                pad=pad, order=order))

    weight2_obs = algebra.make_vect(algebra.load(weight_file2))
    weight2 = bh.repackage_kiyo(pg.physical_grid(
                                weight2_obs,
                                refinement=refinement,
                                pad=pad, order=order))

    if window:
        window_function = fftutil.window_nd(weight1.shape, name=window)
        weight1 *= window_function
        weight2 *= window_function

    print "calculating the cross-power of the spatial weighting functions"
    arr1 = algebra.ones_like(weight1)
    arr2 = algebra.ones_like(weight2)

    # no window applied here (applied above)
    xspec = pe.cross_power_est(weight1, weight2, arr1, arr2,
                               window=None, nonorm=True)

    # for each point in the cube, find |k|, k_perp, k_parallel
    # TODO: speed this up by using one direct numpy call (not limiting)
    k_mag_arr = binning.radius_array(xspec)
    k_perp_arr = binning.radius_array(xspec, zero_axes=[0])
    k_parallel_arr = binning.radius_array(xspec, zero_axes=[1, 2])

    if unitless:
        xspec = pe.make_unitless(xspec, radius_arr=k_mag_arr)

    # NOTE: assuming lowest k bin has only one point in 3D k-space
    # could make this floor of dimensions divided by 2 also
    center_3d = np.transpose(np.transpose(np.where(k_mag_arr == 0.))[0])

    # In the estimator, we devide by 1/sum(w1 * w2) to get most of the effect
    # of the weighing. The mixing matrix here can be thought of as a correction
    # that that diagonal-only estimate.
    leakage_ratio = xspec[center_3d[0], center_3d[1], center_3d[2]] / \
                    np.sum(weight1 * weight2)
    print "power leakage ratio: %10.5g" % leakage_ratio

    xspec /= np.sum(weight1 * weight2)

    print "partitioning the 3D kspace up into the 2D k bins"
    (kflat, ret_indices) = bin_indices_2d(k_perp_arr, k_parallel_arr,
                                          bins, bins)

    # perform a test where the window function is a delta function at the
    # origin so that the mixing matrix is identity
    if identity_test:
        xspec = algebra.zeros_like(xspec)
        xspec[center_3d[0], center_3d[1], center_3d[2]] = 1.

    # now save the window cross-power for downstream pooled users
    algebra.save(xspec_fileout, xspec)

    runlist = []
    for bin_index in range(kflat.shape[0]):
        bin_3d = ret_indices[repr(bin_index)]
        if bin_3d is not None:
            runlist.append((xspec_fileout, bin_index, bins, bin_3d, center_3d))

    pool = multiprocessing.Pool(processes=(multiprocessing.cpu_count() - 4))
    # the longest runs get pushed to the end; randomize for better job packing
    random.shuffle(runlist)
    results = pool.map(sum_window, runlist)
    #gnuplot_single_slice(runlist[0])  # for troubleshooting

    # now save the results for post-processing
    params = {"unitless": unitless, "refinement": refinement, "pad": pad,
              "order": order, "window": window, "zero_pad": zero_pad,
              "identity_test": identity_test, "weight_file1": weight_file1,
              "weight_file2": weight_file2, "bins": bins}

    outshelve = shelve.open(mixing_fileout, "n")
    outshelve["params"] = params        # parameters for this run
    outshelve["weight1"] = weight1      # weight map 1
    outshelve["weight2"] = weight2      # weight map 2
    outshelve["xspec"] = xspec          # copy of the weight spectra
    outshelve["kflat"] = kflat          # 2D k bin vector
    outshelve["bins_3d"] = ret_indices  # indices to k3d for a 2d k bin
    outshelve["results"] = results      # mixing matrix columns
    outshelve.close()
def cross_power_est(arr1, arr2, weight1, weight2, window="blackman"):
    """Calculate the radially average cross-power spectrum of a two nD fields.

    The arrays must be identical and have the same length (physically
    and in pixel number) along each axis.

    Parameters
    ----------
    arr1, arr2: np.ndarray
        The cubes to calculate the cross-power spectrum of. If arr2 is
        None then return the standard (auto-)power spectrum.
    window: boolean
        Apply an additional named window
    """
    if window:
        window_function = fftutil.window_nd(arr1.shape, name=window)
        weight1 *= window_function
        weight2 *= window_function

    warr1 = arr1 * weight1
    warr2 = arr2 * weight2
    ndim = arr1.ndim

    fft_arr1 = np.fft.fftshift(np.fft.fftn(warr1))
    fft_arr2 = np.fft.fftshift(np.fft.fftn(warr2))
    xspec = fft_arr1 * fft_arr2.conj()
    xspec = xspec.real

    # correct for the weighting
    product_weight = weight1 * weight2
    xspec /= np.sum(product_weight)

    # make the axes
    k_axes = tuple(["k_" + axis_name for axis_name in arr1.axes])
    xspec_arr = algebra.make_vect(xspec, axis_names=k_axes)

    info = {'axes': k_axes, 'type': 'vect'}
    width = np.zeros(ndim)
    for axis_index in range(ndim):
        n_axis = arr1.shape[axis_index]
        axis_name = arr1.axes[axis_index]
        axis_vector = arr1.get_axis(axis_name)
        delta_axis = abs(axis_vector[1] - axis_vector[0])
        width[axis_index] = delta_axis

        k_axis = np.fft.fftshift(np.fft.fftfreq(n_axis, d=delta_axis))
        k_axis *= 2. * math.pi
        delta_k_axis = abs(k_axis[1] - k_axis[0])

        k_name = k_axes[axis_index]
        info[k_name + "_delta"] = delta_k_axis
        info[k_name + "_centre"] = 0.
        #print k_axis
        #print k_name, n_axis, delta_axis

    xspec_arr.info = info
    #print xspec_arr.get_axis("k_dec")

    xspec_arr *= width.prod()

    return xspec_arr
Пример #7
0
def calculate_mixing(weight_file1,
                     weight_file2,
                     bins,
                     xspec_fileout,
                     mixing_fileout,
                     unitless=False,
                     refinement=2,
                     pad=5,
                     order=1,
                     window='blackman',
                     zero_pad=False,
                     identity_test=False):
    print "loading the weights and converting to physical coordinates"
    weight1_obs = algebra.make_vect(algebra.load(weight_file1))
    weight1 = bh.repackage_kiyo(
        pg.physical_grid(weight1_obs,
                         refinement=refinement,
                         pad=pad,
                         order=order))

    weight2_obs = algebra.make_vect(algebra.load(weight_file2))
    weight2 = bh.repackage_kiyo(
        pg.physical_grid(weight2_obs,
                         refinement=refinement,
                         pad=pad,
                         order=order))

    if window:
        window_function = fftutil.window_nd(weight1.shape, name=window)
        weight1 *= window_function
        weight2 *= window_function

    print "calculating the cross-power of the spatial weighting functions"
    arr1 = algebra.ones_like(weight1)
    arr2 = algebra.ones_like(weight2)

    # no window applied here (applied above)
    xspec = pe.cross_power_est(weight1,
                               weight2,
                               arr1,
                               arr2,
                               window=None,
                               nonorm=True)

    # for each point in the cube, find |k|, k_perp, k_parallel
    # TODO: speed this up by using one direct numpy call (not limiting)
    k_mag_arr = binning.radius_array(xspec)
    k_perp_arr = binning.radius_array(xspec, zero_axes=[0])
    k_parallel_arr = binning.radius_array(xspec, zero_axes=[1, 2])

    if unitless:
        xspec = pe.make_unitless(xspec, radius_arr=k_mag_arr)

    # NOTE: assuming lowest k bin has only one point in 3D k-space
    # could make this floor of dimensions divided by 2 also
    center_3d = np.transpose(np.transpose(np.where(k_mag_arr == 0.))[0])

    # In the estimator, we devide by 1/sum(w1 * w2) to get most of the effect
    # of the weighing. The mixing matrix here can be thought of as a correction
    # that that diagonal-only estimate.
    leakage_ratio = xspec[center_3d[0], center_3d[1], center_3d[2]] / \
                    np.sum(weight1 * weight2)
    print "power leakage ratio: %10.5g" % leakage_ratio

    xspec /= np.sum(weight1 * weight2)

    print "partitioning the 3D kspace up into the 2D k bins"
    (kflat, ret_indices) = bin_indices_2d(k_perp_arr, k_parallel_arr, bins,
                                          bins)

    # perform a test where the window function is a delta function at the
    # origin so that the mixing matrix is identity
    if identity_test:
        xspec = algebra.zeros_like(xspec)
        xspec[center_3d[0], center_3d[1], center_3d[2]] = 1.

    # now save the window cross-power for downstream pooled users
    algebra.save(xspec_fileout, xspec)

    runlist = []
    for bin_index in range(kflat.shape[0]):
        bin_3d = ret_indices[repr(bin_index)]
        if bin_3d is not None:
            runlist.append((xspec_fileout, bin_index, bins, bin_3d, center_3d))

    pool = multiprocessing.Pool(processes=(multiprocessing.cpu_count() - 4))
    # the longest runs get pushed to the end; randomize for better job packing
    random.shuffle(runlist)
    results = pool.map(sum_window, runlist)
    #gnuplot_single_slice(runlist[0])  # for troubleshooting

    # now save the results for post-processing
    params = {
        "unitless": unitless,
        "refinement": refinement,
        "pad": pad,
        "order": order,
        "window": window,
        "zero_pad": zero_pad,
        "identity_test": identity_test,
        "weight_file1": weight_file1,
        "weight_file2": weight_file2,
        "bins": bins
    }

    outshelve = shelve.open(mixing_fileout, "n")
    outshelve["params"] = params  # parameters for this run
    outshelve["weight1"] = weight1  # weight map 1
    outshelve["weight2"] = weight2  # weight map 2
    outshelve["xspec"] = xspec  # copy of the weight spectra
    outshelve["kflat"] = kflat  # 2D k bin vector
    outshelve["bins_3d"] = ret_indices  # indices to k3d for a 2d k bin
    outshelve["results"] = results  # mixing matrix columns
    outshelve.close()
Пример #8
0
def cross_power_est(arr1, arr2, weight1, weight2, window="blackman"):
    """Calculate the radially average cross-power spectrum of a two nD fields.

    The arrays must be identical and have the same length (physically
    and in pixel number) along each axis.

    Parameters
    ----------
    arr1, arr2: np.ndarray
        The cubes to calculate the cross-power spectrum of. If arr2 is
        None then return the standard (auto-)power spectrum.
    window: boolean
        Apply an additional named window
    """
    if window:
        window_function = fftutil.window_nd(arr1.shape, name=window)
        weight1 *= window_function
        weight2 *= window_function

    warr1 = arr1 * weight1
    warr2 = arr2 * weight2
    ndim = arr1.ndim

    fft_arr1 = np.fft.fftshift(np.fft.fftn(warr1))
    fft_arr2 = np.fft.fftshift(np.fft.fftn(warr2))
    xspec = fft_arr1 * fft_arr2.conj()
    xspec = xspec.real

    # correct for the weighting
    product_weight = weight1 * weight2
    xspec /= np.sum(product_weight)

    # make the axes
    k_axes = tuple(["k_" + axis_name for axis_name in arr1.axes])
    xspec_arr = algebra.make_vect(xspec, axis_names=k_axes)

    info = {'axes': k_axes, 'type': 'vect'}
    width = np.zeros(ndim)
    for axis_index in range(ndim):
        n_axis = arr1.shape[axis_index]
        axis_name = arr1.axes[axis_index]
        axis_vector = arr1.get_axis(axis_name)
        delta_axis = abs(axis_vector[1] - axis_vector[0])
        width[axis_index] = delta_axis

        k_axis = np.fft.fftshift(np.fft.fftfreq(n_axis, d=delta_axis))
        k_axis *= 2. * math.pi
        delta_k_axis = abs(k_axis[1] - k_axis[0])

        k_name = k_axes[axis_index]
        info[k_name + "_delta"] = delta_k_axis
        info[k_name + "_centre"] = 0.
        #print k_axis
        #print k_name, n_axis, delta_axis

    xspec_arr.info = info
    #print xspec_arr.get_axis("k_dec")

    xspec_arr *= width.prod()

    return xspec_arr