Beispiel #1
0
    def load_pairs(self):
        r"""load the set of map/noise pairs specified by keys handed to the
        database. This sets up operations on the quadratic product
            Q = map1^T noise_inv1 B noise_inv2 map2
        """
        par = self.params
        (self.pairlist, pairdict) = dp.cross_maps(par['map1'], par['map2'],
                                             par['noise_inv1'],
                                             par['noise_inv2'],
                                             noise_inv_suffix=";noise_weight",
                                             verbose=False,
                                             tack_on=self.tack_on_input,
                                             db_to_use=self.datapath_db)

        for pairitem in self.pairlist:
            pdict = pairdict[pairitem]
            print "-" * 80
            dp.print_dictionary(pdict, sys.stdout,
                                key_list=['map1', 'noise_inv1',
                                          'map2', 'noise_inv2'])

            map1 = algebra.make_vect(algebra.load(pdict['map1']))
            map2 = algebra.make_vect(algebra.load(pdict['map2']))
            if par['simfile'] is not None:
                print "adding %s with multiplier %s" % (par['simfile'],
                                                        par['sim_multiplier'])

                sim = algebra.make_vect(algebra.load(par['simfile']))
                sim *= par['sim_multiplier']
                print sim.shape, map1.shape
            else:
                sim = algebra.zeros_like(map1)

            noise_inv1 = algebra.make_vect(algebra.load(pdict['noise_inv1']))
            noise_inv2 = algebra.make_vect(algebra.load(pdict['noise_inv2']))

            pair = map_pair.MapPair(map1 + sim, map2 + sim,
                                    noise_inv1, noise_inv2,
                                    self.freq_list)

            pair.set_names(pdict['tag1'], pdict['tag2'])

            pair.params = self.params
            self.pairs[pairitem] = pair

            if par['subtract_inputmap_from_sim'] or \
               par['subtract_sim_from_inputmap']:
                if par['subtract_inputmap_from_sim']:
                    pair_parallel_track = map_pair.MapPair(map1, map2,
                                                  noise_inv1, noise_inv2,
                                                  self.freq_list)

                if par['subtract_sim_from_inputmap']:
                    pair_parallel_track = map_pair.MapPair(sim, sim,
                                                  noise_inv1, noise_inv2,
                                                  self.freq_list)

                pair_parallel_track.set_names(pdict['tag1'], pdict['tag2'])
                pair_parallel_track.params = self.params
                self.pairs_parallel_track[pairitem] = pair_parallel_track
Beispiel #2
0
    def load_pairs(self):
        r"""load the set of map/noise pairs specified by keys handed to the
        database. This sets up operations on the quadratic product
            Q = map1^T noise_inv1 B noise_inv2 map2
        """
        par = self.params
        (self.pairlist,
         pairdict) = dp.cross_maps(par['map1'],
                                   par['map2'],
                                   par['noise_inv1'],
                                   par['noise_inv2'],
                                   noise_inv_suffix=";noise_weight",
                                   verbose=False,
                                   tack_on=self.tack_on_input,
                                   db_to_use=self.datapath_db)

        for pairitem in self.pairlist:
            pdict = pairdict[pairitem]
            print "-" * 80
            dp.print_dictionary(
                pdict,
                sys.stdout,
                key_list=['map1', 'noise_inv1', 'map2', 'noise_inv2'])

            map1 = algebra.make_vect(algebra.load(pdict['map1']))
            map2 = algebra.make_vect(algebra.load(pdict['map2']))
            if par['simfile'] is not None:
                print "adding %s with multiplier %s" % (par['simfile'],
                                                        par['sim_multiplier'])

                sim = algebra.make_vect(algebra.load(par['simfile']))
                sim *= par['sim_multiplier']
                print sim.shape, map1.shape
            else:
                sim = algebra.zeros_like(map1)

            noise_inv1 = algebra.make_vect(algebra.load(pdict['noise_inv1']))
            noise_inv2 = algebra.make_vect(algebra.load(pdict['noise_inv2']))

            pair = map_pair.MapPair(map1 + sim, map2 + sim, noise_inv1,
                                    noise_inv2, self.freq_list)

            pair.set_names(pdict['tag1'], pdict['tag2'])

            pair.params = self.params
            self.pairs[pairitem] = pair

            if par['subtract_inputmap_from_sim'] or \
               par['subtract_sim_from_inputmap']:
                if par['subtract_inputmap_from_sim']:
                    pair_parallel_track = map_pair.MapPair(
                        map1, map2, noise_inv1, noise_inv2, self.freq_list)

                if par['subtract_sim_from_inputmap']:
                    pair_parallel_track = map_pair.MapPair(
                        sim, sim, noise_inv1, noise_inv2, self.freq_list)

                pair_parallel_track.set_names(pdict['tag1'], pdict['tag2'])
                pair_parallel_track.params = self.params
                self.pairs_parallel_track[pairitem] = pair_parallel_track
 def make_combined(self, map_list, targetroot, file):
     imap_temp = algebra.make_vect(algebra.load(map_list[0]))
     cumulative_product = algebra.zeros_like(imap_temp)
     cumulative_weight  = algebra.zeros_like(imap_temp)
     for dir in map_list:
         imap = algebra.make_vect(algebra.load(dir))
         wmap = algebra.make_vect(algebra.load(dir.replace('clean_map', 'noise_inv')))
         cumulative_product += imap*wmap
         cumulative_weight  += wmap
     algebra.compressed_array_summary(cumulative_weight, "weight map")
     algebra.compressed_array_summary(cumulative_product, "product map")
 
     cumulative_weight[cumulative_weight < 1.e-20] = 0.
     cumulative_product[cumulative_weight < 1.e-20] = 0.
 
     cumulative_weight[cumulative_weight == 0] = np.inf
     cumulative_weight[np.isnan(cumulative_weight)] = np.inf
     newmap = cumulative_product / cumulative_weight
     cumulative_weight[np.isinf(cumulative_weight)] = 0.
 
     # if the new map is nan or inf, set it and the wieghts to zero
     nan_array = np.isnan(newmap)
     newmap[nan_array] = 0.
     cumulative_product[nan_array] = 0.
     cumulative_weight[nan_array] = 0.
     inf_array = np.isinf(newmap)
     newmap[inf_array] = 0.
     cumulative_product[inf_array] = 0.
     cumulative_weight[inf_array] = 0.
     algebra.compressed_array_summary(newmap, "new map")
     algebra.compressed_array_summary(cumulative_product,"final map * weight")
     algebra.compressed_array_summary(cumulative_weight, "final weight map")
 
     combined_map_file = targetroot + file
     combined_weight_file = targetroot + file.replace('map', 'weight')
     print combined_map_file
     print combined_weight_file
     algebra.save(combined_map_file, newmap)
     algebra.save(combined_weight_file, cumulative_weight)
def sum_window(argt):
    """A given bin in 2D k-space (labelled by bin_index_2d) is the sum over a
    "washer" in 3D k-space, a band in k_parallel and an annulus in k_x, k_y.
    Let all of the 3D bins in k_space be indexed by bin_3d. The window function
    is centered at k_3d=0, and let these indices defining the center of the 3d
    volume be given in center_3d.
    TODO: replace this with NlogN convolution
    TODO: implement 0-padded roll instead of np.roll, algebra.roll_zeropad()
    """
    (filename, bin_index_2d, k_2d, bin_3d, center_3d) = argt
    # load the cross-power of the weighting functions
    xspec = algebra.make_vect(algebra.load(filename))
    windowsum = algebra.zeros_like(xspec)

    num_3dbins_in_2dbin = bin_3d.shape[0]

    print "%d: summing over %d bins" % (bin_index_2d, num_3dbins_in_2dbin)

    for bin_3dind in range(num_3dbins_in_2dbin):
        # TODO: is this sign right, does it matter?
        off = bin_3d[bin_3dind] - center_3d
        #print off
        windowsum += np.roll(np.roll(np.roll(xspec,
                                             off[0], axis=0),
                                             off[1], axis=1),
                                             off[2], axis=2)

    k_perp_arr = binning.radius_array(xspec, zero_axes=[0])
    k_parallel_arr = binning.radius_array(xspec, zero_axes=[1, 2])
    kx_2d = copy.deepcopy(k_2d)
    ky_2d = copy.deepcopy(k_2d)
    counts_histo_2d, binavg_2d = binning.bin_an_array_2d(windowsum,
                                                         k_perp_arr,
                                                         k_parallel_arr,
                                                         kx_2d, ky_2d)

    return (bin_index_2d, counts_histo_2d, binavg_2d)
Beispiel #5
0
def sum_window(argt):
    """A given bin in 2D k-space (labelled by bin_index_2d) is the sum over a
    "washer" in 3D k-space, a band in k_parallel and an annulus in k_x, k_y.
    Let all of the 3D bins in k_space be indexed by bin_3d. The window function
    is centered at k_3d=0, and let these indices defining the center of the 3d
    volume be given in center_3d.
    TODO: replace this with NlogN convolution
    TODO: implement 0-padded roll instead of np.roll, algebra.roll_zeropad()
    """
    (filename, bin_index_2d, k_2d, bin_3d, center_3d) = argt
    # load the cross-power of the weighting functions
    xspec = algebra.make_vect(algebra.load(filename))
    windowsum = algebra.zeros_like(xspec)

    num_3dbins_in_2dbin = bin_3d.shape[0]

    print "%d: summing over %d bins" % (bin_index_2d, num_3dbins_in_2dbin)

    for bin_3dind in range(num_3dbins_in_2dbin):
        # TODO: is this sign right, does it matter?
        off = bin_3d[bin_3dind] - center_3d
        #print off
        windowsum += np.roll(np.roll(np.roll(xspec, off[0], axis=0),
                                     off[1],
                                     axis=1),
                             off[2],
                             axis=2)

    k_perp_arr = binning.radius_array(xspec, zero_axes=[0])
    k_parallel_arr = binning.radius_array(xspec, zero_axes=[1, 2])
    kx_2d = copy.deepcopy(k_2d)
    ky_2d = copy.deepcopy(k_2d)
    counts_histo_2d, binavg_2d = binning.bin_an_array_2d(
        windowsum, k_perp_arr, k_parallel_arr, kx_2d, ky_2d)

    return (bin_index_2d, counts_histo_2d, binavg_2d)
def calculate_mixing(weight_file1, weight_file2, bins, xspec_fileout,
                     mixing_fileout,
                     unitless=False, refinement=2, pad=5, order=1,
                     window='blackman', zero_pad=False, identity_test=False):
    print "loading the weights and converting to physical coordinates"
    weight1_obs = algebra.make_vect(algebra.load(weight_file1))
    weight1 = bh.repackage_kiyo(pg.physical_grid(
                                weight1_obs,
                                refinement=refinement,
                                pad=pad, order=order))

    weight2_obs = algebra.make_vect(algebra.load(weight_file2))
    weight2 = bh.repackage_kiyo(pg.physical_grid(
                                weight2_obs,
                                refinement=refinement,
                                pad=pad, order=order))

    if window:
        window_function = fftutil.window_nd(weight1.shape, name=window)
        weight1 *= window_function
        weight2 *= window_function

    print "calculating the cross-power of the spatial weighting functions"
    arr1 = algebra.ones_like(weight1)
    arr2 = algebra.ones_like(weight2)

    # no window applied here (applied above)
    xspec = pe.cross_power_est(weight1, weight2, arr1, arr2,
                               window=None, nonorm=True)

    # for each point in the cube, find |k|, k_perp, k_parallel
    # TODO: speed this up by using one direct numpy call (not limiting)
    k_mag_arr = binning.radius_array(xspec)
    k_perp_arr = binning.radius_array(xspec, zero_axes=[0])
    k_parallel_arr = binning.radius_array(xspec, zero_axes=[1, 2])

    if unitless:
        xspec = pe.make_unitless(xspec, radius_arr=k_mag_arr)

    # NOTE: assuming lowest k bin has only one point in 3D k-space
    # could make this floor of dimensions divided by 2 also
    center_3d = np.transpose(np.transpose(np.where(k_mag_arr == 0.))[0])

    # In the estimator, we devide by 1/sum(w1 * w2) to get most of the effect
    # of the weighing. The mixing matrix here can be thought of as a correction
    # that that diagonal-only estimate.
    leakage_ratio = xspec[center_3d[0], center_3d[1], center_3d[2]] / \
                    np.sum(weight1 * weight2)
    print "power leakage ratio: %10.5g" % leakage_ratio

    xspec /= np.sum(weight1 * weight2)

    print "partitioning the 3D kspace up into the 2D k bins"
    (kflat, ret_indices) = bin_indices_2d(k_perp_arr, k_parallel_arr,
                                          bins, bins)

    # perform a test where the window function is a delta function at the
    # origin so that the mixing matrix is identity
    if identity_test:
        xspec = algebra.zeros_like(xspec)
        xspec[center_3d[0], center_3d[1], center_3d[2]] = 1.

    # now save the window cross-power for downstream pooled users
    algebra.save(xspec_fileout, xspec)

    runlist = []
    for bin_index in range(kflat.shape[0]):
        bin_3d = ret_indices[repr(bin_index)]
        if bin_3d is not None:
            runlist.append((xspec_fileout, bin_index, bins, bin_3d, center_3d))

    pool = multiprocessing.Pool(processes=(multiprocessing.cpu_count() - 4))
    # the longest runs get pushed to the end; randomize for better job packing
    random.shuffle(runlist)
    results = pool.map(sum_window, runlist)
    #gnuplot_single_slice(runlist[0])  # for troubleshooting

    # now save the results for post-processing
    params = {"unitless": unitless, "refinement": refinement, "pad": pad,
              "order": order, "window": window, "zero_pad": zero_pad,
              "identity_test": identity_test, "weight_file1": weight_file1,
              "weight_file2": weight_file2, "bins": bins}

    outshelve = shelve.open(mixing_fileout, "n")
    outshelve["params"] = params        # parameters for this run
    outshelve["weight1"] = weight1      # weight map 1
    outshelve["weight2"] = weight2      # weight map 2
    outshelve["xspec"] = xspec          # copy of the weight spectra
    outshelve["kflat"] = kflat          # 2D k bin vector
    outshelve["bins_3d"] = ret_indices  # indices to k3d for a 2d k bin
    outshelve["results"] = results      # mixing matrix columns
    outshelve.close()
Beispiel #7
0
def combine_maps_driver(inputmap_dict, inputweight_dict, output_dict,
                        fullcov=False, datapath_db=None):
    r"""Combine a list of weights, maps specified by their database keys
    """
    if datapath_db is None:
        datapath_db = data_paths.DataPath()

    signal_list = []
    weight_list = []
    for mapkey in inputmap_dict:
        signalfile = inputmap_dict[mapkey]
        weightfile = inputweight_dict[mapkey]
        print "loading pair: %s %s" % (signalfile, weightfile)
        signal_list.append(algebra.make_vect(algebra.load(signalfile)))

        if fullcov:
            raw_weight = algebra.make_mat(
                            algebra.open_memmap(weightfile))
            raw_weight = raw_weight.mat_diag()
        else:
            raw_weight = algebra.make_vect(algebra.load(weightfile))

        # zero out any messy stuff
        raw_weight[raw_weight < 1.e-20] = 0.
        raw_weight[np.isnan(raw_weight)] = 0.
        raw_weight[np.isinf(raw_weight)] = 0.
        weight_list.append(raw_weight)

    prodmap = []
    for mapind in range(0, len(signal_list)):
        prodmap.append(signal_list[mapind] * weight_list[mapind])

    print "CHECK THESE: %d %d %d" % (len(signal_list), len(weight_list),
                                     len(prodmap))

    cumulative_product = algebra.zeros_like(prodmap[0])
    cumulative_weight = algebra.zeros_like(prodmap[0])
    for mapind in range(0, len(signal_list)):
        cumulative_product += prodmap[mapind]
        cumulative_weight += weight_list[mapind]

    algebra.compressed_array_summary(cumulative_weight, "weight map")
    algebra.compressed_array_summary(cumulative_product, "product map")

    newmap = cumulative_product / cumulative_weight

    cumulative_weight[cumulative_weight < 1.e-20] = 0.
    cumulative_product[cumulative_weight < 1.e-20] = 0.

    # if the new map is nan or inf, set it and the wieghts to zero
    nan_array = np.isnan(newmap)
    newmap[nan_array] = 0.
    cumulative_product[nan_array] = 0.
    cumulative_weight[nan_array] = 0.
    inf_array = np.isinf(newmap)
    newmap[inf_array] = 0.
    cumulative_product[inf_array] = 0.
    cumulative_weight[inf_array] = 0.
    algebra.compressed_array_summary(newmap, "new map")
    algebra.compressed_array_summary(cumulative_product, "final map * weight")
    algebra.compressed_array_summary(cumulative_weight, "final weight map")

    print output_dict
    algebra.save(output_dict['map'], newmap)
    algebra.save(output_dict['product'], cumulative_product)
    algebra.save(output_dict['weight'], cumulative_weight)
    algebra.save(output_dict['ones'], algebra.ones_like(newmap))
Beispiel #8
0
    def apply(self, alg_ob, wrap=False, cval=0, right_apply=False) :
        """Apply the beam, as a linear operator, to vector or matrix.

        This operation is equivalent to matrix multiplication by the beam
        matrix.  The matrix multiplication can be performed with the beam
        matrix on either the left or the right.  The beam matrix is a symetric
        matrix.

        Parameters
        ----------
        alg_ob : `vect` or `mat` subclass.
            Object to which the beam will be applied.  This object must live in
            map space.  If it is a vect, alg_ob.axes must be ('freq', 'ra',
            'dec').  If it is a mat, and `right_apply` is False, then
            alg_ob.row_names() must return ('freq', 'ra', 'dec').  If
            `right_apply` is True, then alg_ob.col_names() should return
            this tuple.  Also, the meta data for these three axis must be set
            (see `algebra.alg_object.set_axis_info`).
        wrap : bool
            If tests True, use periodic boundary conditions for the
            convolution. Otherwise zero pad (default).
        right_apply : bool
            Whethar to apply the beam operator with the from the left (False,
            default) or from the right (True).  If `alg_ob` is a vect subclass,
            this has no effect (because the beam matrix is symetric).

        Returns
        -------
        out : `vect` or `mat` subclass same shape as `alg_ob`.
            Convolved map or matrix.

        Notes
        -----
        The missing feature here is the ability to preallowcate memory or to be
        able to overwrite the input alg_ob in place.  This will be important
        for large matricies that we can only hold in memory one at a time.
        Also this would be pretty easy to implement.
        """

        if ((not 'freq' in alg_ob.axes) 
            or (not 'ra' in alg_ob.axes)
            or (not 'dec' in alg_ob.axes)) :
                raise ce.DataError("Beam operation only works in frequncy, "
                                   "ra, dec, coords.")
        
        # allowcate memory for the output.
        out = algebra.zeros_like(alg_ob)
        # Figure out the pixel sizes (in real degrees).
        dfreq = abs(alg_ob.info['freq_delta'])
        dra = abs(alg_ob.info['ra_delta'])
        dra /= sp.cos(alg_ob.info['dec_centre']*sp.pi/180)
        ddec = abs(alg_ob.info['dec_delta'])
        # Figure out the convolution mode.
        if wrap :
            mode = 'wrap'
        else :
            mode = 'constant'
        # Loop over frequencies and do convolution one frequency at a time.
        freq_array = alg_ob.get_axis('freq')
        for ii, freq in enumerate(freq_array) :
            # How wide the kernal has to be.
            width = self.kernal_size(freq)
            # Make sure the dimensions are an odd number of pixels.
            nkx = width//abs(dra)
            if nkx%2 == 0 :
                nkx += 1
            nky = width//abs(ddec)
            if nky%2 == 0 :
                nky += 1
            # Calculate kernal lags.
            lagsx = (sp.arange(nkx, dtype=float) - (nkx - 1)//2)*dra
            lagsy = (sp.arange(nky, dtype=float) - (nky - 1)//2)*ddec
            lags_sq = lagsx[:, None]**2 + lagsy[None, :]**2
            # Make gaussian beam profile.
            kernal = dra*ddec*self.beam_function(lags_sq, freq, 
                                                 squared_delta=True)
#            print kernal.shape, dra, nkx
            if isinstance(alg_ob, algebra.vect) :
                if alg_ob.axes != ('freq', 'ra', 'dec') :
                    raise ce.DataError("Vector axis names must be exactly "
                                       "('freq', 'ra', 'dec')")
                # Do the convolution.
                convolve(alg_ob[ii, ...], kernal, out[ii], mode=mode,
                         cval=cval)
            elif isinstance(alg_ob, algebra.mat) :
                # If applying from the left, loop over columns and convolve
                # over rows.  If applying from the right, do the oposite.
                if right_apply :
                    if alg_ob.col_names() != ('freq', 'ra', 'dec') :
                        raise ce.DataError("Matrix column axis names must be "
                                           "exactly ('freq', 'ra', 'dec')")
                    iterator = alg_ob.iter_row_index()
                else :
                    if alg_ob.row_names() != ('freq', 'ra', 'dec') :
                        raise ce.DataError("Matrix row axis names must be "
                                           "exactly ('freq', 'ra', 'dec')")
                    iterator = alg_ob.iter_col_index()
                for index in iterator :
                    sub_mat = alg_ob[index] # A view.
                    # Pick out this frequency.
                    sub_mat = sub_mat[ii, ...]
                    # make a view of the ouput array.
                    sub_out = out[index]
                    sub_out = sub_out[ii, ...]
                    # Do the convolution.
                    convolve(sub_mat, kernal, sub_out, mode=mode,
                             cval=cval)
        return out
    def save_data(self, n_modes):
        prodmap_list = []
        weight_list = []

        n_modes = "%dmodes" % n_modes
        for pairitem in self.pairlist:
            pair = self.pairs[pairitem]
            (tag1, tag2) = (pair.map1_name, pair.map2_name)
            clnoise = "cleaned_noise_inv"
            map1_file = "%s/sec_%s_cleaned_clean_map_I_with_%s_%s.npy" % \
                            (self.output_root, tag1, tag2, n_modes)
            map2_file = "%s/sec_%s_cleaned_clean_map_I_with_%s_%s.npy" % \
                            (self.output_root, tag2, tag1, n_modes)
            noise_inv1_file = "%s/sec_%s_%s_I_with_%s_%s.npy" % \
                            (self.output_root, tag1, clnoise, tag2, n_modes)
            noise_inv2_file = "%s/sec_%s_%s_I_with_%s_%s.npy" % \
                            (self.output_root, tag2, clnoise, tag1, n_modes)
            modes1_file = "%s/sec_%s_modes_clean_map_I_with_%s_%s.npy" % \
                            (self.output_root, tag1, tag2, n_modes)
            modes2_file = "%s/sec_%s_modes_clean_map_I_with_%s_%s.npy" % \
                            (self.output_root, tag2, tag1, n_modes)

            if self.params['subtract_inputmap_from_sim'] or \
               self.params['subtract_sim_from_inputmap']:
                map1 = pair.map1 - self.pairs_parallel_track[pairitem].map1

                map2 = pair.map2 - self.pairs_parallel_track[pairitem].map2
            else:
                map1 = copy.deepcopy(pair.map1)
                map2 = copy.deepcopy(pair.map2)

            prodmap_list.append(map1 * pair.noise_inv1)
            prodmap_list.append(map2 * pair.noise_inv2)
            weight_list.append(pair.noise_inv1)
            weight_list.append(pair.noise_inv2)

            algebra.save(map1_file, map1)
            algebra.save(map2_file, map2)
            algebra.save(noise_inv1_file, pair.noise_inv1)
            algebra.save(noise_inv2_file, pair.noise_inv2)
            algebra.save(modes1_file, pair.left_modes)
            algebra.save(modes2_file, pair.right_modes)

        cumulative_product = algebra.zeros_like(prodmap_list[0])
        cumulative_weight = algebra.zeros_like(prodmap_list[0])
        for mapind in range(0, len(prodmap_list)):
            cumulative_product += prodmap_list[mapind]
            cumulative_weight += weight_list[mapind]

        algebra.compressed_array_summary(cumulative_weight, "weight map")
        algebra.compressed_array_summary(cumulative_product, "product map")

        cumulative_weight[cumulative_weight < 1.e-20] = 0.
        cumulative_product[cumulative_weight < 1.e-20] = 0.

        newmap = cumulative_product / cumulative_weight

        # if the new map is nan or inf, set it and the wieghts to zero
        nan_array = np.isnan(newmap)
        newmap[nan_array] = 0.
        cumulative_product[nan_array] = 0.
        cumulative_weight[nan_array] = 0.
        inf_array = np.isinf(newmap)
        newmap[inf_array] = 0.
        cumulative_product[inf_array] = 0.
        cumulative_weight[inf_array] = 0.
        algebra.compressed_array_summary(newmap, "new map")
        algebra.compressed_array_summary(cumulative_product, "final map * weight")
        algebra.compressed_array_summary(cumulative_weight, "final weight map")

        combined = "combined_clean"
        combined_map_file = "%s/%s_map_%s.npy" % \
                            (self.output_root, combined, n_modes)
        combined_weight_file = "%s/%s_weight_%s.npy" % \
                            (self.output_root, combined, n_modes)
        combined_product_file = "%s/%s_product_%s.npy" % \
                            (self.output_root, combined, n_modes)
        combined_ones_file = "%s/%s_ones_%s.npy" % \
                            (self.output_root, combined, n_modes)

        algebra.save(combined_map_file, newmap)
        algebra.save(combined_product_file, cumulative_product)
        algebra.save(combined_weight_file, cumulative_weight)
        algebra.save(combined_ones_file, algebra.ones_like(newmap))
Beispiel #10
0
 def execute(self, nprocesses=1):
     """Worker funciton."""
     params = self.params
     # Make parent directory and write parameter file.
     kiyopy.utils.mkparents(params['output_root'])
     parse_ini.write_params(params,
                            params['output_root'] + 'params.ini',
                            prefix=prefix)
     save_noise_diag = params['save_noise_diag']
     in_root = params['input_root']
     all_out_fname_list = []
     all_in_fname_list = []
     # Figure out what the band names are.
     bands = params['bands']
     if not bands:
         map_files = glob.glob(in_root + 'dirty_map_' + pol_str + "_*.npy")
         bands = []
         root_len = len(in_root + 'dirty_map_')
         for file_name in map_files:
             bands.append(file_name[root_len:-4])
     # Loop over files to process.
     for pol_str in params['polarizations']:
         for band in bands:
             if band == -1:
                 band_str = ''
             else:
                 band_str = "_" + repr(band)
             dmap_fname = (in_root + 'dirty_map_' + pol_str + band_str +
                           '.npy')
             all_in_fname_list.append(
                 kiyopy.utils.abbreviate_file_path(dmap_fname))
             # Load the dirty map and the noise matrix.
             dirty_map = algebra.load(dmap_fname)
             dirty_map = algebra.make_vect(dirty_map)
             if dirty_map.axes != ('freq', 'ra', 'dec'):
                 msg = ("Expeced dirty map to have axes ('freq',"
                        "'ra', 'dec'), but it has axes: " +
                        str(dirty_map.axes))
                 raise ce.DataError(msg)
             shape = dirty_map.shape
             # Initialize the clean map.
             clean_map = algebra.info_array(sp.zeros(dirty_map.shape))
             clean_map.info = dict(dirty_map.info)
             clean_map = algebra.make_vect(clean_map)
             # If needed, initialize a map for the noise diagonal.
             if save_noise_diag:
                 noise_diag = algebra.zeros_like(clean_map)
             if params["from_eig"]:
                 # Solving from eigen decomposition of the noise instead of
                 # the noise itself.
                 # Load in the decomposition.
                 evects_fname = (in_root + 'noise_evects_' + pol_str +
                                 +band_str + '.npy')
                 if self.feedback > 1:
                     print "Using dirty map: " + dmap_fname
                     print "Using eigenvectors: " + evects_fname
                 evects = algebra.open_memmap(evects_fname, 'r')
                 evects = algebra.make_mat(evects)
                 evals_inv_fname = (in_root + 'noise_evalsinv_' + pol_str +
                                    "_" + repr(band) + '.npy')
                 evals_inv = algebra.load(evals_inv_fname)
                 evals_inv = algebra.make_mat(evals_inv)
                 # Solve for the map.
                 if params["save_noise_diag"]:
                     clean_map, noise_diag = solve_from_eig(
                         evals_inv, evects, dirty_map, True, self.feedback)
                 else:
                     clean_map = solve_from_eig(evals_inv, evects,
                                                dirty_map, False,
                                                self.feedback)
                 # Delete the eigen vectors to recover memory.
                 del evects
             else:
                 # Solving from the noise.
                 noise_fname = (in_root + 'noise_inv_' + pol_str +
                                band_str + '.npy')
                 if self.feedback > 1:
                     print "Using dirty map: " + dmap_fname
                     print "Using noise inverse: " + noise_fname
                 all_in_fname_list.append(
                     kiyopy.utils.abbreviate_file_path(noise_fname))
                 noise_inv = algebra.open_memmap(noise_fname, 'r')
                 noise_inv = algebra.make_mat(noise_inv)
                 # Two cases for the noise.  If its the same shape as the map
                 # then the noise is diagonal.  Otherwise, it should be
                 # block diagonal in frequency.
                 if noise_inv.ndim == 3:
                     if noise_inv.axes != ('freq', 'ra', 'dec'):
                         msg = ("Expeced noise matrix to have axes "
                                "('freq', 'ra', 'dec'), but it has: " +
                                str(noise_inv.axes))
                         raise ce.DataError(msg)
                     # Noise inverse can fit in memory, so copy it.
                     noise_inv_memory = sp.array(noise_inv, copy=True)
                     # Find the non-singular (covered) pixels.
                     max_information = noise_inv_memory.max()
                     good_data = noise_inv_memory < 1.0e-10 * max_information
                     # Make the clean map.
                     clean_map[good_data] = (dirty_map[good_data] /
                                             noise_inv_memory[good_data])
                     if save_noise_diag:
                         noise_diag[good_data] = \
                                 1/noise_inv_memory[good_data]
                 elif noise_inv.ndim == 5:
                     if noise_inv.axes != ('freq', 'ra', 'dec', 'ra',
                                           'dec'):
                         msg = ("Expeced noise matrix to have axes "
                                "('freq', 'ra', 'dec', 'ra', 'dec'), "
                                "but it has: " + str(noise_inv.axes))
                         raise ce.DataError(msg)
                     # Arrange the dirty map as a vector.
                     dirty_map_vect = sp.array(dirty_map)  # A view.
                     dirty_map_vect.shape = (shape[0], shape[1] * shape[2])
                     frequencies = dirty_map.get_axis('freq') / 1.0e6
                     # Allowcate memory only once.
                     noise_inv_freq = sp.empty(
                         (shape[1], shape[2], shape[1], shape[2]),
                         dtype=float)
                     if self.feedback > 1:
                         print "Inverting noise matrix."
                     # Block diagonal in frequency so loop over frequencies.
                     for ii in xrange(dirty_map.shape[0]):
                         if self.feedback > 1:
                             print "Frequency: ", "%5.1f" % (
                                 frequencies[ii]),
                         if self.feedback > 2:
                             print ", start mmap read:",
                             sys.stdout.flush()
                         noise_inv_freq[...] = noise_inv[ii, ...]
                         if self.feedback > 2:
                             print "done, start eig:",
                             sys.stdout.flush()
                         noise_inv_freq.shape = (shape[1] * shape[2],
                                                 shape[1] * shape[2])
                         # Solve the map making equation by diagonalization.
                         noise_inv_diag, Rot = sp.linalg.eigh(
                             noise_inv_freq, overwrite_a=True)
                         if self.feedback > 2:
                             print "done",
                         map_rotated = sp.dot(Rot.T, dirty_map_vect[ii])
                         # Zero out infinite noise modes.
                         bad_modes = (noise_inv_diag <
                                      1.0e-5 * noise_inv_diag.max())
                         if self.feedback > 1:
                             print ", discarded: ",
                             print "%4.1f" % (100.0 * sp.sum(bad_modes) /
                                              bad_modes.size),
                             print "% of modes",
                         if self.feedback > 2:
                             print ", start rotations:",
                             sys.stdout.flush()
                         map_rotated[bad_modes] = 0.
                         noise_inv_diag[bad_modes] = 1.0
                         # Solve for the clean map and rotate back.
                         map_rotated /= noise_inv_diag
                         map = sp.dot(Rot, map_rotated)
                         if self.feedback > 2:
                             print "done",
                             sys.stdout.flush()
                         # Fill the clean array.
                         map.shape = (shape[1], shape[2])
                         clean_map[ii, ...] = map
                         if save_noise_diag:
                             # Using C = R Lambda R^T
                             # where Lambda = diag(1/noise_inv_diag).
                             temp_noise_diag = 1 / noise_inv_diag
                             temp_noise_diag[bad_modes] = 0
                             # Multiply R by the diagonal eigenvalue matrix.
                             # Broadcasting does equivalent of mult by diag
                             # matrix.
                             temp_mat = Rot * temp_noise_diag
                             # Multiply by R^T, but only calculate the
                             # diagonal elements.
                             for jj in range(shape[1] * shape[2]):
                                 temp_noise_diag[jj] = sp.dot(
                                     temp_mat[jj, :], Rot[jj, :])
                             temp_noise_diag.shape = (shape[1], shape[2])
                             noise_diag[ii, ...] = temp_noise_diag
                         # Return workspace memory to origional shape.
                         noise_inv_freq.shape = (shape[1], shape[2],
                                                 shape[1], shape[2])
                         if self.feedback > 1:
                             print ""
                             sys.stdout.flush()
                 elif noise_inv.ndim == 6:
                     if save_noise_diag:
                         # OLD WAY.
                         #clean_map, noise_diag, chol = solve(noise_inv,
                         #        dirty_map, True, feedback=self.feedback)
                         # NEW WAY.
                         clean_map, noise_diag, noise_inv_diag, chol = \
                                   solve(noise_fname, noise_inv, dirty_map,
                                   True, feedback=self.feedback)
                     else:
                         # OLD WAY.
                         #clean_map, chol = solve(noise_inv, dirty_map,
                         #            False, feedback=self.feedback)
                         # NEW WAY.
                         clean_map, noise_inv_diag, chol = \
                                   solve(noise_fname, noise_inv, dirty_map,
                                   False, feedback=self.feedback)
                     if params['save_cholesky']:
                         chol_fname = (params['output_root'] + 'chol_' +
                                       pol_str + band_str + '.npy')
                         sp.save(chol_fname, chol)
                     if params['save_noise_inv_diag']:
                         noise_inv_diag_fname = (params['output_root'] +
                                                 'noise_inv_diag_' +
                                                 pol_str + band_str +
                                                 '.npy')
                         algebra.save(noise_inv_diag_fname, noise_inv_diag)
                     # Delete the cholesky to recover memory.
                     del chol
                 else:
                     raise ce.DataError("Noise matrix has bad shape.")
                 # In all cases delete the noise object to recover memeory.
                 del noise_inv
             # Write the clean map to file.
             out_fname = (params['output_root'] + 'clean_map_' + pol_str +
                          band_str + '.npy')
             if self.feedback > 1:
                 print "Writing clean map to: " + out_fname
             algebra.save(out_fname, clean_map)
             all_out_fname_list.append(
                 kiyopy.utils.abbreviate_file_path(out_fname))
             if save_noise_diag:
                 noise_diag_fname = (params['output_root'] + 'noise_diag_' +
                                     pol_str + band_str + '.npy')
                 algebra.save(noise_diag_fname, noise_diag)
                 all_out_fname_list.append(
                     kiyopy.utils.abbreviate_file_path(noise_diag_fname))
             # Check the clean map for faileur.
             if not sp.alltrue(sp.isfinite(clean_map)):
                 n_bad = sp.sum(sp.logical_not(sp.isfinite(clean_map)))
                 msg = ("Non finite entries found in clean map. Solve"
                        " failed. %d out of %d entries bad" %
                        (n_bad, clean_map.size))
                 raise RuntimeError(msg)
Beispiel #11
0
    def save_data(self, n_modes):
        prodmap_list = []
        weight_list = []

        n_modes = "%dmodes" % n_modes
        for pairitem in self.pairlist:
            pair = self.pairs[pairitem]
            (tag1, tag2) = (pair.map1_name, pair.map2_name)
            clnoise = "cleaned_noise_inv"
            map1_file = "%s/sec_%s_cleaned_clean_map_I_with_%s_%s.npy" % \
                            (self.output_root, tag1, tag2, n_modes)
            noise_inv1_file = "%s/sec_%s_%s_I_with_%s_%s.npy" % \
                            (self.output_root, tag1, clnoise, tag2, n_modes)
            modes1_file = "%s/sec_%s_modes_clean_map_I_with_%s_%s.npy" % \
                            (self.output_root, tag1, tag2, n_modes)

            #if pair.map1.shape == pair.map2.shape:
            map2_file = "%s/sec_%s_cleaned_clean_map_I_with_%s_%s.npy" % \
                            (self.output_root, tag2, tag1, n_modes)
            noise_inv2_file = "%s/sec_%s_%s_I_with_%s_%s.npy" % \
                            (self.output_root, tag2, clnoise, tag1, n_modes)
            modes2_file = "%s/sec_%s_modes_clean_map_I_with_%s_%s.npy" % \
                            (self.output_root, tag2, tag1, n_modes)

            if self.params['subtract_inputmap_from_sim'] or \
               self.params['subtract_sim_from_inputmap']:
                map1 = pair.map1 - self.pairs_parallel_track[pairitem].map1
                map2 = pair.map2 - self.pairs_parallel_track[pairitem].map2
            elif self.params['subtract_realmap_from_sim']:
                if not os.path.exists(self.params['realmap_dir']):
                    print "Error: Real map directory does not exists"
                    exit()
                else:
                    realmap_file = "%s/sec_%s_cleaned_clean_map_I_with_%s_%s.npy"%\
                                   (self.params['realmap_dir'], tag1, tag2, n_modes)
                    realmap = algebra.make_vect(algebra.load(realmap_file))
                    print "Subtract realmap from result"
                    map1 = copy.deepcopy(pair.map1) - realmap
                    map2 = copy.deepcopy(pair.map2)
                    if map2.shape == map1.shape:
                        map2 -= realmap
            else:
                map1 = copy.deepcopy(pair.map1)
                map2 = copy.deepcopy(pair.map2)

            prodmap_list.append(map1 * pair.noise_inv1)
            prodmap_list.append(map2 * pair.noise_inv2)
            weight_list.append(pair.noise_inv1)
            weight_list.append(pair.noise_inv2)

            if self.params['save_section']:
                algebra.save(map1_file, map1)
                algebra.save(noise_inv1_file, pair.noise_inv1)
                algebra.save(modes1_file, pair.left_modes)

                if pair.map1.shape == pair.map2.shape:
                    algebra.save(map2_file, map2)
                    algebra.save(noise_inv2_file, pair.noise_inv2)
                    algebra.save(modes2_file, pair.right_modes)

            #if map2.shape[0] == 3*map1.shape[0]:
            #    #source_dict = {}
            #    #source_dict['map'] = map2_file
            #    #source_dict['weight'] = noise_inv2_file
            #    map_dict = {}
            #    map_dict['map'] = map2
            #    map_dict['weight'] = pair.noise_inv2
            #    target_dict = {}
            #    target_dict['imap'] = map2_file.replace('_'+tag2, '_'+tag2+'_I')
            #    target_dict['qmap'] = map2_file.replace('_'+tag2, '_'+tag2+'_Q')
            #    target_dict['umap'] = map2_file.replace('_'+tag2, '_'+tag2+'_U')
            #    target_dict['imap_weight'] =\
            #                    noise_inv2_file.replace('_'+tag2, '_'+tag2+'_I')
            #    target_dict['qmap_weight'] =\
            #                    noise_inv2_file.replace('_'+tag2, '_'+tag2+'_Q')
            #    target_dict['umap_weight'] =\
            #                    noise_inv2_file.replace('_'+tag2, '_'+tag2+'_U')
            #    divide_iqu_map(map_dict=map_dict, target_dict=target_dict)

        if map1.shape != map2.shape:
            print "Shape of map1 and map2 are different, can not get combined map."
        else:
            cumulative_product = algebra.zeros_like(prodmap_list[0])
            cumulative_weight = algebra.zeros_like(prodmap_list[0])
            for mapind in range(0, len(prodmap_list)):
                cumulative_product += prodmap_list[mapind]
                cumulative_weight += weight_list[mapind]

            algebra.compressed_array_summary(cumulative_weight, "weight map")
            algebra.compressed_array_summary(cumulative_product, "product map")

            cumulative_weight[cumulative_weight < 1.e-20] = 0.
            cumulative_product[cumulative_weight < 1.e-20] = 0.

            newmap = cumulative_product / cumulative_weight

            # if the new map is nan or inf, set it and the wieghts to zero
            nan_array = np.isnan(newmap)
            newmap[nan_array] = 0.
            cumulative_product[nan_array] = 0.
            cumulative_weight[nan_array] = 0.
            inf_array = np.isinf(newmap)
            newmap[inf_array] = 0.
            cumulative_product[inf_array] = 0.
            cumulative_weight[inf_array] = 0.
            algebra.compressed_array_summary(newmap, "new map")
            algebra.compressed_array_summary(cumulative_product,"final map * weight")
            algebra.compressed_array_summary(cumulative_weight, "final weight map")

            combined = "combined_clean"
            combined_map_file = "%s/%s_map_%s.npy" % \
                                (self.output_root, combined, n_modes)
            combined_weight_file = "%s/%s_weight_%s.npy" % \
                                (self.output_root, combined, n_modes)
            combined_product_file = "%s/%s_product_%s.npy" % \
                                (self.output_root, combined, n_modes)
            combined_ones_file = "%s/%s_ones_%s.npy" % \
                                (self.output_root, combined, n_modes)

            algebra.save(combined_map_file, newmap)
            algebra.save(combined_product_file, cumulative_product)
            algebra.save(combined_weight_file, cumulative_weight)
            algebra.save(combined_ones_file, algebra.ones_like(newmap))
Beispiel #12
0
    def load_pairs(self):
        r"""load the set of map/noise pairs specified by keys handed to the
        database. This sets up operations on the quadratic product
            Q = map1^T noise_inv1 B noise_inv2 map2
        """
        par = self.params
        if not par['pairlist']:
            if par['calc_diagnal']:
                noise_inv_suffix = ";noise_inv"
            else:
                noise_inv_suffix = ";noise_weight"
            (self.pairlist, pairdict) = dp.cross_maps(par['map1'], par['map2'],
                                                 par['noise_inv1'],
                                                 par['noise_inv2'],
                                                 noise_inv_suffix=noise_inv_suffix,
                                                 verbose=False,
                                                 db_to_use=self.datapath_db)
        else:
            self.pairlist = par['pairlist']
            pairdict = par['pairdict']

        for pairitem in self.pairlist:
            pdict = pairdict[pairitem]
            print "-" * 80
            dp.print_dictionary(pdict, sys.stdout,
                                key_list=['map1', 'noise_inv1',
                                          'map2', 'noise_inv2'])

            # map1 & noise_inv1
            map1 = algebra.make_vect(algebra.load(pdict['map1']))
            if par['simfile1'] is not None:
                print "adding %s with multiplier %s" % (par['simfile1'],
                                                        par['sim_multiplier'])

                sim1 = algebra.make_vect(algebra.load(par['simfile1']))
                sim1 *= par['sim_multiplier']
            else:
                sim1 = algebra.zeros_like(map1)
            if not par['no_weights']:
                noise_inv1 = wrap_find_weight(pdict['noise_inv1'],
                                regenerate=par['regenerate_noise_inv'],
                                calc_diagnal = par['calc_diagnal'])
            else:
                noise_inv1 = algebra.ones_like(map1)

            # map2 & noise_inv2
            #if pairitem == 'I_with_E':
            if len(self.freq_list2) == 4*len(self.freq_list1):
                '''For IQUV case'''
                print 'Construct E map using I Q U V'
                iquvdict = {}
                iquvdict['imap'] = pdict['map2'].replace('_E', '_I')
                iquvdict['qmap'] = pdict['map2'].replace('_E', '_Q')
                iquvdict['umap'] = pdict['map2'].replace('_E', '_U')
                iquvdict['vmap'] = pdict['map2'].replace('_E', '_V')
                iquvdict['imap_weight'] = pdict['noise_inv2'].replace('_E', '_I')
                iquvdict['qmap_weight'] = pdict['noise_inv2'].replace('_E', '_Q')
                iquvdict['umap_weight'] = pdict['noise_inv2'].replace('_E', '_U')
                iquvdict['vmap_weight'] = pdict['noise_inv2'].replace('_E', '_V')
                map_dict = extend_iquv_map(source_dict=iquvdict)
                map2 = map_dict['map']
                noise_inv2 = map_dict['weight']

                sim2 = copy.deepcopy(sim1)
                map_dict = {}
                map_dict['imap'] = sim2
                map_dict['qmap'] = algebra.zeros_like(sim1)
                map_dict['umap'] = algebra.zeros_like(sim1)
                map_dict['vmap'] = algebra.zeros_like(sim1)
                map_dict = extend_iquv_map(map_dict=map_dict)
                sim2 = map_dict['map']
            elif len(self.freq_list2) == 3*len(self.freq_list1):
                '''For IQU case'''
                print 'Construct E map using I Q U'
                iquvdict = {}
                iquvdict['imap'] = pdict['map2'].replace('_E', '_I')
                iquvdict['qmap'] = pdict['map2'].replace('_E', '_Q')
                iquvdict['umap'] = pdict['map2'].replace('_E', '_U')
                iquvdict['imap_weight'] = pdict['noise_inv2'].replace('_E', '_I')
                iquvdict['qmap_weight'] = pdict['noise_inv2'].replace('_E', '_Q')
                iquvdict['umap_weight'] = pdict['noise_inv2'].replace('_E', '_U')
                map_dict = extend_iqu_map(source_dict=iquvdict)
                map2 = map_dict['map']
                noise_inv2 = map_dict['weight']

                sim2 = copy.deepcopy(sim1)
                map_dict = {}
                map_dict['imap'] = sim2
                map_dict['qmap'] = algebra.zeros_like(sim1)
                map_dict['umap'] = algebra.zeros_like(sim1)
                map_dict = extend_iqu_map(map_dict=map_dict)
                sim2 = map_dict['map']
            else:
                '''For common case'''
                map2 = algebra.make_vect(algebra.load(pdict['map2']))
                if par['simfile2'] is not None:
                    print "adding %s with multiplier %s" % (par['simfile2'],
                                                            par['sim_multiplier'])
                    sim2 = algebra.make_vect(algebra.load(par['simfile2']))
                    sim2 *= par['sim_multiplier']
                else:
                    sim2 = algebra.zeros_like(map2)
                if not par['no_weights']:
                    noise_inv2 = wrap_find_weight(pdict['noise_inv2'],
                                    regenerate=par['regenerate_noise_inv'],
                                    calc_diagnal = par['calc_diagnal'])
                else:
                    noise_inv2 = algebra.ones_like(map2)

            #if self.params['clip_weight_percent'] is not None:
            #    print "Note: your are clipping the weight maps"
            #    mask1 = self.define_weightmask(noise_inv1, 
            #                percentile=self.params['clip_weight_percent'])
            #    mask2 = self.define_weightmask(noise_inv2, 
            #                percentile=self.params['clip_weight_percent'])
            #    noise_inv1 = self.saturate_weight(noise_inv1, mask1)
            #    noise_inv2 = self.saturate_weight(noise_inv2, mask2)

            pair = map_pair.MapPair(map1 + sim1, map2 + sim2,
                                    noise_inv1, noise_inv2,
                                    self.freq_list1, self.freq_list2)
            pair.set_names(pdict['tag1'], pdict['tag2'])

            pair.params = self.params
            self.pairs[pairitem] = pair

            if par['subtract_inputmap_from_sim'] or \
               par['subtract_sim_from_inputmap']:
                if par['subtract_inputmap_from_sim']:
                    pair_parallel_track = map_pair.MapPair(map1, map2,
                                                  noise_inv1, noise_inv2,
                                                  self.freq_list1, self.freq_list2)

                if par['subtract_sim_from_inputmap']:
                    pair_parallel_track = map_pair.MapPair(sim1, sim2,
                                                  noise_inv1, noise_inv2,
                                                  self.freq_list1, self.freq_list2)

                pair_parallel_track.set_names(pdict['tag1'], pdict['tag2'])
                pair_parallel_track.params = self.params
                self.pairs_parallel_track[pairitem] = pair_parallel_track
Beispiel #13
0
    def execute(self, nprocesses=1) :
        """Worker funciton."""
        params = self.params
        # Make parent directory and write parameter file.
        kiyopy.utils.mkparents(params['output_root'])
        parse_ini.write_params(params, params['output_root'] + 'params.ini',
                               prefix='mm_')
        save_noise_diag = params['save_noise_diag']
        in_root = params['input_root']
        all_out_fname_list = []
        all_in_fname_list = []
        # Loop over files to process.
        for pol_str in params['polarizations']:
            dmap_fname = in_root + 'dirty_map_' + pol_str + '.npy'
            noise_fname = in_root + 'noise_inv_' + pol_str + '.npy'
            all_in_fname_list.append(
                kiyopy.utils.abbreviate_file_path(dmap_fname))
            all_in_fname_list.append(
                kiyopy.utils.abbreviate_file_path(noise_fname))
            # Load the dirty map and the noise matrix.
            dirty_map = algebra.load(dmap_fname)
            dirty_map = algebra.make_vect(dirty_map)
            if dirty_map.axes != ('freq', 'ra', 'dec') :
                raise ce.DataError("Expeced dirty map to have axes "
                                   "('freq', 'ra', 'dec'), but it has axes: "
                                   + str(dirty_map.axes))
            shape = dirty_map.shape
            noise_inv = algebra.open_memmap(noise_fname, 'r')
            noise_inv = algebra.make_mat(noise_inv)
            # Initialize the clean map.
            clean_map = algebra.info_array(sp.zeros(dirty_map.shape))
            clean_map.info = dict(dirty_map.info)
            clean_map = algebra.make_vect(clean_map)
            # If needed, initialize a map for the noise diagonal.
            if save_noise_diag :
                noise_diag = algebra.zeros_like(clean_map)
            # Two cases for the noise.  If its the same shape as the map then
            # the noise is diagonal.  Otherwise, it should be block diagonal in
            # frequency.
            if noise_inv.ndim == 3 :
                if noise_inv.axes != ('freq', 'ra', 'dec') :
                    raise ce.DataError("Expeced noise matrix to have axes "
                                       "('freq', 'ra', 'dec'), but it has: "
                                       + str(noise_inv.axes))
                # Noise inverse can fit in memory, so copy it.
                noise_inv_memory = sp.array(noise_inv, copy=True)
                # Find the non-singular (covered) pixels.
                max_information = noise_inv_memory.max()
                good_data = noise_inv_memory < 1.0e-10*max_information
                # Make the clean map.
                clean_map[good_data] = (dirty_map[good_data] 
                                        / noise_inv_memory[good_data])
                if save_noise_diag :
                    noise_diag[good_data] = 1/noise_inv_memory[good_data]
            elif noise_inv.ndim == 5 :
                if noise_inv.axes != ('freq', 'ra', 'dec', 'ra', 'dec') :
                    raise ce.DataError("Expeced noise matrix to have axes "
                                       "('freq', 'ra', 'dec', 'ra', 'dec'), "
                                       "but it has: "
                                       + str(noise_inv.axes))
                # Arrange the dirty map as a vector.
                dirty_map_vect = sp.array(dirty_map) # A view.
                dirty_map_vect.shape = (shape[0], shape[1]*shape[2])
                frequencies = dirty_map.get_axis('freq')/1.0e6
                # Allowcate memory only once.
                noise_inv_freq = sp.empty((shape[1], shape[2], shape[1],
                                           shape[2]), dtype=float)
                if self.feedback > 1 :
                    print "Inverting noise matrix."
                # Block diagonal in frequency so loop over frequencies.
                for ii in xrange(dirty_map.shape[0]) :
                    if self.feedback > 1:
                        print "Frequency: ", "%5.1f"%(frequencies[ii]),
                    if self.feedback > 2:
                        print ", start mmap read:",
                        sys.stdout.flush()
                    noise_inv_freq[...] = noise_inv[ii, ...]
                    if self.feedback > 2:
                        print "done, start eig:",
                        sys.stdout.flush()
                    noise_inv_freq.shape = (shape[1]*shape[2],
                                            shape[1]*shape[2])
                    # Solve the map making equation by diagonalization.
                    noise_inv_diag, Rot = sp.linalg.eigh(noise_inv_freq, 
                                                         overwrite_a=True)
                    if self.feedback > 2:
                        print "done",
                    map_rotated = sp.dot(Rot.T, dirty_map_vect[ii])
                    # Zero out infinite noise modes.
                    bad_modes = noise_inv_diag < 1.0e-5*noise_inv_diag.max()
                    if self.feedback > 1:
                        print ", discarded: ",
                        print "%4.1f"%(100.0*sp.sum(bad_modes)/bad_modes.size),
                        print "% of modes",
                    if self.feedback > 2:
                        print ", start rotations:",
                        sys.stdout.flush()
                    map_rotated[bad_modes] = 0.
                    noise_inv_diag[bad_modes] = 1.0
                    # Solve for the clean map and rotate back.
                    map_rotated /= noise_inv_diag
                    map = sp.dot(Rot, map_rotated)
                    if self.feedback > 2:
                        print "done",
                        sys.stdout.flush()
                    # Fill the clean array.
                    map.shape = (shape[1], shape[2])
                    clean_map[ii, ...] = map
                    if save_noise_diag :
                        # Using C = R Lambda R^T 
                        # where Lambda = diag(1/noise_inv_diag).
                        temp_noise_diag = 1/noise_inv_diag
                        temp_noise_diag[bad_modes] = 0
                        # Multiply R by the diagonal eigenvalue matrix.
                        # Broadcasting does equivalent of mult by diag matrix.
                        temp_mat = Rot*temp_noise_diag
                        # Multiply by R^T, but only calculate the diagonal
                        # elements.
                        for jj in range(shape[1]*shape[2]) :
                            temp_noise_diag[jj] = sp.dot(temp_mat[jj,:], 
                                                         Rot[jj,:])
                        temp_noise_diag.shape = (shape[1], shape[2])
                        noise_diag[ii, ...] = temp_noise_diag
                    # Return workspace memory to origional shape.
                    noise_inv_freq.shape = (shape[1], shape[2],
                                            shape[1], shape[2])
                    if self.feedback > 1:
                        print ""
                        sys.stdout.flush()
            elif noise_inv.ndim == 6 :
                raise NotImplementedError("Full noise matrix not yet "
                                          "implemented.  Best we can do is "
                                          "block diagonal in frequency.")
            else :
                raise ce.DataError("Noise matrix has bad shape.")
            # Write the clean map to file.
            out_fname = params['output_root'] + 'clean_map_' + pol_str + '.npy'
            algebra.save(out_fname, clean_map)
            all_out_fname_list.append(
                kiyopy.utils.abbreviate_file_path(out_fname))
            if save_noise_diag :
                noise_diag_fname = (params['output_root'] + 'noise_diag_'
                                    + pol_str + '.npy')
                algebra.save(noise_diag_fname, noise_diag)
                all_out_fname_list.append(
                    kiyopy.utils.abbreviate_file_path(noise_diag_fname))

        # Finally update the history object.
        history = hist.read(in_root + 'history.hist')
        history.add('Read map and noise files:', all_in_fname_list)
        history.add('Converted dirty map to clean map.', all_out_fname_list)
        h_fname = params['output_root'] + "history.hist"
        history.write(h_fname)
Beispiel #14
0
    def apply(self, alg_ob, mode="constant", cval=0, right_apply=False):
        """Apply the beam, as a linear operator, to vector or matrix.

        This operation is equivalent to matrix multiplication by the beam
        matrix.  The matrix multiplication can be performed with the beam
        matrix on either the left or the right.  The beam matrix is a symmetric
        matrix.

        Parameters
        ----------
        alg_ob: `vect` or `mat` subclass.
            Object to which the beam will be applied.  This object must live in
            map space.  If it is a vect, alg_ob.axes must be ('freq', 'ra',
            'dec').  If it is a mat, and `right_apply` is False, then
            alg_ob.row_names() must return ('freq', 'ra', 'dec').  If
            `right_apply` is True, then alg_ob.col_names() should return
            this tuple.  Also, the meta data for these three axis must be set
            (see `algebra.alg_object.set_axis_info`).
        wrap: bool
            If tests True, use periodic boundary conditions for the
            convolution. Otherwise zero pad (default).
        right_apply: bool
            Whether to apply the beam operator with the from the left (False,
            default) or from the right (True).  If `alg_ob` is a vect subclass,
            this has no effect (because the beam matrix is symmetric).

        Returns
        -------
        out: `vect` or `mat` subclass same shape as `alg_ob`.
            Convolved map or matrix.

        Notes
        -----
        The missing feature here is the ability to preallocate memory or to be
        able to overwrite the input alg_ob in place.  This will be important
        for large matrices that we can only hold in memory one at a time.
        Also this would be pretty easy to implement.
        """

        if ((not 'freq' in alg_ob.axes)
            or (not 'ra' in alg_ob.axes)
            or (not 'dec' in alg_ob.axes)):
            raise ce.DataError("Beam operation only works in frequency, "
                               "ra, dec, coords.")

        out = algebra.zeros_like(alg_ob)

        # Figure out the pixel sizes (in real degrees).
        dra = abs(alg_ob.info['ra_delta'])
        dra /= sp.cos(alg_ob.info['dec_centre'] * sp.pi / 180.)
        ddec = abs(alg_ob.info['dec_delta'])

        # Loop over frequencies and do convolution one frequency at a time.
        freq_array = alg_ob.get_axis('freq')
        for ii, freq in enumerate(freq_array):
            width = self.kernel_size(freq)

            # Make sure the dimensions are an odd number of pixels.
            nkx = width // abs(dra)
            if nkx % 2 == 0:
                nkx += 1

            nky = width // abs(ddec)
            if nky % 2 == 0:
                nky += 1

            # Calculate kernel lags.
            lagsx = (sp.arange(nkx, dtype=float) - (nkx - 1) // 2) * dra
            lagsy = (sp.arange(nky, dtype=float) - (nky - 1) // 2) * ddec
            lags_sq = lagsx[:, None] ** 2. + lagsy[None, :] ** 2.

            kernel = dra * ddec * self.beam_function(lags_sq, freq,
                                                     squared_delta=True)

            if isinstance(alg_ob, algebra.vect):
                if alg_ob.axes != ('freq', 'ra', 'dec'):
                    raise ce.DataError("Vector axis names must be exactly "
                                       "('freq', 'ra', 'dec')")

                convolve(alg_ob[ii, ...], kernel, output=out[ii], mode=mode,
                         cval=cval)

            elif isinstance(alg_ob, algebra.mat):
                # If applying from the left, loop over columns and convolve
                # over rows.  If applying from the right, do the opposite.
                if right_apply:
                    if alg_ob.col_names() != ('freq', 'ra', 'dec'):
                        raise ce.DataError("Matrix column axis names must be "
                                           "exactly ('freq', 'ra', 'dec')")

                    iterator = alg_ob.iter_row_index()

                else:
                    if alg_ob.row_names() != ('freq', 'ra', 'dec'):
                        raise ce.DataError("Matrix row axis names must be "
                                           "exactly ('freq', 'ra', 'dec')")

                    iterator = alg_ob.iter_col_index()

                for index in iterator:
                    sub_mat = alg_ob[index]
                    # Pick out this frequency.
                    sub_mat = sub_mat[ii, ...]
                    # make a view of the output array.
                    sub_out = out[index]
                    sub_out = sub_out[ii, ...]

                    convolve(sub_mat, kernel, sub_out, mode=mode, cval=cval)
        return out
Beispiel #15
0
def solve_from_eig(noise_evalsinv, noise_evects, dirty_map,
                   return_noise_diag=False, feedback=0):
    """Converts a dirty map to a clean map using the eigen decomposition of the
    noise inverse.
    """
    
    # Check the shapes.
    if noise_evects.ndim != 4:
        raise ValueError("Expected 4D array for 'noise_evects`.")
    if noise_evalsinv.shape != (noise_evects.shape[-1],):
        raise ValueError("Wrong number of eigenvalues.")
    if dirty_map.shape != noise_evects.shape[:-1]:
        raise ValueError("Dirty map and noise don't have matching dimensions.")
    if dirty_map.size != noise_evects.shape[-1]:
        raise ValueError("Eigen space not the same total size as map space.")
    n = noise_evalsinv.shape[0]
    nf = dirty_map.shape[0]
    nr = dirty_map.shape[1]
    nd = dirty_map.shape[2]
    # Copy the eigenvalues.
    noise_evalsinv = noise_evalsinv.copy()
    # Find poorly constrained modes and zero them out.
    bad_inds = noise_evalsinv < 1./constants.T_huge**2
    n_bad = sp.sum(bad_inds)
    if feedback > 1:
        print ("Discarding %d modes of %d. %f percent." 
               % (n_bad, n, 100. * n_bad / n))
    noise_evalsinv[bad_inds] = 1.
    # Rotate the dirty map into the diagonal noise space.
    if feedback > 1:
        print "Rotating map to eigenspace."
    map_rot = sp.zeros(n, dtype=sp.float64)
    for ii in xrange(nf):
        for jj in xrange(nr):
            for kk in xrange(nd):
                tmp = noise_evects[ii,jj,kk,:].copy()
                map_rot += dirty_map[ii,jj,kk] * tmp
    # Multiply by the (diagonal) noise (inverse, inverse).  Zero out any poorly
    # constrained modes.
    map_rot[bad_inds] = 0
    # Take inverse and multiply.
    map_rot = map_rot / noise_evalsinv
    # Now rotate back to the origional space.
    if feedback > 1:
        print "Rotating back to map space."
    clean_map = algebra.zeros_like(dirty_map)
    for ii in xrange(nf):
        for jj in xrange(nr):
            for kk in xrange(nd):
                tmp = noise_evects[ii,jj,kk,:].copy()
                clean_map[ii,jj,kk] = sp.sum(map_rot * tmp)
    if return_noise_diag:
        if feedback > 1:
            print "Getting noise diagonal."
        noise_diag = algebra.zeros_like(dirty_map)
        noise_evals = 1. / noise_evalsinv
        noise_evals[bad_inds] = constants.T_huge**2
        for ii in xrange(nf):
            for jj in xrange(nr):
                for kk in xrange(nd):
                    tmp = noise_evects[ii,jj,kk,:].copy()
                    noise_diag[ii,jj,kk] = sp.sum(tmp**2 * noise_evals)
        return clean_map, noise_diag
    else:
        return clean_map
Beispiel #16
0
 def execute(self, nprocesses=1) :
     """Worker funciton."""
     params = self.params
     # Make parent directory and write parameter file.
     kiyopy.utils.mkparents(params['output_root'])
     parse_ini.write_params(params, params['output_root'] + 'params.ini',
                            prefix=prefix)
     save_noise_diag = params['save_noise_diag']
     in_root = params['input_root']
     all_out_fname_list = []
     all_in_fname_list = []
     # Figure out what the band names are.
     bands = params['bands']
     if not bands:
         map_files = glob.glob(in_root + 'dirty_map_' + pol_str + "_*.npy")
         bands = []
         root_len = len(in_root + 'dirty_map_')
         for file_name in map_files:
             bands.append(file_name[root_len:-4])
     # Loop over files to process.
     for pol_str in params['polarizations']:
         for band in bands:
             if band == -1:
                 band_str = ''
             else:
                 band_str =  "_" + repr(band)
             dmap_fname = (in_root + 'dirty_map_' + pol_str + 
                           band_str + '.npy')
             all_in_fname_list.append(
                 kiyopy.utils.abbreviate_file_path(dmap_fname))
             # Load the dirty map and the noise matrix.
             dirty_map = algebra.load(dmap_fname)
             dirty_map = algebra.make_vect(dirty_map)
             if dirty_map.axes != ('freq', 'ra', 'dec') :
                 msg = ("Expeced dirty map to have axes ('freq',"
                        "'ra', 'dec'), but it has axes: "
                        + str(dirty_map.axes))
                 raise ce.DataError(msg)
             shape = dirty_map.shape
             # Initialize the clean map.
             clean_map = algebra.info_array(sp.zeros(dirty_map.shape))
             clean_map.info = dict(dirty_map.info)
             clean_map = algebra.make_vect(clean_map)
             # If needed, initialize a map for the noise diagonal.
             if save_noise_diag :
                 noise_diag = algebra.zeros_like(clean_map)
             if params["from_eig"]:
                 # Solving from eigen decomposition of the noise instead of
                 # the noise itself.
                 # Load in the decomposition.
                 evects_fname = (in_root + 'noise_evects_' + pol_str +
                                 + band_str + '.npy')
                 if self.feedback > 1:
                     print "Using dirty map: " + dmap_fname
                     print "Using eigenvectors: " + evects_fname
                 evects = algebra.open_memmap(evects_fname, 'r')
                 evects = algebra.make_mat(evects)
                 evals_inv_fname = (in_root + 'noise_evalsinv_' + pol_str
                                    + "_" + repr(band) + '.npy')
                 evals_inv = algebra.load(evals_inv_fname)
                 evals_inv = algebra.make_mat(evals_inv)
                 # Solve for the map.
                 if params["save_noise_diag"]:
                     clean_map, noise_diag = solve_from_eig(evals_inv,
                                 evects, dirty_map, True, self.feedback)
                 else:
                     clean_map = solve_from_eig(evals_inv,
                                 evects, dirty_map, False, self.feedback)
                 # Delete the eigen vectors to recover memory.
                 del evects
             else:
                 # Solving from the noise.
                 noise_fname = (in_root + 'noise_inv_' + pol_str +
                                band_str + '.npy')
                 if self.feedback > 1:
                     print "Using dirty map: " + dmap_fname
                     print "Using noise inverse: " + noise_fname
                 all_in_fname_list.append(
                     kiyopy.utils.abbreviate_file_path(noise_fname))
                 noise_inv = algebra.open_memmap(noise_fname, 'r')
                 noise_inv = algebra.make_mat(noise_inv)
                 # Two cases for the noise.  If its the same shape as the map
                 # then the noise is diagonal.  Otherwise, it should be
                 # block diagonal in frequency.
                 if noise_inv.ndim == 3 :
                     if noise_inv.axes != ('freq', 'ra', 'dec') :
                         msg = ("Expeced noise matrix to have axes "
                                 "('freq', 'ra', 'dec'), but it has: "
                                 + str(noise_inv.axes))
                         raise ce.DataError(msg)
                     # Noise inverse can fit in memory, so copy it.
                     noise_inv_memory = sp.array(noise_inv, copy=True)
                     # Find the non-singular (covered) pixels.
                     max_information = noise_inv_memory.max()
                     good_data = noise_inv_memory < 1.0e-10*max_information
                     # Make the clean map.
                     clean_map[good_data] = (dirty_map[good_data] 
                                             / noise_inv_memory[good_data])
                     if save_noise_diag :
                         noise_diag[good_data] = \
                                 1/noise_inv_memory[good_data]
                 elif noise_inv.ndim == 5 :
                     if noise_inv.axes != ('freq', 'ra', 'dec', 'ra',
                                           'dec'):
                         msg = ("Expeced noise matrix to have axes "
                                "('freq', 'ra', 'dec', 'ra', 'dec'), "
                                "but it has: " + str(noise_inv.axes))
                         raise ce.DataError(msg)
                     # Arrange the dirty map as a vector.
                     dirty_map_vect = sp.array(dirty_map) # A view.
                     dirty_map_vect.shape = (shape[0], shape[1]*shape[2])
                     frequencies = dirty_map.get_axis('freq')/1.0e6
                     # Allowcate memory only once.
                     noise_inv_freq = sp.empty((shape[1], shape[2], 
                                     shape[1], shape[2]), dtype=float)
                     if self.feedback > 1 :
                         print "Inverting noise matrix."
                     # Block diagonal in frequency so loop over frequencies.
                     for ii in xrange(dirty_map.shape[0]) :
                         if self.feedback > 1:
                             print "Frequency: ", "%5.1f"%(frequencies[ii]),
                         if self.feedback > 2:
                             print ", start mmap read:",
                             sys.stdout.flush()
                         noise_inv_freq[...] = noise_inv[ii, ...]
                         if self.feedback > 2:
                             print "done, start eig:",
                             sys.stdout.flush()
                         noise_inv_freq.shape = (shape[1]*shape[2],
                                                 shape[1]*shape[2])
                         # Solve the map making equation by diagonalization.
                         noise_inv_diag, Rot = sp.linalg.eigh(
                             noise_inv_freq, overwrite_a=True)
                         if self.feedback > 2:
                             print "done",
                         map_rotated = sp.dot(Rot.T, dirty_map_vect[ii])
                         # Zero out infinite noise modes.
                         bad_modes = (noise_inv_diag
                                      < 1.0e-5 * noise_inv_diag.max())
                         if self.feedback > 1:
                             print ", discarded: ",
                             print "%4.1f" % (100.0 * sp.sum(bad_modes) 
                                              / bad_modes.size),
                             print "% of modes",
                         if self.feedback > 2:
                             print ", start rotations:",
                             sys.stdout.flush()
                         map_rotated[bad_modes] = 0.
                         noise_inv_diag[bad_modes] = 1.0
                         # Solve for the clean map and rotate back.
                         map_rotated /= noise_inv_diag
                         map = sp.dot(Rot, map_rotated)
                         if self.feedback > 2:
                             print "done",
                             sys.stdout.flush()
                         # Fill the clean array.
                         map.shape = (shape[1], shape[2])
                         clean_map[ii, ...] = map
                         if save_noise_diag :
                             # Using C = R Lambda R^T 
                             # where Lambda = diag(1/noise_inv_diag).
                             temp_noise_diag = 1/noise_inv_diag
                             temp_noise_diag[bad_modes] = 0
                             # Multiply R by the diagonal eigenvalue matrix.
                             # Broadcasting does equivalent of mult by diag
                             # matrix.
                             temp_mat = Rot*temp_noise_diag
                             # Multiply by R^T, but only calculate the
                             # diagonal elements.
                             for jj in range(shape[1]*shape[2]) :
                                 temp_noise_diag[jj] = sp.dot(
                                     temp_mat[jj,:], Rot[jj,:])
                             temp_noise_diag.shape = (shape[1], shape[2])
                             noise_diag[ii, ...] = temp_noise_diag
                         # Return workspace memory to origional shape.
                         noise_inv_freq.shape = (shape[1], shape[2],
                                                 shape[1], shape[2])
                         if self.feedback > 1:
                             print ""
                             sys.stdout.flush()
                 elif noise_inv.ndim == 6 :
                     if save_noise_diag:
                         # OLD WAY.
                         #clean_map, noise_diag, chol = solve(noise_inv,
                         #        dirty_map, True, feedback=self.feedback)
                         # NEW WAY.
                         clean_map, noise_diag, noise_inv_diag, chol = \
                                   solve(noise_fname, noise_inv, dirty_map,
                                   True, feedback=self.feedback)
                     else:
                         # OLD WAY.
                         #clean_map, chol = solve(noise_inv, dirty_map, 
                         #            False, feedback=self.feedback)
                         # NEW WAY.
                         clean_map, noise_inv_diag, chol = \
                                   solve(noise_fname, noise_inv, dirty_map,
                                   False, feedback=self.feedback)
                     if params['save_cholesky']:
                         chol_fname = (params['output_root'] + 'chol_'
                                     + pol_str + band_str + '.npy')
                         sp.save(chol_fname, chol)
                     if params['save_noise_inv_diag']:
                         noise_inv_diag_fname = (params['output_root'] +
                                    'noise_inv_diag_' + pol_str + band_str 
                                    + '.npy')
                         algebra.save(noise_inv_diag_fname, noise_inv_diag)
                     # Delete the cholesky to recover memory.
                     del chol
                 else :
                     raise ce.DataError("Noise matrix has bad shape.")
                 # In all cases delete the noise object to recover memeory.
                 del noise_inv
             # Write the clean map to file.
             out_fname = (params['output_root'] + 'clean_map_'
                          + pol_str + band_str + '.npy')
             if self.feedback > 1:
                 print "Writing clean map to: " + out_fname
             algebra.save(out_fname, clean_map)
             all_out_fname_list.append(
                 kiyopy.utils.abbreviate_file_path(out_fname))
             if save_noise_diag :
                 noise_diag_fname = (params['output_root'] + 'noise_diag_'
                                     + pol_str + band_str + '.npy')
                 algebra.save(noise_diag_fname, noise_diag)
                 all_out_fname_list.append(
                     kiyopy.utils.abbreviate_file_path(noise_diag_fname))
             # Check the clean map for faileur.
             if not sp.alltrue(sp.isfinite(clean_map)):
                 n_bad = sp.sum(sp.logical_not(sp.isfinite(clean_map)))
                 msg = ("Non finite entries found in clean map. Solve"
                        " failed. %d out of %d entries bad" 
                        % (n_bad, clean_map.size)) 
                 raise RuntimeError(msg)
Beispiel #17
0
def calculate_mixing(weight_file1,
                     weight_file2,
                     bins,
                     xspec_fileout,
                     mixing_fileout,
                     unitless=False,
                     refinement=2,
                     pad=5,
                     order=1,
                     window='blackman',
                     zero_pad=False,
                     identity_test=False):
    print "loading the weights and converting to physical coordinates"
    weight1_obs = algebra.make_vect(algebra.load(weight_file1))
    weight1 = bh.repackage_kiyo(
        pg.physical_grid(weight1_obs,
                         refinement=refinement,
                         pad=pad,
                         order=order))

    weight2_obs = algebra.make_vect(algebra.load(weight_file2))
    weight2 = bh.repackage_kiyo(
        pg.physical_grid(weight2_obs,
                         refinement=refinement,
                         pad=pad,
                         order=order))

    if window:
        window_function = fftutil.window_nd(weight1.shape, name=window)
        weight1 *= window_function
        weight2 *= window_function

    print "calculating the cross-power of the spatial weighting functions"
    arr1 = algebra.ones_like(weight1)
    arr2 = algebra.ones_like(weight2)

    # no window applied here (applied above)
    xspec = pe.cross_power_est(weight1,
                               weight2,
                               arr1,
                               arr2,
                               window=None,
                               nonorm=True)

    # for each point in the cube, find |k|, k_perp, k_parallel
    # TODO: speed this up by using one direct numpy call (not limiting)
    k_mag_arr = binning.radius_array(xspec)
    k_perp_arr = binning.radius_array(xspec, zero_axes=[0])
    k_parallel_arr = binning.radius_array(xspec, zero_axes=[1, 2])

    if unitless:
        xspec = pe.make_unitless(xspec, radius_arr=k_mag_arr)

    # NOTE: assuming lowest k bin has only one point in 3D k-space
    # could make this floor of dimensions divided by 2 also
    center_3d = np.transpose(np.transpose(np.where(k_mag_arr == 0.))[0])

    # In the estimator, we devide by 1/sum(w1 * w2) to get most of the effect
    # of the weighing. The mixing matrix here can be thought of as a correction
    # that that diagonal-only estimate.
    leakage_ratio = xspec[center_3d[0], center_3d[1], center_3d[2]] / \
                    np.sum(weight1 * weight2)
    print "power leakage ratio: %10.5g" % leakage_ratio

    xspec /= np.sum(weight1 * weight2)

    print "partitioning the 3D kspace up into the 2D k bins"
    (kflat, ret_indices) = bin_indices_2d(k_perp_arr, k_parallel_arr, bins,
                                          bins)

    # perform a test where the window function is a delta function at the
    # origin so that the mixing matrix is identity
    if identity_test:
        xspec = algebra.zeros_like(xspec)
        xspec[center_3d[0], center_3d[1], center_3d[2]] = 1.

    # now save the window cross-power for downstream pooled users
    algebra.save(xspec_fileout, xspec)

    runlist = []
    for bin_index in range(kflat.shape[0]):
        bin_3d = ret_indices[repr(bin_index)]
        if bin_3d is not None:
            runlist.append((xspec_fileout, bin_index, bins, bin_3d, center_3d))

    pool = multiprocessing.Pool(processes=(multiprocessing.cpu_count() - 4))
    # the longest runs get pushed to the end; randomize for better job packing
    random.shuffle(runlist)
    results = pool.map(sum_window, runlist)
    #gnuplot_single_slice(runlist[0])  # for troubleshooting

    # now save the results for post-processing
    params = {
        "unitless": unitless,
        "refinement": refinement,
        "pad": pad,
        "order": order,
        "window": window,
        "zero_pad": zero_pad,
        "identity_test": identity_test,
        "weight_file1": weight_file1,
        "weight_file2": weight_file2,
        "bins": bins
    }

    outshelve = shelve.open(mixing_fileout, "n")
    outshelve["params"] = params  # parameters for this run
    outshelve["weight1"] = weight1  # weight map 1
    outshelve["weight2"] = weight2  # weight map 2
    outshelve["xspec"] = xspec  # copy of the weight spectra
    outshelve["kflat"] = kflat  # 2D k bin vector
    outshelve["bins_3d"] = ret_indices  # indices to k3d for a 2d k bin
    outshelve["results"] = results  # mixing matrix columns
    outshelve.close()
Beispiel #18
0
    def save_data(self, n_modes):
        prodmap_list = []
        weight_list = []

        n_modes = "%dmodes" % n_modes
        for pairitem in self.pairlist:
            pair = self.pairs[pairitem]
            (tag1, tag2) = (pair.map1_name, pair.map2_name)
            clnoise = "cleaned_noise_inv"
            map1_file = "%s/sec_%s_cleaned_clean_map_I_with_%s_%s.npy" % \
                            (self.output_root, tag1, tag2, n_modes)
            map2_file = "%s/sec_%s_cleaned_clean_map_I_with_%s_%s.npy" % \
                            (self.output_root, tag2, tag1, n_modes)
            noise_inv1_file = "%s/sec_%s_%s_I_with_%s_%s.npy" % \
                            (self.output_root, tag1, clnoise, tag2, n_modes)
            noise_inv2_file = "%s/sec_%s_%s_I_with_%s_%s.npy" % \
                            (self.output_root, tag2, clnoise, tag1, n_modes)
            modes1_file = "%s/sec_%s_modes_clean_map_I_with_%s_%s.npy" % \
                            (self.output_root, tag1, tag2, n_modes)
            modes2_file = "%s/sec_%s_modes_clean_map_I_with_%s_%s.npy" % \
                            (self.output_root, tag2, tag1, n_modes)

            if self.params['subtract_inputmap_from_sim'] or \
               self.params['subtract_sim_from_inputmap']:
                map1 = pair.map1 - self.pairs_parallel_track[pairitem].map1

                map2 = pair.map2 - self.pairs_parallel_track[pairitem].map2
            else:
                map1 = copy.deepcopy(pair.map1)
                map2 = copy.deepcopy(pair.map2)

            prodmap_list.append(map1 * pair.noise_inv1)
            prodmap_list.append(map2 * pair.noise_inv2)
            weight_list.append(pair.noise_inv1)
            weight_list.append(pair.noise_inv2)

            algebra.save(map1_file, map1)
            algebra.save(map2_file, map2)
            algebra.save(noise_inv1_file, pair.noise_inv1)
            algebra.save(noise_inv2_file, pair.noise_inv2)
            algebra.save(modes1_file, pair.left_modes)
            algebra.save(modes2_file, pair.right_modes)

        cumulative_product = algebra.zeros_like(prodmap_list[0])
        cumulative_weight = algebra.zeros_like(prodmap_list[0])
        for mapind in range(0, len(prodmap_list)):
            cumulative_product += prodmap_list[mapind]
            cumulative_weight += weight_list[mapind]

        algebra.compressed_array_summary(cumulative_weight, "weight map")
        algebra.compressed_array_summary(cumulative_product, "product map")

        cumulative_weight[cumulative_weight < 1.e-20] = 0.
        cumulative_product[cumulative_weight < 1.e-20] = 0.

        newmap = cumulative_product / cumulative_weight

        # if the new map is nan or inf, set it and the wieghts to zero
        nan_array = np.isnan(newmap)
        newmap[nan_array] = 0.
        cumulative_product[nan_array] = 0.
        cumulative_weight[nan_array] = 0.
        inf_array = np.isinf(newmap)
        newmap[inf_array] = 0.
        cumulative_product[inf_array] = 0.
        cumulative_weight[inf_array] = 0.
        algebra.compressed_array_summary(newmap, "new map")
        algebra.compressed_array_summary(cumulative_product,
                                         "final map * weight")
        algebra.compressed_array_summary(cumulative_weight, "final weight map")

        combined = "combined_clean"
        combined_map_file = "%s/%s_map_%s.npy" % \
                            (self.output_root, combined, n_modes)
        combined_weight_file = "%s/%s_weight_%s.npy" % \
                            (self.output_root, combined, n_modes)
        combined_product_file = "%s/%s_product_%s.npy" % \
                            (self.output_root, combined, n_modes)
        combined_ones_file = "%s/%s_ones_%s.npy" % \
                            (self.output_root, combined, n_modes)

        algebra.save(combined_map_file, newmap)
        algebra.save(combined_product_file, cumulative_product)
        algebra.save(combined_weight_file, cumulative_weight)
        algebra.save(combined_ones_file, algebra.ones_like(newmap))
Beispiel #19
0
def solve_from_eig(noise_evalsinv,
                   noise_evects,
                   dirty_map,
                   return_noise_diag=False,
                   feedback=0):
    """Converts a dirty map to a clean map using the eigen decomposition of the
    noise inverse.
    """

    # Check the shapes.
    if noise_evects.ndim != 4:
        raise ValueError("Expected 4D array for 'noise_evects`.")
    if noise_evalsinv.shape != (noise_evects.shape[-1], ):
        raise ValueError("Wrong number of eigenvalues.")
    if dirty_map.shape != noise_evects.shape[:-1]:
        raise ValueError("Dirty map and noise don't have matching dimensions.")
    if dirty_map.size != noise_evects.shape[-1]:
        raise ValueError("Eigen space not the same total size as map space.")
    n = noise_evalsinv.shape[0]
    nf = dirty_map.shape[0]
    nr = dirty_map.shape[1]
    nd = dirty_map.shape[2]
    # Copy the eigenvalues.
    noise_evalsinv = noise_evalsinv.copy()
    # Find poorly constrained modes and zero them out.
    bad_inds = noise_evalsinv < 1. / constants.T_huge**2
    n_bad = sp.sum(bad_inds)
    if feedback > 1:
        print("Discarding %d modes of %d. %f percent." %
              (n_bad, n, 100. * n_bad / n))
    noise_evalsinv[bad_inds] = 1.
    # Rotate the dirty map into the diagonal noise space.
    if feedback > 1:
        print "Rotating map to eigenspace."
    map_rot = sp.zeros(n, dtype=sp.float64)
    for ii in xrange(nf):
        for jj in xrange(nr):
            for kk in xrange(nd):
                tmp = noise_evects[ii, jj, kk, :].copy()
                map_rot += dirty_map[ii, jj, kk] * tmp
    # Multiply by the (diagonal) noise (inverse, inverse).  Zero out any poorly
    # constrained modes.
    map_rot[bad_inds] = 0
    # Take inverse and multiply.
    map_rot = map_rot / noise_evalsinv
    # Now rotate back to the origional space.
    if feedback > 1:
        print "Rotating back to map space."
    clean_map = algebra.zeros_like(dirty_map)
    for ii in xrange(nf):
        for jj in xrange(nr):
            for kk in xrange(nd):
                tmp = noise_evects[ii, jj, kk, :].copy()
                clean_map[ii, jj, kk] = sp.sum(map_rot * tmp)
    if return_noise_diag:
        if feedback > 1:
            print "Getting noise diagonal."
        noise_diag = algebra.zeros_like(dirty_map)
        noise_evals = 1. / noise_evalsinv
        noise_evals[bad_inds] = constants.T_huge**2
        for ii in xrange(nf):
            for jj in xrange(nr):
                for kk in xrange(nd):
                    tmp = noise_evects[ii, jj, kk, :].copy()
                    noise_diag[ii, jj, kk] = sp.sum(tmp**2 * noise_evals)
        return clean_map, noise_diag
    else:
        return clean_map