def noise_inv_to_weight(noiseinvlist_in, weightlist_in):
    print reference_noise_inv, reference_weight
    noise_inv = algebra.make_mat(algebra.open_memmap(reference_noise_inv, mode='r'))
    noise_inv_diag = noise_inv.mat_diag()
    algebra.save(reference_weight, noise_inv_diag)
    print "done with reference weights"

    for (noiseinv_item, weight_item) in zip(noiseinvlist_in, weightlist_in):
        print noiseinv_item, weight_item
        noise_inv = algebra.make_mat(algebra.open_memmap(noiseinv_item, mode='r'))
        noise_inv_diag = noise_inv.mat_diag()
        algebra.save(weight_item, noise_inv_diag)
Exemplo n.º 2
0
    def process_noise_inv(self, filename, regenerate=True):
        r"""buffer reading the noise inverse files for speed and also
        save to a file in the intermediate output path.

        If the cached file exists as an intermediate product, load it else
        produce it.
        """
        if filename not in self.noisefiledict:
            basename = filename.split("/")[-1].split(".npy")[0]
            filename_diag = "%s/%s_diag.npy" % \
                           (self.output_root, basename)
            exists = os.access(filename_diag, os.F_OK)
            if exists and not regenerate:
                print "loading pre-diagonalized noise: " + filename_diag
                self.noisefiledict[filename] = algebra.make_vect(
                    algebra.load(filename_diag))
            else:
                print "loading noise: " + filename
                # TODO: have this be smarter about reading various noise cov
                # inputs
                noise_inv = algebra.make_mat(
                    algebra.open_memmap(filename, mode='r'))
                self.noisefiledict[filename] = noise_inv.mat_diag()
                #self.noisefiledict[filename] = algebra.make_vect(
                #                               algebra.load(filename))
                algebra.save(filename_diag, self.noisefiledict[filename])

        return copy.deepcopy(self.noisefiledict[filename])
Exemplo n.º 3
0
    def process_noise_inv(self, filename, regenerate=True):
        r"""buffer reading the noise inverse files for speed and also
        save to a file in the intermediate output path.

        If the cached file exists as an intermediate product, load it else
        produce it.
        """
        if filename not in self.noisefiledict:
            basename = filename.split("/")[-1].split(".npy")[0]
            filename_diag = "%s/%s_diag.npy" % (self.output_root, basename)
            exists = os.access(filename_diag, os.F_OK)
            if exists and not regenerate:
                print "loading pre-diagonalized noise: " + filename_diag
                self.noisefiledict[filename] = algebra.make_vect(algebra.load(filename_diag))
            else:
                print "loading noise: " + filename
                # TODO: have this be smarter about reading various noise cov
                # inputs
                noise_inv = algebra.make_mat(algebra.open_memmap(filename, mode="r"))
                self.noisefiledict[filename] = noise_inv.mat_diag()
                # self.noisefiledict[filename] = algebra.make_vect(
                #                               algebra.load(filename))
                algebra.save(filename_diag, self.noisefiledict[filename])

        return copy.deepcopy(self.noisefiledict[filename])
Exemplo n.º 4
0
def noise_inv_to_weight(noiseinvlist_in, weightlist_in):
    for (noiseinv_item, weight_item) in zip(noiseinvlist_in, weightlist_in):
        print noiseinv_item, weight_item
        noise_inv = algebra.make_mat(
            algebra.open_memmap(noiseinv_item, mode='r'))
        noise_inv_diag = noise_inv.mat_diag()
        algebra.save(weight_item, noise_inv_diag)
Exemplo n.º 5
0
 def test_tri_copy_memmap(self):
     self.noise_inv.shape = (self.size, self.size)
     noise_mem = al.open_memmap("testout.npy", mode='w+',
                                shape=self.noise_inv.shape)
     noise_mem[...] = self.noise_inv
     tri_noise_inv = _c.up_tri_copy(noise_mem)
     for ii in range(self.size):
         self.assertTrue(sp.allclose(tri_noise_inv[ii,ii:],
                                     self.noise_inv[ii,ii:]))
Exemplo n.º 6
0
 def test_tri_copy_memmap(self):
     self.noise_inv.shape = (self.size, self.size)
     noise_mem = al.open_memmap("testout.npy",
                                mode='w+',
                                shape=self.noise_inv.shape)
     noise_mem[...] = self.noise_inv
     tri_noise_inv = _c.up_tri_copy(noise_mem)
     for ii in range(self.size):
         self.assertTrue(
             sp.allclose(tri_noise_inv[ii, ii:], self.noise_inv[ii, ii:]))
Exemplo n.º 7
0
def find_weight_re_diagnal(filename):
    r"""rather than read the full noise_inv and find its diagonal, cache the
    diagonal values.

    Note that the .info does not get shelved (class needs to be made
    serializeable). Return the info separately.
    """
    print "loading noise: " + filename
    noise_inv = algebra.make_mat(algebra.open_memmap(filename, mode='r'))
    noise_inv_diag = noise_inv.mat_diag()

    return noise_inv_diag, noise_inv_diag.info
def convert_noiseinv_to_weight(mapkey):
    datapath_db = dp.DataPath()
    filedb = datapath_db.fetch(mapkey)[1]
    map_cases = datapath_db.fileset_cases(mapkey, "section;maptype")

    for section in map_cases['section']:
        noiseinv_file = filedb[section + ";noise_inv"]
        noiseweight_file = filedb[section + ";noise_weight"]
        print noiseinv_file, noiseweight_file

        noise_inv = algebra.make_mat(algebra.open_memmap(noiseinv_file, mode='r'))
        noise_inv_diag = noise_inv.mat_diag()
        algebra.save(noiseweight_file, noise_inv_diag)
Exemplo n.º 9
0
 def execute(self, nprocesses):
     params = self.params
     # Make parent directory and write parameter file.
     kiyopy.utils.mkparents(params['output_root'])
     parse_ini.write_params(params,
                            params['output_root'] + 'params.ini',
                            prefix=prefix)
     in_root = params['input_root']
     # Figure out what the band names are.
     bands = params['bands']
     if not bands:
         map_files = glob.glob(in_root + pol_str + "_*.npy")
         bands = []
         root_len = len(in_root)
         for file_name in map_files:
             bands.append(file_name[root_len:-4])
     # Loop over polarizations.
     for pol_str in params['polarizations']:
         # Read in all the maps to be glued.
         maps = []
         for band in bands:
             band_map_fname = (in_root + pol_str + "_" + repr(band) +
                               '.npy')
             if self.feedback > 1:
                 print "Read using map: " + band_map_fname
             if params['mat_diag']:
                 if self.feedback > 1:
                     print "Treating as a matrix, getting diagonal."
                 band_map = al.open_memmap(band_map_fname, mode='r')
                 band_map = al.make_mat(band_map)
                 band_map = band_map.mat_diag()
             else:
                 band_map = al.load(band_map_fname)
                 band_map = al.make_vect(band_map)
             if band_map.axes != ('freq', 'ra', 'dec'):
                 msg = ("Expeced maps to have axes ('freq',"
                        "'ra', 'dec'), but it has axes: " +
                        str(band_map.axes))
                 raise ce.DataError(msg)
             maps.append(band_map)
         # Now glue them together.
         out_map = glue(maps)
         out_fname = (params['output_root'] + pol_str + "_" + "all" +
                      '.npy')
         if self.feedback > 1:
             print "Writing glued map to: " + out_fname
         al.save(out_fname, out_map)
Exemplo n.º 10
0
 def execute(self, nprocesses):
     params = self.params
     # Make parent directory and write parameter file.
     kiyopy.utils.mkparents(params['output_root'])
     parse_ini.write_params(params, params['output_root'] + 'params.ini',
                            prefix=prefix)
     in_root = params['input_root']        
     # Figure out what the band names are.
     bands = params['bands']
     if not bands:
         map_files = glob.glob(in_root + pol_str + "_*.npy")
         bands = []
         root_len = len(in_root)
         for file_name in map_files:
             bands.append(file_name[root_len:-4])
     # Loop over polarizations.
     for pol_str in params['polarizations']:
         # Read in all the maps to be glued.
         maps = []
         for band in bands:
             band_map_fname = (in_root + pol_str + "_" +
                           repr(band) + '.npy')
             if self.feedback > 1:
                 print "Read using map: " + band_map_fname
             if params['mat_diag']:
                 if self.feedback > 1:
                     print "Treating as a matrix, getting diagonal."
                 band_map = al.open_memmap(band_map_fname, mode='r')
                 band_map = al.make_mat(band_map)
                 band_map = band_map.mat_diag()
             else:
                 band_map = al.load(band_map_fname)
                 band_map = al.make_vect(band_map)
             if band_map.axes != ('freq', 'ra', 'dec') :
                 msg = ("Expeced maps to have axes ('freq',"
                        "'ra', 'dec'), but it has axes: "
                        + str(band_map.axes))
                 raise ce.DataError(msg)
             maps.append(band_map)
         # Now glue them together.
         out_map = glue(maps)
         out_fname = (params['output_root']
                      + pol_str + "_" + "all" + '.npy')
         if self.feedback > 1:
             print "Writing glued map to: " + out_fname
         al.save(out_fname, out_map)
Exemplo n.º 11
0
def extract(in_dir, out_dir) :
    """Searches for noise_inv files, extracts the diagonal and writes it out.
    """
    
    files = glob.glob(in_dir + '/*noise_inv*.npy')
    for file_path in files:
        if 'noise_inv_diag' in file_path:
            continue
        file_name = file_path[len(in_dir):]
        print file_name
        parts = file_name.split('noise_inv')
        if len(parts) != 2:
            raise RuntimeError("'noise_inv' appears in file name more than"
                               " once.  Wasn't prepared for this.")
        out_path = out_dir + '/' + parts[0] + 'noise_inv_diag' + parts[1]
        mat = al.open_memmap(file_path, 'r')
        mat = al.make_mat(mat)
        mat_diag = mat.mat_diag()
        al.save(out_path, mat_diag)
    def execute(self):
        '''Clean the maps of foregrounds, save the results, and get the
        autocorrelation.'''

        params = self.params
        freq_list = sp.array(params['freq_list'], dtype=int)
        lags = sp.array(params['lags'])

        # Write parameter file.
        kiyopy.utils.mkparents(params['output_root'])
        parse_ini.write_params(params, params['output_root'] + 'params.ini',
                               prefix=prefix)

        # Get the map data from file as well as the noise inverse.
        if len(params['file_middles']) == 1:
            fmid_name = params['file_middles'][0]
            params['file_middles'] = (fmid_name, fmid_name)

        if len(params['file_middles']) >= 2:
            # Deal with multiple files.
            num_maps = len(params['file_middles'])
            maps = []
            noise_invs = []

            # Load all maps and noises once.
            for map_index in range(0, num_maps):
                map_file = (params['input_root'] +
                            params['file_middles'][map_index] +
                            params['input_end_map'])

                print "Loading map %d of %d." % (map_index + 1, num_maps)

                map_in = algebra.make_vect(algebra.load(map_file))

                maps.append(map_in)
                if not params["no_weights"]:
                    noise_file = (params['input_root'] +
                                  params['file_middles'][map_index] +
                                  params['input_end_noise'])

                    print "Loading noise %d of %d." % (map_index + 1, num_maps)

                    noise_inv = algebra.make_mat(
                                    algebra.open_memmap(noise_file, mode='r'))

                    noise_inv = noise_inv.mat_diag()
                else:
                    noise_inv = algebra.ones_like(map_in)

                noise_invs.append(noise_inv)

            pairs = []
            # Make pairs with deepcopies to not make mutability mistakes.
            for map1_index in range(0, num_maps):
                for map2_index in range(0, num_maps):
                    if (map2_index > map1_index):
                        map1 = copy.deepcopy(maps[map1_index])
                        map2 = copy.deepcopy(maps[map2_index])
                        noise_inv1 = copy.deepcopy(noise_invs[map1_index])
                        noise_inv2 = copy.deepcopy(noise_invs[map2_index])

                        pair = map_pair.MapPair(map1, map2,
                                                noise_inv1, noise_inv2,
                                                freq_list)

                        pair.lags = lags
                        pair.params = params

                        # Keep track of the names of maps in pairs so
                        # it knows what to save later.
                        pair.set_names(params['file_middles'][map1_index],
                                       params['file_middles'][map2_index])
                        pairs.append(pair)

            num_map_pairs = len(pairs)
            print "%d map pairs created from %d maps." % (len(pairs), num_maps)

        # Hold a reference in self.
        self.pairs = pairs

        # Get maps/ noise inv ready for running.
        if params["convolve"]:
            for pair in pairs:
                pair.degrade_resolution()

        if params['factorizable_noise']:
            for pair in pairs:
                pair.make_noise_factorizable()

        if params['sub_weighted_mean']:
            for pair in pairs:
                pair.subtract_weighted_mean()

        self.pairs = pairs
        # Since correlating takes so long, if you already have the svds
        # you can skip this first correlation [since that's all it's really
        # for and it is the same no matter how many modes you want].
        # Note: map_pairs will not have anything saved in 'fore_corr' if you
        # skip this correlation.
        if not params['skip_fore_corr']:
            # Correlate the maps with multiprocessing. Note that the
            # correlations are saved to file separately then loaded in
            # together because that's (one way) how multiprocessing works.
            runlist = [(pairs[pair_index],
                        params['output_root'],
                        pair_index, False) for
                        pair_index in range(0, num_map_pairs)]

            pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
            pool.map(multiproc, runlist)

            # Load the correlations and save them to each pair. The pairs that
            # got passed to multiproc are not the same ones as ones in
            # self.pairs, so this must be done to have actual values.
            print "Loading map pairs back into program."
            file_name = params['output_root']
            file_name += "map_pair_for_freq_slices_fore_corr_"

            fore_pairs = []
            for count in range(0, num_map_pairs):
                print "Loading correlation for pair %d" % (count)
                pickle_handle = open(file_name + str(count) + ".pkl", "r")
                correlate_results = cPickle.load(pickle_handle)
                pairs[count].fore_corr = correlate_results[0]
                pairs[count].fore_counts = correlate_results[1]
                fore_pairs.append(pairs[count])
                pickle_handle.close()

            self.fore_pairs = copy.deepcopy(fore_pairs)
            # With this, you do not need fore_pairs anymore.
            self.pairs = copy.deepcopy(fore_pairs)

            pairs = self.pairs

            # Get foregrounds.

            # svd_info_list keeps track of all of the modes of all maps in
            # all pairs. This means if you want to subract a different number
            # of modes for the same maps/noises/frequencies, you have the modes
            # already saved and do not need to run the first correlation again.
            svd_info_list = []
            for pair in pairs:
                vals, modes1, modes2 = cf.get_freq_svd_modes(pair.fore_corr,
                                                          len(freq_list))
                pair.vals = vals

                # Save ALL of the modes for reference.
                pair.all_modes1 = modes1
                pair.all_modes2 = modes2
                svd_info = (vals, modes1, modes2)
                svd_info_list.append(svd_info)

                # Save only the modes you want to subtract.
                n_modes = params['modes']
                pair.modes1 = modes1[:n_modes]
                pair.modes2 = modes2[:n_modes]

            self.svd_info_list = svd_info_list
            self.pairs = pairs

            if params['save_svd_info']:
                io_wrap.save_pickle(self.svd_info_list, params['svd_file'])
        else:
            # The first correlation and svd has been skipped.
            # This means you already have the modes so you can just load
            # them from file.
            self.svd_info_list = io_wrap.load_pickle(params['svd_file'])
            # Set the svd info to the pairs.
            for i in range(0, len(pairs)):
                svd_info = self.svd_info_list[i]
                pairs[i].vals = svd_info[0]
                pairs[i].all_modes1 = svd_info[1]
                pairs[i].all_modes2 = svd_info[2]
                n_modes = params['modes']
                pairs[i].modes1 = svd_info[1][:n_modes]
                pairs[i].modes2 = svd_info[2][:n_modes]

            self.pairs = pairs

        # Subtract foregrounds.
        for pair_index in range(0, len(pairs)):
            pairs[pair_index].subtract_frequency_modes(pairs[pair_index].modes1,
                pairs[pair_index].modes2)

        # Save cleaned clean maps, cleaned noises, and modes.
        self.save_data(save_maps=params['save_maps'],
                       save_noises=params['save_noises'],
                       save_modes=params['save_modes'])

        # Finish if this was just first pass.
        if params['first_pass_only']:
            self.pairs = pairs
            return

        # Correlate the cleaned maps.
        # Here we could calculate the power spectrum instead eventually.
        runlist = [(pairs[pair_index],
                    params['output_root'],
                    pair_index, True) for
                    pair_index in range(0, num_map_pairs)]

        pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
        pool.map(multiproc, runlist)

        print "Loading map pairs back into program."
        file_name = params['output_root']
        file_name += "map_pair_for_freq_slices_corr_"

        temp_pair_list = []
        for count in range(0, num_map_pairs):
            print "Loading correlation for pair %d" % (count)
            pickle_handle = open(file_name + str(count) + ".pkl", "r")
            correlate_results = cPickle.load(pickle_handle)
            pairs[count].corr = correlate_results[0]
            pairs[count].counts = correlate_results[1]
            temp_pair_list.append(pairs[count])
            pickle_handle.close()

        self.pairs = copy.deepcopy(temp_pair_list)

        # Get the average correlation and its standard deviation.
        corr_list = []
        for pair in self.pairs:
            corr_list.append(pair.corr)

        self.corr_final, self.corr_std = cf.get_corr_and_std_3d(corr_list)

        if params['pickle_slices']:
            pickle_slices(self)

        return
def noise_inv_to_weight(uncal_weightlist):
        noise_inv = algebra.make_mat(
                                    algebra.open_memmap(filename, mode='r'))
        self.noisefiledict[filename] = noise_inv.mat_diag()
        algebra.save(filename_diag, self.noisefiledict[filename])
Exemplo n.º 14
0
        # Iterating through a ndimensional array produces slices along
        # the last axis. This is equivalent to data[i,:,:] in this case
        for data_slice in data:

            # The formatting string indicates that I'm writing out
            # the values in left-justified columns 7 characters in width
            # with 2 decimal places.
            np.savetxt(outfile, data_slice)

            # Writing out a break to indicate different slices...
            outfile.write('# New slice\n')


if __name__ == "__main__":
    if len(sys.argv) == 2:
        # Argument should just be a .npy file.
        array = algebra.load(sys.argv[1])
        out_fname = sys.argv[1].split('/')[-1][:-4] + '.txt'
        tofile(out_fname, array)
    elif len(sys.argv) == 3 and sys.argv[1] == str("diag"):
        # Second argument should be a .npy file that should be interpreted as a
        # matrix and we want to save the diagonal.
        mat = algebra.open_memmap(sys.argv[2])
        mat = algebra.make_mat(mat)
        array = mat.mat_diag()
        out_fname = sys.argv[2].split('/')[-1][:-4] + '.txt'
        tofile(out_fname, array)
    else:
        print("Usage : python alg2txt.py [input file] or"
              " python alg2txt.py diag [input file]")
Exemplo n.º 15
0
def combine_maps(param_dict, fullcov=False, verbose=False):
    """combines a list of maps as a weighted mean using a specified list of
    inverse covariance weights
    fullcov indicates that it is not just the diagonal and should be squashed
    """
    print param_dict
    covlist = param_dict["covlist"]
    try:
        mul_cov_list = zip(covlist, param_dict["multiply_cov"])
        print "using user-specified covariance multipliers" + \
               repr(param_dict["multiply_cov"])
    except KeyError:
        mul_cov_list = zip(covlist, [1.] * len(covlist))

    maps = []
    for tagname in param_dict["maplist"]:
        if verbose:
            print tagname
        maps.append(algebra.make_vect(
                    algebra.load(param_dict["root_data"] + tagname + ".npy")))

    weights = []
    for cov_entry in mul_cov_list:
        if verbose:
            print cov_entry
        (tagname, multiplier) = cov_entry

        if fullcov:
            raw_weight = algebra.make_mat(
                            algebra.open_memmap(param_dict["root_cov"] + \
                                                tagname + ".npy", mode='r'))
            raw_weight = raw_weight.mat_diag()
        else:
            raw_weight = algebra.make_vect(algebra.load(
                                param_dict["root_cov"] + tagname + ".npy"))

        # zero out any messy stuff
        raw_weight *= multiplier
        raw_weight[raw_weight < 1.e-20] = 0.
        raw_weight[np.isnan(raw_weight)] = 0.
        raw_weight[np.isinf(raw_weight)] = 0.
        weights.append(raw_weight)

    prodmap = []
    for mapind in range(0, len(maps)):
        prodmap.append(maps[mapind] * weights[mapind])

    for mapind in range(1, len(maps)):
        prodmap[0] += prodmap[mapind]
        weights[0] += weights[mapind]

    algebra.compressed_array_summary(weights[0], "weight map")
    algebra.compressed_array_summary(prodmap[0], "product map")

    newmap = prodmap[0] / weights[0]

    newweights = weights[0]
    newweights[newweights < 1.e-20] = 0.
    # if the new map is nan or inf, set it and the wieghts to zero
    nan_array = np.isnan(newmap)
    newmap[nan_array] = 0.
    newweights[nan_array] = 0.
    inf_array = np.isinf(newmap)
    newmap[inf_array] = 0.
    newweights[inf_array] = 0.
    algebra.compressed_array_summary(newmap, "new map")
    algebra.compressed_array_summary(newweights, "final weight map")

    return (newmap, newweights, prodmap[0])
Exemplo n.º 16
0
    def execute(self):
        '''Clean the maps of foregrounds, save the results, and get the
        autocorrelation.'''

        params = self.params
        freq_list = sp.array(params['freq_list'], dtype=int)
        lags = sp.array(params['lags'])

        # Write parameter file.
        kiyopy.utils.mkparents(params['output_root'])
        parse_ini.write_params(params,
                               params['output_root'] + 'params.ini',
                               prefix=prefix)

        # Get the map data from file as well as the noise inverse.
        if len(params['file_middles']) == 1:
            fmid_name = params['file_middles'][0]
            params['file_middles'] = (fmid_name, fmid_name)

        if len(params['file_middles']) >= 2:
            # Deal with multiple files.
            num_maps = len(params['file_middles'])
            maps = []
            noise_invs = []

            # Load all maps and noises once.
            for map_index in range(0, num_maps):
                map_file = (params['input_root'] +
                            params['file_middles'][map_index] +
                            params['input_end_map'])

                print "Loading map %d of %d." % (map_index + 1, num_maps)

                map_in = algebra.make_vect(algebra.load(map_file))

                maps.append(map_in)
                if not params["no_weights"]:
                    noise_file = (params['input_root'] +
                                  params['file_middles'][map_index] +
                                  params['input_end_noise'])

                    print "Loading noise %d of %d." % (map_index + 1, num_maps)

                    noise_inv = algebra.make_mat(
                        algebra.open_memmap(noise_file, mode='r'))

                    noise_inv = noise_inv.mat_diag()
                else:
                    noise_inv = algebra.ones_like(map_in)

                noise_invs.append(noise_inv)

            pairs = []
            # Make pairs with deepcopies to not make mutability mistakes.
            for map1_index in range(0, num_maps):
                for map2_index in range(0, num_maps):
                    if (map2_index > map1_index):
                        map1 = copy.deepcopy(maps[map1_index])
                        map2 = copy.deepcopy(maps[map2_index])
                        noise_inv1 = copy.deepcopy(noise_invs[map1_index])
                        noise_inv2 = copy.deepcopy(noise_invs[map2_index])

                        pair = map_pair.MapPair(map1, map2, noise_inv1,
                                                noise_inv2, freq_list)

                        pair.lags = lags
                        pair.params = params

                        # Keep track of the names of maps in pairs so
                        # it knows what to save later.
                        pair.set_names(params['file_middles'][map1_index],
                                       params['file_middles'][map2_index])
                        pairs.append(pair)

            num_map_pairs = len(pairs)
            print "%d map pairs created from %d maps." % (len(pairs), num_maps)

        # Hold a reference in self.
        self.pairs = pairs

        # Get maps/ noise inv ready for running.
        if params["convolve"]:
            for pair in pairs:
                pair.degrade_resolution()

        if params['factorizable_noise']:
            for pair in pairs:
                pair.make_noise_factorizable()

        if params['sub_weighted_mean']:
            for pair in pairs:
                pair.subtract_weighted_mean()

        self.pairs = pairs
        # Since correlating takes so long, if you already have the svds
        # you can skip this first correlation [since that's all it's really
        # for and it is the same no matter how many modes you want].
        # Note: map_pairs will not have anything saved in 'fore_corr' if you
        # skip this correlation.
        if not params['skip_fore_corr']:
            # Correlate the maps with multiprocessing. Note that the
            # correlations are saved to file separately then loaded in
            # together because that's (one way) how multiprocessing works.
            fore_pairs = []
            processes_list = []
            for pair_index in range(0, num_map_pairs):
                # Calls 1 multiproc (which governs the correlating) for each
                # pair on a new CPU so you can have all pairs working at once.
                multi = multiprocessing.Process(target=multiproc,
                                                args=([
                                                    pairs[pair_index],
                                                    params['output_root'],
                                                    pair_index, False
                                                ]))

                processes_list.append(multi)

                multi.start()

            # Waits for all correlations to finish before continuing.
            while True in [multi.is_alive() for multi in processes_list]:
                print "processing"
                time.sleep(5)

            # just to be safe
            time.sleep(1)

            # more concise call, but multiprocessing does not behave well with
            # complex objects...........
            #runlist = [(pair_index,
            #            params['output_root'],
            #            False) for
            #            pair_index in range(0, num_map_pairs)]
            #pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
            #pool.map(self.multiproc, runlist)

            # Load the correlations and save them to each pair. The pairs that
            # got passed to multiproc are not the same ones as ones in
            # self.pairs, so this must be done to have actual values.
            print "Loading map pairs back into program."
            file_name = params['output_root']
            file_name += "map_pair_for_freq_slices_fore_corr_"

            for count in range(0, num_map_pairs):
                print "Loading correlation for pair %d" % (count)
                pickle_handle = open(file_name + str(count) + ".pkl", "r")
                correlate_results = cPickle.load(pickle_handle)
                pairs[count].fore_corr = correlate_results[0]
                pairs[count].fore_counts = correlate_results[1]
                fore_pairs.append(pairs[count])
                pickle_handle.close()

            self.fore_pairs = copy.deepcopy(fore_pairs)
            # With this, you do not need fore_pairs anymore.
            self.pairs = copy.deepcopy(fore_pairs)

            pairs = self.pairs

            # Get foregrounds.

            # svd_info_list keeps track of all of the modes of all maps in
            # all pairs. This means if you want to subract a different number
            # of modes for the same maps/noises/frequencies, you have the modes
            # already saved and do not need to run the first correlation again.
            svd_info_list = []
            for pair in pairs:
                vals, modes1, modes2 = cf.get_freq_svd_modes(
                    pair.fore_corr, len(freq_list))
                pair.vals = vals

                # Save ALL of the modes for reference.
                pair.all_modes1 = modes1
                pair.all_modes2 = modes2
                svd_info = (vals, modes1, modes2)
                svd_info_list.append(svd_info)

                # Save only the modes you want to subtract.
                n_modes = params['modes']
                pair.modes1 = modes1[:n_modes]
                pair.modes2 = modes2[:n_modes]

            self.svd_info_list = svd_info_list
            self.pairs = pairs

            if params['save_svd_info']:
                ft.save_pickle(self.svd_info_list, params['svd_file'])
        else:
            # The first correlation and svd has been skipped.
            # This means you already have the modes so you can just load
            # them from file.
            self.svd_info_list = ft.load_pickle(params['svd_file'])
            # Set the svd info to the pairs.
            for i in range(0, len(pairs)):
                svd_info = self.svd_info_list[i]
                pairs[i].vals = svd_info[0]
                pairs[i].all_modes1 = svd_info[1]
                pairs[i].all_modes2 = svd_info[2]
                n_modes = params['modes']
                pairs[i].modes1 = svd_info[1][:n_modes]
                pairs[i].modes2 = svd_info[2][:n_modes]

            self.pairs = pairs

        # Subtract foregrounds.
        for pair_index in range(0, len(pairs)):
            pairs[pair_index].subtract_frequency_modes(
                pairs[pair_index].modes1, pairs[pair_index].modes2)

        # Save cleaned clean maps, cleaned noises, and modes.
        self.save_data(save_maps=params['save_maps'],
                       save_noises=params['save_noises'],
                       save_modes=params['save_modes'])

        # Finish if this was just first pass.
        if params['first_pass_only']:
            self.pairs = pairs
            return

        # Correlate the cleaned maps.
        # Here we could calculate the power spectrum instead eventually.
        temp_pair_list = []
        processes_list = []
        for pair_index in range(0, num_map_pairs):
            multi = multiprocessing.Process(target=multiproc,
                                            args=([
                                                pairs[pair_index],
                                                params['output_root'],
                                                pair_index, True
                                            ]))

            processes_list.append(multi)
            multi.start()

        while True in [multi.is_alive() for multi in processes_list]:
            print "processing"
            time.sleep(5)

        # just to be safe
        time.sleep(1)

        # ugh, would really rathter use implementation below except multiprocessing
        # does not behave.................
        #runlist = [(pairs[pair_index],
        #            params['output_root'],
        #            pair_index, True) for
        #            pair_index in range(0, num_map_pairs)]

        #pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
        #pool.map(multiproc, runlist)

        print "Loading map pairs back into program."
        file_name = params['output_root']
        file_name += "map_pair_for_freq_slices_corr_"

        for count in range(0, num_map_pairs):
            print "Loading correlation for pair %d" % (count)
            pickle_handle = open(file_name + str(count) + ".pkl", "r")
            correlate_results = cPickle.load(pickle_handle)
            pairs[count].corr = correlate_results[0]
            pairs[count].counts = correlate_results[1]
            temp_pair_list.append(pairs[count])
            pickle_handle.close()

        self.pairs = copy.deepcopy(temp_pair_list)

        # Get the average correlation and its standard deviation.
        corr_list = []
        for pair in self.pairs:
            corr_list.append(pair.corr)

        self.corr_final, self.corr_std = cf.get_corr_and_std_3d(corr_list)

        if params['pickle_slices']:
            ft.save_pickle(self, self.params['output_root'] + \
                                 'New_Slices_object.pkl')

        return
def noise_inv_to_weight(noiseinvlist_in, weightlist_in):
    for (noiseinv_item, weight_item) in zip(noiseinvlist_in, weightlist_in):
        print noiseinv_item, weight_item
        noise_inv = algebra.make_mat(algebra.open_memmap(noiseinv_item, mode='r'))
        noise_inv_diag = noise_inv.mat_diag()
        algebra.save(weight_item, noise_inv_diag)
Exemplo n.º 18
0
    def execute(self, nprocesses=1) :
        """Function that does all the work.
        
        Parameters
        ----------
        nprocesses : int
            Number of threads to use.  So far this program is not threaded.
            this argument is only included for compatibility with other
            modules.
        """
        params = self.params

        #### Front end.  Read in the power spectrum, convert it to 2d correlation
        # function, etc.
        # corr_function = f(params['unit_system'], params['power_file_name'],
        #                   params['power_format'])

        # Temporary face correlation to make the rest of the code run.
        corr_function = lambda rho, f : (1.0e6/(rho**2 + 0.001)
                                         /(abs(f) + 1.0e3))

        #### Units.  Figure out the axes in the proper units etc.
        # Start by getting the map from which we will be getting our axes.
        map = algebra.open_memmap(params["map_file"], mode='r')
        map = algebra.make_vect(map)
        if map.axes != ('freq', 'ra', 'dec') :
            raise ce.DataError("Expeceted map to be in freq-ra-dec space.")
        # Next figure out all the axes.
        nfreq = map.shape[0]
        nra = map.shape[1]
        ndec = map.shape[2]
        freq = map.get_axis('freq')
        ra = map.get_axis('ra')*sp.cos(sp.pi*map.info['dec_centre']/180.0)
        dec = map.get_axis('dec')
        # Coordinate dependant conversion factors.
        system = params['unit_system']
        if system == "deg-freq" :
            z = freq
            z_width = sp.ones(nfreq)*map.info['freq_delta']
            Drho_Ddeg = sp.ones(nfreq)
        elif system == "Mpc/h-Mpc/h" :
            # Do a bunch of cosmology dependant stuff.
            # Not written yet, so bail here.
            return
        else :
            raise ValueError('Unit system must be one of "deg-freq", '
                             '"Mpc/h-Mpc/h", "deg-log_freq" or "Mpc/h-mu".')
        # Get the beam object.
        # The following is temporary.  Eventually need to read beam data from
        # file.
        beam_freq = sp.arange(600.0e6, 1000.0e6, 50.0e6)
        beam_width = 0.3*600.0e6/beam_freq
        Beam = beam.GaussianBeam(beam_width, beam_freq)
        # Figure out the range of angular lags we need to consider in our
        # coordinate system.
        # First the max lag.
        # This is inefficient if the range of Drho_Ddeg is very wide.
        max_lag = (max(ra) - min(ra))**2 + (max(dec) - min(dec))**2
        max_lag = sp.sqrt(max_lag)*max(Drho_Ddeg)
        # Figure out the minimum lag step.
        lag_step = min([abs(map.info['ra_delta']) *
                        sp.cos(sp.pi*map.info['dec_centre']/180.0), 
                        abs(map.info['dec_delta'])])
        if max_lag/lag_step > 10*(ndec + nra) :
            raise RunTimeError("Dynamic range is very large.  There will be "
                               "too many integrals to do.")
        # There is probably a more efficient lag binning than this... peicewise
        # linear then log?
        divisions = 10.0
        lag = sp.arange(0.0, max_lag + lag_step/divisions, lag_step/divisions)
        sq_lag = lag**2
        nlag = len(lag)

        #### Integral.  Loop over all possible lags and do the integral.
        # Allowcate memory for all f,f',lag combinations.
        integrals = sp.empty((nfreq, nfreq, nlag), dtype=float)
        if self.feedback >= 2 :
            print "Starting integrals."
        for find1 in xrange(nfreq) :
            for find2 in xrange(nfreq) :
                for lind in xrange(nlag) :
                    # Get separation in radial direction.
                    delta_z = abs(z[find1] - z[find2])
                    # Get the window functions.
                    W, rho_limits = Beam.angular_real_space_window(freq[find1],
                            freq[find2], return_limits=True)
                    Q, z_limits = Beam.radial_real_space_window(z_width[find1],
                            z_width[find2], return_limits=True)
                    # Construct the integrand.
                    int = integrand(corr_function, W, Q, lag[lind], delta_z)
                    # Integrate.
                    # Reiman sum is the most efficient integration algorithm
                    # known to mankind.
                    z_vals = sp.arange(z_limits[0], z_limits[1], 
                                       (z_limits[1] - z_limits[0])/20)
                    z_vals = z_vals[None, :]
                    rho_vals = sp.arange(rho_limits[0], rho_limits[1], 
                                       (rho_limits[1] - rho_limits[0])/20)
                    rho_vals = rho_vals[:, None]
                    result = (sp.sum(int(rho_vals, z_vals)) 
                              * (z_limits[1] - z_limits[0])/20
                              * (rho_limits[1] - rho_limits[0])/20)
                    # Store the result.
                    integrals[find1, find2, lind] = result
        if self.feedback >= 2 :
            print "Integrals done."

        #### Assignment.  Allocate memory, loop over elements and assign.
        # Allowcate memory and build final coraviance matrix object.
        covariance = algebra.open_memmap(params["out_file_name"], 
                        mode='w+', dtype=float, shape=(nfreq, nra, ndec, nfreq,
                        nra, ndec))
        covariance = algebra.make_mat(covariance, axis_names=("freq", "ra", 
                        "dec","freq", "ra", "dec"), 
                        row_axes=(0, 1, 2), col_axes=(3, 4, 5))
        covariance.copy_axis_info(map)
        # Make a matrix of angular pairwise lags.
        sq_angles = (dec[None, :, None, None] - dec[None, None, None, :])**2
        sq_angles = sq_angles + \
                (ra[:, None, None, None] - ra[None, None, :, None])**2
        if self.feedback >= 2 :
            print "Filling covariance matrix by interpolation."
        # Now fill in the elements by interpolating integrals.
        for find1 in xrange(nfreq) :
            for find2 in xrange(nfreq) :
                # The pairwise angular lags in the unit system.
                this_sq_lag = (sq_angles*(Drho_Ddeg[find1] + 
                                                Drho_Ddeg[find2])**2/4)
                # The interpolation function.  Perhaps there is a better
                # algorithm than cubic?
                interpolator = interpolate.interp1d(sq_lag,
                                integrals[find1,find2,:], bounds_error=True,
                                kind='cubic')
                covariance[find1,:,:,find2,:,:] = interpolator(this_sq_lag)
        del covariance, map
        if self.feedback >= 2 :
            print "Done"
def combine_maps(source_key, alt_weight=None,
                 signal='map', weight='weight', divider_token=";",
                 fullcov=False):
    r"""
    `source_key` is the file db key for the maps to combine
    `alt_weight` is an optional alternate weighting for the maps
    `signal` is the tag in the file db entry for the signal maps
    `weight` is the tag in the file db entry for the N^-1 weights
    `fullcov` uses a memory map for large N^-1 and pulls the diagonal
    `divider_token` is the token that divides the map section name
            from the data type e.g. "A_with_B;noise_inv"
    """
    datapath_db = data_paths.DataPath()
    source_fdb = datapath_db.fetch(source_key, intend_read=True,
                                   silent=True)
    source_fdict = source_fdb[1]

    # accumulate all the files to combine
    weightkeys = {}
    signalkeys = {}
    for filekey in source_fdb[0]:
        if divider_token in filekey:
            data_type = filekey.split(divider_token)[1]
            map_section = filekey.split(divider_token)[0]

            if data_type == signal:
                signalkeys[map_section] = source_fdict[filekey]

            if data_type == weight:
                weightkeys[map_section] = source_fdict[filekey]

    signal_list = []
    weight_list = []
    for mapkey in signalkeys:
        signalfile = signalkeys[mapkey]
        weightfile = weightkeys[mapkey]
        print "loading pair: %s %s" % (signalfile, weightfile)
        signal_list.append(algebra.make_vect(algebra.load(signalfile)))

        if fullcov:
            raw_weight = algebra.make_mat(
                            algebra.open_memmap(weightfile))
            raw_weight = raw_weight.mat_diag()
        else:
            raw_weight = algebra.make_vect(algebra.load(weightfile))

        # zero out any messy stuff (should have been done already)
        raw_weight[raw_weight < 1.e-20] = 0.
        raw_weight[np.isnan(raw_weight)] = 0.
        raw_weight[np.isinf(raw_weight)] = 0.
        weight_list.append(raw_weight)

    prodmap = []
    for mapind in range(0, len(signal_list)):
        prodmap.append(signal_list[mapind] * weight_list[mapind])

    for mapind in range(1, len(signal_list)):
        prodmap[0] += prodmap[mapind]
        weight_list[0] += weight_list[mapind]

    algebra.compressed_array_summary(weight_list[0], "weight map")
    algebra.compressed_array_summary(prodmap[0], "product map")

    newmap = prodmap[0] / weight_list[0]

    newweights = weight_list[0]
    prodmap = prodmap[0]

    newweights[newweights < 1.e-20] = 0.
    prodmap[newweights < 1.e-20] = 0.

    # if the new map is nan or inf, set it and the wieghts to zero
    nan_array = np.isnan(newmap)
    newmap[nan_array] = 0.
    prodmap[nan_array] = 0.
    newweights[nan_array] = 0.
    inf_array = np.isinf(newmap)
    newmap[inf_array] = 0.
    prodmap[inf_array] = 0.
    newweights[inf_array] = 0.
    algebra.compressed_array_summary(newmap, "new map")
    algebra.compressed_array_summary(prodmap, "final map * weight")
    algebra.compressed_array_summary(newweights, "final weight map")

    # write out the three maps (combined map, map * weight, weight)
    signal_out = source_fdict['combined_map']
    prodmap_out = source_fdict['combined_mapxweight']
    weight_out = source_fdict['combined_weight']

    # alternately
    #signal_out = "%s/%s_signal.npy" % (outputdir, source_key)
    #prodmap_out = "%s/%s_product.npy" % (outputdir, source_key)
    #weight_out = "%s/%s_weight.npy" % (outputdir, source_key)

    algebra.save(signal_out, newmap)
    algebra.save(prodmap_out, prodmap)
    algebra.save(weight_out, newweights)
Exemplo n.º 20
0
        # Iterating through a ndimensional array produces slices along
        # the last axis. This is equivalent to data[i,:,:] in this case
        for data_slice in data:

            # The formatting string indicates that I'm writing out
            # the values in left-justified columns 7 characters in width
            # with 2 decimal places.
            np.savetxt(outfile, data_slice)

            # Writing out a break to indicate different slices...
            outfile.write("# New slice\n")


if __name__ == "__main__":
    if len(sys.argv) == 2:
        # Argument should just be a .npy file.
        array = algebra.load(sys.argv[1])
        out_fname = sys.argv[1].split("/")[-1][:-4] + ".txt"
        tofile(out_fname, array)
    elif len(sys.argv) == 3 and sys.argv[1] == str("diag"):
        # Second argument should be a .npy file that should be interpreted as a
        # matrix and we want to save the diagonal.
        mat = algebra.open_memmap(sys.argv[2])
        mat = algebra.make_mat(mat)
        array = mat.mat_diag()
        out_fname = sys.argv[2].split("/")[-1][:-4] + ".txt"
        tofile(out_fname, array)
    else:
        print("Usage : python alg2txt.py [input file] or" " python alg2txt.py diag [input file]")
Exemplo n.º 21
0
    def execute(self, nprocesses=1) :
        """Worker funciton."""
        params = self.params
        # Make parent directory and write parameter file.
        kiyopy.utils.mkparents(params['output_root'])
        parse_ini.write_params(params, params['output_root'] + 'params.ini',
                               prefix='mm_')
        save_noise_diag = params['save_noise_diag']
        in_root = params['input_root']
        all_out_fname_list = []
        all_in_fname_list = []
        # Loop over files to process.
        for pol_str in params['polarizations']:
            dmap_fname = in_root + 'dirty_map_' + pol_str + '.npy'
            noise_fname = in_root + 'noise_inv_' + pol_str + '.npy'
            all_in_fname_list.append(
                kiyopy.utils.abbreviate_file_path(dmap_fname))
            all_in_fname_list.append(
                kiyopy.utils.abbreviate_file_path(noise_fname))
            # Load the dirty map and the noise matrix.
            dirty_map = algebra.load(dmap_fname)
            dirty_map = algebra.make_vect(dirty_map)
            if dirty_map.axes != ('freq', 'ra', 'dec') :
                raise ce.DataError("Expeced dirty map to have axes "
                                   "('freq', 'ra', 'dec'), but it has axes: "
                                   + str(dirty_map.axes))
            shape = dirty_map.shape
            noise_inv = algebra.open_memmap(noise_fname, 'r')
            noise_inv = algebra.make_mat(noise_inv)
            # Initialize the clean map.
            clean_map = algebra.info_array(sp.zeros(dirty_map.shape))
            clean_map.info = dict(dirty_map.info)
            clean_map = algebra.make_vect(clean_map)
            # If needed, initialize a map for the noise diagonal.
            if save_noise_diag :
                noise_diag = algebra.zeros_like(clean_map)
            # Two cases for the noise.  If its the same shape as the map then
            # the noise is diagonal.  Otherwise, it should be block diagonal in
            # frequency.
            if noise_inv.ndim == 3 :
                if noise_inv.axes != ('freq', 'ra', 'dec') :
                    raise ce.DataError("Expeced noise matrix to have axes "
                                       "('freq', 'ra', 'dec'), but it has: "
                                       + str(noise_inv.axes))
                # Noise inverse can fit in memory, so copy it.
                noise_inv_memory = sp.array(noise_inv, copy=True)
                # Find the non-singular (covered) pixels.
                max_information = noise_inv_memory.max()
                good_data = noise_inv_memory < 1.0e-10*max_information
                # Make the clean map.
                clean_map[good_data] = (dirty_map[good_data] 
                                        / noise_inv_memory[good_data])
                if save_noise_diag :
                    noise_diag[good_data] = 1/noise_inv_memory[good_data]
            elif noise_inv.ndim == 5 :
                if noise_inv.axes != ('freq', 'ra', 'dec', 'ra', 'dec') :
                    raise ce.DataError("Expeced noise matrix to have axes "
                                       "('freq', 'ra', 'dec', 'ra', 'dec'), "
                                       "but it has: "
                                       + str(noise_inv.axes))
                # Arrange the dirty map as a vector.
                dirty_map_vect = sp.array(dirty_map) # A view.
                dirty_map_vect.shape = (shape[0], shape[1]*shape[2])
                frequencies = dirty_map.get_axis('freq')/1.0e6
                # Allowcate memory only once.
                noise_inv_freq = sp.empty((shape[1], shape[2], shape[1],
                                           shape[2]), dtype=float)
                if self.feedback > 1 :
                    print "Inverting noise matrix."
                # Block diagonal in frequency so loop over frequencies.
                for ii in xrange(dirty_map.shape[0]) :
                    if self.feedback > 1:
                        print "Frequency: ", "%5.1f"%(frequencies[ii]),
                    if self.feedback > 2:
                        print ", start mmap read:",
                        sys.stdout.flush()
                    noise_inv_freq[...] = noise_inv[ii, ...]
                    if self.feedback > 2:
                        print "done, start eig:",
                        sys.stdout.flush()
                    noise_inv_freq.shape = (shape[1]*shape[2],
                                            shape[1]*shape[2])
                    # Solve the map making equation by diagonalization.
                    noise_inv_diag, Rot = sp.linalg.eigh(noise_inv_freq, 
                                                         overwrite_a=True)
                    if self.feedback > 2:
                        print "done",
                    map_rotated = sp.dot(Rot.T, dirty_map_vect[ii])
                    # Zero out infinite noise modes.
                    bad_modes = noise_inv_diag < 1.0e-5*noise_inv_diag.max()
                    if self.feedback > 1:
                        print ", discarded: ",
                        print "%4.1f"%(100.0*sp.sum(bad_modes)/bad_modes.size),
                        print "% of modes",
                    if self.feedback > 2:
                        print ", start rotations:",
                        sys.stdout.flush()
                    map_rotated[bad_modes] = 0.
                    noise_inv_diag[bad_modes] = 1.0
                    # Solve for the clean map and rotate back.
                    map_rotated /= noise_inv_diag
                    map = sp.dot(Rot, map_rotated)
                    if self.feedback > 2:
                        print "done",
                        sys.stdout.flush()
                    # Fill the clean array.
                    map.shape = (shape[1], shape[2])
                    clean_map[ii, ...] = map
                    if save_noise_diag :
                        # Using C = R Lambda R^T 
                        # where Lambda = diag(1/noise_inv_diag).
                        temp_noise_diag = 1/noise_inv_diag
                        temp_noise_diag[bad_modes] = 0
                        # Multiply R by the diagonal eigenvalue matrix.
                        # Broadcasting does equivalent of mult by diag matrix.
                        temp_mat = Rot*temp_noise_diag
                        # Multiply by R^T, but only calculate the diagonal
                        # elements.
                        for jj in range(shape[1]*shape[2]) :
                            temp_noise_diag[jj] = sp.dot(temp_mat[jj,:], 
                                                         Rot[jj,:])
                        temp_noise_diag.shape = (shape[1], shape[2])
                        noise_diag[ii, ...] = temp_noise_diag
                    # Return workspace memory to origional shape.
                    noise_inv_freq.shape = (shape[1], shape[2],
                                            shape[1], shape[2])
                    if self.feedback > 1:
                        print ""
                        sys.stdout.flush()
            elif noise_inv.ndim == 6 :
                raise NotImplementedError("Full noise matrix not yet "
                                          "implemented.  Best we can do is "
                                          "block diagonal in frequency.")
            else :
                raise ce.DataError("Noise matrix has bad shape.")
            # Write the clean map to file.
            out_fname = params['output_root'] + 'clean_map_' + pol_str + '.npy'
            algebra.save(out_fname, clean_map)
            all_out_fname_list.append(
                kiyopy.utils.abbreviate_file_path(out_fname))
            if save_noise_diag :
                noise_diag_fname = (params['output_root'] + 'noise_diag_'
                                    + pol_str + '.npy')
                algebra.save(noise_diag_fname, noise_diag)
                all_out_fname_list.append(
                    kiyopy.utils.abbreviate_file_path(noise_diag_fname))

        # Finally update the history object.
        history = hist.read(in_root + 'history.hist')
        history.add('Read map and noise files:', all_in_fname_list)
        history.add('Converted dirty map to clean map.', all_out_fname_list)
        h_fname = params['output_root'] + "history.hist"
        history.write(h_fname)
Exemplo n.º 22
0
def combine_maps(param_dict, fullcov=False, verbose=False):
    """combines a list of maps as a weighted mean using a specified list of
    inverse covariance weights
    fullcov indicates that it is not just the diagonal and should be squashed
    """
    print param_dict
    covlist = param_dict["covlist"]
    try:
        mul_cov_list = zip(covlist, param_dict["multiply_cov"])
        print "using user-specified covariance multipliers" + repr(param_dict["multiply_cov"])
    except KeyError:
        mul_cov_list = zip(covlist, [1.0] * len(covlist))

    maps = []
    for tagname in param_dict["maplist"]:
        if verbose:
            print tagname
        maps.append(algebra.make_vect(algebra.load(param_dict["root_data"] + tagname + ".npy")))

    weights = []
    for cov_entry in mul_cov_list:
        if verbose:
            print cov_entry
        (tagname, multiplier) = cov_entry

        if fullcov:
            raw_weight = algebra.make_mat(algebra.open_memmap(param_dict["root_cov"] + tagname + ".npy", mode="r"))
            raw_weight = raw_weight.mat_diag()
        else:
            raw_weight = algebra.make_vect(algebra.load(param_dict["root_cov"] + tagname + ".npy"))

        # zero out any messy stuff
        raw_weight *= multiplier
        raw_weight[raw_weight < 1.0e-20] = 0.0
        raw_weight[np.isnan(raw_weight)] = 0.0
        raw_weight[np.isinf(raw_weight)] = 0.0
        weights.append(raw_weight)

    prodmap = []
    for mapind in range(0, len(maps)):
        prodmap.append(maps[mapind] * weights[mapind])

    for mapind in range(1, len(maps)):
        prodmap[0] += prodmap[mapind]
        weights[0] += weights[mapind]

    algebra.compressed_array_summary(weights[0], "weight map")
    algebra.compressed_array_summary(prodmap[0], "product map")

    newmap = prodmap[0] / weights[0]

    newweights = weights[0]
    newweights[newweights < 1.0e-20] = 0.0
    # if the new map is nan or inf, set it and the wieghts to zero
    nan_array = np.isnan(newmap)
    newmap[nan_array] = 0.0
    newweights[nan_array] = 0.0
    inf_array = np.isinf(newmap)
    newmap[inf_array] = 0.0
    newweights[inf_array] = 0.0
    algebra.compressed_array_summary(newmap, "new map")
    algebra.compressed_array_summary(newweights, "final weight map")

    return (newmap, newweights, prodmap[0])
Exemplo n.º 23
0
def combine_maps_driver(inputmap_dict, inputweight_dict, output_dict,
                        fullcov=False, datapath_db=None):
    r"""Combine a list of weights, maps specified by their database keys
    """
    if datapath_db is None:
        datapath_db = data_paths.DataPath()

    signal_list = []
    weight_list = []
    for mapkey in inputmap_dict:
        signalfile = inputmap_dict[mapkey]
        weightfile = inputweight_dict[mapkey]
        print "loading pair: %s %s" % (signalfile, weightfile)
        signal_list.append(algebra.make_vect(algebra.load(signalfile)))

        if fullcov:
            raw_weight = algebra.make_mat(
                            algebra.open_memmap(weightfile))
            raw_weight = raw_weight.mat_diag()
        else:
            raw_weight = algebra.make_vect(algebra.load(weightfile))

        # zero out any messy stuff
        raw_weight[raw_weight < 1.e-20] = 0.
        raw_weight[np.isnan(raw_weight)] = 0.
        raw_weight[np.isinf(raw_weight)] = 0.
        weight_list.append(raw_weight)

    prodmap = []
    for mapind in range(0, len(signal_list)):
        prodmap.append(signal_list[mapind] * weight_list[mapind])

    print "CHECK THESE: %d %d %d" % (len(signal_list), len(weight_list),
                                     len(prodmap))

    cumulative_product = algebra.zeros_like(prodmap[0])
    cumulative_weight = algebra.zeros_like(prodmap[0])
    for mapind in range(0, len(signal_list)):
        cumulative_product += prodmap[mapind]
        cumulative_weight += weight_list[mapind]

    algebra.compressed_array_summary(cumulative_weight, "weight map")
    algebra.compressed_array_summary(cumulative_product, "product map")

    newmap = cumulative_product / cumulative_weight

    cumulative_weight[cumulative_weight < 1.e-20] = 0.
    cumulative_product[cumulative_weight < 1.e-20] = 0.

    # if the new map is nan or inf, set it and the wieghts to zero
    nan_array = np.isnan(newmap)
    newmap[nan_array] = 0.
    cumulative_product[nan_array] = 0.
    cumulative_weight[nan_array] = 0.
    inf_array = np.isinf(newmap)
    newmap[inf_array] = 0.
    cumulative_product[inf_array] = 0.
    cumulative_weight[inf_array] = 0.
    algebra.compressed_array_summary(newmap, "new map")
    algebra.compressed_array_summary(cumulative_product, "final map * weight")
    algebra.compressed_array_summary(cumulative_weight, "final weight map")

    print output_dict
    algebra.save(output_dict['map'], newmap)
    algebra.save(output_dict['product'], cumulative_product)
    algebra.save(output_dict['weight'], cumulative_weight)
    algebra.save(output_dict['ones'], algebra.ones_like(newmap))
Exemplo n.º 24
0
    def execute(self, nprocesses=1):
        """Function that acctually does the work.

        The nprocesses parameter does not do anything yet.  It is just there
        for compatibility with the pipeline manager.
        """
        params = self.params
        kiyopy.utils.mkparents(params["output_root"])
        parse_ini.write_params(params, params["output_root"] + "params.ini", prefix="mm_")
        # Rename some commonly used parameters.
        map_shape = params["map_shape"]
        spacing = params["pixel_spacing"]
        algorithm = params["noise_model"]
        noise_root = params["noise_parameters_input_root"]
        ra_spacing = -spacing / sp.cos(params["field_centre"][1] * sp.pi / 180.0)
        if not algorithm in ("grid", "diag_file", "disjoint_scans"):
            raise ValueError("Invalid noise model: " + algorithm)
        if len(params["IFs"]) != 1:
            raise ce.FileParameterTypeError("Can only process a single IF.")

        # Set up to iterate over the pol states.
        npol = 2  # This will be reset when we read the first data block.
        pol_ind = 0

        all_file_names = []

        while pol_ind < npol:
            # Flag for the first block processed (will allowcate memory on the
            # first iteration).
            first_block = True
            # Loop over the files to process.
            try:
                for file_middle in params["file_middles"]:
                    input_fname = params["input_root"] + file_middle + params["input_end"]
                    # Read in the data, and loop over data blocks.
                    Reader = fitsGBT.Reader(input_fname, feedback=self.feedback)
                    Blocks = Reader.read(params["scans"], params["IFs"])

                    # Calculate the time varience at each frequency.  This will
                    # be used as weights in most algorithms.
                    if not algorithm == "grid":
                        if not noise_root == "None":
                            # We have measured variance.
                            noise_pars = sp.load(noise_root + file_middle + ".npy")
                            var = noise_pars[params["IFs"][0], pol_ind, 0, :]
                        else:
                            # We need to measure the variance.
                            var = tools.calc_time_var_file(Blocks, pol_ind, 0)
                            # Convert from masked array to array.
                            var = var.filled(9999.0)
                    else:
                        var = 1.0
                    weight = 1 / var

                    for Data in Blocks:
                        dims = Data.dims
                        # On first pass set up the map parameters.
                        if first_block:
                            shape = map_shape + (dims[-1],)
                            Data.calc_freq()
                            centre_freq = Data.freq[dims[-1] // 2]
                            delta_freq = Data.field["CDELT1"]
                            if pol_ind == 0:
                                # Figure out the length of the polarization
                                # loop.
                                npol = dims[1]
                                # Accumulate the data history.
                                history = hist.History(Data.history)
                            # Get the current polarization integer.
                            this_pol = Data.field["CRVAL4"][pol_ind]
                            # Check that we even want to make a dirty map for
                            # this polarization.
                            if (not utils.polint2str(this_pol) in params["polarizations"]) and params["polarizations"]:
                                # Break to the end of the polarization loop.
                                raise ce.NextIteration()
                            # Allowcate memory for the map.
                            map_data = sp.zeros(shape, dtype=float)
                            map_data = algebra.make_vect(map_data, axis_names=("ra", "dec", "freq"))
                            # Allowcate memory for the inverse map noise.
                            if algorithm in ("grid", "diag_file"):
                                noise_inv = sp.zeros(shape, dtype=float)
                                noise_inv = algebra.make_mat(
                                    noise_inv, axis_names=("ra", "dec", "freq"), row_axes=(0, 1, 2), col_axes=(0, 1, 2)
                                )
                            elif algorithm in ("disjoint_scans", "ds_grad"):
                                # At each frequency use full N^2 noise matrix,
                                # but assume each frequency has uncorrelated
                                # noise. This is a big matrix so make sure it
                                # is reasonable.
                                size = shape[0] ^ 2 * shape[1] ^ 2 * shape[2]
                                if size > 4e9:  # 16 GB
                                    raise RunTimeError("Map size too big. " "Asked for a lot " "of memory.")
                                noise_inv = sp.zeros(shape[0:2] + shape, dtype=sp.float32)
                                noise_inv = algebra.make_mat(
                                    noise_inv,
                                    axis_names=("ra", "dec", "ra", "dec", "freq"),
                                    row_axes=(0, 1, 4),
                                    col_axes=(2, 3, 4),
                                )
                                # Allowcate memory for temporary data. Hold the
                                # number of times each pixel in this scan is
                                # hit. Factor of 2 longer in time in case some
                                # scans are longer than first block (guppi).
                                pixel_hits = sp.empty((2 * dims[0], dims[-1]))
                            first_block = False
                        else:
                            if pol_ind == 0:
                                history.merge(Data)
                        # Figure out the pointing pixel index and the frequency
                        # indicies.
                        Data.calc_pointing()
                        ra_inds = tools.calc_inds(Data.ra, params["field_centre"][0], shape[0], ra_spacing)
                        dec_inds = tools.calc_inds(
                            Data.dec, params["field_centre"][1], shape[1], params["pixel_spacing"]
                        )
                        data = Data.data[:, pol_ind, 0, :]
                        if algorithm in ("grid", "diag_file"):
                            add_data_2_map(data, ra_inds, dec_inds, map_data, noise_inv, weight)
                        elif algorithm in ("disjoint_scans",):
                            add_data_2_map(data - ma.mean(data, 0), ra_inds, dec_inds, map_data, None, weight)
                            pixel_hits[:] = 0
                            pixel_list = pixel_counts(data, ra_inds, dec_inds, pixel_hits, map_shape=shape[0:2])
                            add_scan_noise(pixel_list, pixel_hits, var, noise_inv)
                        # End Blocks for loop.
                    # End file name for loop.
                # Now write the dirty maps out for this polarization.
                # Use memmaps for this since we want to reorganize data
                # and write at the same time.
                # New maps will have the frequency axis as slowly varying, for
                # future efficiency.
                map_file_name = params["output_root"] + "dirty_map_" + utils.polint2str(this_pol) + ".npy"
                mfile = algebra.open_memmap(map_file_name, mode="w+", shape=(shape[2],) + shape[:2])
                map_mem = algebra.make_vect(mfile, axis_names=("freq", "ra", "dec"))
                # And the noise matrix.
                noise_file_name = params["output_root"] + "noise_inv_" + utils.polint2str(this_pol) + ".npy"
                if algorithm in ("disjoint_scans", "ds_grad"):
                    mfile = algebra.open_memmap(noise_file_name, mode="w+", shape=(shape[2],) + shape[:2] * 2)
                    noise_mem = algebra.make_mat(
                        mfile, axis_names=("freq", "ra", "dec", "ra", "dec"), row_axes=(0, 1, 2), col_axes=(0, 3, 4)
                    )
                else:
                    mfile = algebra.open_memmap(noise_file_name, mode="w+", shape=(shape[2],) + shape[:2])
                    noise_mem = algebra.make_mat(
                        mfile, axis_names=("freq", "ra", "dec"), row_axes=(0, 1, 2), col_axes=(0, 1, 2)
                    )
                # Give the data arrays axis information.
                map_mem.set_axis_info("freq", centre_freq, delta_freq)
                map_mem.set_axis_info("ra", params["field_centre"][0], ra_spacing)
                map_mem.set_axis_info("dec", params["field_centre"][1], params["pixel_spacing"])
                noise_mem.set_axis_info("freq", centre_freq, delta_freq)
                noise_mem.set_axis_info("ra", params["field_centre"][0], ra_spacing)
                noise_mem.set_axis_info("dec", params["field_centre"][1], params["pixel_spacing"])
                # Copy the data to the memory maps after rearranging.
                # The roll_axis should return a view, so this should
                # be memory efficient.
                map_mem[...] = sp.rollaxis(map_data, -1)
                noise_mem[...] = sp.rollaxis(noise_inv, -1)

                # Free up all that memory and flush memory maps to file.
                del mfile, map_mem, noise_mem, map_data, noise_inv

                # Save the file names for the history.
                all_file_names.append(kiyopy.utils.abbreviate_file_path(map_file_name))
                all_file_names.append(kiyopy.utils.abbreviate_file_path(noise_file_name))
            except ce.NextIteration:
                pass
            pol_ind += 1
            # End polarization for loop.
        history.add("Made dirty map.", all_file_names)
        h_file_name = params["output_root"] + "history.hist"
        history.write(h_file_name)
Exemplo n.º 25
0
 def execute(self, nprocesses=1) :
     """Worker funciton."""
     params = self.params
     # Make parent directory and write parameter file.
     kiyopy.utils.mkparents(params['output_root'])
     parse_ini.write_params(params, params['output_root'] + 'params.ini',
                            prefix=prefix)
     save_noise_diag = params['save_noise_diag']
     in_root = params['input_root']
     all_out_fname_list = []
     all_in_fname_list = []
     # Figure out what the band names are.
     bands = params['bands']
     if not bands:
         map_files = glob.glob(in_root + 'dirty_map_' + pol_str + "_*.npy")
         bands = []
         root_len = len(in_root + 'dirty_map_')
         for file_name in map_files:
             bands.append(file_name[root_len:-4])
     # Loop over files to process.
     for pol_str in params['polarizations']:
         for band in bands:
             if band == -1:
                 band_str = ''
             else:
                 band_str =  "_" + repr(band)
             dmap_fname = (in_root + 'dirty_map_' + pol_str + 
                           band_str + '.npy')
             all_in_fname_list.append(
                 kiyopy.utils.abbreviate_file_path(dmap_fname))
             # Load the dirty map and the noise matrix.
             dirty_map = algebra.load(dmap_fname)
             dirty_map = algebra.make_vect(dirty_map)
             if dirty_map.axes != ('freq', 'ra', 'dec') :
                 msg = ("Expeced dirty map to have axes ('freq',"
                        "'ra', 'dec'), but it has axes: "
                        + str(dirty_map.axes))
                 raise ce.DataError(msg)
             shape = dirty_map.shape
             # Initialize the clean map.
             clean_map = algebra.info_array(sp.zeros(dirty_map.shape))
             clean_map.info = dict(dirty_map.info)
             clean_map = algebra.make_vect(clean_map)
             # If needed, initialize a map for the noise diagonal.
             if save_noise_diag :
                 noise_diag = algebra.zeros_like(clean_map)
             if params["from_eig"]:
                 # Solving from eigen decomposition of the noise instead of
                 # the noise itself.
                 # Load in the decomposition.
                 evects_fname = (in_root + 'noise_evects_' + pol_str +
                                 + band_str + '.npy')
                 if self.feedback > 1:
                     print "Using dirty map: " + dmap_fname
                     print "Using eigenvectors: " + evects_fname
                 evects = algebra.open_memmap(evects_fname, 'r')
                 evects = algebra.make_mat(evects)
                 evals_inv_fname = (in_root + 'noise_evalsinv_' + pol_str
                                    + "_" + repr(band) + '.npy')
                 evals_inv = algebra.load(evals_inv_fname)
                 evals_inv = algebra.make_mat(evals_inv)
                 # Solve for the map.
                 if params["save_noise_diag"]:
                     clean_map, noise_diag = solve_from_eig(evals_inv,
                                 evects, dirty_map, True, self.feedback)
                 else:
                     clean_map = solve_from_eig(evals_inv,
                                 evects, dirty_map, False, self.feedback)
                 # Delete the eigen vectors to recover memory.
                 del evects
             else:
                 # Solving from the noise.
                 noise_fname = (in_root + 'noise_inv_' + pol_str +
                                band_str + '.npy')
                 if self.feedback > 1:
                     print "Using dirty map: " + dmap_fname
                     print "Using noise inverse: " + noise_fname
                 all_in_fname_list.append(
                     kiyopy.utils.abbreviate_file_path(noise_fname))
                 noise_inv = algebra.open_memmap(noise_fname, 'r')
                 noise_inv = algebra.make_mat(noise_inv)
                 # Two cases for the noise.  If its the same shape as the map
                 # then the noise is diagonal.  Otherwise, it should be
                 # block diagonal in frequency.
                 if noise_inv.ndim == 3 :
                     if noise_inv.axes != ('freq', 'ra', 'dec') :
                         msg = ("Expeced noise matrix to have axes "
                                 "('freq', 'ra', 'dec'), but it has: "
                                 + str(noise_inv.axes))
                         raise ce.DataError(msg)
                     # Noise inverse can fit in memory, so copy it.
                     noise_inv_memory = sp.array(noise_inv, copy=True)
                     # Find the non-singular (covered) pixels.
                     max_information = noise_inv_memory.max()
                     good_data = noise_inv_memory < 1.0e-10*max_information
                     # Make the clean map.
                     clean_map[good_data] = (dirty_map[good_data] 
                                             / noise_inv_memory[good_data])
                     if save_noise_diag :
                         noise_diag[good_data] = \
                                 1/noise_inv_memory[good_data]
                 elif noise_inv.ndim == 5 :
                     if noise_inv.axes != ('freq', 'ra', 'dec', 'ra',
                                           'dec'):
                         msg = ("Expeced noise matrix to have axes "
                                "('freq', 'ra', 'dec', 'ra', 'dec'), "
                                "but it has: " + str(noise_inv.axes))
                         raise ce.DataError(msg)
                     # Arrange the dirty map as a vector.
                     dirty_map_vect = sp.array(dirty_map) # A view.
                     dirty_map_vect.shape = (shape[0], shape[1]*shape[2])
                     frequencies = dirty_map.get_axis('freq')/1.0e6
                     # Allowcate memory only once.
                     noise_inv_freq = sp.empty((shape[1], shape[2], 
                                     shape[1], shape[2]), dtype=float)
                     if self.feedback > 1 :
                         print "Inverting noise matrix."
                     # Block diagonal in frequency so loop over frequencies.
                     for ii in xrange(dirty_map.shape[0]) :
                         if self.feedback > 1:
                             print "Frequency: ", "%5.1f"%(frequencies[ii]),
                         if self.feedback > 2:
                             print ", start mmap read:",
                             sys.stdout.flush()
                         noise_inv_freq[...] = noise_inv[ii, ...]
                         if self.feedback > 2:
                             print "done, start eig:",
                             sys.stdout.flush()
                         noise_inv_freq.shape = (shape[1]*shape[2],
                                                 shape[1]*shape[2])
                         # Solve the map making equation by diagonalization.
                         noise_inv_diag, Rot = sp.linalg.eigh(
                             noise_inv_freq, overwrite_a=True)
                         if self.feedback > 2:
                             print "done",
                         map_rotated = sp.dot(Rot.T, dirty_map_vect[ii])
                         # Zero out infinite noise modes.
                         bad_modes = (noise_inv_diag
                                      < 1.0e-5 * noise_inv_diag.max())
                         if self.feedback > 1:
                             print ", discarded: ",
                             print "%4.1f" % (100.0 * sp.sum(bad_modes) 
                                              / bad_modes.size),
                             print "% of modes",
                         if self.feedback > 2:
                             print ", start rotations:",
                             sys.stdout.flush()
                         map_rotated[bad_modes] = 0.
                         noise_inv_diag[bad_modes] = 1.0
                         # Solve for the clean map and rotate back.
                         map_rotated /= noise_inv_diag
                         map = sp.dot(Rot, map_rotated)
                         if self.feedback > 2:
                             print "done",
                             sys.stdout.flush()
                         # Fill the clean array.
                         map.shape = (shape[1], shape[2])
                         clean_map[ii, ...] = map
                         if save_noise_diag :
                             # Using C = R Lambda R^T 
                             # where Lambda = diag(1/noise_inv_diag).
                             temp_noise_diag = 1/noise_inv_diag
                             temp_noise_diag[bad_modes] = 0
                             # Multiply R by the diagonal eigenvalue matrix.
                             # Broadcasting does equivalent of mult by diag
                             # matrix.
                             temp_mat = Rot*temp_noise_diag
                             # Multiply by R^T, but only calculate the
                             # diagonal elements.
                             for jj in range(shape[1]*shape[2]) :
                                 temp_noise_diag[jj] = sp.dot(
                                     temp_mat[jj,:], Rot[jj,:])
                             temp_noise_diag.shape = (shape[1], shape[2])
                             noise_diag[ii, ...] = temp_noise_diag
                         # Return workspace memory to origional shape.
                         noise_inv_freq.shape = (shape[1], shape[2],
                                                 shape[1], shape[2])
                         if self.feedback > 1:
                             print ""
                             sys.stdout.flush()
                 elif noise_inv.ndim == 6 :
                     if save_noise_diag:
                         # OLD WAY.
                         #clean_map, noise_diag, chol = solve(noise_inv,
                         #        dirty_map, True, feedback=self.feedback)
                         # NEW WAY.
                         clean_map, noise_diag, noise_inv_diag, chol = \
                                   solve(noise_fname, noise_inv, dirty_map,
                                   True, feedback=self.feedback)
                     else:
                         # OLD WAY.
                         #clean_map, chol = solve(noise_inv, dirty_map, 
                         #            False, feedback=self.feedback)
                         # NEW WAY.
                         clean_map, noise_inv_diag, chol = \
                                   solve(noise_fname, noise_inv, dirty_map,
                                   False, feedback=self.feedback)
                     if params['save_cholesky']:
                         chol_fname = (params['output_root'] + 'chol_'
                                     + pol_str + band_str + '.npy')
                         sp.save(chol_fname, chol)
                     if params['save_noise_inv_diag']:
                         noise_inv_diag_fname = (params['output_root'] +
                                    'noise_inv_diag_' + pol_str + band_str 
                                    + '.npy')
                         algebra.save(noise_inv_diag_fname, noise_inv_diag)
                     # Delete the cholesky to recover memory.
                     del chol
                 else :
                     raise ce.DataError("Noise matrix has bad shape.")
                 # In all cases delete the noise object to recover memeory.
                 del noise_inv
             # Write the clean map to file.
             out_fname = (params['output_root'] + 'clean_map_'
                          + pol_str + band_str + '.npy')
             if self.feedback > 1:
                 print "Writing clean map to: " + out_fname
             algebra.save(out_fname, clean_map)
             all_out_fname_list.append(
                 kiyopy.utils.abbreviate_file_path(out_fname))
             if save_noise_diag :
                 noise_diag_fname = (params['output_root'] + 'noise_diag_'
                                     + pol_str + band_str + '.npy')
                 algebra.save(noise_diag_fname, noise_diag)
                 all_out_fname_list.append(
                     kiyopy.utils.abbreviate_file_path(noise_diag_fname))
             # Check the clean map for faileur.
             if not sp.alltrue(sp.isfinite(clean_map)):
                 n_bad = sp.sum(sp.logical_not(sp.isfinite(clean_map)))
                 msg = ("Non finite entries found in clean map. Solve"
                        " failed. %d out of %d entries bad" 
                        % (n_bad, clean_map.size)) 
                 raise RuntimeError(msg)
Exemplo n.º 26
0
 def execute(self, nprocesses=1):
     """Worker funciton."""
     params = self.params
     # Make parent directory and write parameter file.
     kiyopy.utils.mkparents(params['output_root'])
     parse_ini.write_params(params,
                            params['output_root'] + 'params.ini',
                            prefix=prefix)
     save_noise_diag = params['save_noise_diag']
     in_root = params['input_root']
     all_out_fname_list = []
     all_in_fname_list = []
     # Figure out what the band names are.
     bands = params['bands']
     if not bands:
         map_files = glob.glob(in_root + 'dirty_map_' + pol_str + "_*.npy")
         bands = []
         root_len = len(in_root + 'dirty_map_')
         for file_name in map_files:
             bands.append(file_name[root_len:-4])
     # Loop over files to process.
     for pol_str in params['polarizations']:
         for band in bands:
             if band == -1:
                 band_str = ''
             else:
                 band_str = "_" + repr(band)
             dmap_fname = (in_root + 'dirty_map_' + pol_str + band_str +
                           '.npy')
             all_in_fname_list.append(
                 kiyopy.utils.abbreviate_file_path(dmap_fname))
             # Load the dirty map and the noise matrix.
             dirty_map = algebra.load(dmap_fname)
             dirty_map = algebra.make_vect(dirty_map)
             if dirty_map.axes != ('freq', 'ra', 'dec'):
                 msg = ("Expeced dirty map to have axes ('freq',"
                        "'ra', 'dec'), but it has axes: " +
                        str(dirty_map.axes))
                 raise ce.DataError(msg)
             shape = dirty_map.shape
             # Initialize the clean map.
             clean_map = algebra.info_array(sp.zeros(dirty_map.shape))
             clean_map.info = dict(dirty_map.info)
             clean_map = algebra.make_vect(clean_map)
             # If needed, initialize a map for the noise diagonal.
             if save_noise_diag:
                 noise_diag = algebra.zeros_like(clean_map)
             if params["from_eig"]:
                 # Solving from eigen decomposition of the noise instead of
                 # the noise itself.
                 # Load in the decomposition.
                 evects_fname = (in_root + 'noise_evects_' + pol_str +
                                 +band_str + '.npy')
                 if self.feedback > 1:
                     print "Using dirty map: " + dmap_fname
                     print "Using eigenvectors: " + evects_fname
                 evects = algebra.open_memmap(evects_fname, 'r')
                 evects = algebra.make_mat(evects)
                 evals_inv_fname = (in_root + 'noise_evalsinv_' + pol_str +
                                    "_" + repr(band) + '.npy')
                 evals_inv = algebra.load(evals_inv_fname)
                 evals_inv = algebra.make_mat(evals_inv)
                 # Solve for the map.
                 if params["save_noise_diag"]:
                     clean_map, noise_diag = solve_from_eig(
                         evals_inv, evects, dirty_map, True, self.feedback)
                 else:
                     clean_map = solve_from_eig(evals_inv, evects,
                                                dirty_map, False,
                                                self.feedback)
                 # Delete the eigen vectors to recover memory.
                 del evects
             else:
                 # Solving from the noise.
                 noise_fname = (in_root + 'noise_inv_' + pol_str +
                                band_str + '.npy')
                 if self.feedback > 1:
                     print "Using dirty map: " + dmap_fname
                     print "Using noise inverse: " + noise_fname
                 all_in_fname_list.append(
                     kiyopy.utils.abbreviate_file_path(noise_fname))
                 noise_inv = algebra.open_memmap(noise_fname, 'r')
                 noise_inv = algebra.make_mat(noise_inv)
                 # Two cases for the noise.  If its the same shape as the map
                 # then the noise is diagonal.  Otherwise, it should be
                 # block diagonal in frequency.
                 if noise_inv.ndim == 3:
                     if noise_inv.axes != ('freq', 'ra', 'dec'):
                         msg = ("Expeced noise matrix to have axes "
                                "('freq', 'ra', 'dec'), but it has: " +
                                str(noise_inv.axes))
                         raise ce.DataError(msg)
                     # Noise inverse can fit in memory, so copy it.
                     noise_inv_memory = sp.array(noise_inv, copy=True)
                     # Find the non-singular (covered) pixels.
                     max_information = noise_inv_memory.max()
                     good_data = noise_inv_memory < 1.0e-10 * max_information
                     # Make the clean map.
                     clean_map[good_data] = (dirty_map[good_data] /
                                             noise_inv_memory[good_data])
                     if save_noise_diag:
                         noise_diag[good_data] = \
                                 1/noise_inv_memory[good_data]
                 elif noise_inv.ndim == 5:
                     if noise_inv.axes != ('freq', 'ra', 'dec', 'ra',
                                           'dec'):
                         msg = ("Expeced noise matrix to have axes "
                                "('freq', 'ra', 'dec', 'ra', 'dec'), "
                                "but it has: " + str(noise_inv.axes))
                         raise ce.DataError(msg)
                     # Arrange the dirty map as a vector.
                     dirty_map_vect = sp.array(dirty_map)  # A view.
                     dirty_map_vect.shape = (shape[0], shape[1] * shape[2])
                     frequencies = dirty_map.get_axis('freq') / 1.0e6
                     # Allowcate memory only once.
                     noise_inv_freq = sp.empty(
                         (shape[1], shape[2], shape[1], shape[2]),
                         dtype=float)
                     if self.feedback > 1:
                         print "Inverting noise matrix."
                     # Block diagonal in frequency so loop over frequencies.
                     for ii in xrange(dirty_map.shape[0]):
                         if self.feedback > 1:
                             print "Frequency: ", "%5.1f" % (
                                 frequencies[ii]),
                         if self.feedback > 2:
                             print ", start mmap read:",
                             sys.stdout.flush()
                         noise_inv_freq[...] = noise_inv[ii, ...]
                         if self.feedback > 2:
                             print "done, start eig:",
                             sys.stdout.flush()
                         noise_inv_freq.shape = (shape[1] * shape[2],
                                                 shape[1] * shape[2])
                         # Solve the map making equation by diagonalization.
                         noise_inv_diag, Rot = sp.linalg.eigh(
                             noise_inv_freq, overwrite_a=True)
                         if self.feedback > 2:
                             print "done",
                         map_rotated = sp.dot(Rot.T, dirty_map_vect[ii])
                         # Zero out infinite noise modes.
                         bad_modes = (noise_inv_diag <
                                      1.0e-5 * noise_inv_diag.max())
                         if self.feedback > 1:
                             print ", discarded: ",
                             print "%4.1f" % (100.0 * sp.sum(bad_modes) /
                                              bad_modes.size),
                             print "% of modes",
                         if self.feedback > 2:
                             print ", start rotations:",
                             sys.stdout.flush()
                         map_rotated[bad_modes] = 0.
                         noise_inv_diag[bad_modes] = 1.0
                         # Solve for the clean map and rotate back.
                         map_rotated /= noise_inv_diag
                         map = sp.dot(Rot, map_rotated)
                         if self.feedback > 2:
                             print "done",
                             sys.stdout.flush()
                         # Fill the clean array.
                         map.shape = (shape[1], shape[2])
                         clean_map[ii, ...] = map
                         if save_noise_diag:
                             # Using C = R Lambda R^T
                             # where Lambda = diag(1/noise_inv_diag).
                             temp_noise_diag = 1 / noise_inv_diag
                             temp_noise_diag[bad_modes] = 0
                             # Multiply R by the diagonal eigenvalue matrix.
                             # Broadcasting does equivalent of mult by diag
                             # matrix.
                             temp_mat = Rot * temp_noise_diag
                             # Multiply by R^T, but only calculate the
                             # diagonal elements.
                             for jj in range(shape[1] * shape[2]):
                                 temp_noise_diag[jj] = sp.dot(
                                     temp_mat[jj, :], Rot[jj, :])
                             temp_noise_diag.shape = (shape[1], shape[2])
                             noise_diag[ii, ...] = temp_noise_diag
                         # Return workspace memory to origional shape.
                         noise_inv_freq.shape = (shape[1], shape[2],
                                                 shape[1], shape[2])
                         if self.feedback > 1:
                             print ""
                             sys.stdout.flush()
                 elif noise_inv.ndim == 6:
                     if save_noise_diag:
                         # OLD WAY.
                         #clean_map, noise_diag, chol = solve(noise_inv,
                         #        dirty_map, True, feedback=self.feedback)
                         # NEW WAY.
                         clean_map, noise_diag, noise_inv_diag, chol = \
                                   solve(noise_fname, noise_inv, dirty_map,
                                   True, feedback=self.feedback)
                     else:
                         # OLD WAY.
                         #clean_map, chol = solve(noise_inv, dirty_map,
                         #            False, feedback=self.feedback)
                         # NEW WAY.
                         clean_map, noise_inv_diag, chol = \
                                   solve(noise_fname, noise_inv, dirty_map,
                                   False, feedback=self.feedback)
                     if params['save_cholesky']:
                         chol_fname = (params['output_root'] + 'chol_' +
                                       pol_str + band_str + '.npy')
                         sp.save(chol_fname, chol)
                     if params['save_noise_inv_diag']:
                         noise_inv_diag_fname = (params['output_root'] +
                                                 'noise_inv_diag_' +
                                                 pol_str + band_str +
                                                 '.npy')
                         algebra.save(noise_inv_diag_fname, noise_inv_diag)
                     # Delete the cholesky to recover memory.
                     del chol
                 else:
                     raise ce.DataError("Noise matrix has bad shape.")
                 # In all cases delete the noise object to recover memeory.
                 del noise_inv
             # Write the clean map to file.
             out_fname = (params['output_root'] + 'clean_map_' + pol_str +
                          band_str + '.npy')
             if self.feedback > 1:
                 print "Writing clean map to: " + out_fname
             algebra.save(out_fname, clean_map)
             all_out_fname_list.append(
                 kiyopy.utils.abbreviate_file_path(out_fname))
             if save_noise_diag:
                 noise_diag_fname = (params['output_root'] + 'noise_diag_' +
                                     pol_str + band_str + '.npy')
                 algebra.save(noise_diag_fname, noise_diag)
                 all_out_fname_list.append(
                     kiyopy.utils.abbreviate_file_path(noise_diag_fname))
             # Check the clean map for faileur.
             if not sp.alltrue(sp.isfinite(clean_map)):
                 n_bad = sp.sum(sp.logical_not(sp.isfinite(clean_map)))
                 msg = ("Non finite entries found in clean map. Solve"
                        " failed. %d out of %d entries bad" %
                        (n_bad, clean_map.size))
                 raise RuntimeError(msg)