예제 #1
0
    def load_pairs(self, regenerate=True):
        r"""load the set of map/noise pairs specified by keys handed to the
        database. This sets up operations on the quadratic product
            Q = map1^T noise_inv1 B noise_inv2 map2
        """
        par = self.params
        (self.pairlist, pairdict) = dp.cross_maps(par['map1'],
                                                  par['map2'],
                                                  par['noise_inv1'],
                                                  par['noise_inv2'],
                                                  verbose=False)

        for pairitem in self.pairlist:
            pdict = pairdict[pairitem]
            print "-" * 80
            dp.print_dictionary(
                pdict,
                sys.stdout,
                key_list=['map1', 'noise_inv1', 'map2', 'noise_inv2'])

            map1 = algebra.make_vect(algebra.load(pdict['map1']))
            map2 = algebra.make_vect(algebra.load(pdict['map2']))
            sim = algebra.make_vect(algebra.load(par['simfile']))

            if not par['no_weights']:
                noise_inv1 = self.process_noise_inv(pdict['noise_inv1'],
                                                    regenerate=regenerate)

                noise_inv2 = self.process_noise_inv(pdict['noise_inv2'],
                                                    regenerate=regenerate)
            else:
                noise_inv1 = algebra.ones_like(map1)
                noise_inv2 = algebra.ones_like(map2)

            pair = map_pair.MapPair(map1 + sim, map2 + sim, noise_inv1,
                                    noise_inv2, self.freq_list)

            pair.set_names(pdict['tag1'], pdict['tag2'])
            pair.lags = self.lags
            pair.params = self.params
            self.pairs[pairitem] = pair

            pair_nosim = map_pair.MapPair(map1, map2, noise_inv1, noise_inv2,
                                          self.freq_list)

            pair_nosim.set_names(pdict['tag1'], pdict['tag2'])
            pair_nosim.lags = self.lags
            pair_nosim.params = self.params
            self.pairs_nosim[pairitem] = pair_nosim
def map_pair_cal(uncal_maplist, uncal_weightlist, calfactor_outlist,
                 dirtymap_inlist, dirtymap_outlist,
                 convolve=True, factorizable_noise=False,
                 sub_weighted_mean=True, freq_list=range(256)):

    map1file = reference_clean
    weight1file = reference_weight
    #map1file = uncal_maplist.pop(0)
    #weight1file = uncal_weightlist.pop(0)
    #calfactor_outlist.pop(0)
    #dirtymap_out0 = dirtymap_outlist.pop(0)
    #dirtymap_in0 = dirtymap_inlist.pop(0)

    # do nothing to the reference map
    #ref_dirtymap = algebra.make_vect(algebra.load(dirtymap_in0))
    #algebra.save(dirtymap_out0, ref_dirtymap)

    # load maps into pairs
    svdout = shelve.open("correlation_pairs_v2.shelve")
    for map2file, weight2file, calfactor_outfile, \
        dirty_infile, dirty_outfile in zip(uncal_maplist, \
            uncal_weightlist, calfactor_outlist,
            dirtymap_inlist, dirtymap_outlist):

        print map1file, weight1file, map2file, weight2file

        pair = map_pair.MapPair(map1file, map2file,
                                weight1file, weight2file,
                                freq_list, avoid_db=True)

        if factorizable_noise:
            pair.make_noise_factorizable()

        if sub_weighted_mean:
            pair.subtract_weighted_mean()

        if convolve:
            pair.degrade_resolution()

        (corr, counts) = pair.correlate()
        svd_info = ce.get_freq_svd_modes(corr, len(freq_list))
        svdout[map2file] = svd_info

        # write out the left right and cal factors
        leftmode = svd_info[1][0]
        rightmode = svd_info[2][0]
        calfactor = leftmode/rightmode

        facout = open(calfactor_outfile, "w")
        for outvals in zip(leftmode, rightmode, calfactor):
            facout.write("%10.15g %10.15g %10.15g\n" % outvals)

        facout.close()

        newmap = algebra.make_vect(algebra.load(dirty_infile))
        newmap[freq_list, :, :] *= calfactor[:,np.newaxis,np.newaxis]
        algebra.save(dirty_outfile, newmap)
        print dirty_outfile

    svdout.close()
예제 #3
0
def call_xspec_run(map1_key, map2_key,
                   noiseinv1_key, noiseinv2_key):
    r"""a free-standing function which calls the xspec analysis
    batteries included
    """

    # define the bad frequency list
    cutlist = [6, 7, 8, 15, 16, 18, 19, 20, 21, 22, 37, 103, 104, 105, 106,
               107, 108, 130, 131, 132, 133, 134, 237, 244, 254, 255]

    # visual inspection for wacky data #1
    augmented = [177, 194, 195, 196, 197, 198, 201, 204, 209, 213, 229]
    for entry in augmented:
        cutlist.append(entry)

    # visual inspection for wacky data #2
    augmented = [80, 171, 175, 179, 182, 183, 187, 212, 218, 219]
    for entry in augmented:
        cutlist.append(entry)

    # visual inspection of weights
    badweights = [133, 189, 192, 193, 194, 195, 196, 197, 198, 208, 209, 213,
                  233]
    for entry in badweights:
        cutlist.append(entry)

    freq = range(256)
    for entry in cutlist:
        try:
            freq.remove(entry)
        except ValueError:
            print "can't cut %d" % entry

    print "%d of %d freq slices removed" % (len(cutlist), 256)
    print freq

    # define the 1D spectral bins
    #nbins=40
    #bins = np.logspace(math.log10(0.00702349679605685),
    #                   math.log10(2.81187396154818),
    #                   num=(nbins + 1), endpoint=True)
    bins = np.logspace(math.log10(0.00765314),
                       math.log10(2.49977141),
                       num=35, endpoint=True)

    # initialize and calculate the xspec
    simpair = mp.MapPair(map1_key, map2_key,
                         noiseinv1_key, noiseinv2_key,
                         freq)

    if subtract_mean:
        simpair.subtract_weighted_mean()

    if degrade_resolution:
        simpair.degrade_resolution()

    if n_modes is not None:
        print "mode subtraction not implemented yet"

    retval = simpair.pwrspec_summary()
예제 #4
0
def call_xspec_run(map1_key, map2_key,
                   noiseinv1_key, noiseinv2_key,
                   inifile=None):
    r"""a free-standing function which calls the xspec analysis
    """
    params_init = {
        "unitless": True,
        "return_3d": False,
        "truncate": False,
        "window": None,
        "refinement": 2,
        "pad": 5,
        "order": 2,
        "freq_list": tuple(range(256)),
        "bins": [0.00765314, 2.49977141, 35]
                   }
    prefix = 'xs_'

    params = parse_ini.parse(inifile, params_init, prefix=prefix)
    if inifile is None:
        print "WARNING: no ini file for pwrspec estimation"

    # initialize and calculate the xspec
    simpair = mp.MapPair(map1_key, map2_key,
                         noiseinv1_key, noiseinv2_key,
                         params['freq_list'], avoid_db=True)

    bparam = params['bins']
    bins = np.logspace(math.log10(bparam[0]),
                       math.log10(bparam[1]),
                       num=bparam[2], endpoint=True)

    retval = simpair.pwrspec_summary(window=params['window'],
                                     unitless=params['unitless'],
                                     bins=bins,
                                     truncate=params['truncate'],
                                     refinement=params['refinement'],
                                     pad=params['pad'],
                                     order=params['order'],
                                     return_3d=params['return_3d'])

    return retval
예제 #5
0
    def execute(self):
        '''Clean the maps of foregrounds, save the results, and get the
        autocorrelation.'''

        params = self.params
        freq_list = sp.array(params['freq_list'], dtype=int)
        lags = sp.array(params['lags'])

        # Write parameter file.
        kiyopy.utils.mkparents(params['output_root'])
        parse_ini.write_params(params,
                               params['output_root'] + 'params.ini',
                               prefix=prefix)

        # Get the map data from file as well as the noise inverse.
        if len(params['file_middles']) == 1:
            fmid_name = params['file_middles'][0]
            params['file_middles'] = (fmid_name, fmid_name)

        if len(params['file_middles']) >= 2:
            # Deal with multiple files.
            num_maps = len(params['file_middles'])
            maps = []
            noise_invs = []

            # Load all maps and noises once.
            for map_index in range(0, num_maps):
                map_file = (params['input_root'] +
                            params['file_middles'][map_index] +
                            params['input_end_map'])

                print "Loading map %d of %d." % (map_index + 1, num_maps)

                map_in = algebra.make_vect(algebra.load(map_file))

                maps.append(map_in)
                if not params["no_weights"]:
                    noise_file = (params['input_root'] +
                                  params['file_middles'][map_index] +
                                  params['input_end_noise'])

                    print "Loading noise %d of %d." % (map_index + 1, num_maps)

                    noise_inv = algebra.make_mat(
                        algebra.open_memmap(noise_file, mode='r'))

                    noise_inv = noise_inv.mat_diag()
                else:
                    noise_inv = algebra.ones_like(map_in)

                noise_invs.append(noise_inv)

            pairs = []
            # Make pairs with deepcopies to not make mutability mistakes.
            for map1_index in range(0, num_maps):
                for map2_index in range(0, num_maps):
                    if (map2_index > map1_index):
                        map1 = copy.deepcopy(maps[map1_index])
                        map2 = copy.deepcopy(maps[map2_index])
                        noise_inv1 = copy.deepcopy(noise_invs[map1_index])
                        noise_inv2 = copy.deepcopy(noise_invs[map2_index])

                        pair = map_pair.MapPair(map1, map2, noise_inv1,
                                                noise_inv2, freq_list)

                        pair.lags = lags
                        pair.params = params

                        # Keep track of the names of maps in pairs so
                        # it knows what to save later.
                        pair.set_names(params['file_middles'][map1_index],
                                       params['file_middles'][map2_index])
                        pairs.append(pair)

            num_map_pairs = len(pairs)
            print "%d map pairs created from %d maps." % (len(pairs), num_maps)

        # Hold a reference in self.
        self.pairs = pairs

        # Get maps/ noise inv ready for running.
        if params["convolve"]:
            for pair in pairs:
                pair.degrade_resolution()

        if params['factorizable_noise']:
            for pair in pairs:
                pair.make_noise_factorizable()

        if params['sub_weighted_mean']:
            for pair in pairs:
                pair.subtract_weighted_mean()

        self.pairs = pairs
        # Since correlating takes so long, if you already have the svds
        # you can skip this first correlation [since that's all it's really
        # for and it is the same no matter how many modes you want].
        # Note: map_pairs will not have anything saved in 'fore_corr' if you
        # skip this correlation.
        if not params['skip_fore_corr']:
            # Correlate the maps with multiprocessing. Note that the
            # correlations are saved to file separately then loaded in
            # together because that's (one way) how multiprocessing works.
            fore_pairs = []
            processes_list = []
            for pair_index in range(0, num_map_pairs):
                # Calls 1 multiproc (which governs the correlating) for each
                # pair on a new CPU so you can have all pairs working at once.
                multi = multiprocessing.Process(target=multiproc,
                                                args=([
                                                    pairs[pair_index],
                                                    params['output_root'],
                                                    pair_index, False
                                                ]))

                processes_list.append(multi)

                multi.start()

            # Waits for all correlations to finish before continuing.
            while True in [multi.is_alive() for multi in processes_list]:
                print "processing"
                time.sleep(5)

            # just to be safe
            time.sleep(1)

            # more concise call, but multiprocessing does not behave well with
            # complex objects...........
            #runlist = [(pair_index,
            #            params['output_root'],
            #            False) for
            #            pair_index in range(0, num_map_pairs)]
            #pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
            #pool.map(self.multiproc, runlist)

            # Load the correlations and save them to each pair. The pairs that
            # got passed to multiproc are not the same ones as ones in
            # self.pairs, so this must be done to have actual values.
            print "Loading map pairs back into program."
            file_name = params['output_root']
            file_name += "map_pair_for_freq_slices_fore_corr_"

            for count in range(0, num_map_pairs):
                print "Loading correlation for pair %d" % (count)
                pickle_handle = open(file_name + str(count) + ".pkl", "r")
                correlate_results = cPickle.load(pickle_handle)
                pairs[count].fore_corr = correlate_results[0]
                pairs[count].fore_counts = correlate_results[1]
                fore_pairs.append(pairs[count])
                pickle_handle.close()

            self.fore_pairs = copy.deepcopy(fore_pairs)
            # With this, you do not need fore_pairs anymore.
            self.pairs = copy.deepcopy(fore_pairs)

            pairs = self.pairs

            # Get foregrounds.

            # svd_info_list keeps track of all of the modes of all maps in
            # all pairs. This means if you want to subract a different number
            # of modes for the same maps/noises/frequencies, you have the modes
            # already saved and do not need to run the first correlation again.
            svd_info_list = []
            for pair in pairs:
                vals, modes1, modes2 = cf.get_freq_svd_modes(
                    pair.fore_corr, len(freq_list))
                pair.vals = vals

                # Save ALL of the modes for reference.
                pair.all_modes1 = modes1
                pair.all_modes2 = modes2
                svd_info = (vals, modes1, modes2)
                svd_info_list.append(svd_info)

                # Save only the modes you want to subtract.
                n_modes = params['modes']
                pair.modes1 = modes1[:n_modes]
                pair.modes2 = modes2[:n_modes]

            self.svd_info_list = svd_info_list
            self.pairs = pairs

            if params['save_svd_info']:
                ft.save_pickle(self.svd_info_list, params['svd_file'])
        else:
            # The first correlation and svd has been skipped.
            # This means you already have the modes so you can just load
            # them from file.
            self.svd_info_list = ft.load_pickle(params['svd_file'])
            # Set the svd info to the pairs.
            for i in range(0, len(pairs)):
                svd_info = self.svd_info_list[i]
                pairs[i].vals = svd_info[0]
                pairs[i].all_modes1 = svd_info[1]
                pairs[i].all_modes2 = svd_info[2]
                n_modes = params['modes']
                pairs[i].modes1 = svd_info[1][:n_modes]
                pairs[i].modes2 = svd_info[2][:n_modes]

            self.pairs = pairs

        # Subtract foregrounds.
        for pair_index in range(0, len(pairs)):
            pairs[pair_index].subtract_frequency_modes(
                pairs[pair_index].modes1, pairs[pair_index].modes2)

        # Save cleaned clean maps, cleaned noises, and modes.
        self.save_data(save_maps=params['save_maps'],
                       save_noises=params['save_noises'],
                       save_modes=params['save_modes'])

        # Finish if this was just first pass.
        if params['first_pass_only']:
            self.pairs = pairs
            return

        # Correlate the cleaned maps.
        # Here we could calculate the power spectrum instead eventually.
        temp_pair_list = []
        processes_list = []
        for pair_index in range(0, num_map_pairs):
            multi = multiprocessing.Process(target=multiproc,
                                            args=([
                                                pairs[pair_index],
                                                params['output_root'],
                                                pair_index, True
                                            ]))

            processes_list.append(multi)
            multi.start()

        while True in [multi.is_alive() for multi in processes_list]:
            print "processing"
            time.sleep(5)

        # just to be safe
        time.sleep(1)

        # ugh, would really rathter use implementation below except multiprocessing
        # does not behave.................
        #runlist = [(pairs[pair_index],
        #            params['output_root'],
        #            pair_index, True) for
        #            pair_index in range(0, num_map_pairs)]

        #pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
        #pool.map(multiproc, runlist)

        print "Loading map pairs back into program."
        file_name = params['output_root']
        file_name += "map_pair_for_freq_slices_corr_"

        for count in range(0, num_map_pairs):
            print "Loading correlation for pair %d" % (count)
            pickle_handle = open(file_name + str(count) + ".pkl", "r")
            correlate_results = cPickle.load(pickle_handle)
            pairs[count].corr = correlate_results[0]
            pairs[count].counts = correlate_results[1]
            temp_pair_list.append(pairs[count])
            pickle_handle.close()

        self.pairs = copy.deepcopy(temp_pair_list)

        # Get the average correlation and its standard deviation.
        corr_list = []
        for pair in self.pairs:
            corr_list.append(pair.corr)

        self.corr_final, self.corr_std = cf.get_corr_and_std_3d(corr_list)

        if params['pickle_slices']:
            ft.save_pickle(self, self.params['output_root'] + \
                                 'New_Slices_object.pkl')

        return