コード例 #1
0
ファイル: pair_set.py プロジェクト: OMGitsHongyu/analysis_IM
    def load_pairs(self):
        r"""load the set of map/noise pairs specified by keys handed to the
        database. This sets up operations on the quadratic product
            Q = map1^T noise_inv1 B noise_inv2 map2
        """
        par = self.params
        (self.pairlist, pairdict) = dp.cross_maps(par['map1'], par['map2'],
                                             par['noise_inv1'],
                                             par['noise_inv2'],
                                             noise_inv_suffix=";noise_weight",
                                             verbose=False,
                                             tack_on=self.tack_on_input,
                                             db_to_use=self.datapath_db)

        for pairitem in self.pairlist:
            pdict = pairdict[pairitem]
            print "-" * 80
            dp.print_dictionary(pdict, sys.stdout,
                                key_list=['map1', 'noise_inv1',
                                          'map2', 'noise_inv2'])

            map1 = algebra.make_vect(algebra.load(pdict['map1']))
            map2 = algebra.make_vect(algebra.load(pdict['map2']))
            if par['simfile'] is not None:
                print "adding %s with multiplier %s" % (par['simfile'],
                                                        par['sim_multiplier'])

                sim = algebra.make_vect(algebra.load(par['simfile']))
                sim *= par['sim_multiplier']
                print sim.shape, map1.shape
            else:
                sim = algebra.zeros_like(map1)

            noise_inv1 = algebra.make_vect(algebra.load(pdict['noise_inv1']))
            noise_inv2 = algebra.make_vect(algebra.load(pdict['noise_inv2']))

            pair = map_pair.MapPair(map1 + sim, map2 + sim,
                                    noise_inv1, noise_inv2,
                                    self.freq_list)

            pair.set_names(pdict['tag1'], pdict['tag2'])

            pair.params = self.params
            self.pairs[pairitem] = pair

            if par['subtract_inputmap_from_sim'] or \
               par['subtract_sim_from_inputmap']:
                if par['subtract_inputmap_from_sim']:
                    pair_parallel_track = map_pair.MapPair(map1, map2,
                                                  noise_inv1, noise_inv2,
                                                  self.freq_list)

                if par['subtract_sim_from_inputmap']:
                    pair_parallel_track = map_pair.MapPair(sim, sim,
                                                  noise_inv1, noise_inv2,
                                                  self.freq_list)

                pair_parallel_track.set_names(pdict['tag1'], pdict['tag2'])
                pair_parallel_track.params = self.params
                self.pairs_parallel_track[pairitem] = pair_parallel_track
コード例 #2
0
def generate_proc_sim(input_file, weightfile, output_file,
                      meansub=False, degrade=False):
    r"""make the maps with various combinations of beam conv/meansub"""
    print "%s -> %s (beam, etc.)" % (input_file, output_file)
    simmap = algebra.make_vect(algebra.load(input_file))

    if degrade:
        print "performing common resolution convolution"
        beam_data = sp.array([0.316148488246, 0.306805630985, 0.293729620792,
                 0.281176247549, 0.270856788455, 0.26745856078,
                 0.258910010848, 0.249188429031])
        freq_data = sp.array([695, 725, 755, 785, 815, 845, 875, 905],
                             dtype=float)
        freq_data *= 1.0e6
        beam_diff = sp.sqrt(max(1.1 * beam_data) ** 2 - (beam_data) ** 2)
        common_resolution = beam.GaussianBeam(beam_diff, freq_data)
        # Convolve to a common resolution.
        simmap = common_resolution.apply(simmap)

    if meansub:
        print "performing mean subtraction"
        noise_inv = algebra.make_vect(algebra.load(weightfile))
        means = sp.sum(sp.sum(noise_inv * simmap, -1), -1)
        means /= sp.sum(sp.sum(noise_inv, -1), -1)
        means.shape += (1, 1)
        simmap -= means
        # the weights will be zero in some places
        simmap[noise_inv < 1.e-20] = 0.

    # extra sanity?
    simmap[np.isinf(simmap)] = 0.
    simmap[np.isnan(simmap)] = 0.

    print "saving to" + output_file
    algebra.save(output_file, simmap)
コード例 #3
0
def generate_windows(window="blackman"):
    datapath_db = data_paths.DataPath()
    # first generate a window for the full physical volume
    filename = datapath_db.fetch('simideal_15hr_physical', intend_read=True,
                                 pick='1')
    print filename
    pcube = algebra.make_vect(algebra.load(filename))
    pwindow = algebra.make_vect(fftutil.window_nd(pcube.shape, name=window),
                                axis_names=('freq', 'ra', 'dec'))
    pwindow.copy_axis_info(pcube)
    print pwindow.shape
    algebra.save("physical_window.npy", pwindow)

    # now generate one for the observed region and project onto the physical
    # volume.
    filename = datapath_db.fetch('simideal_15hr_beam', intend_read=True,
                                 pick='1')
    print filename
    ocube = algebra.make_vect(algebra.load(filename))
    owindow = algebra.make_vect(fftutil.window_nd(ocube.shape, name=window),
                                axis_names=('freq', 'ra', 'dec'))
    owindow.copy_axis_info(ocube)
    print owindow.shape
    print owindow.axes
    algebra.save("observed_window.npy", owindow)
    pwindow = physical_gridding.physical_grid(owindow, refinement=2)
    print pwindow.shape
    algebra.save("observed_window_physreg.npy", pwindow)
コード例 #4
0
def add_sim_radio():
    """script: go through a list of simulations and add those to a selected map
    """
    root_file = "/mnt/raid-project/gmrt/eswitzer/wiggleZ/"
    radio_file = root_file + "modetest_combined_maps/combined_41-73_cleaned_clean_15.npy"
    root_sim = "/mnt/raid-project/gmrt/calinliv/wiggleZ/simulations/test100/"
    root_out = root_file + "simulations_plus_data/"
    radio_data = algebra.make_vect(algebra.load(radio_file))

    for simindex in range(1,101):
        simname = root_sim + "simulated_signal_map_" + \
                  repr(simindex)+"_with_beam.npy"
        filename = root_out + "simulated_signal_plusdata_map_" + \
                   repr(simindex)+"_with_beam.npy"
        simoutname = root_out + "simulated_signal_map_" + \
                   repr(simindex)+"_with_beam.npy"

        sim_data = algebra.make_vect(algebra.load(simname))
        sim_data /= 1000.
        outmap = copy.deepcopy(radio_data)
        outmap += sim_data

        algebra.save(filename, outmap)
        algebra.save(simoutname, sim_data)

        print filename
コード例 #5
0
def plot_difference(filename1, filename2, title, sigmarange=6., sigmacut=None,
                    transverse=False, outputdir="./", multiplier=1000.,
                    logscale=False, fractional=False,
                    ignore=None, diff_filename="./difference.npy"):
    """make movies of the difference of two maps (assuming same dimensions)"""
    map1 = algebra.make_vect(algebra.load(filename1))
    map2 = algebra.make_vect(algebra.load(filename2))

    if fractional:
        difftitle = "fractional diff."
        dmap = (map1 - map2) / map1 * 100.
    else:
        difftitle = "difference"
        dmap = map1 - map2

    algebra.save(diff_filename, dmap)

    make_cube_movie(diff_filename,
                       difftitle, cube_frame_dir, sigmarange=6.,
                       sigmacut=sigmacut, outputdir=outputdir, ignore=ignore,
                       multiplier=multiplier, transverse=transverse,
                       logscale=False)

    make_cube_movie(filename1,
                       title, cube_frame_dir, sigmarange=sigmarange,
                       sigmacut=sigmacut, outputdir=outputdir, ignore=ignore,
                       multiplier=multiplier, transverse=transverse,
                       logscale=logscale, filetag_suffix="_1")

    make_cube_movie(filename2,
                       title, cube_frame_dir, sigmarange=sigmarange,
                       sigmacut=sigmacut, outputdir=outputdir, ignore=ignore,
                       multiplier=multiplier, transverse=transverse,
                       logscale=logscale, filetag_suffix="_2")
コード例 #6
0
def calculate_xspec_file(cube1_file,
                         cube2_file,
                         bins,
                         weight1_file=None,
                         weight2_file=None,
                         truncate=False,
                         window="blackman",
                         return_3d=False,
                         unitless=True):

    cube1 = algebra.make_vect(algebra.load(cube1_file))
    cube2 = algebra.make_vect(algebra.load(cube2_file))

    if weight1_file is None:
        weight1 = algebra.ones_like(cube1)
    else:
        weight1 = algebra.make_vect(algebra.load(weight1_file))

    if weight2_file is None:
        weight2 = algebra.ones_like(cube2)
    else:
        weight2 = algebra.make_vect(algebra.load(weight2_file))

    print cube1.shape, cube2.shape, weight1.shape, weight2.shape
    return calculate_xspec(cube1,
                           cube2,
                           weight1,
                           weight2,
                           bins=bins,
                           window=window,
                           unitless=unitless,
                           truncate=truncate,
                           return_3d=return_3d)
コード例 #7
0
ファイル: pair_set.py プロジェクト: wheeyeon/analysis_IM
    def load_pairs(self):
        r"""load the set of map/noise pairs specified by keys handed to the
        database. This sets up operations on the quadratic product
            Q = map1^T noise_inv1 B noise_inv2 map2
        """
        par = self.params
        (self.pairlist,
         pairdict) = dp.cross_maps(par['map1'],
                                   par['map2'],
                                   par['noise_inv1'],
                                   par['noise_inv2'],
                                   noise_inv_suffix=";noise_weight",
                                   verbose=False,
                                   tack_on=self.tack_on_input,
                                   db_to_use=self.datapath_db)

        for pairitem in self.pairlist:
            pdict = pairdict[pairitem]
            print "-" * 80
            dp.print_dictionary(
                pdict,
                sys.stdout,
                key_list=['map1', 'noise_inv1', 'map2', 'noise_inv2'])

            map1 = algebra.make_vect(algebra.load(pdict['map1']))
            map2 = algebra.make_vect(algebra.load(pdict['map2']))
            if par['simfile'] is not None:
                print "adding %s with multiplier %s" % (par['simfile'],
                                                        par['sim_multiplier'])

                sim = algebra.make_vect(algebra.load(par['simfile']))
                sim *= par['sim_multiplier']
                print sim.shape, map1.shape
            else:
                sim = algebra.zeros_like(map1)

            noise_inv1 = algebra.make_vect(algebra.load(pdict['noise_inv1']))
            noise_inv2 = algebra.make_vect(algebra.load(pdict['noise_inv2']))

            pair = map_pair.MapPair(map1 + sim, map2 + sim, noise_inv1,
                                    noise_inv2, self.freq_list)

            pair.set_names(pdict['tag1'], pdict['tag2'])

            pair.params = self.params
            self.pairs[pairitem] = pair

            if par['subtract_inputmap_from_sim'] or \
               par['subtract_sim_from_inputmap']:
                if par['subtract_inputmap_from_sim']:
                    pair_parallel_track = map_pair.MapPair(
                        map1, map2, noise_inv1, noise_inv2, self.freq_list)

                if par['subtract_sim_from_inputmap']:
                    pair_parallel_track = map_pair.MapPair(
                        sim, sim, noise_inv1, noise_inv2, self.freq_list)

                pair_parallel_track.set_names(pdict['tag1'], pdict['tag2'])
                pair_parallel_track.params = self.params
                self.pairs_parallel_track[pairitem] = pair_parallel_track
コード例 #8
0
    def realize_simulation(self):
        """do basic handling to call Richard's simulation code
        this produces self.sim_map and self.sim_map_phys
        """
        if self.scenario == "nostr":
            print "running dd+vv and no streaming case"
            simobj = corr21cm.Corr21cm.like_kiyo_map(self.template_map)
            maps = simobj.get_kiyo_field_physical(refinement=self.refinement)

        else:
            if self.scenario == "str":
                print "running dd+vv and streaming simulation"
                simobj = corr21cm.Corr21cm.like_kiyo_map(
                    self.template_map, sigma_v=self.streaming_dispersion)

                maps = simobj.get_kiyo_field_physical(
                    refinement=self.refinement)

            if self.scenario == "ideal":
                print "running dd-only and no mean simulation"
                simobj = corr21cm.Corr21cm.like_kiyo_map(self.template_map)
                maps = simobj.get_kiyo_field_physical(
                    refinement=self.refinement,
                    density_only=True,
                    no_mean=True,
                    no_evolution=True)

        (gbtsim, gbtphys, physdim) = maps

        # process the physical-space map
        self.sim_map_phys = algebra.make_vect(gbtphys,
                                              axis_names=('freq', 'ra', 'dec'))
        pshp = self.sim_map_phys.shape

        # define the axes of the physical map; several alternatives are commented
        info = {}
        info['axes'] = ('freq', 'ra', 'dec')
        info['type'] = 'vect'
        info['freq_delta'] = abs(physdim[0] - physdim[1]) / float(pshp[0] - 1)
        info['freq_centre'] = physdim[0] + info['freq_delta'] * float(
            pshp[0] // 2)
        #        'freq_centre': abs(physdim[0] + physdim[1]) / 2.,

        info['ra_delta'] = abs(physdim[2]) / float(pshp[1] - 1)
        #info['ra_centre'] = info['ra_delta'] * float(pshp[1] // 2)
        #        'ra_centre': abs(physdim[2]) / 2.,
        info['ra_centre'] = 0.

        info['dec_delta'] = abs(physdim[3]) / float(pshp[2] - 1)
        #info['dec_centre'] = info['dec_delta'] * float(pshp[2] // 2)
        #        'dec_centre': abs(physdim[3]) / 2.,
        info['dec_centre'] = 0.

        self.sim_map_phys.info = info

        # process the map in observation coordinates
        self.sim_map = algebra.make_vect(gbtsim,
                                         axis_names=('freq', 'ra', 'dec'))
        self.sim_map.copy_axis_info(self.template_map)
コード例 #9
0
def find_avg_fsky(map_key, tack_on=None, refinement=2, pad=5, order=1):
    """Take all the pairs that enter the autopower, open their weight files and
    find the fsky for each data treatment
    In a pipeline
        fsky = find_avg_fsky(self.params["map_key"],
                            tack_on=self.params["tack_on"],
                            refinement=self.params["refinement"],
                            pad=self.params["pad"],
                            order=self.params["order"])
    """
    fsky = {}
    datapath_db = dp.DataPath()

    map_cases = datapath_db.fileset_cases(map_key, "pair;type;treatment")

    # This code is essentially verbatim for the permutation in the real
    # autopower
    unique_pairs = dp.GBTauto_cross_pairs(map_cases['pair'],
                                          map_cases['pair'],
                                          cross_sym="_with_")

    treatment_list = map_cases['treatment']

    for treatment in treatment_list:
        for item in unique_pairs:
            dbkeydict = {}
            mapset0 = (map_key, item[0], treatment)
            mapset1 = (map_key, item[1], treatment)
            dbkeydict['noiseinv1_key'] = "%s:%s;noise_inv;%s" % mapset0
            dbkeydict['noiseinv2_key'] = "%s:%s;noise_inv;%s" % mapset1
            files = dp.convert_dbkeydict_to_filedict(dbkeydict,
                                                     datapath_db=datapath_db,
                                                     tack_on=tack_on)

            print files['noiseinv1_key'], files['noiseinv2_key']
            weight1 = algebra.make_vect(algebra.load(files['noiseinv1_key']))
            weight2 = algebra.make_vect(algebra.load(files['noiseinv2_key']))

            physweight1 = bh.repackage_kiyo(
                pg.physical_grid(weight1,
                                 refinement=refinement,
                                 pad=pad,
                                 order=order))

            physweight2 = bh.repackage_kiyo(
                pg.physical_grid(weight2,
                                 refinement=refinement,
                                 pad=pad,
                                 order=order))

            #fsky = np.sum(physweight1 * physweight2)**2
            #fsky /= np.sum(physweight1**2 * physweight2**2)
            #fsky /= float(physweight1.size)
            fsky = np.sum(weight1 * weight2)**2
            fsky /= np.sum(weight1**2 * weight2**2)
            fsky /= float(weight1.size)
            print "volume factor in noise weight: ", fsky

    return fsky
コード例 #10
0
def add_manual_mask(source_key, cut_freq_list=None,
                    signal_name='map', noise_inv_name='noise_inv',
                    weight_name='weight', divider_token=";"):
    r"""
    `source_key` is the file db key for the maps to combine
    `signal_name` is the tag in the file db entry for the signal maps
    `noise_inv_name` is the tag in the file db entry for the N^-1 weights
    `weight_name` is the tag in the file db entry for the weights to write out
    `divider_token` is the token that divides the map section name
            from the data type e.g. "A_with_B;noise_inv"
    """
    datapath_db = data_paths.DataPath()
    source_fdb = datapath_db.fetch(source_key, silent=True)
    source_fdict = source_fdb[1]

    # accumulate all the files to combine
    noise_inv_keys = {}
    weight_keys = {}
    signal_keys = {}
    for filekey in source_fdb[0]:
        if divider_token in filekey:
            data_type = filekey.split(divider_token)[1]
            map_section = filekey.split(divider_token)[0]

            if data_type == signal_name:
                signal_keys[map_section] = source_fdict[filekey]

            if data_type == noise_inv_name:
                noise_inv_keys[map_section] = source_fdict[filekey]

            if data_type == weight_name:
                weight_keys[map_section] = source_fdict[filekey]

    for mapkey in signal_keys:
        signal_file = signal_keys[mapkey]
        noise_inv_file = noise_inv_keys[mapkey]
        weight_file = weight_keys[mapkey]
        print "loading pair: %s %s -> %s" % \
                (signal_file, noise_inv_file, weight_file)
        signal_map = algebra.make_vect(algebra.load(signal_file))
        weightmap = algebra.make_vect(algebra.load(noise_inv_file))

        # set the new weights to zero where the N^-1 is small
        # or the signal map is inf or nan
        weightmap[np.isnan(weightmap)] = 0.
        weightmap[np.isinf(weightmap)] = 0.
        weightmap[np.isnan(signal_map)] = 0.
        weightmap[np.isinf(signal_map)] = 0.
        weightmap[weightmap < 1.e-20] = 0.

        if cut_freq_list is not None:
            for cutindex in cut_freq_list:
                weightmap[cutindex, :, :] = 0.

        # could also determine the filename here, outside of the database
        #outputdir = datapath_db.fetch_parent(source_key, return_path=True)
        #weight_out = "%s/%s" % (outputdir, source_key)
        algebra.compressed_array_summary(weightmap, "new weight map")
        algebra.save(weight_file, weightmap)
コード例 #11
0
def map_pair_cal(uncal_maplist, uncal_weightlist, calfactor_outlist,
                 dirtymap_inlist, dirtymap_outlist,
                 convolve=True, factorizable_noise=True,
                 sub_weighted_mean=True, freq_list=range(256)):

    map1file = uncal_maplist.pop(0)
    weight1file = uncal_weightlist.pop(0)
    calfactor_outlist.pop(0)
    dirtymap_out0 = dirtymap_outlist.pop(0)
    dirtymap_in0 = dirtymap_inlist.pop(0)

    # do nothing to the reference map
    ref_dirtymap = algebra.make_vect(algebra.load(dirtymap_in0))
    algebra.save(dirtymap_out0, ref_dirtymap)

    # load maps into pairs
    svdout = shelve.open("correlation_pairs.shelve")
    for map2file, weight2file, calfactor_outfile, \
        dirty_infile, dirty_outfile in zip(uncal_maplist, \
            uncal_weightlist, calfactor_outlist,
            dirtymap_inlist, dirtymap_outlist):

        print map1file, weight1file, map2file, weight2file

        pair = map_pair.MapPair(map1file, map2file,
                                weight1file, weight2file,
                                freq_list, avoid_db=True)

        if factorizable_noise:
            pair.make_noise_factorizable()

        if sub_weighted_mean:
            pair.subtract_weighted_mean()

        if convolve:
            pair.degrade_resolution()

        (corr, counts) = pair.correlate()
        svd_info = ce.get_freq_svd_modes(corr, len(freq_list))
        svdout[map2file] = svd_info

        # write out the left right and cal factors
        leftmode = svd_info[1][0]
        rightmode = svd_info[2][0]
        calfactor = leftmode/rightmode

        facout = open(calfactor_outfile, "w")
        for outvals in zip(leftmode, rightmode, calfactor):
            facout.write("%10.15g %10.15g %10.15g\n" % outvals)

        facout.close()

        newmap = algebra.make_vect(algebra.load(dirty_infile))
        newmap[freq_list, :, :] *= calfactor[:,np.newaxis,np.newaxis]
        algebra.save(dirty_outfile, newmap)
        print dirty_outfile

    svdout.close()
コード例 #12
0
ファイル: map_pair.py プロジェクト: wheeyeon/analysis_IM
    def __init__(self,
                 map1,
                 map2,
                 noise_inv1,
                 noise_inv2,
                 freq,
                 input_filenames=False,
                 conv_factor=1.1):
        r"""
        arguments: map1, map2, noise_inv1, noise_inv2, freq
        conv_factor is the factor by which to multiply the largest beam
        in the convolution to a common resolution
        """
        if input_filenames:
            self.map1 = algebra.make_vect(algebra.load(map1))
            self.map2 = algebra.make_vect(algebra.load(map2))
            if noise_inv1:
                print "loading noise1 file: " + noise_inv1
                self.noise_inv1 = algebra.make_vect(algebra.load(noise_inv1))
            else:
                print "WARNING: map1 has unity weight; no file given"
                self.noise_inv1 = algebra.ones_like(self.map1)

            if noise_inv2:
                print "loading noise2 file: " + noise_inv2
                self.noise_inv2 = algebra.make_vect(algebra.load(noise_inv2))
            else:
                print "WARNING: map2 has unity weight; no file given"
                self.noise_inv2 = algebra.ones_like(self.map2)

        else:
            self.map1 = map1
            self.map2 = map2
            self.noise_inv1 = noise_inv1
            self.noise_inv2 = noise_inv2

        self.freq = freq
        self.conv_factor = conv_factor

        # maps in physical coordinates (derived)
        self.phys_map1 = None
        self.phys_map2 = None
        self.phys_noise_inv1 = None
        self.phys_noise_inv2 = None

        # give infinite noise to masked bands
        self.sanitize()

        # Set attributes.
        self.left_modes = 0
        self.right_modes = 0
        # For saving, to keep track of each mapname.
        self.map1_name = ''
        self.map2_name = ''
        # Which section [A, B, C, D...] the maps is from.
        self.map1_code = ''
        self.map2_code = ''
コード例 #13
0
    def subtract_frequency_modes_slow(self, modes1, modes2=None):
        r"""Subtract frequency mode from the map.

        Parameters
        ---------
        modes1: list of 1D arrays.
            Arrays must be the same length as self.freq.  Modes to subtract out
            of the map one.
        modes2: list of 1D arrays.
            Modes to subtract out of map 2.  If `None` set to `modes1`.

        """

        if modes2 == None:
            modes2 = modes1

        map1 = self.map1
        map2 = self.map2
        freq = self.freq

        # First map.
        outmap_left = sp.empty((len(modes1), ) + map1.shape[1:])
        outmap_left = algebra.make_vect(outmap_left,
                                        axis_names=('freq', 'ra', 'dec'))
        outmap_left.copy_axis_info(map1)
        for ira in range(map1.shape[1]):
            for jdec in range(map1.shape[2]):
                # if sp.any(map1.data.mask[ira, jdec, freq]):
                #    continue
                # else:
                for mode_index, mode_vector in enumerate(modes1):
                    # v.shape = freq.shape
                    mode_vector = mode_vector.reshape(freq.shape)
                    # amp = sp.sum(mode_vector*map1.data[ira, jdec, freq])
                    amp = sp.dot(mode_vector, map1[freq, ira, jdec])
                    map1[freq, ira, jdec] -= amp * mode_vector
                    outmap_left[mode_index, ira, jdec] = amp
        self.left_modes = outmap_left

        # Second map.
        outmap_right = sp.empty((len(modes2), ) + map2.shape[1:])
        outmap_right = algebra.make_vect(outmap_right,
                                         axis_names=('freq', 'ra', 'dec'))
        outmap_right.copy_axis_info(map2)
        for ira in range(map2.shape[1]):
            for jdec in range(map2.shape[2]):
                # if sp.any(map2.data.mask[ira, jdec, freq]):
                #    continue
                # else:
                for mode_index, mode_vector in enumerate(modes2):
                    # mode_vector.shape = freq.shape
                    mode_vector = mode_vector.reshape(freq.shape)
                    amp = sp.dot(mode_vector, map2[freq, ira, jdec])
                    map2[freq, ira, jdec] -= amp * mode_vector
                    outmap_right[mode_index, ira, jdec] = amp
        self.right_modes = outmap_right
コード例 #14
0
ファイル: noisemk.py プロジェクト: YichaoLi/PowerMaker
	def process_map(self, imap_fname, nmap_fname, ii, mock_fname=None):
		params = self.params
		sigma = params['sigma']
		mu = params['mu']
		out_root = params['output_root']
		in_root = params['input_root']
		
		imap = algebra.load(in_root + imap_fname)
		imap = algebra.make_vect(imap)
		#print imap.flatten().mean()
		imap = imap - imap.flatten().mean()
		if imap.axes != ('freq', 'ra', 'dec') :
			raise ce.DataError('AXES ERROR!')

		print ' :: Set Noise to Gaussian'
		np.random.seed()
		nmap = algebra.info_array(
			sigma*np.random.randn(imap.shape[0],imap.shape[1], imap.shape[2])+mu)
		nmap.axes = imap.axes
		nmap = algebra.make_vect(nmap)
		nmap.info = imap.info
		if nmap.axes != ('freq', 'ra', 'dec') :
			raise ce.DataError('AXES ERROR!')

		## add noise to map ##
		imap = imap + nmap
		non0 = nmap.nonzero()
		nmap[non0] = (1./sigma)**2

		#if mock_fname != None:
		#	mmap = algebra.info_array(
		#		2.*np.random.randn(imap.shape[0],imap.shape[1], imap.shape[2])-0.5)
		#	mmap.axes = imap.axes
		#	mmap = algebra.make_vect(mmap)
		#	box, nbox, mbox = self.fill(imap, nmap, mmap)
		#	pkrm_nfname = out_root + 'fftbox_' +  mock_fname
		#	algebra.save(pkrm_nfname, mbox)
		#else:
		#	box, nbox = self.fill(imap, nmap)

		hr = params['hr']
		mid = params['mid']
		last = params['last']
		pol_str = params['polarizations'][0]
		end = pol_str
		if len(last)!=0:
			end = end + last[ii]
		end = end + '_' + str(ii)
		imap_fname = hr[ii] + mid[0] + end + '.npy'
		nmap_fname = hr[ii] + mid[1] + end + '.npy'

		pkrm_fname = out_root + imap_fname
		algebra.save(pkrm_fname, imap)

		pkrm_nfname = out_root + nmap_fname
		algebra.save(pkrm_nfname, nmap)
コード例 #15
0
    def realize_simulation(self):
        """do basic handling to call Richard's simulation code
        this produces self.sim_map and self.sim_map_phys
        """
        if self.scenario == "nostr":
            print "running dd+vv and no streaming case"
            simobj = corr21cm.Corr21cm.like_kiyo_map(self.template_map)
            maps = simobj.get_kiyo_field_physical(refinement=self.refinement)

        else:
            if self.scenario == "str":
                print "running dd+vv and streaming simulation"
                simobj = corr21cm.Corr21cm.like_kiyo_map(self.template_map,
                                           sigma_v=self.streaming_dispersion)

                maps = simobj.get_kiyo_field_physical(refinement=self.refinement)

            if self.scenario == "ideal":
                print "running dd-only and no mean simulation"
                simobj = corr21cm.Corr21cm.like_kiyo_map(self.template_map)
                maps = simobj.get_kiyo_field_physical(
                                            refinement=self.refinement,
                                            density_only=True,
                                            no_mean=True,
                                            no_evolution=True)

        (gbtsim, gbtphys, physdim) = maps

        # process the physical-space map
        self.sim_map_phys = algebra.make_vect(gbtphys, axis_names=('freq', 'ra', 'dec'))
        pshp = self.sim_map_phys.shape

        # define the axes of the physical map; several alternatives are commented
        info = {}
        info['axes'] = ('freq', 'ra', 'dec')
        info['type'] = 'vect'
        info['freq_delta'] = abs(physdim[0] - physdim[1]) / float(pshp[0] - 1)
        info['freq_centre'] = physdim[0] + info['freq_delta'] * float(pshp[0] // 2)
        #        'freq_centre': abs(physdim[0] + physdim[1]) / 2.,

        info['ra_delta'] = abs(physdim[2]) / float(pshp[1] - 1)
        #info['ra_centre'] = info['ra_delta'] * float(pshp[1] // 2)
        #        'ra_centre': abs(physdim[2]) / 2.,
        info['ra_centre'] = 0.

        info['dec_delta'] = abs(physdim[3]) / float(pshp[2] - 1)
        #info['dec_centre'] = info['dec_delta'] * float(pshp[2] // 2)
        #        'dec_centre': abs(physdim[3]) / 2.,
        info['dec_centre'] = 0.

        self.sim_map_phys.info = info

        # process the map in observation coordinates
        self.sim_map = algebra.make_vect(gbtsim, axis_names=('freq', 'ra', 'dec'))
        self.sim_map.copy_axis_info(self.template_map)
コード例 #16
0
def test_scheme(template_file, sim_filename1, sim_filename2):
    r"""look at some differences between maps"""
    template_map = algebra.make_vect(algebra.load(template_file))
    gbtsim1 = realize_simulation(template_map, scenario="streaming", seed=5489, refinement=1.0)
    gbtsim2 = realize_simulation(template_map, seed=5489, refinement=1.0)

    sim_map1 = algebra.make_vect(gbtsim1, axis_names=("freq", "ra", "dec"))
    sim_map2 = algebra.make_vect(gbtsim2, axis_names=("freq", "ra", "dec"))
    sim_map1.copy_axis_info(template_map)
    sim_map2.copy_axis_info(template_map)
    algebra.save(sim_filename1, sim_map1)
    algebra.save(sim_filename2, sim_map2)
コード例 #17
0
ファイル: pair_set.py プロジェクト: astrofanlee/project_TL
def divide_iqu_map(source_dict=None, target_dict=None, map_dict=None):
    if source_dict != None:
        iqu        = algebra.make_vect(algebra.load(source_dict['map']))
        iqu_weight = algebra.make_vect(algebra.load(source_dict['weight']))
    elif map_dict != None:
        iqu        = algebra.make_vect(map_dict['map'])
        iqu_weight = algebra.make_vect(map_dict['weight'])
    else:
        print "Error: Can not find iqu map"

    nfreq = iqu.shape[0]/3

    imap = algebra.make_vect(iqu[ 0*nfreq : 1*nfreq, ...])
    qmap = algebra.make_vect(iqu[ 1*nfreq : 2*nfreq, ...])
    umap = algebra.make_vect(iqu[ 2*nfreq : 3*nfreq, ...])

    imap.info = iqu.info
    qmap.info = iqu.info
    umap.info = iqu.info

    imap.copy_axis_info(iqu)
    qmap.copy_axis_info(iqu)
    umap.copy_axis_info(iqu)

    imap_weight = algebra.make_vect(iqu_weight[ 0*nfreq : 1*nfreq, ...])
    qmap_weight = algebra.make_vect(iqu_weight[ 1*nfreq : 2*nfreq, ...])
    umap_weight = algebra.make_vect(iqu_weight[ 2*nfreq : 3*nfreq, ...])

    imap_weight.info = iqu_weight.info
    qmap_weight.info = iqu_weight.info
    umap_weight.info = iqu_weight.info

    imap_weight.copy_axis_info(iqu_weight)
    qmap_weight.copy_axis_info(iqu_weight)
    umap_weight.copy_axis_info(iqu_weight)

    if target_dict != None:
        algebra.save(target_dict['imap'], imap)
        algebra.save(target_dict['qmap'], qmap)
        algebra.save(target_dict['umap'], umap)

        algebra.save(target_dict['imap_weight'], imap_weight)
        algebra.save(target_dict['qmap_weight'], qmap_weight)
        algebra.save(target_dict['umap_weight'], umap_weight)
    else:
        map_dict = {}
        map_dict['imap'] = imap
        map_dict['qmap'] = qmap
        map_dict['umap'] = umap
        map_dict['imap_weight'] = imap_weight
        map_dict['qmap_weight'] = qmap_weight
        map_dict['umap_weight'] = umap_weight
        return map_dict
コード例 #18
0
ファイル: functions.py プロジェクト: astrofanlee/project_TL
def getmap(imap_fname, nmap_fname, mmap_fname=None, half=None):
    """
    get the matrix of intensity map and noise map
    """
    #in_root = params['input_root']

    imap = algebra.load(imap_fname)
    imap = algebra.make_vect(imap)

    if half!=None:
        imap = getmap_halfz(imap, half)
    #print "--The neam value for imap is:",imap.flatten().mean(),"--"
    #imap = imap - imap.flatten().mean()
    if imap.axes != ('freq', 'ra', 'dec') :
        raise ce.DataError('AXES ERROR!')

    try:
        nmap = algebra.load(nmap_fname)
        nmap = algebra.make_vect(nmap)

        if half!=None:
            nmap = getmap_halfz(nmap, half)

        bad = nmap<1.e-5*nmap.flatten().max()
        nmap[bad] = 0.
        non0 = nmap.nonzero()
        #imap[non0] = imap[non0]/nmap[non0]
    except IOError:
        print 'NO Noise File :: Set Noise to One'
        nmap = algebra.info_array(sp.ones(imap.shape))
        nmap.axes = imap.axes
        nmap = algebra.make_vect(nmap)
    nmap.info = imap.info
    if nmap.axes != ('freq', 'ra', 'dec') :
        raise ce.DataError('AXES ERROR!')

    if mmap_fname != None:
        try:
            mmap = algebra.load(mmap_fname)
            mmap = algebra.make_vect(mmap)
            if half!=None:
                mmap = getmap_halfz(mmap, half)
        except IOError:
            print 'NO Mock File :: Make it!'
            mmap = algebra.info_array(
                2.*np.random.rand(imap.shape[0],imap.shape[1], imap.shape[2])-0.5)
            mmap.axes = imap.axes
            mmap = algebra.make_vect(mmap)
        
        return imap, nmap, mmap
    else:
        return imap, nmap
コード例 #19
0
    def __init__(self,
                 map1,
                 map2,
                 noise_inv1,
                 noise_inv2,
                 freq,
                 input_filenames=False):
        r"""
        arguments: map1, map2, noise_inv1, noise_inv2, freq
        """
        if input_filenames:
            self.map1 = algebra.make_vect(algebra.load(map1))
            self.map2 = algebra.make_vect(algebra.load(map2))
            self.noise_inv1 = algebra.make_vect(algebra.load(noise_inv1))
            self.noise_inv2 = algebra.make_vect(algebra.load(noise_inv2))
        else:
            self.map1 = map1
            self.map2 = map2
            self.noise_inv1 = noise_inv1
            self.noise_inv2 = noise_inv2

        # older method that uses the database
        #     self.datapath_db = data_paths.DataPath()
        #     self.map1 = self.datapath_db.fetch_multi(map1)
        #     self.map2 = self.datapath_db.fetch_multi(map2)
        #     self.noise_inv1 = self.datapath_db.fetch_multi(noise_inv1)
        #     self.noise_inv2 = self.datapath_db.fetch_multi(noise_inv2)

        self.freq = freq

        # set the physical-dimension maps to None
        self.phys_map1 = None
        self.phys_map2 = None
        self.phys_noise_inv1 = None
        self.phys_noise_inv2 = None

        # Give infinite noise to unconsidered frequencies
        self.sanitize()

        # Set attributes.
        self.counts = 0
        self.modes1 = 0
        self.modes2 = 0
        self.left_modes = 0
        self.right_modes = 0
        # For saving, to keep track of each mapname.
        self.map1_name = ''
        self.map2_name = ''
        # Which section [A, B, C, D...] the maps is from.
        self.map1_code = ''
        self.map2_code = ''
コード例 #20
0
ファイル: map_pair.py プロジェクト: OMGitsHongyu/analysis_IM
    def __init__(self, map1, map2, noise_inv1, noise_inv2, freq,
                 input_filenames=False, conv_factor=1.1):
        r"""
        arguments: map1, map2, noise_inv1, noise_inv2, freq
        conv_factor is the factor by which to multiply the largest beam
        in the convolution to a common resolution
        """
        if input_filenames:
            self.map1 = algebra.make_vect(algebra.load(map1))
            self.map2 = algebra.make_vect(algebra.load(map2))
            if noise_inv1:
                print "loading noise1 file: " + noise_inv1
                self.noise_inv1 = algebra.make_vect(algebra.load(noise_inv1))
            else:
                print "WARNING: map1 has unity weight; no file given"
                self.noise_inv1 = algebra.ones_like(self.map1)

            if noise_inv2:
                print "loading noise2 file: " + noise_inv2
                self.noise_inv2 = algebra.make_vect(algebra.load(noise_inv2))
            else:
                print "WARNING: map2 has unity weight; no file given"
                self.noise_inv2 = algebra.ones_like(self.map2)

        else:
            self.map1 = map1
            self.map2 = map2
            self.noise_inv1 = noise_inv1
            self.noise_inv2 = noise_inv2

        self.freq = freq
        self.conv_factor = conv_factor

        # maps in physical coordinates (derived)
        self.phys_map1 = None
        self.phys_map2 = None
        self.phys_noise_inv1 = None
        self.phys_noise_inv2 = None

        # give infinite noise to masked bands
        self.sanitize()

        # Set attributes.
        self.left_modes = 0
        self.right_modes = 0
        # For saving, to keep track of each mapname.
        self.map1_name = ''
        self.map2_name = ''
        # Which section [A, B, C, D...] the maps is from.
        self.map1_code = ''
        self.map2_code = ''
コード例 #21
0
    def load_pairs(self, regenerate=True):
        r"""load the set of map/noise pairs specified by keys handed to the
        database. This sets up operations on the quadratic product
            Q = map1^T noise_inv1 B noise_inv2 map2
        """
        par = self.params
        (self.pairlist, pairdict) = dp.cross_maps(par['map1'],
                                                  par['map2'],
                                                  par['noise_inv1'],
                                                  par['noise_inv2'],
                                                  verbose=False)

        for pairitem in self.pairlist:
            pdict = pairdict[pairitem]
            print "-" * 80
            dp.print_dictionary(
                pdict,
                sys.stdout,
                key_list=['map1', 'noise_inv1', 'map2', 'noise_inv2'])

            map1 = algebra.make_vect(algebra.load(pdict['map1']))
            map2 = algebra.make_vect(algebra.load(pdict['map2']))
            sim = algebra.make_vect(algebra.load(par['simfile']))

            if not par['no_weights']:
                noise_inv1 = self.process_noise_inv(pdict['noise_inv1'],
                                                    regenerate=regenerate)

                noise_inv2 = self.process_noise_inv(pdict['noise_inv2'],
                                                    regenerate=regenerate)
            else:
                noise_inv1 = algebra.ones_like(map1)
                noise_inv2 = algebra.ones_like(map2)

            pair = map_pair.MapPair(map1 + sim, map2 + sim, noise_inv1,
                                    noise_inv2, self.freq_list)

            pair.set_names(pdict['tag1'], pdict['tag2'])
            pair.lags = self.lags
            pair.params = self.params
            self.pairs[pairitem] = pair

            pair_nosim = map_pair.MapPair(map1, map2, noise_inv1, noise_inv2,
                                          self.freq_list)

            pair_nosim.set_names(pdict['tag1'], pdict['tag2'])
            pair_nosim.lags = self.lags
            pair_nosim.params = self.params
            self.pairs_nosim[pairitem] = pair_nosim
コード例 #22
0
def test_scheme(template_file, sim_filename1, sim_filename2):
    r"""look at some differences between maps"""
    template_map = algebra.make_vect(algebra.load(template_file))
    gbtsim1 = realize_simulation(template_map,
                                 scenario='streaming',
                                 seed=5489,
                                 refinement=1.)
    gbtsim2 = realize_simulation(template_map, seed=5489, refinement=1.)

    sim_map1 = algebra.make_vect(gbtsim1, axis_names=('freq', 'ra', 'dec'))
    sim_map2 = algebra.make_vect(gbtsim2, axis_names=('freq', 'ra', 'dec'))
    sim_map1.copy_axis_info(template_map)
    sim_map2.copy_axis_info(template_map)
    algebra.save(sim_filename1, sim_map1)
    algebra.save(sim_filename2, sim_map2)
コード例 #23
0
 def get_map(self):
     map = sp.zeros((self.nf, self.nra, self.ndec))
     map = al.make_vect(map, ('freq', 'ra', 'dec'))
     map.set_axis_info('freq', 800e6, 1e6)
     map.set_axis_info('ra', 21, self.map_size / self.nra)
     map.set_axis_info('dec', 0, self.map_size / self.ndec)
     return map
コード例 #24
0
 def setUp(self):
     Reader = fitsGBT.Reader("./testdata/testfile_guppi_combined.fits",
                             feedback=0)
     self.Blocks = Reader.read((), 1)
     Data = self.Blocks[0]
     Data.calc_freq()
     params = {'dm_deweight_time_slope': True}
     Maker = dirty_map.DirtyMapMaker(params, feedback=0)
     n_chan = Data.dims[-1]
     Maker.n_chan = n_chan
     Maker.pols = (1, 2, 3, 4)
     Maker.pol_ind = 0
     Maker.band_centres = (Data.freq[Data.dims[-1] // 2], )
     Maker.band_ind = 0
     map = sp.zeros((Data.dims[-1], 32, 15))
     map = al.make_vect(map, ('freq', 'ra', 'dec'))
     map.set_axis_info('freq', Data.freq[Data.dims[-1] // 2],
                       Data.field['CRVAL1'])
     map.set_axis_info('ra', 218, 0.075)
     map.set_axis_info('dec', 2, 0.075)
     Maker.map = map
     self.Maker = Maker
     # The variances of each channel.
     self.norms = (sp.arange(1., 2., 0.25)[:, None] *
                   (sp.arange(1., 2., 1. / n_chan)[None, :]))
     for Data in self.Blocks:
         Data.data[...] = random.randn(*Data.data.shape)
         Data.data *= sp.sqrt(self.norms[:, None, :])
         Data.data += 50.
コード例 #25
0
ファイル: data_paths.py プロジェクト: astrofanlee/project_TL
    def fetch_multi(self, data_obj, db_token="db:", silent=False,
                    intend_read=True):
        r"""Handle various sorts of file pointers/data
        if `data_obj`
            is an array, return a deep copy of it
            is a string:
                if it begins with "db:" -- string after db is a db key
                otherwise assume it is a file and try to open it
        """
        if isinstance(data_obj, str):
            if data_obj[0:len(db_token)] == db_token:
                db_key = data_obj[len(db_token):]
                filename = self.fetch(db_key, intend_read=intend_read,
                                      silent=silent)
            else:
                filename = data_obj
                prefix = "non-db filename "
                ft.path_properties(filename, intend_read=intend_read,
                                   is_file=True,
                                   prefix=prefix, silent=silent)

            ret_data = algebra.make_vect(algebra.load(filename))
        else:
            ret_data = copy.deepcopy(data_obj)

        return ret_data
コード例 #26
0
def repackage_pickle_as_shelve(pklfile, shelvefile):
    """Take pickled output from Liviu's code and combine the data from various
    sources into a common shelve file. [script, not production]
    """
    print pklfile
    f = open(pklfile, "r")
    F = cPickle.load(f)
    f.close()

    # Setting axis info after pickling.
    map_file = F.params["input_root"] + "sec_A_15hr_41-73_clean_map_I.npy"
    exMap = algebra.make_vect(algebra.load(map_file))
    for Pair in F.Pairs:
        Pair.Map1.info = exMap.info
        Pair.Map2.info = exMap.info
        Pair.Noise_inv1.info = exMap.info
        Pair.Noise_inv2.info = exMap.info

    for corrindex in range(6):
        shelvename = shelvefile + "_" + repr(corrindex) + ".shelve"
        corr_shelve = shelve.open(shelvename)
        print shelvename
        corr_shelve["corr"] = F.Pairs[corrindex].corr
        corr_shelve["counts"] = F.Pairs[corrindex].counts
        corr_shelve["freq_axis"] = F.Pairs[corrindex].Map1.get_axis("freq")
        corr_shelve["params"] = F.params
        corr_shelve.close()
コード例 #27
0
ファイル: mktmp.py プロジェクト: wheeyeon/analysis_IM
def mktmp(rgn_i,rgn_j,rgn_k,srgn_i1,srgn_i2,srgn_j1,srgn_j2,srgn_k1,srgn_k2,outfilename):
    """Write to disk a file representing an empty matrix of given dimensions. Also write an identically
    shaped array of booleans, which are true if the index points to the subregion.
    rgn_i/j/k  : the dimensions of the full region to be simulated
        srgn_i/j/k : the dimensions of the deep integration subregion
    outfilename: the name of the file to be created
    """


    regiontype = np.zeros((rgn_i,rgn_j,rgn_k), bool)

    array = np.zeros((rgn_i,rgn_j,rgn_k))

    for i in range(0,rgn_i):
        for j in range(0,rgn_j):
            for k in range(0,rgn_k):
                if (i>=(srgn_i1-1) and i<=(srgn_i2-1)):
                    if (j>=(srgn_j1-1) and j<=(srgn_j2-1)):
                        if (k>=(srgn_k1-1) and k<=(srgn_k2-1)):
                            regiontype[i,j,k]=True
            else:
                regiontype[i,j,k]=False

    region=algebra.info_array(array)
    regiontypename = 'bool' + outfilename
    np.save(regiontypename, regiontype)
    algebra.save(outfilename,region)
    print "done"
    template_map = algebra.make_vect(algebra.load(outfilename))
コード例 #28
0
ファイル: simulate_gbt.py プロジェクト: wheeyeon/analysis_IM
    def execute_assembledir(self):
        # link the weights through to the simulation directory
        for (weight_file_in, weight_file_out) in \
                zip(self.input_weight_maps, self.output_weight_maps):
            os.symlink(weight_file_in, weight_file_out)
            os.symlink(weight_file_in + ".meta", weight_file_out + ".meta")

        signalfile = self.output_root + self.output_signal
        signalmap = algebra.make_vect(algebra.load(signalfile))
        signalmap *= self.multiplier

        # now load the signal simulation add thermal noise and save
        for (thermal_file, mapfile) in \
                zip(self.output_thermal, self.output_maps):
            thermalmap = algebra.make_vect(algebra.load(thermal_file))
            algebra.save(mapfile, signalmap + thermalmap)
コード例 #29
0
ファイル: pca.py プロジェクト: POFK/ICA_learning
def ReadMeta(data_path):
    """return  freq ra dec"""
    data = algebra.make_vect(algebra.load(data_path))
    freq = data.get_axis("freq")
    ra = data.get_axis("ra")
    dec = data.get_axis("dec")
    return freq, ra, dec
コード例 #30
0
    def process_noise_inv(self, filename, regenerate=True):
        r"""buffer reading the noise inverse files for speed and also
        save to a file in the intermediate output path.

        If the cached file exists as an intermediate product, load it else
        produce it.
        """
        if filename not in self.noisefiledict:
            basename = filename.split("/")[-1].split(".npy")[0]
            filename_diag = "%s/%s_diag.npy" % (self.output_root, basename)
            exists = os.access(filename_diag, os.F_OK)
            if exists and not regenerate:
                print "loading pre-diagonalized noise: " + filename_diag
                self.noisefiledict[filename] = algebra.make_vect(algebra.load(filename_diag))
            else:
                print "loading noise: " + filename
                # TODO: have this be smarter about reading various noise cov
                # inputs
                noise_inv = algebra.make_mat(algebra.open_memmap(filename, mode="r"))
                self.noisefiledict[filename] = noise_inv.mat_diag()
                # self.noisefiledict[filename] = algebra.make_vect(
                #                               algebra.load(filename))
                algebra.save(filename_diag, self.noisefiledict[filename])

        return copy.deepcopy(self.noisefiledict[filename])
コード例 #31
0
    def deactivated_test_extreme_index(self):
        """Set of parameters know to have cased issues in the past with
        numerical stability."""

        nf = 40
        nt = 150
        n = nf * nt
        dt = 0.26214
        BW = 1. / dt / 2.
        time_stream = sp.zeros((nf, nt))
        time_stream = al.make_vect(time_stream, axis_names=("freq", "time"))
        time = dt * (sp.arange(nt) + 50)
        N = dirty_map.Noise(time_stream, time)
        # Thermal.
        thermal = sp.zeros(nf, dtype=float) + 0.0002 * BW * 2.
        thermal[22] = dirty_map.T_infinity**2
        N.add_thermal(thermal)
        # Time mean and slope.
        N.deweight_time_mean()
        N.deweight_time_slope()
        # Extreem index over_f bit.
        mode = -sp.ones(nf, dtype=float) / sp.sqrt(nf - 1)
        mode[22] = 0
        # Parameters measured from one of the data sets.  Known to screw things
        # up.
        #N.add_over_f_freq_mode(8.128e-7, -4.586, 1.0, 1.422e-7, mode, True)
        N.add_over_f_freq_mode(0.001729, -0.777, 1.0, 1e-8, mode, True)
        #N.orthogonalize_modes()
        N.finalize()
        # Check if the fast inverse works.
        N_mat = N.get_mat()
        N_mat.shape = (n, n)
        N_inv = N.get_inverse()
        N_inv.shape = (n, n)
コード例 #32
0
    def deactivated_test_extreme_index(self):
        """Set of parameters know to have cased issues in the past with
        numerical stability."""

        nf = 40
        nt = 150
        n = nf * nt
        dt = 0.26214
        BW = 1. / dt / 2.
        time_stream = sp.zeros((nf, nt))
        time_stream = al.make_vect(time_stream, axis_names=("freq", "time"))
        time = dt * (sp.arange(nt) + 50)
        N = dirty_map.Noise(time_stream, time)
        # Thermal.
        thermal = sp.zeros(nf, dtype=float) + 0.0002 * BW * 2.
        thermal[22] = dirty_map.T_infinity**2
        N.add_thermal(thermal)
        # Time mean and slope.
        N.deweight_time_mean()
        N.deweight_time_slope()
        # Extreem index over_f bit.
        mode = -sp.ones(nf, dtype=float) / sp.sqrt(nf - 1)
        mode[22] = 0
        # Parameters measured from one of the data sets.  Known to screw things
        # up.
        #N.add_over_f_freq_mode(8.128e-7, -4.586, 1.0, 1.422e-7, mode, True)
        N.add_over_f_freq_mode(0.001729, -0.777, 1.0, 1e-8, mode, True)
        #N.orthogonalize_modes()
        N.finalize()
        # Check if the fast inverse works.
        N_mat = N.get_mat()
        N_mat.shape = (n, n)
        N_inv = N.get_inverse()
        N_inv.shape = (n, n)
コード例 #33
0
    def __init__(self, parameter_file=None, params_dict=None, feedback=0):
        self.params = params_dict
        if parameter_file:
            self.params = parse_ini.parse(parameter_file,
                                          params_init,
                                          prefix=prefix)

        self.output_file = self.params['output_file']
        self.delta_temp_file = self.params['delta_temp_file']
        self.total_integration = self.params['total_integration']
        self.weight_map = algebra.make_vect(
            algebra.load(self.params['weight_file']))

        self.max_stdev = self.params['max_stdev']

        # set the random seed
        if (self.params['seed'] < 0):
            # The usual seed is not fine enough for parallel jobs
            randsource = open("/dev/random", "rb")
            self.seed = struct.unpack("I", randsource.read(4))[0]
            #self.seed = abs(long(outfile_physical.__hash__()))
        else:
            self.seed = self.params['seed']

        random.seed(self.seed)
コード例 #34
0
    def fetch_multi(self,
                    data_obj,
                    db_token="db:",
                    silent=False,
                    intend_read=False):
        r"""Handle various sorts of file pointers/data
        if `data_obj`
            is an array, return a deep copy of it
            is a string:
                if it begins with "db:" -- string after db is a db key
                otherwise assume it is a file and try to open it
        """
        if isinstance(data_obj, str):
            if data_obj[0:len(db_token)] == db_token:
                db_key = data_obj[len(db_token):]
                filename = self.fetch(db_key,
                                      intend_read=intend_read,
                                      silent=silent)
            else:
                filename = data_obj
                prefix = "non-db filename "
                ft.path_properties(filename,
                                   intend_read=intend_read,
                                   is_file=True,
                                   prefix=prefix,
                                   silent=silent)

            ret_data = algebra.make_vect(algebra.load(filename))
        else:
            ret_data = copy.deepcopy(data_obj)

        return ret_data
コード例 #35
0
    def setUp(self) :
        # Read in just to fiugre out the band structure.
        this_test_file = 'testdata/testfile_guppi_rotated.fits'
        Reader = fitsGBT.Reader(this_test_file, feedback=0)
        Blocks = Reader.read((0,),())
        bands = ()
        for Data in Blocks:
            n_chan = Data.dims[3]
            Data.calc_freq()
            freq = Data.freq
            delta = abs(sp.mean(sp.diff(freq)))
            centre = freq[n_chan//2]
            band = int(centre/1e6)
            bands += (band,)
            map = sp.zeros((n_chan, 15, 11))
            map = algebra.make_vect(map, axis_names=('freq', 'ra', 'dec'))
            map.set_axis_info('freq', centre, -delta)
            map.set_axis_info('ra', 218, -0.2)
            map.set_axis_info('dec', 2, 0.2)
            algebra.save('./testout_clean_map_I_' + str(band) + '.npy', map)

        self.params = {'sm_input_root' : 'testdata/',
                       'sm_file_middles' : ("testfile",),
                       'sm_input_end' : "_guppi_rotated.fits",
                       'sm_output_root' : "./testout_",
                       'sm_output_end' : "_sub.fits",
                       'sm_solve_for_gain' : True,
                       'sm_gain_output_end' : 'gain.pickle',
                       'sm_map_input_root' : './testout_',
                       'sm_map_type' : 'clean_map_',
                       'sm_map_polarizations' : ('I',),
                       'sm_map_bands' : bands
                       }
コード例 #36
0
    def setUp(self):
        # Read in just to fiugre out the band structure.
        this_test_file = 'testdata/testfile_guppi_rotated.fits'
        Reader = fitsGBT.Reader(this_test_file, feedback=0)
        Blocks = Reader.read((0, ), ())
        bands = ()
        for Data in Blocks:
            n_chan = Data.dims[3]
            Data.calc_freq()
            freq = Data.freq
            delta = abs(sp.mean(sp.diff(freq)))
            centre = freq[n_chan // 2]
            band = int(centre / 1e6)
            bands += (band, )
            map = sp.zeros((n_chan, 15, 11))
            map = algebra.make_vect(map, axis_names=('freq', 'ra', 'dec'))
            map.set_axis_info('freq', centre, -delta)
            map.set_axis_info('ra', 218, -0.2)
            map.set_axis_info('dec', 2, 0.2)
            algebra.save('./testout_clean_map_I_' + str(band) + '.npy', map)

        self.params = {
            'sm_input_root': 'testdata/',
            'sm_file_middles': ("testfile", ),
            'sm_input_end': "_guppi_rotated.fits",
            'sm_output_root': "./testout_",
            'sm_output_end': "_sub.fits",
            'sm_solve_for_gain': True,
            'sm_gain_output_end': 'gain.pickle',
            'sm_map_input_root': './testout_',
            'sm_map_type': 'clean_map_',
            'sm_map_polarizations': ('I', ),
            'sm_map_bands': bands
        }
コード例 #37
0
def map_pair_cal(uncal_maplist, uncal_weightlist, calfactor_outlist,
                 dirtymap_inlist, dirtymap_outlist,
                 convolve=True, factorizable_noise=False,
                 sub_weighted_mean=True, freq_list=range(256)):

    map1file = reference_clean
    weight1file = reference_weight
    #map1file = uncal_maplist.pop(0)
    #weight1file = uncal_weightlist.pop(0)
    #calfactor_outlist.pop(0)
    #dirtymap_out0 = dirtymap_outlist.pop(0)
    #dirtymap_in0 = dirtymap_inlist.pop(0)

    # do nothing to the reference map
    #ref_dirtymap = algebra.make_vect(algebra.load(dirtymap_in0))
    #algebra.save(dirtymap_out0, ref_dirtymap)

    # load maps into pairs
    svdout = shelve.open("correlation_pairs_v2.shelve")
    for map2file, weight2file, calfactor_outfile, \
        dirty_infile, dirty_outfile in zip(uncal_maplist, \
            uncal_weightlist, calfactor_outlist,
            dirtymap_inlist, dirtymap_outlist):

        print map1file, weight1file, map2file, weight2file

        pair = map_pair.MapPair(map1file, map2file,
                                weight1file, weight2file,
                                freq_list, avoid_db=True)

        if factorizable_noise:
            pair.make_noise_factorizable()

        if sub_weighted_mean:
            pair.subtract_weighted_mean()

        if convolve:
            pair.degrade_resolution()

        (corr, counts) = pair.correlate()
        svd_info = ce.get_freq_svd_modes(corr, len(freq_list))
        svdout[map2file] = svd_info

        # write out the left right and cal factors
        leftmode = svd_info[1][0]
        rightmode = svd_info[2][0]
        calfactor = leftmode/rightmode

        facout = open(calfactor_outfile, "w")
        for outvals in zip(leftmode, rightmode, calfactor):
            facout.write("%10.15g %10.15g %10.15g\n" % outvals)

        facout.close()

        newmap = algebra.make_vect(algebra.load(dirty_infile))
        newmap[freq_list, :, :] *= calfactor[:,np.newaxis,np.newaxis]
        algebra.save(dirty_outfile, newmap)
        print dirty_outfile

    svdout.close()
コード例 #38
0
    def make_opt_sim(self):
        r"""this produces self.sim_map_optsim"""

        print "making sim of optically-selected galaxies"
        selection_function = \
                self.datapath_db.fetch_multi(self.params['selection_file'])

        poisson_vect = np.vectorize(np.random.poisson)

        print self.sim_map_delta

        mean_num_gal = (self.sim_map_delta + 1.) * selection_function
        print mean_num_gal

        self.sim_map_optsim = poisson_vect(mean_num_gal)
        self.sim_map_optsim = \
            algebra.make_vect(self.sim_map_optsim.astype(float), axis_names=('freq', 'ra', 'dec'))

        if self.params['optcatalog_file']:
            optical_catalog = \
                self.datapath_db.fetch_multi(self.params['optcatalog_file'])

            # convert from delta to N
            optical_catalog = (optical_catalog + 1.) * selection_function

            print np.sum(optical_catalog), np.sum(self.sim_map_optsim)

        self.sim_map_optsim = self.sim_map_optsim / selection_function - 1.

        self.sim_map_optsim.copy_axis_info(self.sim_map_delta)
コード例 #39
0
def repackage_pickle_as_shelve(pklfile, shelvefile):
    """Take pickled output from Liviu's code and combine the data from various
    sources into a common shelve file. [script, not production]
    """
    print pklfile
    f = open(pklfile, "r")
    F = cPickle.load(f)
    f.close()

    # Setting axis info after pickling.
    map_file = F.params["input_root"] + "sec_A_15hr_41-73_clean_map_I.npy"
    exMap = algebra.make_vect(algebra.load(map_file))
    for Pair in F.Pairs:
        Pair.Map1.info = exMap.info
        Pair.Map2.info = exMap.info
        Pair.Noise_inv1.info = exMap.info
        Pair.Noise_inv2.info = exMap.info

    for corrindex in range(6):
        shelvename = shelvefile + "_" + repr(corrindex) + ".shelve"
        corr_shelve = shelve.open(shelvename)
        print shelvename
        corr_shelve["corr"] = F.Pairs[corrindex].corr
        corr_shelve["counts"] = F.Pairs[corrindex].counts
        corr_shelve["freq_axis"] = F.Pairs[corrindex].Map1.get_axis('freq')
        corr_shelve["params"] = F.params
        corr_shelve.close()
コード例 #40
0
    def process_noise_inv(self, filename, regenerate=True):
        r"""buffer reading the noise inverse files for speed and also
        save to a file in the intermediate output path.

        If the cached file exists as an intermediate product, load it else
        produce it.
        """
        if filename not in self.noisefiledict:
            basename = filename.split("/")[-1].split(".npy")[0]
            filename_diag = "%s/%s_diag.npy" % \
                           (self.output_root, basename)
            exists = os.access(filename_diag, os.F_OK)
            if exists and not regenerate:
                print "loading pre-diagonalized noise: " + filename_diag
                self.noisefiledict[filename] = algebra.make_vect(
                    algebra.load(filename_diag))
            else:
                print "loading noise: " + filename
                # TODO: have this be smarter about reading various noise cov
                # inputs
                noise_inv = algebra.make_mat(
                    algebra.open_memmap(filename, mode='r'))
                self.noisefiledict[filename] = noise_inv.mat_diag()
                #self.noisefiledict[filename] = algebra.make_vect(
                #                               algebra.load(filename))
                algebra.save(filename_diag, self.noisefiledict[filename])

        return copy.deepcopy(self.noisefiledict[filename])
コード例 #41
0
 def setUp(self):
     # Make a positive definite noise matrix, clean map, and dirty_map.
     self.nra = 10
     self.ndec = 5
     self.nf = 20
     self.shape = (self.nf, self.nra, self.ndec)
     self.size = self.nra * self.ndec * self.nf
     # Clean map.
     clean_map = sp.empty(self.shape, dtype=float)
     clean_map = al.make_vect(clean_map, axis_names=('freq', 'ra', 'dec'))
     clean_map[...] = sp.sin(sp.arange(self.nf))[:, None, None]
     clean_map *= sp.cos(sp.arange(self.nra))[:, None]
     clean_map *= sp.cos(sp.arange(self.ndec))
     # Noise inverse matrix.
     noise_inv = sp.empty(self.shape * 2, dtype=float)
     noise_inv = al.make_mat(noise_inv,
                             axis_names=('freq', 'ra', 'dec') * 2,
                             row_axes=(0, 1, 2),
                             col_axes=(3, 4, 5))
     rand_mat = rand.randn(*((self.size, ) * 2))
     information_factor = 1.e6  # K**-2
     rand_mat = sp.dot(rand_mat, rand_mat.transpose()) * information_factor
     noise_inv.flat[...] = rand_mat.flat
     # Dirty map.
     dirty_map = al.partial_dot(noise_inv, clean_map)
     # Store in self.
     self.clean_map = clean_map
     self.noise_inv = noise_inv
     self.dirty_map = dirty_map
コード例 #42
0
 def setUp(self):
     Reader = fitsGBT.Reader("./testdata/testfile_guppi_combined.fits",
                             feedback=0)
     self.Blocks = Reader.read((), 1)
     Data = self.Blocks[0]
     Data.calc_freq()
     params = {'dm_deweight_time_slope' : True}
     Maker = dirty_map.DirtyMapMaker(params, feedback=0)
     n_chan = Data.dims[-1]
     Maker.n_chan = n_chan
     Maker.pols = (1, 2, 3, 4)
     Maker.pol_ind = 0
     Maker.band_centres = (Data.freq[Data.dims[-1]//2],)
     Maker.band_ind = 0
     map = sp.zeros((Data.dims[-1], 32, 15))
     map = al.make_vect(map, ('freq', 'ra', 'dec'))
     map.set_axis_info('freq', Data.freq[Data.dims[-1]//2],
                       Data.field['CRVAL1'])
     map.set_axis_info('ra', 218, 0.075)
     map.set_axis_info('dec', 2, 0.075)
     Maker.map = map
     self.Maker = Maker
     # The variances of each channel.
     self.norms = (sp.arange(1., 2., 0.25)[:,None]
                   * (sp.arange(1., 2., 1./n_chan)[None,:]))
     for Data in self.Blocks:
         Data.data[...] = random.randn(*Data.data.shape)
         Data.data *= sp.sqrt(self.norms[:,None,:])
         Data.data += 50.
コード例 #43
0
 def setUp(self):
     # Make a positive definite noise matrix, clean map, and dirty_map.
     self.nra = 10
     self.ndec = 5
     self.nf = 20
     self.shape = (self.nf, self.nra, self.ndec)
     self.size = self.nra * self.ndec * self.nf
     # Clean map.
     clean_map = sp.empty(self.shape, dtype=float)
     clean_map = al.make_vect(clean_map, axis_names=('freq', 'ra', 'dec'))
     clean_map[...] = sp.sin(sp.arange(self.nf))[:,None,None]
     clean_map *= sp.cos(sp.arange(self.nra))[:,None]
     clean_map *= sp.cos(sp.arange(self.ndec))
     # Noise inverse matrix.
     noise_inv = sp.empty(self.shape * 2, dtype=float)
     noise_inv = al.make_mat(noise_inv, axis_names=('freq', 'ra', 'dec')*2,
                             row_axes=(0, 1, 2), col_axes=(3, 4, 5))
     rand_mat = rand.randn(*((self.size,) * 2))
     information_factor = 1.e6  # K**-2
     rand_mat = sp.dot(rand_mat, rand_mat.transpose()) * information_factor
     noise_inv.flat[...] = rand_mat.flat
     # Dirty map.
     dirty_map = al.partial_dot(noise_inv, clean_map)
     # Store in self.
     self.clean_map = clean_map
     self.noise_inv = noise_inv
     self.dirty_map = dirty_map
コード例 #44
0
 def get_map(self):
     map = sp.zeros((self.nf, self.nra, self.ndec))
     map = al.make_vect(map, ('freq', 'ra', 'dec'))
     map.set_axis_info('freq', 800e6, 1e6)
     map.set_axis_info('ra', 21, self.map_size/self.nra)
     map.set_axis_info('dec', 0, self.map_size/self.ndec)
     return map
コード例 #45
0
    def make_opt_sim(self):
        r"""this produces self.sim_map_optsim"""

        print "making sim of optically-selected galaxies"
        selection_function = \
                self.datapath_db.fetch_multi(self.params['selection_file'])

        poisson_vect = np.vectorize(np.random.poisson)

        print self.sim_map_delta

        mean_num_gal = (self.sim_map_delta + 1.) * selection_function
        print mean_num_gal

        self.sim_map_optsim = poisson_vect(mean_num_gal)
        self.sim_map_optsim = \
            algebra.make_vect(self.sim_map_optsim.astype(float), axis_names=('freq', 'ra', 'dec'))

        if self.params['optcatalog_file']:
            optical_catalog = \
                self.datapath_db.fetch_multi(self.params['optcatalog_file'])

            # convert from delta to N
            optical_catalog = (optical_catalog + 1.) * selection_function

            print np.sum(optical_catalog), np.sum(self.sim_map_optsim)

        self.sim_map_optsim = self.sim_map_optsim / selection_function - 1.

        self.sim_map_optsim.copy_axis_info(self.sim_map_delta)
コード例 #46
0
ファイル: read.py プロジェクト: POFK/ICA_learning
def ReadMeta(data_path):
    '''return  freq ra dec'''
    data = algebra.make_vect(algebra.load(data_path))
    freq = data.get_axis('freq')
    ra = data.get_axis('ra')
    dec = data.get_axis('dec')
    return freq,ra,dec
コード例 #47
0
ファイル: pair_set.py プロジェクト: astrofanlee/project_TL
def extend_iqu_map(source_dict=None, target_dict=None, map_dict=None):
    if source_dict != None:
        imap = algebra.make_vect(algebra.load(source_dict['imap']))
        qmap = algebra.make_vect(algebra.load(source_dict['qmap']))
        umap = algebra.make_vect(algebra.load(source_dict['umap']))

        if source_dict.has_key('imap_weight'):
            imap_weight = algebra.make_vect(algebra.load(source_dict['imap_weight']))
            qmap_weight = algebra.make_vect(algebra.load(source_dict['qmap_weight']))
            umap_weight = algebra.make_vect(algebra.load(source_dict['umap_weight']))
        elif source_dict.has_key('imap_inv'):
            imap_weight, info = find_weight_re_diagnal(source_dict['imap_inv'])
            qmap_weight, info = find_weight_re_diagnal(source_dict['qmap_inv'])
            umap_weight, info = find_weight_re_diagnal(source_dict['umap_inv'])
        else:
            print 'Warning: no weight'
            imap_weight = algebra.ones_like(imap)
            qmap_weight = algebra.ones_like(imap)
            umap_weight = algebra.ones_like(imap)
    elif map_dict != None:
        imap = map_dict['imap']
        qmap = map_dict['qmap']
        umap = map_dict['umap']

        if 'imap_weight' in map_dict.keys():
            imap_weight = map_dict['imap_weight']
            qmap_weight = map_dict['qmap_weight']
            umap_weight = map_dict['umap_weight']
        else:
            print 'Warning: no weight'
            imap_weight = algebra.ones_like(imap)
            qmap_weight = algebra.ones_like(imap)
            umap_weight = algebra.ones_like(imap)
    else:
        print "Error: Can not find I Q U maps"
        exit()

    iqu = algebra.info_array(imap.tolist() + qmap.tolist() + umap.tolist())
    iqu = algebra.make_vect(iqu)
    iqu.info = imap.info
    iqu.copy_axis_info(imap)

    iqu_weight = algebra.info_array(imap_weight.tolist() + 
                                    qmap_weight.tolist() + 
                                    umap_weight.tolist())
    iqu_weight = algebra.make_vect(iqu_weight)
    iqu_weight.info = imap_weight.info
    iqu_weight.copy_axis_info(imap_weight)

    if target_dict != None:
        algebra.save(target_dict['map'], iqu)
        algebra.save(target_dict['weight'], iqu_weight)
    else:
        map_dict = {}
        map_dict['map']    = iqu
        map_dict['weight'] = iqu_weight
        return map_dict
コード例 #48
0
    def produce_delta_map(self, optical_file, optical_selection_file):
        map_optical = algebra.make_vect(algebra.load(optical_file))
        map_nbar = algebra.make_vect(algebra.load(optical_selection_file))

        old_settings = np.seterr(invalid="ignore", under="ignore")
        map_delta = map_optical / map_nbar - 1.
        np.seterr(**old_settings)

        # TODO: also consider setting the nbar to zero outside of galaxies?
        map_delta[np.isinf(map_delta)] = 0.
        map_delta[np.isnan(map_delta)] = 0.
        # if e.g. nbar is zero, then set the point as if there were no galaxies
        # downstream, nbar=0 should coincide with zero weight anyway
        #map_delta[np.isinf(map_delta)] = -1.
        #map_delta[np.isnan(map_delta)] = -1.

        return map_delta
コード例 #49
0
def cross_power_est_highmem(arr1, arr2, weight1, weight2,
                    window="blackman", nonorm=False):
    """Calculate the cross-power spectrum of a two nD fields.

    The arrays must be identical and have the same length (physically
    and in pixel number) along each axis.

    Same goal as above without the emphasis on saving memory.
    This is the "tried and true" legacy function.
    """
    if window:
        window_function = fftutil.window_nd(arr1.shape, name=window)
        weight1 *= window_function
        weight2 *= window_function

    warr1 = arr1 * weight1
    warr2 = arr2 * weight2
    ndim = arr1.ndim

    fft_arr1 = np.fft.fftshift(np.fft.fftn(warr1))
    fft_arr2 = np.fft.fftshift(np.fft.fftn(warr2))
    xspec = fft_arr1 * fft_arr2.conj()
    xspec = xspec.real

    # correct for the weighting
    product_weight = weight1 * weight2
    xspec /= np.sum(product_weight)

    # make the axes
    k_axes = tuple(["k_" + axis_name for axis_name in arr1.axes])
    xspec_arr = algebra.make_vect(xspec, axis_names=k_axes)

    info = {'axes': k_axes, 'type': 'vect'}
    width = np.zeros(ndim)
    for axis_index in range(ndim):
        n_axis = arr1.shape[axis_index]
        axis_name = arr1.axes[axis_index]
        axis_vector = arr1.get_axis(axis_name)
        delta_axis = abs(axis_vector[1] - axis_vector[0])
        width[axis_index] = delta_axis

        k_axis = np.fft.fftshift(np.fft.fftfreq(n_axis, d=delta_axis))
        k_axis *= 2. * math.pi
        delta_k_axis = abs(k_axis[1] - k_axis[0])

        k_name = k_axes[axis_index]
        info[k_name + "_delta"] = delta_k_axis
        info[k_name + "_centre"] = 0.
        #print k_axis
        #print k_name, n_axis, delta_axis

    xspec_arr.info = info
    #print xspec_arr.get_axis("k_dec")

    if not nonorm:
        xspec_arr *= width.prod()

    return xspec_arr
コード例 #50
0
def find_map_region(
    min_ra,
    max_ra,
    min_dec,
    max_dec,
    target_sample=0.25,
    multiplier=16,
    search_start=0,
    max_freq=700.391,
    min_freq=899.609,
    n_freq=256,
    exact_freq=True,
):
    r"""target 0.25 pixels/FWHM"""
    # do multiples of 16 so that 256 freq * 16N is a multiple of 4096

    ra_sampling = target_sample * 2.0  # initial value that will not fail
    n_ra = search_start
    while ra_sampling > target_sample:
        n_ra += multiplier
        ra_sampling = find_map_dimensions(
            define_map_region(min_freq, max_freq, min_ra, max_ra, min_dec, max_dec, n_freq, n_ra, 32, exact_freq=True),
            silent=True,
        )
        ra_sampling = ra_sampling[0]

    dec_sampling = target_sample * 2.0
    n_dec = search_start
    while dec_sampling > target_sample:
        n_dec += multiplier
        dec_sampling = find_map_dimensions(
            define_map_region(
                min_freq, max_freq, min_ra, max_ra, min_dec, max_dec, n_freq, n_ra, n_dec, exact_freq=True
            ),
            silent=True,
        )
        dec_sampling = dec_sampling[1]

    ra_sample_ratio = target_sample / ra_sampling
    dec_sample_ratio = target_sample / dec_sampling

    print "n_ra=%d, samp=%g, ratio=%g" % (n_ra, ra_sampling, target_sample / ra_sampling)

    print "n_dec=%d, samp=%g, ratio=%g" % (n_dec, dec_sampling, target_sample / dec_sampling)

    print "original ra=(%g,%g), dec=(%g,%g)" % (min_ra, max_ra, min_dec, max_dec)

    template_map = define_map_region(min_freq, max_freq, min_ra, max_ra, min_dec, max_dec, n_freq, n_ra, n_dec)
    # now expand the map a bit so that pixels are exactly 0.25 deg
    info = template_map.info

    blank = sp.zeros((n_freq, n_ra, n_dec))
    info["ra_delta"] *= ra_sample_ratio
    info["dec_delta"] *= dec_sample_ratio
    map_prod = algebra.make_vect(blank, axis_names=("freq", "ra", "dec"))
    map_prod.info = info

    return map_prod
コード例 #51
0
def cross_power_est_highmem(arr1, arr2, weight1, weight2,
                    window="blackman", nonorm=False):
    """Calculate the cross-power spectrum of a two nD fields.

    The arrays must be identical and have the same length (physically
    and in pixel number) along each axis.

    Same goal as above without the emphasis on saving memory.
    This is the "tried and true" legacy function.
    """
    if window:
        window_function = fftutil.window_nd(arr1.shape, name=window)
        weight1 *= window_function
        weight2 *= window_function

    warr1 = arr1 * weight1
    warr2 = arr2 * weight2
    ndim = arr1.ndim

    fft_arr1 = np.fft.fftshift(np.fft.fftn(warr1))
    fft_arr2 = np.fft.fftshift(np.fft.fftn(warr2))
    xspec = fft_arr1 * fft_arr2.conj()
    xspec = xspec.real

    # correct for the weighting
    product_weight = weight1 * weight2
    xspec /= np.sum(product_weight)

    # make the axes
    k_axes = tuple(["k_" + axis_name for axis_name in arr1.axes])
    xspec_arr = algebra.make_vect(xspec, axis_names=k_axes)

    info = {'axes': k_axes, 'type': 'vect'}
    width = np.zeros(ndim)
    for axis_index in range(ndim):
        n_axis = arr1.shape[axis_index]
        axis_name = arr1.axes[axis_index]
        axis_vector = arr1.get_axis(axis_name)
        delta_axis = abs(axis_vector[1] - axis_vector[0])
        width[axis_index] = delta_axis

        k_axis = np.fft.fftshift(np.fft.fftfreq(n_axis, d=delta_axis))
        k_axis *= 2. * math.pi
        delta_k_axis = abs(k_axis[1] - k_axis[0])

        k_name = k_axes[axis_index]
        info[k_name + "_delta"] = delta_k_axis
        info[k_name + "_centre"] = 0.
        #print k_axis
        #print k_name, n_axis, delta_axis

    xspec_arr.info = info
    #print xspec_arr.get_axis("k_dec")

    if not nonorm:
        xspec_arr *= width.prod()

    return xspec_arr
コード例 #52
0
def template_map_axes(filename):
    """Open a numpy array map and extract its axis/etc. information
    """
    print "using the volume template file: " + filename
    template_map = algebra.make_vect(algebra.load(filename))
    freq_axis = template_map.get_axis('freq')
    ra_axis = template_map.get_axis('ra')
    dec_axis = template_map.get_axis('dec')
    return (freq_axis, ra_axis, dec_axis, template_map.shape, template_map)
コード例 #53
0
def add_sim_to_data(simkey, datakey, replace=False):
    datapath_db = data_paths.DataPath()

    mapA_file = datapath_db.fetch(datakey + ":A;clean_map", intend_read=True)
    mapB_file = datapath_db.fetch(datakey + ":B;clean_map", intend_read=True)
    mapC_file = datapath_db.fetch(datakey + ":C;clean_map", intend_read=True)
    mapD_file = datapath_db.fetch(datakey + ":D;clean_map", intend_read=True)
    simfile = datapath_db.fetch(simkey + ":1", intend_read=True)

    simmap = algebra.make_vect(algebra.load(simfile))

    mapset = [mapA_file, mapB_file, mapC_file, mapD_file]
    for mapfile in mapset:
        print mapfile, simfile
        origmap = algebra.make_vect(algebra.load(mapfile))
        if replace:
            algebra.save(mapfile, simmap)
        else:
            algebra.save(mapfile, origmap + simmap)