Esempio n. 1
0
    def load_lib(self, libname='', driver=None):
        """Read a CKC library which has been pre-convolved to be close to your
        resolution.  This library should be stored as an HDF5 file, with the
        datasets ``wavelengths``, ``parameters`` and ``spectra``.  These are
        ndarrays of shape (nwave,), (nmodels,), and (nmodels, nwave)
        respecitvely.  The ``parameters`` array is a structured array.  Spectra
        with no fluxes > 1e-32 are removed from the library if the librarty is
        kept in memory.
        """
        import h5py
        f = h5py.File(libname, "r", driver=driver)
        self._wave = np.array(f['wavelengths'])
        self._libparams = np.array(f['parameters'])

        if self._in_memory:
            self._spectra = np.array(f['spectra'])
            f.close()
            # Filter library so that only existing spectra are included
            maxf = np.max(self._spectra, axis=1)
            good = maxf > 1e-32
            self._libparams = self._libparams[good]
            self._spectra = self._spectra[good, :]
        else:
            self._spectra = f['spectra']

        if self.logify_Z:
            from numpy.lib import recfunctions as rfn
            self._libparams['Z'] = np.log10(self._libparams['Z'])
            rfn.rename_fields(self._libparams, {'Z': 'logZ'})
Esempio n. 2
0
def read_halos_Rockstar_SO(*args, **kwargs):
    halos = read_halos_Rockstar(*args, **kwargs)
    halos = rfn.rename_fields(halos, {'m':'m_rockstar', 'alt_m':'alt_m_rockstar',
                                      'N':'N_rockstar', 'alt_N':'alt_N_rockstar'})
    halos = rfn.rename_fields(halos, {'m_SO':'m', 'alt_m_SO':'alt_m',
                                      'N_SO':'N', 'alt_N_SO':'alt_N'})
    return halos
Esempio n. 3
0
    def load_lib(self, libname='', driver=None):
        """Read a CKC library which has been pre-convolved to be close to your
        resolution.  This library should be stored as an HDF5 file, with the
        datasets ``wavelengths``, ``parameters`` and ``spectra``.  These are
        ndarrays of shape (nwave,), (nmodels,), and (nmodels, nwave)
        respecitvely.  The ``parameters`` array is a structured array.  Spectra
        with no fluxes > 1e-32 are removed from the library if the librarty is
        kept in memory.
        """
        import h5py
        f = h5py.File(libname, "r", driver=driver)
        self._wave = np.array(f['wavelengths'])
        self._libparams = np.array(f['parameters'])

        if self._in_memory:
            self._spectra = np.array(f['spectra'])
            f.close()
            # Filter library so that only existing spectra are included
            maxf = np.max(self._spectra, axis=1)
            good = maxf > 1e-32
            self._libparams = self._libparams[good]
            self._spectra = self._spectra[good, :]
        else:
            self._spectra = f['spectra']

        if self.logify_Z:
            from numpy.lib import recfunctions as rfn
            self._libparams['Z'] = np.log10(self._libparams['Z'])
            rfn.rename_fields(self._libparams, {'Z': 'logZ'})
Esempio n. 4
0
    def load_data(self):
        """Loads the data, which is described by the dataset.

        Note: This does not call the ``prepare_data`` method! It only loads
              the data as the method names says.

        Returns
        -------
        data : numpy record ndarray
            A numpy record ndarray holding the monte-carlo data.
        """
        pathfilenames = self.pathfilenames

        if isinstance(pathfilenames, str):
            pathfilenames = [pathfilenames]

        pathfilename = pathfilenames[0]
        assert_file_exists(pathfilename)
        data = np.load(pathfilename)
        for i in range(1, len(pathfilenames)):
            pathfilename = pathfilenames[i]
            assert_file_exists(pathfilename)
            data = np.append(data, np.load(pathfilename))

        data = np_rfn.rename_fields(data, self._mc_field_name_renaming_dict)

        return data
Esempio n. 5
0
    def __init__(self, query=None, table=None):
        # Either query or table, but not both (query xor table).
        if (query and table is not None) or\
           (not query and table is None):
            raise Exception('Either the query or the table parameter is '
                            'required. But not both.')
        if table is not None:
            # Use table if it was provided.
            self.table = table
        else:
            # Otherwise use the query to create the table.
            self.table = SDSS.query_sql(query)
        self.catalog = np.array(self.table)
        self.catalog = rename_fields(self.catalog, {'objID': 'id'})

        # Calculate B and V like the VizieR data.
        # Use Robert Lupton's derived equations found here:
        # http://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php

        g = self.catalog['g']
        r = self.catalog['r']

        B = g + 0.3130 * (g - r) + 0.2271  # sigma = 0.0107
        V = g - 0.5784 * (g - r) - 0.0038  # sigma = 0.0054

        self.catalog = append_fields(self.catalog, 'B', B)
        self.catalog = append_fields(self.catalog, 'V', V)
Esempio n. 6
0
def load_and_prepare_data(pathfilenames):
    """Loads the data file(s), renames fields and applies diffuse dataset cuts.

    Parameters
    ----------
    pathfilenames : str | sequence of str
        The file name(s), including path(s), of the monte-carlo data file(s).

    Returns
    -------
    data : numpy record ndarray
        Loaded and prepared monte-carlo data.
    """
    if isinstance(pathfilenames, basestring):
        pathfilenames = [pathfilenames]
    pathfilename = pathfilenames[0]
    assert_file_exists(pathfilename)
    data = np.load(pathfilename)
    for i in range(1, len(pathfilenames)):
        pathfilename = pathfilenames[i]
        assert_file_exists(pathfilename)
        data = np.append(data, np.load(pathfilename))

    # Rename fields based on MC_keys dictionary.
    data = np_rfn.rename_fields(data, CFG['MC_keys'])

    # Apply diffuse dataset cuts.
    data = diffuse_cuts(data)

    return data
Esempio n. 7
0
            def get_price(self, order_book_id, start, end):
                """
                :param order_book_id: e.g. 000002.XSHE
                :param start: 20160101
                :param end: 20160201
                :returns:
                :rtype: numpy.rec.array
                """
                # start = get_date_from_int(start)
                # end = get_date_from_int(end)
                # bar_count = (end - start).days

                # TODO: this is slow, make it run faster
                bar_count = 1000
                origin_bars = bars = self.history_bars(order_book_id,
                                                       bar_count, "1d")

                dtype = copy.deepcopy(bars.dtype)
                names = list(dtype.names)
                names[0] = "date"
                dtype.names = names
                bars = rfn.rename_fields(bars, {"datetime": "date"})
                bars["date"] = origin_bars["datetime"] / 1000000

                return bars
Esempio n. 8
0
def load_labels(fname='cocotools/coords/standard_labels.csv',
                usecols=None):
    """Load the standard labels file; return a record array with the atlas.
    """
    # Converters for the fields we're interested in: strings without whitespace
    # and floats
    cleanstr = lambda x: str(x).strip()
    conv = {0:cleanstr, 1:cleanstr, 2:cleanstr, 3:cleanstr, 4:float, 5:float,
            6:float, 7:cleanstr, 8:cleanstr, 9:int}
    # Exclude comments column by default.
    usecols = range(7)+[8] if usecols is None else usecols
    with open(fname) as f:
        # Read the first line for the names
        all_names = [n.strip() for n in f.next().split(',')]
        # genfromtxt is buggy, it gets confused with commas inside strings
        # use the stdlib csv reader and rebuild lines with | as separator,
        # which genfromtxt can then use
        data = ['|'.join(x) for x in list(csv.reader(f))]
        # Older versions of numpy need an actual filehandle, not an arbitrary
        # iterable.
        with tempfile.TemporaryFile() as fdata:
            fdata.write('\n'.join(data))
            fdata.seek(0)
            # Now, let genfromtxt iterate over the rest of the file
            araw = np.genfromtxt(fdata, delimiter='|', usecols=usecols,
                                 converters=conv)
    # Rename the fields so the dtype has more useful names than f0, f1, etc.
    # Also make it a recarray for more convenient use further down
    names = [all_names[i] for i in usecols]
    renamer = dict(zip(araw.dtype.names, names))
    atlas = rfn.rename_fields(araw, renamer).view(np.recarray)

    # Flip y axis to have frontal areas on the left in axial projections
    #atlas.y *= -1
    return atlas
 def load_as_recarr(self, filename, fields=None, formatmarker='# format:'):
     """Warning: if there's missing value, it will be filled with default value.
         See: https://docs.scipy.org/doc/numpy/user/basics.rec.html
         numpy.lib.recfunctions.merge_arrays()
             -1 for integers
             -1.0 for floating point numbers
             '-' for characters
             '-1' for strings
             True for boolean values
     """
     cols = self.load(filename, fields=fields, formatmarker=formatmarker)
     _fields, cols_data = cols.keys(), cols.values(
     )  # None fields is updated during self.load
     cols_type = [
         self.name2fmter[field_name]._type_ for field_name in _fields
     ]
     #for name,x,t in zip(fields, cols_data, cols_type):
     #    print '***', name,x,t
     cols_nparr = [
         np.array(x, dtype=t)
         for name, x, t in zip(_fields, cols_data, cols_type)
     ]
     unnamed_recarr = rfn.merge_arrays(cols_nparr,
                                       flatten=True,
                                       usemask=False).view(np.recarray)
     return rfn.rename_fields(
         unnamed_recarr, dict(zip(unnamed_recarr.dtype.names, _fields)))
def filter_on_fields(to_filter,
                     for_filter,
                     filter_fields,
                     filter_fields_2=None,
                     return_selection=False):
    """
    Returns entries of to_filter whose combination of the filter_fields
    values are present in for_filter. filter_fields_2: names of filter_fields in
    for_filter (if different than in to_filter)
    If return_selection, will instead
    """
    a = np.array(fields_view(to_filter, filter_fields))
    if filter_fields_2 is None:
        filter_fields_2 = filter_fields
    b = np.array(fields_view(for_filter, filter_fields_2))
    # Rename the fields, if needed
    # If only one field is selected, this won't be needed (and would return None instead of working)
    if not isinstance(filter_fields, str) and len(filter_fields) > 1:
        b = recfunctions.rename_fields(
            b, dict(zip(filter_fields_2, filter_fields)))
    selection = np.in1d(a, b)
    if return_selection:
        return selection
    else:
        return to_filter[selection]
Esempio n. 11
0
 def show(self, header, newdata):
     # print("{0} new, {1} stations".format(header['num_sources'][0], header['num_stations'][0]))
     if newdata.shape[0] > 0:
         newdata = rename_fields(newdata, {'t':'time'})
         newdata['time'] -= self._t_offset
         self._dataq.append(newdata)
         self.send("B4D_LMAnewsources_live")
Esempio n. 12
0
def load_catalogue(path):

    sources = np.load(path)

    # Maintain backwards-compatibility

    maps = [
        ("ra", "ra_rad"),
        ("dec", "dec_rad"),
        ("Relative Injection Weight", "injection_weight_modifier"),
        ("Distance (Mpc)", "distance_mpc"),
        ("Name", "source_name"),
        ("Ref Time (MJD)", "ref_time_mjd"),
        ("Start Time (MJD)", "start_time_mjd"),
        ("End Time (MJD)", "end_time_mjd"),
    ]

    for (old_key, new_key) in maps:

        if old_key in sources.dtype.names:

            sources = rename_fields(sources, {old_key: new_key})

    if "base_weight" not in sources.dtype.names:

        base_weight = np.ones(len(sources))

        sources = append_fields(sources,
                                "base_weight",
                                base_weight,
                                usemask=False,
                                dtypes=[float])

    # Check that ra and dec are really in radians!

    if max(sources["ra_rad"]) > 2.0 * np.pi:
        raise Exception("Sources have Right Ascension values greater than 2 "
                        "pi. Are you sure you're not using degrees rather "
                        "than radians?")

    if max(abs(sources["dec_rad"])) > np.pi / 2.0:
        raise Exception("Sources have Declination values exceeding "
                        "+/- pi/2. Are you sure you're not using degrees "
                        "rather than radians?")

    # Check that all sources have a unique name

    if len(set(sources["source_name"])) < len(sources["source_name"]):

        raise Exception("Some sources in catalogue do not have unique "
                        "names. Please assign unique names to each source.")

    # Rescale 'base_weight'
    # sources["base_weight"] /= np.mean(sources["base_weight"])

    # Order sources
    sources = np.sort(sources, order="distance_mpc")

    return sources
Esempio n. 13
0
 def format_data_keys(exp, mc):
     exp = np_rfn.rename_fields(exp, {
         'logE': 'log_energy',
         'angErr': 'ang_err'
     })
     mc = np_rfn.rename_fields(
         mc, {
             'trueAzi': 'true_azi',
             'trueZen': 'true_zen',
             'logE': 'log_energy',
             'angErr': 'ang_err',
             'trueE': 'true_energy',
             'trueRa': 'true_ra',
             'trueDec': 'true_dec',
             'ow': 'mcweight'
         })
     return (exp, mc)
Esempio n. 14
0
def test_Osiris_Dev_Hdf5_ParticleFile_is_valid_backend(
        make_prt_file: Callable[[str, np.ndarray, Optional[str]], Path]):
    data = unstructured_to_structured(np.random.random((10, 4)))
    data = rename_fields(data, {"f0": "q"})

    prt_path = make_prt_file("osiris_dev_particles_hdf5", data)

    assert Osiris_Dev_Hdf5_ParticleFile.is_valid_backend(prt_path)
Esempio n. 15
0
def write_gfas(filename, data, indir=None, nside=None, survey="?",
               gaiaepoch=None):
    """Write a catalogue of Guide/Focus/Alignment targets.

    Parameters
    ----------
    filename : :class:`str`
        Output file name.
    data  : :class:`~numpy.ndarray`
        Array of GFAs to write to file.
    indir : :class:`str`, optional, defaults to None.
        Name of input Legacy Survey Data Release directory, write to header
        of output file if passed (and if not None).
    nside: :class:`int`, defaults to None.
        If passed, add a column to the GFAs array popluated with HEALPixels
        at resolution `nside`.
    survey : :class:`str`, optional, defaults to "?"
        Written to output file header as the keyword `SURVEY`.
    gaiaepoch: :class:`float`, defaults to None
        Gaia proper motion reference epoch. If not None, write to header of
        output file. If None, default to an epoch of 2015.5.
    """
    # ADM rename 'TYPE' to 'MORPHTYPE'.
    data = rfn.rename_fields(data, {'TYPE': 'MORPHTYPE'})

    # ADM create header to include versions, etc.
    hdr = fitsio.FITSHDR()
    depend.setdep(hdr, 'desitarget', desitarget_version)
    depend.setdep(hdr, 'desitarget-git', gitversion())

    if indir is not None:
        depend.setdep(hdr, 'input-data-release', indir)
        # ADM note that if 'dr' is not in the indir DR
        # ADM directory structure, garbage will
        # ADM be rewritten gracefully in the header.
        drstring = 'dr'+indir.split('dr')[-1][0]
        depend.setdep(hdr, 'photcat', drstring)

    # ADM add HEALPix column, if requested by input.
    if nside is not None:
        theta, phi = np.radians(90-data["DEC"]), np.radians(data["RA"])
        hppix = hp.ang2pix(nside, theta, phi, nest=True)
        data = rfn.append_fields(data, 'HPXPIXEL', hppix, usemask=False)
        hdr['HPXNSIDE'] = nside
        hdr['HPXNEST'] = True

    # ADM add the type of survey (main, or commissioning "cmx") to the header.
    hdr["SURVEY"] = survey

    # ADM add the Gaia reference epoch, or pass 2015.5 if not included.
    hdr['REFEPOCH'] = {'name': 'REFEPOCH',
                       'value': 2015.5,
                       'comment': "Gaia Proper Motion Reference Epoch"}
    if gaiaepoch is not None:
        hdr['REFEPOCH'] = gaiaepoch

    fitsio.write(filename, data, extname='GFA_TARGETS', header=hdr, clobber=True)
Esempio n. 16
0
 def test_rename_fields(self):
     # Test rename fields
     a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
                  dtype=[('a', int),
                         ('b', [('ba', float), ('bb', (float, 2))])])
     test = rename_fields(a, {'a': 'A', 'bb': 'BB'})
     newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
     control = a.view(newdtype)
     assert_equal(test.dtype, newdtype)
     assert_equal(test, control)
Esempio n. 17
0
 def test_rename_fields(self):
     # Test rename fields
     a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
                  dtype=[('a', int),
                         ('b', [('ba', float), ('bb', (float, 2))])])
     test = rename_fields(a, {'a': 'A', 'bb': 'BB'})
     newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
     control = a.view(newdtype)
     assert_equal(test.dtype, newdtype)
     assert_equal(test, control)
 def test_rename_fields(self):
     # Test rename fields
     a = np.array(
         [(1, (2, [3.0, 30.0])), (4, (5, [6.0, 60.0]))],
         dtype=[("a", int), ("b", [("ba", float), ("bb", (float, 2))])],
     )
     test = rename_fields(a, {"a": "A", "bb": "BB"})
     newdtype = [("A", int), ("b", [("ba", float), ("BB", (float, 2))])]
     control = a.view(newdtype)
     assert_equal(test.dtype, newdtype)
     assert_equal(test, control)
Esempio n. 19
0
 def test_rename_fields(self):
     # Test rename fields
     a = np.array(
         [(1, (2, [3.0, 30.0])), (4, (5, [6.0, 60.0]))],
         dtype=[("a", int), ("b", [("ba", float), ("bb", (float, 2))])],
     )
     test = rename_fields(a, {"a": "A", "bb": "BB"})
     newdtype = [("A", int), ("b", [("ba", float), ("BB", (float, 2))])]
     control = a.view(newdtype)
     assert_equal(test.dtype, newdtype)
     assert_equal(test, control)
Esempio n. 20
0
def read_photometry_file(header_path):
    """Read simulated light-curves from file

    Files are expected in pairs of a header file (`*HEAD.fits`) with target
    meta data and a photometry file (`*PHOT.fits`) with simulated light-curves.

    Args:
        header_path (str): Path of the header file

    Returns:
        - An array of data from the header file
        - An array of data from the photometry file
    """

    # Map of column names to names that snmachine/sncosmo will recognize
    column_name_mapping = {
        'MJD': 'mjd',
        'FLT': 'filter',
        'FIELD': 'field',
        'PHOTFLAG': 'photflag',
        'PHOTPROB': 'photprob',
        'FLUXCAL': 'flux',
        'FLUXCALERR': 'flux_error',
        'PSF_SIG1': 'psf_sig1',
        'SKY_SIG': 'sky_sig',
        'ZEROPT': 'zp',
        'SIM_MAGOBS': 'sim_magobs'
    }

    # Data types to use when reading in the photometry file
    dtypes = [
        ('MJD', '>f8'), ('FLT', 'U10'), ('FIELD', '|S12'),
        ('PHOTFLAG', '>i4'), ('PHOTPROB', '>f4'),
        ('FLUXCAL', '>f4'), ('FLUXCALERR', '>f4'),
        ('PSF_SIG1', '>f4'), ('SKY_SIG', '>f4'),
        ('ZEROPT', '>f4'), ('SIM_MAGOBS', '>f4')
    ]

    with fits.open(header_path) as header_hdulist:
        meta_data = header_hdulist[1].data

    phot_file_path = header_path.replace('HEAD', 'PHOT')
    with fits.open(phot_file_path) as photometry_hdulist:
        # Typecasting to an array avoids astropy bugs/performance issues
        phot_data = np.array(photometry_hdulist[1].data, dtype=dtypes)

        # Rename columns to sncosmo friendly format
        phot_data = rfn.rename_fields(phot_data, column_name_mapping)

        # Rename filters to sncosmo friendly format
        lowercase = np.char.strip(np.char.lower(phot_data['filter']))
        phot_data['filter'] = np.char.add('lsst', lowercase)

    return meta_data, phot_data
Esempio n. 21
0
    def updateNames(self, rename: dict) -> None:
        datas = self.field("laserdata")
        for i in range(len(datas)):
            remove = [
                name for name in datas[i].dtype.names if name not in rename
            ]
            datas[i] = rfn.drop_fields(datas[i], remove, usemask=False)
            datas[i] = rfn.rename_fields(datas[i], rename)

        self.setField("laserdata", datas)
        self.setElidedNames(datas[0].dtype.names)
Esempio n. 22
0
def grl_loader(season):

    if isinstance(season.grl_path, list):
        grl = np.sort(np.array(
            np.concatenate([np.load(x) for x in season.grl_path])),
                      order="run")
    else:
        grl = np.load(season.grl_path)

    # Check if bad runs are found in GRL
    try:
        if np.sum(~grl["good_i3"]) == 0:
            pass
        else:
            logger.error("Trying to load", season)
            logger.error("The following runs are included:")
            logger.error(grl[~grl["good_i3"]])
            raise Exception("Runs marked as 'bad' are found in Good Run List")
    except ValueError:
        logger.warning(
            "No field called 'good_i3' found in GoodRunList. "
            "Cannot check if all runs in GoodRunList are actually good.")

    if "length" not in grl.dtype.names:

        if "livetime" in grl.dtype.names:
            grl = rename_fields(grl, {"livetime": "length"})
        else:
            raise Exception("No recognised Livetime field found in "
                            "GoodRunList. (Searched for 'livetime' and "
                            "'length')")

    # Check if there are events in runs not found in GRL

    exp_data = season.get_exp_data()
    if "run" in exp_data.dtype.names:
        bad_runs = [x for x in set(exp_data["run"]) if x not in grl["run"]]
        if len(bad_runs) > 0:
            raise Exception(
                "Trying to use GoodRunList, but events in data have "
                "runs that are not included on this GoodRunList. \n"
                "Please check to make sure both the GoodRunList, "
                "and the event selection, are correct. \n" +
                "The following runs are affected: \n" + str(bad_runs))

    # Sometimes, inexplicable, the runs come in random orders rather than
    # ascending order. This deals with that.

    grl = np.sort(grl, order="run")

    del exp_data

    return grl
Esempio n. 23
0
def write_fom_targets(targets, FoM, desi_target, bgs_target, mws_target):
    """Return new targets array with added/renamed columns including ELG Figure of Merit

    Args:
        targets: numpy structured array of targets
        FoM: Figure of Merit calculated by apply_XD_globalerror
        desi_target: 1D array of target selection bit flags
        bgs_target: 1D array of target selection bit flags
        mws_target: 1D array of target selection bit flags

    Returns:
        New targets structured array with those changes

    Notes:

        Finalize target list by:

        * renaming OBJID -> BRICK_OBJID (it is only unique within a brick)
        * Adding new columns:

          - TARGETID: unique ID across all bricks
          - FoM: ELG XD Figure of Merit
          - DESI_TARGET: target selection flags
          - MWS_TARGET: target selection flags
          - BGS_TARGET: target selection flags
    """
    ntargets = len(targets)
    assert ntargets == len(FoM)
    assert ntargets == len(desi_target)
    assert ntargets == len(bgs_target)
    assert ntargets == len(mws_target)

    #- OBJID in tractor files is only unique within the brick; rename and
    #- create a new unique TARGETID
    targets = rfn.rename_fields(targets, {'OBJID': 'BRICK_OBJID'})
    targetid = targets['BRICKID'].astype(
        np.int64) * 1000000 + targets['BRICK_OBJID']

    #- Add new columns: TARGETID, TARGETFLAG, NUMOBS
    targets = rfn.append_fields(
        targets,
        ['TARGETID', 'DESI_TARGET', 'BGS_TARGET', 'MWS_TARGET', 'FOM'],
        [targetid, desi_target, bgs_target, mws_target, FoM],
        usemask=False)

    io.write_targets('FoM.fits',
                     targets,
                     qso_selection='irrelevant',
                     sandboxcuts=True)

    print('{} targets written to {}'.format(len(targets), 'FoM.fits'))

    return
Esempio n. 24
0
def read_nest_output_device_data_from_ascii_to_dict(filepath):
    """This function reads data from a NEST recording device ascii file into an events dictionary
       Arguments:
        - filepath: absolute or relative path to the file (string)
       Returns:
        the events dictionary of the recorded data
    """
    recarray = rename_fields(
        np.genfromtxt(filepath, names=True, skip_header=2), {
            "sender": "senders",
            "time_ms": "times"
        })
    return {name: ensure_list(recarray[name]) for name in recarray.dtype.names}
Esempio n. 25
0
    def load_array(self, d, file):

        import time
        t0 = time.time()

        if self.params['has_sheared'] & (file == 'shapefile'):
            d['flags_1p'] = 'flags_select_1p'
            d['flags_1m'] = 'flags_select_1m'
            d['flags_2p'] = 'flags_select_2p'
            d['flags_2m'] = 'flags_select_2m'

        if self.params['pdf_type'] == 'pdf':
            keys = [
                key for key in d.keys()
                if (d[key] is not None) & (key is not 'pzstack')
            ]
        else:
            keys = [key for key in d.keys() if (d[key] is not None)]

        if 'objid' in keys:
            dtypes = [('objid', 'i8')]
        else:
            raise ValueError('missing object id in ' + file)
        dtypes += [(key, 'f8') for key in keys if (key is not 'objid')]
        if self.params['pdf_type'] == 'pdf':
            dtypes += [('pzstack_' + str(i), 'f8')
                       for i in range(len(self.params['pdf_z']))]

        fits = fio.FITS(self.params[file])[-1]
        array = fits.read(columns=[d[key] for key in keys])

        array = rename_fields(array, {v: k for k, v in d.iteritems()})

        if ('weight' not in array.dtype.names) & (file == 'shapefile'):
            array = append_fields(array,
                                  'weight',
                                  np.ones(len(array)),
                                  usemask=False)

        if self.params['pdf_type'] == 'pdf':
            for i in range(len(self.params['pdf_z'])):
                array['pzstack' + str(i)] = fits.read(columns=d['pzstack'] +
                                                      str(i))

        if np.any(np.diff(array['objid']) < 1):
            raise ValueError('misordered or duplicate ids in ' + file)

        return array
Esempio n. 26
0
def test_Osiris_Dev_Hdf5_ParticleFile_properties(
        make_prt_file: Callable[[str, np.ndarray, Optional[str]], Path]):
    data = unstructured_to_structured(np.random.random((10, 4)))
    data = rename_fields(data, {"f0": "q"})

    prt_path = make_prt_file("osiris_dev_particles_hdf5",
                             data,
                             name="some particles")
    backend = Osiris_Dev_Hdf5_ParticleFile(prt_path)

    assert backend.name == "osiris_dev_particles_hdf5"
    assert backend.location == prt_path
    assert backend.dataset_name == "some_particles"
    assert backend.dataset_label == "some particles"

    assert backend.quantity_names == ["f1", "f2", "f3", "q"]
    assert backend.quantity_labels == [
        "f1 label",
        "f2 label",
        "f3 label",
        "q label",
    ]
    assert backend.quantity_units == [
        "f1 unit",
        "f2 unit",
        "f3 unit",
        "q unit",
    ]

    assert backend.shape == (10, )
    assert backend.dtype == np.dtype([
        ("f1", float),
        ("f2", float),
        ("f3", float),
        ("q", float),
    ])

    # taken function 'make_osiris_444_particles_hdf'
    assert backend.iteration == 12345
    assert np.isclose(backend.time_step, -321.9)
    assert backend.time_unit == "time unit"

    # check reading of data
    for indexing in (np.s_[0], np.s_[-1], np.s_[:], np.s_[3:7], np.s_[4:1]):
        expected_data = data[indexing]
        np.testing.assert_array_equal(backend.get_data((indexing, )),
                                      expected_data)
Esempio n. 27
0
    def parse_data():

        logger = logging.Logger("default_logger")
        logger.setLevel("DEBUG")

        with fits.open(os.path.join(alertstack_data_dir, "table-4LAC-DR2-h.fits")) as hdul: # table_4LAC
            cat = hdul[1].data
        cat = np.sort(cat, order="Energy_Flux100")[::-1]#Energy_Flux100

        logging.info("Selecting blazars from 4FGL catalogue")

        blazar_class = ["bll", "BLL", "fsrq", "FSRQ", "bcu", "BCU"]

        logging.info("Using all sources from class {0}".format(blazar_class))
        #
        mask = np.array([x["CLASS"] in blazar_class for x in cat])
        blazars = np.array(cat[mask])

        cut_e = -11.6
        mask_e = np.array([x["Energy_Flux100"]>10**cut_e for x in blazars])
        blazars = np.array(blazars[mask_e])

        maps = [
            ("RAJ2000", "ra_rad"),
            ("DEJ2000", "dec_rad"),
        ]

        for (old_key, new_key) in maps:

            blazars = rename_fields(blazars, {old_key: new_key})

        mask_GP = [is_outside_GP(blazars['ra_rad'][i],blazars['dec_rad'][i]) for i in range(len(blazars))]
        blazars = blazars[mask_GP] 
        
        ####################
        #fluxes = blazars['Energy_Flux100'].byteswap().newbyteorder()
        #blazar_df = pd.DataFrame({'flux': fluxes})
        #blazar_df['bins'] = pd.cut(blazar_df['flux'],50)
        #blazar_df2 = blazar_df.groupby('bins').agg({'flux': sum, 'bins': 'count'}).rename(columns = {'bins': 'count', 'flux':'background'}).reset_index()
        #blazar_df2['flux2'] = (len(blazars)/sum(blazar_df2['background']))*blazar_df2['background']
        #blazar_df2['s/b'] = blazar_df2['flux2']/(blazar_df2['count'])
        #blazar_df = blazar_df.merge(blazar_df2,how='left',on='bins')
        #blazars = np.lib.recfunctions.append_fields(blazars, 's/b', blazar_df['s/b'].values)

        logging.info("Found {0} sources in total".format(len(blazars)))

        return blazars
Esempio n. 28
0
def load_labels(fname='cocotools/coords/standard_labels.csv', usecols=None):
    """Load the standard labels file; return a record array with the atlas.
    """
    # Converters for the fields we're interested in: strings without whitespace
    # and floats
    cleanstr = lambda x: str(x).strip()
    conv = {
        0: cleanstr,
        1: cleanstr,
        2: cleanstr,
        3: cleanstr,
        4: float,
        5: float,
        6: float,
        7: cleanstr,
        8: cleanstr,
        9: int
    }
    # Exclude comments column by default.
    usecols = range(7) + [8] if usecols is None else usecols
    with open(fname) as f:
        # Read the first line for the names
        all_names = [n.strip() for n in f.next().split(',')]
        # genfromtxt is buggy, it gets confused with commas inside strings
        # use the stdlib csv reader and rebuild lines with | as separator,
        # which genfromtxt can then use
        data = ['|'.join(x) for x in list(csv.reader(f))]
        # Older versions of numpy need an actual filehandle, not an arbitrary
        # iterable.
        with tempfile.TemporaryFile() as fdata:
            fdata.write('\n'.join(data))
            fdata.seek(0)
            # Now, let genfromtxt iterate over the rest of the file
            araw = np.genfromtxt(fdata,
                                 delimiter='|',
                                 usecols=usecols,
                                 converters=conv)
    # Rename the fields so the dtype has more useful names than f0, f1, etc.
    # Also make it a recarray for more convenient use further down
    names = [all_names[i] for i in usecols]
    renamer = dict(zip(araw.dtype.names, names))
    atlas = rfn.rename_fields(araw, renamer).view(np.recarray)

    # Flip y axis to have frontal areas on the left in axial projections
    #atlas.y *= -1
    return atlas
Esempio n. 29
0
def filter_on_fields(to_filter, for_filter, filter_fields, filter_fields_2=None, return_selection=False):
    """Returns entries of to_filter whose combination of the filter_fields values are present in for_filter.
    filter_fields_2: names of filter_fields in for_filter (if different than in to_filter)
    If return_selection, will instead return boolean selection array for to_filter
    """
    a = np.array(fields_view(to_filter, filter_fields))
    if filter_fields_2 is None:
        filter_fields_2 = filter_fields
    b = np.array(fields_view(for_filter, filter_fields_2))
    # Rename the fields, if needed
    # If only one field is selected, this won't be needed (and would return None instead of working)
    if not isinstance(filter_fields, str) and len(filter_fields) > 1:
        b = recfunctions.rename_fields(b, dict(zip(filter_fields_2, filter_fields)))
    selection = np.in1d(a, b)
    if return_selection:
        return selection
    else:
        return to_filter[selection]
Esempio n. 30
0
    def test_complex_dataset(self):
        """
        Tests for a dataset containing recorded data for a multiple
        configurations.
        """
        # define multiple configurations for one dataset
        self.mod.knobs.n_configs = 3

        # -- dset with 1 shotnum                                    ----
        self.mod.knobs.sn_size = 1
        self.assertInRangeSN()
        self.assertOutRangeSN()

        # -- typical dset with sequential shot numbers              ----
        self.mod.knobs.sn_size = 100
        self.assertInRangeSN()
        self.assertOutRangeSN()

        # -- dset with non-sequential shot numbers                  ----
        self.mod.knobs.sn_size = 100
        data = self.cgroup["Run time list"][...]
        sn_arr = np.append(
            np.arange(5, 25, dtype=np.uint32),
            np.append(np.arange(51, 111, dtype=np.uint32),
                      np.arange(150, 170, dtype=np.uint32)),
        )
        data["Shot number"][0::3] = sn_arr
        data["Shot number"][1::3] = sn_arr
        data["Shot number"][2::3] = sn_arr
        del self.cgroup["Run time list"]
        self.cgroup.create_dataset("Run time list", data=data)
        self.assertInRangeSN()
        self.assertOutRangeSN()

        # -- dset without a configuration fields                    ----
        self.mod.knobs.sn_size = 50
        data = self.cgroup["Run time list"][...]
        data = rfn.rename_fields(data, {"Configuration name": "oops"})
        del self.cgroup["Run time list"]
        self.cgroup.create_dataset("Run time list", data=data)
        cdset = self.cgroup["Run time list"]
        with self.assertRaises(ValueError):
            build_shotnum_dset_relation(np.empty(5, dtype=np.uint32), cdset,
                                        "Shot number", self.map, "config01")
Esempio n. 31
0
def rename(array: np.ndarray, original: Union[list, str],
           new: Union[list, str]) -> np.ndarray:
    """
    Rename the structured array.

    :param array: np.ndarray(structured), array that houses the columns/fields to be renamed.
    :param original: list or str, the original column/s to be renamed.
    :param new: list or str, the new column/s names.
    :return : np.ndarray(structured), the renamed structured array.
    """

    if (type(original) == str):
        original = [original]
        new = [new]
    mapping = {}
    for ori, ne in zip(original, new):
        mapping[ori] = ne

    return rfn.rename_fields(array, mapping)
Esempio n. 32
0
def make_atlas(kind=pht00, usecols=None):
    """Load the standard labels files returning a record array with the atlas.
    """

    # Converters for the fields we're interested in: strings without whitespace
    # and floats
    fname = kind['fname']

    if fname.endswith('txt'):
        from matplotlib import mlab
        return mlab.csv2rec(fname, delimiter='\t')

    conv = kind['converters']
    usecols = kind['cols'] if usecols is None else usecols

    with open(fname) as f:
        # Read the first line for the names
        all_names = [n.strip() for n in f.next().split(',')]
        # genfromtxt is buggy, it gets confused with commas inside strings
        # use the stdlib csv reader and rebuild lines with | as separator,
        # which genfromtxt can then use
        data = ['|'.join(x) for x in list(csv.reader(f))]
        # Older versions of numpy need an actual filehandle, not an arbitrary
        # iterable.
        with tempfile.TemporaryFile() as fdata:
            fdata.write('\n'.join(data))
            fdata.seek(0)
            # Now, let genfromtxt iterate over the rest of the file
            araw = np.genfromtxt(fdata,
                                 delimiter='|',
                                 usecols=usecols,
                                 converters=conv)

    # Rename the fields so the dtype has more useful names than f0, f1, etc.
    # Also make it a recarray for more convenient use further down
    names = [all_names[i] for i in usecols]
    renamer = dict(zip(araw.dtype.names, names))
    atlas = rfn.rename_fields(araw, renamer).view(np.recarray)

    # Flip y axis to have frontal areas on the left in axial projections
    #atlas.y *= -1
    return atlas
Esempio n. 33
0
    def test_add_start_and_length_modifications(self):
        """test add_raw_start_and_raw_length_to_events and add_start_and_length_to_events methods"""

        # get input data
        sampling_freq = self.dna_handle.sample_rate
        start_time = self.dna_handle.raw_attributes['start_time']
        event_table = self.dna_handle.get_basecall_data()
        self.assertTrue(check_event_table_time(event_table),
                        "Invalid initial start times")

        # add raw fields
        event_table = add_raw_start_and_raw_length_to_events(
            event_table, start_time, sampling_freq)
        raw_starts = event_table['raw_start'].tolist()
        raw_lengths = event_table['raw_length'].tolist()

        # save old fields
        event_table = rename_fields(event_table, {
            'start': 'original_start',
            'length': 'original_length'
        })

        # add non-raw fields
        event_table = add_start_and_length_to_events(event_table, start_time,
                                                     sampling_freq)
        self.assertTrue(
            check_event_table_time(event_table,
                                   min_difference=1.0 / sampling_freq),
            "Invalid modified start times")

        # get fields
        original_starts = event_table['original_start'].tolist()
        original_lengths = event_table['original_length'].tolist()
        starts = event_table['start'].tolist()
        lengths = event_table['length'].tolist()

        # compare elementwise
        places = int(math.log10(sampling_freq)) + 1
        for original_start, start in zip(original_starts, starts):
            self.assertAlmostEqual(original_start, start, places=places)
        for original_length, length in zip(original_lengths, lengths):
            self.assertAlmostEqual(original_length, length, places=places)
Esempio n. 34
0
def finalize(targets, desi_target, bgs_target, mws_target, sky=0):
    """Return new targets array with added/renamed columns

    Args:
        targets: numpy structured array of targets
        kwargs: colname=array of columns to add
        desi_target: 1D array of target selection bit flags
        bgs_target: 1D array of target selection bit flags
        mws_target: 1D array of target selection bit flags
        sky: Pass 1 to indicate these are blank sky targets

    Returns new targets structured array with those changes

    Finalize target list by:
      * renaming OBJID -> BRICK_OBJID (it is only unique within a brick)
      * Adding new columns:

        - TARGETID: unique ID across all bricks
        - LVM_TARGET: target selection flags
        - MWS_TARGET: target selection flags
        - BGS_TARGET: target selection flags
    """
    ntargets = len(targets)
    assert ntargets == len(desi_target)
    assert ntargets == len(bgs_target)
    assert ntargets == len(mws_target)

    #- OBJID in tractor files is only unique within the brick; rename and
    #- create a new unique TARGETID
    targets = rfn.rename_fields(targets, {'OBJID':'BRICK_OBJID'})
    targetid = encode_targetid(objid=targets['BRICK_OBJID'],
                               brickid=targets['BRICKID'],
                               release=targets['RELEASE'],
                               sky=sky)

    #- Add new columns: TARGETID, TARGETFLAG, NUMOBS
    targets = rfn.append_fields(targets,
        ['TARGETID', 'LVM_TARGET', 'BGS_TARGET', 'MWS_TARGET'],
        [targetid, desi_target, bgs_target, mws_target], usemask=False)

    return targets
Esempio n. 35
0
def GetCOSMOS23(maxdist=10.0):

    morph = esutil.io.read('cosmos_morph_cassata_1.1.fits')
    morph = rec.rename_fields(morph, {'RA':'ra', 'DEC':'dec'})
    cuts = GetFiducialCuts() 
    cut = XYCuts(morph['MAG_AUTO_ACS'], np.log10(morph['R_HALF']*0.03), cuts)
    morph = morph[cut]

    m1 = np.zeros( (len(morph),2) )
    m1[:,0] = morph['ra']
    m1[:,1] = morph['dec']
    np.savetxt('morph-pre-mask.txt', m1)

    #enrique = np.loadtxt('zCOSMOS-rndnb104Ia20.004-22.525.023.024.0zCOSMOS-Bext3.dat')
    #e = np.zeros( len(enrique), dtype=[('ra',np.float32), ('dec',np.float32)])
    #e['ra'] = enrique[:,2]
    #e['dec'] = enrique[:,1]

    enrique = np.loadtxt('zCOSMOS-mask.dat')
    e = np.zeros( len(enrique), dtype=[('ra',np.float32), ('dec',np.float32)])
    e['ra'] = enrique[:,1]
    e['dec'] = enrique[:,0]


    masked_morph = ApplyMaskRandoms(morph, e, cat_ra='ra', cat_dec='dec', rand_ra='ra', rand_dec='dec', maxdist=maxdist)
    GeoCuts = GetGeometryCuts()
    mcut = XYCuts(masked_morph['ra'], masked_morph['dec'], GeoCuts)
    masked_morph = masked_morph[mcut]
    
    m2 = np.zeros( (len(masked_morph),2) )
    m2[:,0] = masked_morph['ra']
    m2[:,1] = masked_morph['dec']
    np.savetxt('morph-post-mask.txt', m2)


    ecut = XYCuts(e['ra'], e['dec'], GeoCuts)
    e = e[ecut]

    u = UniformRandom(GeoCuts)
    u = ApplyMaskRandoms(u, e, cat_ra='ra', cat_dec='dec', rand_ra='ra', rand_dec='dec', maxdist=maxdist)
    return masked_morph, e, u
Esempio n. 36
0
    def parse_data():

        logger = logging.Logger("default_logger")
        logger.setLevel("DEBUG")

        with fits.open(os.path.join(alertstack_data_dir,
                                    "table_4LAC.fits")) as hdul:
            cat = hdul[1].data
        cat = np.sort(cat, order="Flux1000")[::-1]

        logging.info("Selecting blazars from 4FGL catalogue")

        blazar_class = ["bll", "BLL", "fsrq", "FSRQ", "bcu", "BCU"]

        logging.info("Using all sources from class {0}".format(blazar_class))
        #
        mask = np.array([x["CLASS"] in blazar_class for x in cat])
        blazars = np.array(cat[mask])

        cut_e = -11.6
        mask_e = np.array([x["Energy_Flux100"] > 10**cut_e for x in blazars])
        blazars = np.array(blazars[mask_e])

        maps = [
            ("RAJ2000", "ra_rad"),
            ("DEJ2000", "dec_rad"),
        ]

        for (old_key, new_key) in maps:

            blazars = rename_fields(blazars, {old_key: new_key})

        mask_GP = [
            is_outside_GP(blazars['ra_rad'][i], blazars['dec_rad'][i])
            for i in range(len(blazars))
        ]
        blazars = blazars[mask_GP]

        logging.info("Found {0} sources in total".format(len(blazars)))

        return blazars
Esempio n. 37
0
def make_atlas(kind=pht00, usecols=None):
    """Load the standard labels files returning a record array with the atlas.
    """

    # Converters for the fields we're interested in: strings without whitespace
    # and floats
    fname = kind['fname']

    if fname.endswith('txt'):
        from matplotlib import mlab
        return mlab.csv2rec(fname, delimiter='\t')
    
    conv = kind['converters']
    usecols = kind['cols'] if usecols is None else usecols
    
    with open(fname) as f:
        # Read the first line for the names
        all_names = [n.strip() for n in f.next().split(',')]
        # genfromtxt is buggy, it gets confused with commas inside strings
        # use the stdlib csv reader and rebuild lines with | as separator,
        # which genfromtxt can then use
        data = ['|'.join(x) for x in list(csv.reader(f))]
        # Older versions of numpy need an actual filehandle, not an arbitrary
        # iterable.
        with tempfile.TemporaryFile() as fdata:
            fdata.write('\n'.join(data))
            fdata.seek(0)
            # Now, let genfromtxt iterate over the rest of the file
            araw = np.genfromtxt(fdata, delimiter='|', usecols=usecols,
                                 converters=conv)

    # Rename the fields so the dtype has more useful names than f0, f1, etc.
    # Also make it a recarray for more convenient use further down
    names = [all_names[i] for i in usecols]
    renamer = dict(zip(araw.dtype.names, names))
    atlas = rfn.rename_fields(araw, renamer).view(np.recarray)

    # Flip y axis to have frontal areas on the left in axial projections
    #atlas.y *= -1
    return atlas
Esempio n. 38
0
def finalize(targets, desi_target, bgs_target, mws_target):
    """Return new targets array with added/renamed columns
    
    Args:
        targets: numpy structured array of targets
        kwargs: colname=array of columns to add
        desi_target: 1D array of target selection bit flags
        bgs_target: 1D array of target selection bit flags
        mws_target: 1D array of target selection bit flags
        
    Returns new targets structured array with those changes
    
    Finalize target list by:
      * renaming OBJID -> BRICK_OBJID (it is only unique within a brick)
      * Adding new columns:
    
        - TARGETID: unique ID across all bricks
        - DESI_TARGET: target selection flags
        - MWS_TARGET: target selection flags
        - BGS_TARGET: target selection flags        
    """
    ntargets = len(targets)
    assert ntargets == len(desi_target)
    assert ntargets == len(bgs_target)
    assert ntargets == len(mws_target)
    
    #- OBJID in tractor files is only unique within the brick; rename and
    #- create a new unique TARGETID
    targets = rfn.rename_fields(targets, {'OBJID':'BRICK_OBJID'})
    targetid = targets['BRICKID'].astype(np.int64)*1000000 + targets['BRICK_OBJID']

    #- Add new columns: TARGETID, TARGETFLAG, NUMOBS
    targets = rfn.append_fields(targets,
        ['TARGETID', 'DESI_TARGET', 'BGS_TARGET', 'MWS_TARGET'],
        [targetid, desi_target, bgs_target, mws_target], usemask=False)

    return targets
Esempio n. 39
0
File: mod.py Progetto: kknet/rqalpha
            def get_price(self, order_book_id, start, end):
                """
                :param order_book_id: e.g. 000002.XSHE
                :param start: 20160101
                :param end: 20160201
                :returns:
                :rtype: numpy.rec.array
                """
                # start = get_date_from_int(start)
                # end = get_date_from_int(end)
                # bar_count = (end - start).days

                # TODO: this is slow, make it run faster
                bar_count = 1000
                origin_bars = bars = self.history_bars(order_book_id, bar_count, "1d")

                dtype = copy.deepcopy(bars.dtype)
                names = list(dtype.names)
                names[0] = "date"
                dtype.names = names
                bars = rfn.rename_fields(bars, {"datetime": "date"})
                bars["date"] = origin_bars["datetime"] / 1000000

                return bars
Esempio n. 40
0
    bdir = '/gpfs01/astro/workarea/esuchyta/git-repos/BalrogDirs/2015-Nov/BalrogDB/%s'%(name)
    dfile = os.path.join(bdir, '%s-des.fits'%(name))
    outdir = os.path.join(ddir, name)

    if MPI.COMM_WORLD.Get_rank()==0:
        if not os.path.exists(outdir):
            os.makedirs(outdir)

    if not os.path.exists(zzfile):
        if MPI.COMM_WORLD.Get_rank()==0:
            z = fitsio.read(zfile, ext=-1)
            z = np.sort(z, order='COADD_OBJECTS_ID')
            names = {}
            for n in z.dtype.names:
                names[n] = n.lower()
            z = rec.rename_fields(z, names)
            f = fitsio.FITS(zzfile, 'rw')
            f.write(z)


    if MPI.COMM_WORLD.Get_rank()==0:
        size = fitsio.read_header(zzfile, ext=-1)['NAXIS2']
        z = fitsio.read(zzfile, ext=-1, columns=['COADD_OBJECTS_ID'])
        num = size/inc
        print num
        starts = np.arange(num)*inc
        ends = starts + inc
        mod = size%inc
        if mod != 0:
            starts = np.append(starts, ends[-1])
            ends = np.append(ends, ends[-1]+mod)
Esempio n. 41
0
def get_triggers(channel, etg, segments, cache=None, snr=None, frange=None,
                 columns=None, raw=False, **kwargs):
    """Get triggers for the given channel
    """
    # get table from etg
    try:
        Table = TABLE[etg.lower()]
    except KeyError as e:
        e.args = ('Unknown ETG %r, cannot map to LIGO_LW Table class' % etg,)
        raise
    tablename = strip_table_name(Table.tableName)
    # get default columns for this table
    if columns is None:
        for key in COLUMNS:
            if issubclass(Table, key):
                columns = COLUMNS[key][:]
                break
    if 'channel' in columns:
        columns.pop('channel')

    # find triggers
    if cache is None:
        cache = find_trigger_files(channel, etg, segments, **kwargs)

    # read cache
    trigs = lsctables.New(Table, columns=columns)
    cache = cache.unique()
    cache.sort(key=lambda x: x.segment[0])
    for segment in segments:
        if len(cache.sieve(segment=segment)):
            if tablename.endswith('_inspiral'):
                filt = lambda t: float(t.get_end()) in segment
            else:
                filt = lambda t: float(t.get_peak()) in segment
            trigs.extend(Table.read(cache.sieve(segment=segment), filt=filt))

    # format table as numpy.recarray
    recarray = trigs.to_recarray(columns=columns)

    # filter
    if snr is not None:
        recarray = recarray[recarray['snr'] >= snr]
    if tablename.endswith('_burst') and frange is not None:
        recarray = recarray[
            (recarray['peak_frequency'] >= frange[0]) &
            (recarray['peak_frequency'] < frange[1])]

    # return basic table if 'raw'
    if raw:
        return recarray

    # otherwise spend the rest of this function converting functions to
    # something useful for the hveto core analysis
    addfields = {}
    dropfields = []

    # append channel to all events
    columns.append('channel')
    addfields['channel'] = numpy.repeat(channel, recarray.shape[0])

    # rename frequency column
    if tablename.endswith('_burst'):
        recarray = recfunctions.rename_fields(
            recarray, {'peak_frequency': 'frequency'})
        idx = columns.index('peak_frequency')
        columns.pop(idx)
        columns.insert(idx, 'frequency')

    # map time to its own column
    if tablename.endswith('_inspiral'):
        tcols = ['end_time', 'end_time_ns']
    elif tablename.endswith('_burst'):
        tcols = ['peak_time', 'peak_time_ns']
    else:
        tcols = None
    if tcols:
        times = recarray[tcols[0]] + recarray[tcols[1]] * 1e-9
        addfields['time'] = times
        dropfields.extend(tcols)
        columns = ['time'] + columns[2:]

    # add and remove fields as required
    if addfields:
        names, data = zip(*addfields.items())
        recarray = recfunctions.rec_append_fields(recarray, names, data)
        recarray = recfunctions.rec_drop_fields(recarray, dropfields)

    return recarray[columns]
Esempio n. 42
0
radec  = s2dec('01 10 16.2','-02 18 50')
q0107b = rf.rec_append_fields(q0107b,'RA' ,radec[0]*np.ones(len(q0107b)))
q0107b = rf.rec_append_fields(q0107b,'DEC',radec[1]*np.ones(len(q0107b)))
radec  = s2dec('01 10 14.52','-02 16 57.5')
q0107c = rf.rec_append_fields(q0107c,'RA' ,radec[0]*np.ones(len(q0107c)))
q0107c = rf.rec_append_fields(q0107c,'DEC',radec[1]*np.ones(len(q0107c)))
radec = s2dec('10 22 18.99','01 32 18.8')
q1022 = rf.rec_append_fields(q1022,'RA' ,radec[0]*np.ones(len(q1022)))
q1022 = rf.rec_append_fields(q1022,'DEC',radec[1]*np.ones(len(q1022)))


#read galaxies
VVDSF10 = readtxt('/home/ntejos/catalogs/VVDS/VVDS_F10.cat',readnames=True)
VVDSF14 = readtxt('/home/ntejos/catalogs/VVDS/VVDS_F14.cat',readnames=True)
VVDSF22 = readtxt('/home/ntejos/catalogs/VVDS/VVDS_F22.cat',readnames=True) 
VVDSF10 = rf.rename_fields(VVDSF10,{'Z':'ZGAL','MAG_AUTO_I':'MAG','ALPHA_J2000':'RA','DELTA_J2000':'DEC'})
VVDSF14 = rf.rename_fields(VVDSF14,{'Z':'ZGAL','MAG_AUTO_I':'MAG','ALPHA_J2000':'RA','DELTA_J2000':'DEC'})
VVDSF22 = rf.rename_fields(VVDSF22,{'Z':'ZGAL','MAG_AUTO_I':'MAG','ALPHA_J2000':'RA','DELTA_J2000':'DEC'})

names    = 'RA,DEC,ZGAL,MAG'
usecols  = [0,1,2,3]
VIMOSQ1  = readtxt('/home/ntejos/catalogs/Q0107/VIMOS.txt',names=names,usecols=usecols,comment='#')
DEIMOSQ1 = readtxt('/home/ntejos/catalogs/Q0107/DEIMOS.txt',names=names,usecols=usecols,comment='#')
CFHTQ1   = readtxt('/home/ntejos/catalogs/Q0107/CFHT.txt',names=names,usecols=usecols,comment='#')
GMOSQ1   = readtxt('/home/ntejos/catalogs/Q0107/GMOS.txt',names=names,usecols=usecols,comment='#')
VIMOSF1005 = readtxt('/home/ntejos/catalogs/J1005/VIMOS.txt',names=names,usecols=usecols,comment='#')
VIMOSF1022 = readtxt('/home/ntejos/catalogs/J1022/VIMOS.txt',names=names,usecols=usecols,comment='#')


#clean galaxy catalog
galaxies=[VVDSF10,VVDSF14,VVDSF22,VIMOSF1005,VIMOSF1022]
Esempio n. 43
0
def GetCOSMOS21(maxdist=10.0, usemorph=False, jfile=None, hfile=None, usepz=False):
    
    if hfile is not None:
        morph = esutil.io.read(hfile)
        morph = rec.rename_fields(morph, {'RA':'ra', 'DEC':'dec'})

    elif jfile is not None:
        morph = esutil.io.read(jfile)
        print len(morph)

    elif usemorph:
        morph = esutil.io.read('cosmos_morph_cassata_1.1.fits')
        morph = rec.rename_fields(morph, {'RA':'ra', 'DEC':'dec'})
    
    elif usepz:
        morph = esutil.io.read('cosmos_zphot_mag25.fits')
        morph = rec.rename_fields(morph, {'imag':'i_mag', 'rmag':'r_mag'})
        cut = (morph['auto_flag'] > -1)

    else:
        morph = esutil.io.read('cosmos_phot_20060103.fits')
        morph = rec.rename_fields(morph, {'RA':'ra', 'DEC':'dec'})
        #cut = (morph['star']==0) & (morph['auto_flag']==1) & (morph['i_mask']==0)
        #cut = (morph['star']==0) & (morph['auto_flag']==1) & (morph['i_mask']==0) & (morph['blend_mask']==0)
        #cut = (morph['star']==0) & (morph['auto_flag']==1) & (morph['blend_mask']==0) & (morph['i_mask']==0) & (morph['z_mask']==0) & (morph['V_mask']==0) & (morph['B_mask']==0)
        cut = (morph['blend_mask'] == 0) &  (morph['star'] == 0) & (morph['auto_flag'] > -1)
        #cut = (morph['star']==0) & (morph['i_mask']==0)
        morph = morph[cut]

    '''
    fig, ax = plt.subplots(1,1)
    ax.scatter(morph['ra'], morph['dec'], s=0.2, lw=0)
    plt.show()
    sys.exit()
    '''

    '''
    cuts = GetBrightCuts() 

    #cut = XYCuts(morph['MAG_AUTO_ACS'], np.log10(morph['R_HALF']*0.03), cuts)
    cut = XYCuts(morph['i_mag_auto'], morph['i_mag_auto'], cuts)
    morph = morph[cut]
    '''

    enrique = np.loadtxt('zCOSMOS-mask.dat')
    e = np.zeros( len(enrique), dtype=[('ra',np.float32), ('dec',np.float32)])
    e['ra'] = enrique[:,1]
    e['dec'] = enrique[:,0]

    """
    masked_morph = ApplyMaskRandoms(morph, e, cat_ra='ra', cat_dec='dec', rand_ra='ra', rand_dec='dec', maxdist=maxdist)

    if jfile is not None:
        GeoCuts = GetEGCuts()
        mcut = XYCuts(masked_morph['ra'], masked_morph['dec'], GeoCuts)
        masked_morph = masked_morph[mcut]
        r = RectArea()
        u = UniformRandom(r)
        u = ApplyMaskRandoms(u, e, cat_ra='ra', cat_dec='dec', rand_ra='ra', rand_dec='dec', maxdist=maxdist)

    elif usemorph:
        GeoCuts = GetGeometryCuts()
        mcut = XYCuts(masked_morph['ra'], masked_morph['dec'], GeoCuts)
        masked_morph = masked_morph[mcut]
        ecut = XYCuts(e['ra'], e['dec'], GeoCuts)
        e = e[ecut]
        u = UniformRandom(GeoCuts)
        u = ApplyMaskRandoms(u, e, cat_ra='ra', cat_dec='dec', rand_ra='ra', rand_dec='dec', maxdist=maxdist)
    else:
        r = RectArea()
        u = UniformRandom(r)
        u = ApplyMaskRandoms(u, e, cat_ra='ra', cat_dec='dec', rand_ra='ra', rand_dec='dec', maxdist=maxdist)

    return masked_morph, e, u

    #return masked_morph, e
    """

    if hfile is not None:
        GeoCuts = GetEGCuts()
    elif jfile is not None:
        GeoCuts = GetEGCuts()
    elif usemorph:
        GeoCuts = GetGeometryCuts()
    else:
        GeoCuts = GetEGCuts()
    
    mcut = XYCuts(morph['ra'], morph['dec'], GeoCuts)
    morph = morph[mcut]
    u = UniformRandom(GeoCuts)
    u = ApplyMaskRandoms(u, e, cat_ra='ra', cat_dec='dec', rand_ra='ra', rand_dec='dec', maxdist=maxdist)
    morph = ApplyMaskRandoms(morph, e, cat_ra='ra', cat_dec='dec', rand_ra='ra', rand_dec='dec', maxdist=maxdist)

    return morph, e, u