예제 #1
0
    def _get_uncertainties(self, star_group_size):
        """
        Retrieve uncertainties on fitted parameters from the fitter
        object.

        Parameters
        ----------
        star_group_size : int
            Number of stars in the given group.

        Returns
        -------
        unc_tab : `~astropy.table.QTable`
            A table which contains uncertainties on the fitted parameters.
            The uncertainties are reported as one standard deviation.
        """
        unc_tab = QTable()
        for param_name in self.psf_model.param_names:
            if not self.psf_model.fixed[param_name]:
                unc_tab.add_column(
                    Column(name=param_name + "_unc",
                           data=np.empty(star_group_size)))

        k = 0
        n_fit_params = len(unc_tab.colnames)
        param_cov = self.fitter.fit_info.get('param_cov', None)
        for i in range(star_group_size):
            unc_tab[i] = np.sqrt(np.diag(param_cov))[k:k + n_fit_params]
            k = k + n_fit_params

        return unc_tab
예제 #2
0
    def load_mage_z3(cls, sample='all'):
        """ Load the LLS table from the z~3 MagE survey

        (Fumagalli et al. 2013, ApJ, 775, 78)

        Parameters
        ----------
        sample : str
          Survey sample
            * all -- All
            * non-color -- Restricts to quasars that were *not* color-selected
            * color -- Restricts to quasars that were color-selected

        Returns
        -------
        lls_survey : IGMSurvey
          Includes all quasars observed in the survey
          And all the LLS

        """
        # LLS File
        survey_fil = pyigm_path + '/data/LLS/HD-LLS/fumagalli13_apj775_78_tab1+2.fits'
        tab = Table.read(survey_fil)

        # Rename some columns
        tab.rename_column('RAJ2000', 'RA')
        tab['RA'].unit = u.deg
        tab.rename_column('DEJ2000', 'DEC')
        tab['DEC'].unit = u.deg
        tab.rename_column('zqso', 'Z_QSO')
        tab.rename_column('zlls', 'Z_LLS')
        tab.rename_column('zend', 'Z_START')  # F13 was opposite of POW10
        tab.rename_column('zstart', 'Z_END')  # F13 was opposite of POW10

        # Cut table
        if sample == 'all':
            pass
        elif sample == 'non-color':
            NC = np.array(
                [True if row['n_Name'][0] == 'N' else False for row in tab])
            tab = tab[NC]
        elif sample == 'color':
            Clr = [True if row['n_Name'][0] == 'C' else False for row in tab]
            tab = tab[Clr]

        # Good LLS
        lls = tab['Z_LLS'] >= tab['Z_START']
        lls_tab = QTable(tab[lls])
        nlls = np.sum(lls)
        # Set NHI to 17.8 (tau>=2)
        lls_tab.add_column(Column([17.8] * nlls, name='NHI'))
        lls_tab.add_column(Column([99.9] * nlls, name='SIGNHI'))

        # Generate survey
        lls_survey = cls.from_sfits(lls_tab)
        lls_survey.ref = 'z3_MagE'
        lls_survey.sightlines = tab

        return lls_survey
예제 #3
0
    def load_mage_z3(cls, sample='all'):
        """ Load the LLS table from the z~3 MagE survey

        (Fumagalli et al. 2013, ApJ, 775, 78)

        Parameters
        ----------
        sample : str
          Survey sample
            * all -- All
            * non-color -- Restricts to quasars that were *not* color-selected
            * color -- Restricts to quasars that were color-selected

        Returns
        -------
        lls_survey : IGMSurvey
          Includes all quasars observed in the survey
          And all the LLS

        """
        # LLS File
        survey_fil = pyigm_path+'/data/LLS/HD-LLS/fumagalli13_apj775_78_tab1+2.fits'
        tab = Table.read(survey_fil)

        # Rename some columns
        tab.rename_column('RAJ2000', 'RA')
        tab['RA'].unit = u.deg
        tab.rename_column('DEJ2000', 'DEC')
        tab['DEC'].unit = u.deg
        tab.rename_column('zqso', 'Z_QSO')
        tab.rename_column('zlls', 'Z_LLS')
        tab.rename_column('zend', 'Z_START')  # F13 was opposite of POW10
        tab.rename_column('zstart', 'Z_END')  # F13 was opposite of POW10

        # Cut table
        if sample == 'all':
            pass
        elif sample == 'non-color':
            NC = np.array([True if row['n_Name'][0] == 'N' else False for row in tab])
            tab = tab[NC]
        elif sample == 'color':
            Clr = [True if row['n_Name'][0] == 'C' else False for row in tab]
            tab = tab[Clr]

        # Good LLS
        lls = tab['Z_LLS'] >= tab['Z_START']
        lls_tab = QTable(tab[lls])
        nlls = np.sum(lls)
        # Set NHI to 17.8 (tau>=2)
        lls_tab.add_column(Column([17.8]*nlls, name='NHI'))
        lls_tab.add_column(Column([99.9]*nlls, name='SIGNHI'))

        # Generate survey
        lls_survey = cls.from_sfits(lls_tab)
        lls_survey.ref = 'z3_MagE'
        lls_survey.sightlines = tab

        return lls_survey
예제 #4
0
def add_column(tbl: table.QTable, colname: str, dtype, unit):
    if colname not in tbl.colnames:
        if isinstance(dtype, str):
            dtype = str
            val = dtype("0" * 32)
        else:
            val = dtype(-999)
        tbl.add_column([val] * len(tbl), name=colname)
        if unit is not None:
            tbl[colname] *= unit
예제 #5
0
파일: fetch_pwv.py 프로젝트: talister/pwv
def fetch_LCO_weather(site_code,
                      start=None,
                      end=None,
                      interval=600,
                      dbg=False):
    """Fetch LCO weather (temperature and pressure) for LCO <site_code> (e.g. 'ogg')
    between [start] and [end] (defaults to start of current year and now) interpolated
    to a spacing of [interval] seconds (defaults to 600s).
    Returns an AstroPy QTable of UTC datetime and temperature and pressure"""

    site_code = site_code.lower()
    start = start or datetime(datetime.utcnow().year, 1, 1)
    end = end or datetime.utcnow().replace(
        hour=0, minute=0, second=0, microsecond=0)
    nrows = int((end - start) / timedelta(seconds=interval))

    # Construct a Q(uantity)Table and a column of datetime64's from <start> to
    # <end> with [interval] spacing
    table = QTable()
    dt = np.arange(start, end, step=interval, dtype='datetime64[s]')
    aa = Column(dt, name='UTC Datetime')
    table.add_column(aa)

    for quantity in ['temperature', 'pressure']:
        datum = map_quantity_to_LCO_datum(quantity)
        data = query_LCO_telemetry(site_code, start, end, datum)
        if len(data) > 0:
            interp_timestamps, interp_values = interpolate_LCO_telemetry(
                data, interval)
            unit = interp_values[0].unit
            if dbg:
                print(quantity, interp_timestamps[0], interp_timestamps[-1],
                      len(interp_timestamps), len(interp_values))

            # Check if the interpolated dataset starts late or ends early and pad
            # accordingly
            num_before = max(
                int((interp_timestamps[0] - start) /
                    timedelta(seconds=interval)), 0)
            num_after = max(
                int((end - interp_timestamps[-1]) /
                    timedelta(seconds=interval)), 0)
            pad_values = np.pad(interp_values, (num_before, num_after), 'edge')
            # Put units back
            pad_values = pad_values * unit
            if dbg:
                print("Padding by {} before, {} after, new length={}".format(
                    num_before, num_after, len(pad_values)))
            # Trim padded array to right length, turn into a column and add to table
            col = Column(pad_values[0:nrows], name=quantity)
            table.add_column(col)
        else:
            print("Found no data for {} at {} between {}->{}".format(
                datum, site_code, start, end))
    return table
예제 #6
0
 def build_table(self):
     """Generate an astropy QTable out of the component.
     Returns
     -------
     comp_tbl : QTable
     """
     if len(self._abslines) == 0:
         return
     comp_tbl = QTable()
     comp_tbl.add_column(Column([iline.wrest.to(u.AA).value for iline in self._abslines]*u.AA, name='wrest'))
     for attrib in ['z', 'flag_N', 'logN', 'sig_logN']:
         comp_tbl.add_column(Column([iline.attrib[attrib] for iline in self._abslines], name=attrib))
     # Return
     return comp_tbl
예제 #7
0
    def _model_params2table(self, fit_model, star_group):
        """
        Place fitted parameters into an astropy table.

        Parameters
        ----------
        fit_model : `astropy.modeling.Fittable2DModel` instance
            PSF or PRF model to fit the data. Could be one of the models
            in this package like `~photutils.psf.sandbox.DiscretePRF`,
            `~photutils.psf.IntegratedGaussianPRF`, or any other
            suitable 2D model.

        star_group : `~astropy.table.Table`
            the star group instance.

        Returns
        -------
        param_tab : `~astropy.table.QTable`
            A table that contains the fitted parameters.
        """
        param_tab = QTable()

        for param_tab_name in self._pars_to_output.keys():
            param_tab.add_column(
                Column(name=param_tab_name, data=np.empty(len(star_group))))

        if len(star_group) > 1:
            for i in range(len(star_group)):
                for param_tab_name, param_name in self._pars_to_output.items():
                    # get sub_model corresponding to star with index i as name
                    # name was set in utils.get_grouped_psf_model()
                    # we can't use model['name'] here as that only
                    # searches leaves and we might want a intermediate
                    # node of the tree
                    sub_models = [
                        model for model in fit_model.traverse_postorder()
                        if model.name == i
                    ]
                    if len(sub_models) != 1:
                        raise ValueError('sub_models must have a length of 1')
                    sub_model = sub_models[0]

                    param_tab[param_tab_name][i] = getattr(
                        sub_model, param_name).value
        else:
            for param_tab_name, param_name in self._pars_to_output.items():
                param_tab[param_tab_name] = getattr(fit_model,
                                                    param_name).value

        return param_tab
예제 #8
0
def read_in_galaxy(filename, index):
    print "attempting to read in",filename

    if ".pkl" in filename:
        print "assuming that",filename,"is a pickle file with only a dict of galaxies in it"
        galaxies = pickle.load(open(filename,"r"))
        print "for now we're only dealing with one galaxy at a time; taking galaxy #",index
        assert(index in galaxies.keys()), "index %i not in file :-(" % index
        galaxy = galaxies[index]
        col_z = Column(name='z',data=galaxies['z'])
        galaxy.add_column(col_z,0)
    elif "peter" in filename:
        print "assuming that",filename,"is an ascii file from peter behroozi and gergo popping with Mh,Ms,dMhdt,sfr,Mg,Mhi,Mh2"
        print "also assuming that",filename,"only has one iteration"
        z,Mh,Ms,dMhdt,sfr,Mg,Mhi,Mh2 = np.loadtxt(filename,usecols=(0,7*index+1,7*index+2,7*index+3,7*index+4,7*index+5,7*index+6,7*index+7),unpack=True)
        galaxy = QTable([z,Mh,Ms,dMhdt,sfr,Mg,Mhi,Mh2],names=('z','Mh','Ms','dMhdt','sfr','Mg','Mhi','Mh2'))

    ## need to add units
    galaxy['Mh'].unit = u.Msun
    galaxy['Ms'].unit = u.Msun
    galaxy['Mg'].unit = u.Msun
    galaxy['Mhi'].unit = u.Msun
    galaxy['Mh2'].unit = u.Msun
    galaxy['dMhdt'].unit = u.Msun / u.yr
    if 'dMrdt' in galaxy.colnames:
        galaxy['dMrdt'].unit = u.Msun / u.yr
    galaxy['sfr'].unit = u.Msun / u.yr

    ### Helium is not included in the gas masses!!
    galaxy['Mg'] = galaxy['Mg'] * HELIUM_CORR

    ### Let's make a CGM! ###
    galaxy['Mcgm'] = galaxy['Mh']*cosmo.Ob0/cosmo.Om0 - galaxy['Ms'] - galaxy['Mg']

    age = cosmo.age(galaxy['z'])
    col_age = Column(name='age', data=age)
    galaxy.add_column(col_age,1)

    tlb = cosmo.lookback_time(galaxy['z'])
    col_tlb = Column(name='lookback', data=tlb)
    galaxy.add_column(col_tlb,2)

    ## check to make sure sorted so earlier is earlier
    galaxy.sort('age')

    return galaxy
예제 #9
0
    def write_to_ascii(self, outfil, format='ascii.ecsv'):
        """ Write to a text file.

        Parameters
        ----------
        outfil: str
          Filename.
        """
        # Convert to astropy Table
        table = QTable([self.wavelength, self.flux],
                       names=('WAVE', 'FLUX'))
        if self.sig_is_set:
            sigclm = Column(self.sig, name='ERROR')
            table.add_column(sigclm)
        if self.co_is_set:
            coclm = Column(self.co, name='CO')
            table.add_column(coclm)

        # Write
        table.write(outfil, format=format)
예제 #10
0
 def build_table(self):
     """Generate an astropy QTable out of the abs lines
     Returns
     -------
     comp_tbl : QTable
     """
     if len(self._abslines) == 0:
         return
     comp_tbl = QTable()
     comp_tbl.add_column(Column([iline.wrest.to(u.AA).value for iline in self._abslines]*u.AA, name='wrest'))
     comp_tbl.add_column(Column([iline.z for iline in self._abslines], name='z'))
     for attrib in ['flag_N', 'logN', 'sig_logN']:
         comp_tbl.add_column(Column([iline.attrib[attrib] for iline in self._abslines], name=attrib))
     # Return
     return comp_tbl
예제 #11
0
def find_peaks(data,
               threshold,
               box_size=3,
               footprint=None,
               mask=None,
               border_width=None,
               npeaks=np.inf,
               centroid_func=None,
               error=None,
               wcs=None):
    """
    Find local peaks in an image that are above above a specified
    threshold value.

    Peaks are the maxima above the ``threshold`` within a local region.
    The local regions are defined by either the ``box_size`` or
    ``footprint`` parameters.  ``box_size`` defines the local region
    around each pixel as a square box.  ``footprint`` is a boolean array
    where `True` values specify the region shape.

    If multiple pixels within a local region have identical intensities,
    then the coordinates of all such pixels are returned.  Otherwise,
    there will be only one peak pixel per local region.  Thus, the
    defined region effectively imposes a minimum separation between
    peaks unless there are identical peaks within the region.

    If ``centroid_func`` is input, then it will be used to calculate a
    centroid within the defined local region centered on each detected
    peak pixel.  In this case, the centroid will also be returned in the
    output table.

    Parameters
    ----------
    data : array_like
        The 2D array of the image.

    threshold : float or array-like
        The data value or pixel-wise data values to be used for the
        detection threshold. A 2D ``threshold`` must have the same shape
        as ``data``. See `~photutils.segmentation.detect_threshold` for
        one way to create a ``threshold`` image.

    box_size : scalar or tuple, optional
        The size of the local region to search for peaks at every point
        in ``data``.  If ``box_size`` is a scalar, then the region shape
        will be ``(box_size, box_size)``.  Either ``box_size`` or
        ``footprint`` must be defined.  If they are both defined, then
        ``footprint`` overrides ``box_size``.

    footprint : `~numpy.ndarray` of bools, optional
        A boolean array where `True` values describe the local footprint
        region within which to search for peaks at every point in
        ``data``.  ``box_size=(n, m)`` is equivalent to
        ``footprint=np.ones((n, m))``.  Either ``box_size`` or
        ``footprint`` must be defined.  If they are both defined, then
        ``footprint`` overrides ``box_size``.

    mask : array_like, bool, optional
        A boolean mask with the same shape as ``data``, where a `True`
        value indicates the corresponding element of ``data`` is masked.

    border_width : bool, optional
        The width in pixels to exclude around the border of the
        ``data``.

    npeaks : int, optional
        The maximum number of peaks to return.  When the number of
        detected peaks exceeds ``npeaks``, the peaks with the highest
        peak intensities will be returned.

    centroid_func : callable, optional
        A callable object (e.g., function or class) that is used to
        calculate the centroid of a 2D array. The ``centroid_func``
        must accept a 2D `~numpy.ndarray`, have a ``mask`` keyword, and
        optionally an ``error`` keyword. The callable object must return
        a tuple of two 1D `~numpy.ndarray` objects, representing the x
        and y centroids, respectively.

    error : array_like, optional
        The 2D array of the 1-sigma errors of the input ``data``.
        ``error`` is used only if ``centroid_func`` is input (the
        ``error`` array is passed directly to the ``centroid_func``).

    wcs : `None` or WCS object, optional
        A world coordinate system (WCS) transformation that
        supports the `astropy shared interface for WCS
        <https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
        (e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`). If `None`, then
        the sky coordinates will not be returned in the output
        `~astropy.table.Table`.

    Returns
    -------
    output : `~astropy.table.Table` or `None`
        A table containing the x and y pixel location of the peaks and
        their values.  If ``centroid_func`` is input, then the table
        will also contain the centroid position.  If no peaks are found
        then `None` is returned.
    """
    from scipy.ndimage import maximum_filter

    data = np.asanyarray(data)

    if np.all(data == data.flat[0]):
        warnings.warn('Input data is constant. No local peaks can be found.',
                      NoDetectionsWarning)
        return None

    if not np.isscalar(threshold):
        threshold = np.asanyarray(threshold)
        if data.shape != threshold.shape:
            raise ValueError('A threshold array must have the same shape as '
                             'the input data.')

    # remove NaN values to avoid runtime warnings
    nan_mask = np.isnan(data)
    if np.any(nan_mask):
        data = np.copy(data)  # ndarray
        data[nan_mask] = np.nanmin(data)

    if footprint is not None:
        data_max = maximum_filter(data,
                                  footprint=footprint,
                                  mode='constant',
                                  cval=0.0)
    else:
        data_max = maximum_filter(data,
                                  size=box_size,
                                  mode='constant',
                                  cval=0.0)

    peak_goodmask = (data == data_max)  # good pixels are True

    if mask is not None:
        mask = np.asanyarray(mask)
        if data.shape != mask.shape:
            raise ValueError('data and mask must have the same shape')
        peak_goodmask = np.logical_and(peak_goodmask, ~mask)

    if border_width is not None:
        for i in range(peak_goodmask.ndim):
            peak_goodmask = peak_goodmask.swapaxes(0, i)
            peak_goodmask[:border_width] = False
            peak_goodmask[-border_width:] = False
            peak_goodmask = peak_goodmask.swapaxes(0, i)

    peak_goodmask = np.logical_and(peak_goodmask, (data > threshold))
    y_peaks, x_peaks = peak_goodmask.nonzero()
    peak_values = data[y_peaks, x_peaks]

    nxpeaks = len(x_peaks)
    if nxpeaks > npeaks:
        idx = np.argsort(peak_values)[::-1][:npeaks]
        x_peaks = x_peaks[idx]
        y_peaks = y_peaks[idx]
        peak_values = peak_values[idx]

    if nxpeaks == 0:
        warnings.warn('No local peaks were found.', NoDetectionsWarning)
        return None

    # construct the output table
    meta = {'version': _get_version_info()}
    colnames = ['x_peak', 'y_peak', 'peak_value']
    coldata = [x_peaks, y_peaks, peak_values]
    table = QTable(coldata, names=colnames, meta=meta)

    if wcs is not None:
        skycoord_peaks = wcs.pixel_to_world(x_peaks, y_peaks)
        table.add_column(skycoord_peaks, name='skycoord_peak', index=2)

    # perform centroiding
    if centroid_func is not None:
        from ..centroids import centroid_sources  # prevents circular import

        if not callable(centroid_func):
            raise TypeError('centroid_func must be a callable object')

        x_centroids, y_centroids = centroid_sources(
            data,
            x_peaks,
            y_peaks,
            box_size=box_size,
            footprint=footprint,
            error=error,
            mask=mask,
            centroid_func=centroid_func)

        table['x_centroid'] = x_centroids
        table['y_centroid'] = y_centroids

        if wcs is not None:
            skycoord_centroids = wcs.pixel_to_world(x_centroids, y_centroids)
            idx = table.colnames.index('y_centroid') + 1
            table.add_column(skycoord_centroids,
                             name='skycoord_centroid',
                             index=idx)

    return table
예제 #12
0
파일: fetch_pwv.py 프로젝트: talister/pwv
def read_ascii(filepath, dbg=False):
    """Read single parameter ASCII format files extracted from the GrADS server e.g.
     https://goldsmr4.gesdisc.eosdis.nasa.gov/dods/M2T1NXSLV.ascii?tqv
     or from the output of fetch_merra2_ascii_timeseries()"""

    with open(filepath, 'r') as foo_fh:

        first_line = foo_fh.readline()
        if 'Dataset:' in first_line.strip():
            # JD for 2002-01-01, starting point of aggregation values
            t0 = 2452275.5
            # OpenDAP format file
            data = {}
            for line in foo_fh:
                line = line.rstrip()
                if len(line) == 0:
                    continue
                if dbg: print(line)
                if '[' not in line:
                    chunks = line.split(',')
                    if len(chunks) == 2:
                        quantity = chunks[0]
                        value = chunks[1]
                        try:
                            value = float(value)
                        except ValueError:
                            pass
                        value = [
                            value,
                        ]
                    else:
                        quantity = chunks[0]
                        value = [float(x.strip()) for x in chunks[1:]]
                    data[quantity] = value
                else:
                    chunks = line.split(',')
                    array_name = chunks[0]
                    value = chunks[1]
                    try:
                        value = float(value)
                    except ValueError:
                        pass
                    index = array_name.find('[')
                    if index > 0:
                        array_name = array_name[0:index]
                    if array_name not in data:
                        # New quantity
                        data[array_name] = []
                    data[array_name].append(value)
        else:
            # GDS format file
            t0 = 1721423.5
            status = foo_fh.seek(0)

            in_data = False
            data = {}
            for line in foo_fh:
                line = line.rstrip()
                if len(line) == 0:
                    continue
                if dbg: print(line, in_data)
                if line.count(',') == 1:
                    if line[0] != '[':
                        if in_data is True:
                            data[array_name] = array
                            array = []
                            array_name = line.split(',')[0]
                        else:
                            in_data = True
                            array = []
                            array_name = line.split(',')[0]
                            if dbg: print("Created array_name", array_name)
                    else:
                        value = float(line.split(',')[1].strip())
                        array.append(value)
                elif line.count(',') >= 1 and in_data is True:
                    values = [float(x.strip()) for x in line.split(',')]
                    data[array_name] = values
                    in_data = False
        foo_fh.close()

    if data.get('time', None) is not None:
        times = np.array(time_index_to_dt(data['time'], t0),
                         dtype='datetime64[s]')
        data['datetime'] = times

    table = QTable()
    for key in data.keys():
        if dbg: print(key, len(data[key]))
        if type(data[key]) != float and len(data[key]) > 1:
            # Skip 1D values such as latitude or longitude
            aa = Column(data[key], name=key)
            table.add_column(aa)
    return table
예제 #13
0
def get_nuv_cand(outfil=None, known_fil=None):
    '''
    Generate a list of NUV AGN candidates from WISE + GALEX

    Paramaeters:
    -----------
    known_fil: string (None)
      Filename for a Table of NUV candidates with known redshifts
    '''
    #  Read photometry
    z1_galex_nuv, z1_wise_nuv = uvqs_cand.load_phot(use_NUV=True)

    # COLOR CUTS
    gdc = color_cut(z1_galex_nuv, z1_wise_nuv)
    srt = np.argsort(z1_wise_nuv[gdc]['RA'])
    gdc = gdc[srt]
    w1mw2 = z1_wise_nuv[gdc]['W1MPRO'] - z1_wise_nuv[gdc]['W2MPRO']

    # Generate the QTable
    z1_cand_nuv = QTable(
        [Column(z1_wise_nuv[gdc]['RA'], name='RA', unit=u.degree)])
    z1_cand_nuv.add_column(
        Column(z1_wise_nuv[gdc]['DEC'], name='DEC', unit=u.degree))
    z1_cand_nuv.add_column(Column(z1_wise_nuv[gdc]['W1MPRO'], name='W1'))
    z1_cand_nuv.add_column(Column(z1_wise_nuv[gdc]['W2MPRO'], name='W2'))
    z1_cand_nuv.add_column(
        Column(z1_wise_nuv[gdc]['W1MPRO'] - z1_wise_nuv[gdc]['W2MPRO'],
               name='W1-W2'))
    z1_cand_nuv.add_column(Column(z1_galex_nuv[gdc]['FUV'], name='FUV'))
    z1_cand_nuv.add_column(Column(z1_galex_nuv[gdc]['NUV'], name='NUV'))
    z1_cand_nuv.add_column(
        Column(z1_galex_nuv[gdc]['FUV'] - z1_galex_nuv[gdc]['NUV'],
               name='FUV-NUV'))

    msk = z1_cand_nuv['RA'] == z1_cand_nuv['RA']
    c_z1_cand = SkyCoord(ra=z1_cand_nuv['RA'], dec=z1_cand_nuv['DEC'])

    # Check against z1QSO
    cut_z1qso = uvqs_io.new_z1qso()  # z1QSO with good z, not in milliq
    c_zq = SkyCoord(ra=cut_z1qso['RA'], dec=cut_z1qso['DEC'])
    idx, d2d, d3d = coords.match_coordinates_sky(c_z1_cand,
                                                 c_zq,
                                                 nthneighbor=1)
    bad = np.where(d2d.to('arcsec') < (5. * u.arcsec))[0]
    print('z1q_nuv: Rejecting {:d} quasars from z1QSO'.format(len(bad)))
    msk[bad] = False

    # Check against Flesch+15
    milliq = uvqs_io.load_milliq(good_z=True, good_FUV=True)
    c_mq = SkyCoord(ra=milliq['RA'] * u.degree, dec=milliq['DEC'] * u.degree)
    idx, d2d, d3d = coords.match_coordinates_sky(c_z1_cand,
                                                 c_mq,
                                                 nthneighbor=1)
    # Fill in z
    z = np.zeros(len(c_z1_cand))
    mt = np.where((d2d.to('arcsec') < (5. * u.arcsec)))[0]
    z[mt] = milliq['Z'][idx][mt]
    z1_cand_nuv.add_column(Column(z, name='Z'))

    # Write?
    if known_fil is not None:
        import pdb
        pdb.set_trace()
        xxf.table_to_fits(Table(z1_cand_nuv[mt]),
                          known_fil,
                          compress=True,
                          comment='Known NUV Candidates')
        print('z1qso_nuv.get_cand: Writing {:d} candidates to {:s}'.format(
            len(z1_cand_nuv), outfil))

    # Parse out low-z
    bad = np.where((d2d.to('arcsec') < (5. * u.arcsec))
                   & (milliq['Z'][idx] < 0.5))[0]
    print('z1q_nuv: Rejecting {:d} quasars from Flesch+15'.format(len(bad)))
    msk[bad] = False

    # Add priority flags
    z1_cand_nuv = z1_cand_nuv[msk]
    priority = np.zeros(len(z1_cand_nuv), dtype='int')

    gd1 = np.where(z1_cand_nuv['NUV'] < 16.5)[0]
    priority[gd1] = 1
    gd2 = np.where((z1_cand_nuv['NUV'] < 17.0) & priority == 0)[0]
    priority[gd2] = 2
    gd3 = np.where(z1_cand_nuv['NUV'] > 17.0)[0]
    priority[gd3] = 3

    z1_cand_nuv.add_column(Column(priority, name='PRI'))

    # Fill in z1qso observed
    z1qso, cand = z1qa.load_z1qso()  # z1QSO with good z, not in milliq
    c_zn = SkyCoord(ra=z1_cand_nuv['RA'], dec=z1_cand_nuv['DEC'])
    c_zq = SkyCoord(ra=z1qso['RA'] * u.degree, dec=z1qso['DEC'] * u.degree)
    idx, d2d, d3d = coords.match_coordinates_sky(c_zn, c_zq, nthneighbor=1)
    mt = np.where((d2d.to('arcsec') < (5. * u.arcsec)))[0]
    spec_qual = np.zeros(len(z1_cand_nuv), dtype='int')
    z_qual = np.zeros(len(z1_cand_nuv), dtype='int')

    spec_qual[mt] = z1qso['SPEC_QUAL'][idx[mt]]
    z_qual[mt] = z1qso['Z_QUAL'][idx[mt]]

    z1_cand_nuv.add_column(Column(spec_qual, name='SPEC_QUAL'))
    z1_cand_nuv.add_column(Column(z_qual, name='Z_QUAL'))

    # Names
    names = []
    for jj, z1c in enumerate(z1_cand_nuv):
        name = 'z1c_J{:s}{:s}'.format(
            c_zn[jj].ra.to_string(unit=u.hour, pad=True, sep='', precision=2),
            c_zn[jj].dec.to_string(pad=True,
                                   alwayssign=True,
                                   sep='',
                                   precision=2))
        names.append(str(name))
    z1_cand_nuv.add_column(Column(names, name='NAME'))

    # Sort
    srt = np.argsort(z1_cand_nuv['RA'])
    z1_cand_nuv = z1_cand_nuv[srt]

    # Write?
    if not outfil is None:
        xxf.table_to_fits(Table(z1_cand_nuv),
                          outfil,
                          compress=True,
                          comment='z1QSO NUV Candidates')
        print('z1qso_nuv.get_cand: Writing {:d} candidates to {:s}'.format(
            len(z1_cand_nuv), outfil))

    # Return
    return z1_cand_nuv
예제 #14
0
def mk_summary(dlas, prefix, outfil, specpath=None, htmlfil=None):
    """ Loops through the DLA list and generates a Table

    Also pushes the 1D spectra into the folder

    Parameters
    ----------
    dlas : DLASurvey
    prefix : str
    outfil : str
      Name of the output FITS summary file
    htmlfil : str, optional

    Returns
    -------
    """
    #
    if htmlfil is None:
        htmlfil = 'tmp.html'

    # # Constructing
    # QSO, RA/DEC
    cqso = Column(dlas.qso, name='QSO')
    ra = dlas.coord.ra.degree[0]
    dec = dlas.coord.dec.degree[0]
    jname = []
    for abs_sys in dlas._abs_sys:
        jname.append(survey_name(prefix, abs_sys))

    cjname = Column(jname, name='Name')
    cra = Column(ra, name='RA', unit=u.degree)
    cdec = Column(dec, name='DEC', unit=u.degree)
    czem = Column(dlas.zem, name='Z_QSO')

    # Begin the Table
    dla_table = QTable( [cjname, cqso, cra, cdec, czem] )

    # LLS properties
    czabs = Column(dlas.zabs, name='ZABS')
    cNHI = Column(dlas.NHI, name='logNHI')
    csigNHI = Column(dlas.sig_NHI, name='sig(logNHI)')

    # Add to Table
    dla_table.add_columns([czabs, cNHI, csigNHI])

    # Spectra files
    all_sfiles = []
    for jj,ills in enumerate(dlas._abs_sys):
        sub_spec = mk_1dspec(ills, name=cjname[jj], outpath=specpath)
        # Pad
        while len(sub_spec) < 5:
            sub_spec.append(str('NULL'))
        # Append
        all_sfiles.append(sub_spec)

    cspec = Column(np.array(all_sfiles), name='SPEC_FILES')
    dla_table.add_column( cspec )

    # Sort
    dla_table.sort('RA')

    # Write
    print('Writing {:s}'.format(outfil))
    xxf.table_to_fits(dla_table,outfil)
    print('Writing {:s}'.format(htmlfil))
    Table(dla_table).write(htmlfil)

    return dla_table
예제 #15
0
def stackLines(galaxy,
               velocity,
               binmap,
               binedge,
               bintype,
               binunit,
               cubeCO=None,
               cubeHCN=None,
               cubeHCOp=None,
               cube13CO=None,
               cubeC18O=None,
               sfrmap=None,
               ltirmap=None,
               maxAbsVel=250.0):
    '''
    Actually do the stacking.

    cube: data we are stacking (SpectralCube)

    galaxy: line from degas_base.fits table with galaxy information.
    
    velocity: velocity map to stack data using

    binmap: bin map

    binedge: bin edges
    
    binlabel: bin labels
    
    bintype: type of bin
    
    cubeCO: CO cube

    cubeHCN: HCN cube
    
    cubeHCOp: HCO+ cube
    
    cube13CO: 13CO cube
    
    cubeC18O: C18O cube

    sfrmap: star formation rate map

    ltirmap: LTIR map

    maxAbsVel: absolute maximum velocity to include in spectral in km/s. 
    Assumes line is centered at zero (which should be true for stacking).
    
    Date        Programmer      Description of Changes
    ----------------------------------------------------------------------
    10/29/2020  Yiqing Song     Original Code
    12/03/2020  A.A. Kepley     Added comments and moved bin creation up 
                                a level to simplify code.
    12/10/2020  A.A. Kepley     added LTIR calculation
    5/6/2021    A.A. Kepley     modified to fit all lines for one galaxy at 
                                once so I can use CO FWHM to calculate upper 
                                limits for other lines

    '''

    from signal_id.stacking import bin_by_label as BinByLabel

    # get the relevant info on the galaxy and cube
    D = galaxy['DIST_MPC'] * u.Mpc
    pix_area = (np.radians(np.abs(cubeCO.header['CDELT1'])) *
                D.to(u.kpc))**2  #kpc^2 ## TODO CHECK THIS CONVERSION

    # do the individual line stacks.
    # -----------------------------

    stack = {}
    if cubeCO:
        ## fill in nans with 0 to avoid fft shift issue with nans
        #cubeCO_nonan = cubeCO.with_fill_value(0)

        # do the stack -- making sure the units work out right.

        stack['CO'], labelvals = BinByLabel(cubeCO,
                                            binmap.value,
                                            velocity,
                                            weight_map=None,
                                            return_weights=True)

    if cubeHCN:
        ## fill in nans with 0 to avoid fft shift issue with nans
        #cubeHCN_nonan = cubeHCN.with_fill_value(0)

        # do the stack -- making sure the units work out right.
        stack['HCN'], labelvals = BinByLabel(cubeHCN,
                                             binmap.value,
                                             velocity,
                                             weight_map=None,
                                             return_weights=True)

    if cubeHCOp:

        #cubeHCOp_nonan = cubeHCOp.with_fill_value(0)

        # do the stack -- making sure the units work out right.
        stack['HCOp'], labelvals = BinByLabel(cubeHCOp,
                                              binmap.value,
                                              velocity,
                                              weight_map=None,
                                              return_weights=True)

    if cube13CO:
        #cube13CO_nonan = cube13CO.with_fill_value(0)

        # do the stack -- making sure the units work out right.
        stack['13CO'], labelvals = BinByLabel(cube13CO,
                                              binmap.value,
                                              velocity,
                                              weight_map=None,
                                              return_weights=True)

    if cubeC18O:

        #cubeC18O_nonan = cubeC18O.with_fill_value(0)

        # do the stack -- making sure the units work out right.
        stack['C18O'], labelvals = BinByLabel(cubeC18O,
                                              binmap.value,
                                              velocity,
                                              weight_map=None,
                                              return_weights=True)

    # putting the table together
    # -------------------------

    # first add bins
    t = {
        'bin_lower': binedge[0:-1],
        'bin_upper': binedge[1:],
        'bin_mean': (binedge[0:-1] + binedge[1:]) / 2.0
    }
    total_stack = QTable(t)

    # Then set up the structures for the bin-based profiles, etc.
    spectral_profile = {}
    stack_weights = {}
    for line in ['CO', 'HCN', 'HCOp', '13CO', 'C18O']:
        spectral_profile[line] = np.zeros(
            (len(labelvals), len(stack['CO'][0]['spectral_axis'])))
        stack_weights[line] = np.zeros((len(labelvals)))

    bin_label = np.zeros(len(labelvals))
    bin_area = np.zeros(len(labelvals)) * pix_area.unit
    sfr_mean = np.zeros(len(labelvals)) * sfrmap.unit
    ltir_mean = np.zeros(len(labelvals)) * ltirmap.unit

    spectral_axis = np.zeros(
        (len(labelvals), len(stack['CO'][0]['spectral_axis'])
         )) * stack['CO'][0]['spectral_axis'].unit

    for i in range(len(labelvals)):
        spectral_axis[i, :] = stack['CO'][i]['spectral_axis']

        bin_label[i] = stack['CO'][i]['label']

        ## TODO CHECK THE CALCULATION BELOW FOR BINAREA
        bin_area[i] = float(sum(
            binmap[binmap == bin_label[i]].flatten())) * pix_area
        # calculating the mean SFR as requested
        if sfrmap is not None:
            sfr_mean[i] = np.nanmean(sfrmap[binmap == bin_label[i]])
        # calculate the mean LTIR as request
        if ltirmap is not None:
            ltir_mean[i] = np.nanmean(ltirmap[binmap == bin_label[i]])

        # get the spectral profiles
        for line in ['CO', 'HCN', 'HCOp', '13CO', 'C18O']:
            if line in stack.keys():
                spectral_profile[line][i, :] = stack[line][i]['spectrum']
                stack_weights[line][i] = stack[line][i]['weights']
            else:
                spectral_profile[line][i, :] = np.full(
                    len(stack['CO'][0]['spectral_axis']), np.nan)
                stack_weights[line][i] = np.nan

    # add above items to the table
    total_stack.add_column(Column(spectral_axis), name='spectral_axis')
    for line in ['CO', 'HCN', 'HCOp', '13CO', 'C18O']:
        total_stack.add_column(
            Column(spectral_profile[line],
                   name='stack_profile_' + line,
                   unit=cubeCO.unit))
        total_stack.add_column(
            Column(stack_weights[line], name='stack_weights_' + line))

    # TODO CHECK UNITS HERE
    total_stack.add_column(Column(bin_area, name='bin_area'))
    total_stack.add_column(Column(
        sfr_mean, name='sfr_mean'))  # Msun/yr/kpc^2 -- units from input image
    total_stack.add_column(Column(sfr_mean * bin_area),
                           name='sfr_total')  # Msun/yr/kpc^2 * kpc^2 = Msun/Yr
    total_stack.add_column(
        Column(ltir_mean),
        name='ltir_mean')  # Lsun/pc^2 -- units from input image
    total_stack.add_column(
        Column(ltir_mean * 1000.0**2 * bin_area),
        name='ltir_total')  # Lsun/pc^2 * (1000 pc/kpc)^2 * kpc^2   = Lsun

    return total_stack
예제 #16
0
    def nstar(self, image, star_groups):
        """
        Fit, as appropriate, a compound or single model to the given
        ``star_groups``. Groups are fitted sequentially from the
        smallest to the biggest. In each iteration, ``image`` is
        subtracted by the previous fitted group.

        Parameters
        ----------
        image : numpy.ndarray
            Background-subtracted image.

        star_groups : `~astropy.table.Table`
            This table must contain the following columns: ``id``,
            ``group_id``, ``x_0``, ``y_0``, ``flux_0``.  ``x_0`` and
            ``y_0`` are initial estimates of the centroids and
            ``flux_0`` is an initial estimate of the flux. Additionally,
            columns named as ``<param_name>_0`` are required if any
            other parameter in the psf model is free (i.e., the
            ``fixed`` attribute of that parameter is ``False``).

        Returns
        -------
        result_tab : `~astropy.table.QTable`
            Astropy table that contains photometry results.

        image : numpy.ndarray
            Residual image.
        """
        result_tab = QTable()
        for param_tab_name in self._pars_to_output.keys():
            result_tab.add_column(Column(name=param_tab_name))

        unc_tab = QTable()
        for param, isfixed in self.psf_model.fixed.items():
            if not isfixed:
                unc_tab.add_column(Column(name=param + "_unc"))

        y, x = np.indices(image.shape)

        star_groups = star_groups.group_by('group_id')
        for n in range(len(star_groups.groups)):
            group_psf = get_grouped_psf_model(self.psf_model,
                                              star_groups.groups[n],
                                              self._pars_to_set)
            usepixel = np.zeros_like(image, dtype=bool)

            for row in star_groups.groups[n]:
                usepixel[overlap_slices(large_array_shape=image.shape,
                                        small_array_shape=self.fitshape,
                                        position=(row['y_0'], row['x_0']),
                                        mode='trim')[0]] = True

            fit_model = self.fitter(group_psf, x[usepixel], y[usepixel],
                                    image[usepixel])
            param_table = self._model_params2table(fit_model,
                                                   star_groups.groups[n])
            result_tab = vstack([result_tab, param_table])

            param_cov = self.fitter.fit_info.get('param_cov', None)
            if param_cov is not None:
                unc_tab = vstack([
                    unc_tab,
                    self._get_uncertainties(len(star_groups.groups[n]))
                ])

            # do not subtract if the fitting did not go well
            try:
                image = subtract_psf(image,
                                     self.psf_model,
                                     param_table,
                                     subshape=self.fitshape)
            except NoOverlapError:
                pass

        if param_cov is not None:
            result_tab = hstack([result_tab, unc_tab])

        return result_tab, image
예제 #17
0
파일: core.py 프로젝트: gianninapr/sbpy
    def apply(self, data, name, unit=None):
        """Apply an arbitrarily shaped sequence as additional column to a
        `~sbpy.data.DataClass` object and reshape it accordingly.

        Parameters
        ----------
        data : list or iterable `~astropy.units.Quantity` object
            Data to be added in a new column in form of a one-dimensional
            list or a two-dimensional nested sequence. Each element in
            ``data``
            corresponds to one of the rows in the existing data table. If
            an element
            of ``data`` is a list, the corresponding data table row is
            repeated the same the number of times as there are elements in
            this sublist. If ``data`` is
            provided as a flat list and has the same length as the current
            data table, ``data`` will be simply added as a column to the data
            table and the length of the data table will not change. If
            ``data`` is provided as a `~astropy.units.Quantity` object (only
            possible for flat lists), its
            unit is adopted, unless ``unit`` is specified (not None).
        name : str
            Name of the new data column.
        unit : `~astropy.units` object or str, optional
            Unit to be applied to the new column. Default:
            `None`

        Returns
        -------
        None

        Note
        ----
        As a result of this method, the length of the underlying data table
        will be the same as the length of the flattened `data` parameter.

        Examples
        --------
        Imagine the following scenario: you obtain photometric measurements
        of the same asteroid over a number of nights. The following
        `~sbpy.data.Ephem` object summarizes the observations:

        >>> from sbpy.data import Ephem
        >>> import astropy.units as u
        >>> obs = Ephem.from_columns([[2451223, 2451224, 2451226]*u.d,
        ...                           [120.1, 121.3, 124.9]*u.deg,
        ...                           [12.4, 12.2, 10.8]*u.deg],
        ...                          names=('JD', 'RA', 'DEC'))
        >>> obs
        <QTable length=3>
            JD       RA     DEC
            d       deg     deg
         float64  float64 float64
        --------- ------- -------
        2451223.0   120.1    12.4
        2451224.0   121.3    12.2
        2451226.0   124.9    10.8

        After analyzing the observations, you would like to add the
        measured apparent V-band magnitudes to this object. You have
        one observation from the first night, two from the second night,
        and three from the third night. Instead of re-creating ``obs``,
        `~sbpy.data.DataClass.apply` offers a convenient way to
        supplement ``obs``:

        >>> obs.apply([[12.1], [12.5, 12.6], [13.5, 13.4, 13.5]],
        ...           name='V', unit='mag')
        >>> obs
        <QTable length=6>
            JD       RA     DEC      V
            d       deg     deg     mag
         float64  float64 float64 float64
        --------- ------- ------- -------
        2451223.0   120.1    12.4    12.1
        2451224.0   121.3    12.2    12.5
        2451224.0   121.3    12.2    12.6
        2451226.0   124.9    10.8    13.5
        2451226.0   124.9    10.8    13.4
        2451226.0   124.9    10.8    13.5

        Note how the data table has been re-arranged and rows have been
        duplicated in order to provide the expected shape.
        """
        _newtable = None

        # strip units off Quantity objects
        if isinstance(data, u.Quantity):
            unit = data.unit
            data = data.value

        if len(data) != len(self.table):
            raise DataClassError('Data parameter must have '
                                 'same length as self._table')

        _newcolumn = array([])
        for i, val in enumerate(data):
            if not isinstance(val, (list, tuple, ndarray)):
                val = [val]
            _newcolumn = hstack([_newcolumn, val])
            # add corresponding row from _table for each element in val
            for j in range(len(val)):
                # initialize new QTable object
                if _newtable is None:
                    _newtable = QTable(self.table[0])
                    continue
                _newtable.add_row(self.table[i])

        # add new column
        _newtable.add_column(Column(_newcolumn, name=name, unit=unit))

        self._table = _newtable
예제 #18
0
class DataClass():
    """`~sbpy.data.DataClass` serves as the base class for all data
    container classes in `sbpy` in order to provide consistent
    functionality throughout all these classes.

    The core of `~sbpy.data.DataClass` is an `~astropy.table.QTable`
    object (referred to as the `data table` below) - a type of
    `~astropy.table.Table` object that supports the `~astropy.units`
    formalism on a per-column base - which already provides most of
    the required functionality. `~sbpy.data.DataClass` objects can be
    manually generated from dictionaries
    (`~sbpy.data.DataClass.from_dict`), `~numpy.array`-like
    (`~sbpy.data.DataClass.from_array`) objects, or directly from
    another `~astropy.table.QTable` object.

    A few high-level functions for table data access or modification
    are provided; other, more complex modifications can be applied to
    the underlying table object (`~sbpy.data.DataClass.table`) directly.

    """

    def __init__(self, data):
        self._table = QTable()
        # self.altkeys = {}  # dictionary for alternative column names

        if (len(data.items()) == 1 and 'table' in data.keys()):
            # single item provided named 'table' -> already Table object
            self._table = QTable(data['table'])
        else:
            # treat kwargs as dictionary
            for key, val in data.items():
                try:
                    unit = val.unit
                    val = val.value
                except AttributeError:
                    unit = None

                # check if val is already list-like
                try:
                    val[0]
                except (TypeError, IndexError):
                    val = [val]

                self._table[key] = Column(val, unit=unit)

    @classmethod
    def from_dict(cls, data):
        """Create `~sbpy.data.DataClass` object from dictionary or list of
        dictionaries.

        Parameters
        ----------
        data : `~collections.OrderedDict`, dictionary or list (or similar) of
             dictionaries Data that will be ingested in
             `~sbpy.data.DataClass` object.  Each dictionary creates a
             row in the data table. Dictionary keys are used as column
             names; corresponding values must be scalar (cannot be
             lists or arrays). If a list of dictionaries is provided,
             all dictionaries have to provide the same set of keys
             (and units, if used at all).

        Returns
        -------
        `DataClass` object

        Examples
        --------
        >>> import astropy.units as u
        >>> from sbpy.data import Orbit
        >>> orb = Orbit.from_dict({'a': 2.7674*u.au,
        ...                        'e': 0.0756,
        ...                        'i': 10.59321*u.deg})

        Since dictionaries have no specific order, the ordering of the
        column in the example above is not defined. If your data table
        requires a specific order, use an ``OrderedDict``:

        >>> from collections import OrderedDict
        >>> orb = Orbit.from_dict(OrderedDict([('a', 2.7674*u.au),
        ...                                    ('e', 0.0756),
        ...                                    ('i', 10.59321*u.deg)]))
        >>> print(orb)
        <QTable length=1>
           a       e       i
           AU             deg
        float64 float64 float64
        ------- ------- --------
         2.7674  0.0756 10.59321
        >>> print(orb.column_names) # doctest: +SKIP
        <TableColumns names=('a','e','i')>
        >>> print(orb.table['a', 'e', 'i'])
          a      e       i
          AU            deg
        ------ ------ --------
        2.7674 0.0756 10.59321

        """
        if isinstance(data, (dict, OrderedDict)):
            return cls(data)
        elif isinstance(data, (list, ndarray, tuple)):
            # build table from first dict and append remaining rows
            tab = cls(data[0])
            for row in data[1:]:
                tab.add_rows(row)
            return tab
        else:
            raise TypeError('this function requires a dictionary or a '
                            'list of dictionaries')

    @classmethod
    def from_array(cls, data, names):
        """Create `~sbpy.data.DataClass` object from list, `~numpy.ndarray`,
        or tuple.

        Parameters
        ----------
        data : list, `~numpy.ndarray`, or tuple
            Data that will be ingested in `DataClass` object. A one
            dimensional sequence will be interpreted as a single row. Each
            element that is itself a sequence will be interpreted as a
            column.
        names : list
            Column names, must have the same number of names as data columns.

        Returns
        -------
        `DataClass` object

        Examples
        --------
        >>> from sbpy.data import DataClass
        >>> import astropy.units as u
        >>> dat = DataClass.from_array([[1, 2, 3]*u.deg,
        ...                             [4, 5, 6]*u.km,
        ...                             ['a', 'b', 'c']],
        ...                            names=('a', 'b', 'c'))
        >>> print(dat.table)
         a   b   c
        deg  km
        --- --- ---
        1.0 4.0   a
        2.0 5.0   b
        3.0 6.0   c

        """

        if isinstance(data, (list, ndarray, tuple)):
            return cls.from_dict(OrderedDict(zip(names, data)))
        else:
            raise TypeError('this function requires a list, tuple or a '
                            'numpy array')

    @classmethod
    def from_table(cls, data):
        """Create `DataClass` object from `~astropy.table.Table` or
        `astropy.table.QTable` object.

        Parameters
        ----------
        data : astropy `Table` object, mandatory
             Data that will be ingested in `DataClass` object.

        Returns
        -------
        `DataClass` object

        Examples
        --------
        >>> from astropy.table import QTable
        >>> import astropy.units as u
        >>> from sbpy.data import DataClass
        >>> tab = QTable([[1,2,3]*u.kg,
        ...               [4,5,6]*u.m/u.s,],
        ...              names=['mass', 'velocity'])
        >>> dat = DataClass.from_table(tab)
        >>> print(dat.table)
        mass velocity
         kg   m / s
        ---- --------
         1.0      4.0
         2.0      5.0
         3.0      6.0
        """

        return cls({'table': data})

    @classmethod
    def from_file(cls, filename, **kwargs):
        """Create `DataClass` object from a file using
        `~astropy.table.Table.read`.

        Parameters
        ----------
        filename : str
             Name of the file that will be read and parsed.
        **kwargs : additional parameters
             Optional parameters that will be passed on to
             `~astropy.table.Table.read`.

        Returns
        -------
        `DataClass` object

        Notes
        -----
        This function is merely a wrapper around
        `~astropy.table.Table.read`. Please refer to the documentation of
        that function for additional information on optional parameters
        and data formats that are available. Furthermore, note that this
        function is not able to identify units. If you want to work with
        `~astropy.units` you have to assign them manually to the object
        columns.

        Examples
        --------
        >>> from sbpy.data import DataClass

        >>> dat = DataClass.from_file('data.txt',
        ...                           format='ascii') # doctest: +SKIP
        """

        data = QTable.read(filename, **kwargs)

        return cls({'table': data})

    def to_file(self, filename, format='ascii', **kwargs):
        """Write object to a file using
        `~astropy.table.Table.write`.

        Parameters
        ----------
        filename : str
             Name of the file that will be written.
        format : str, optional
             Data format in which the file should be written. Default:
             ``ASCII``
        **kwargs : additional parameters
             Optional parameters that will be passed on to
             `~astropy.table.Table.write`.

        Returns
        -------
        None

        Notes
        -----
        This function is merely a wrapper around
        `~astropy.table.Table.write`. Please refer to the
        documentation of that function for additional information on
        optional parameters and data formats that are
        available. Furthermore, note that this function is not able to
        write unit information to the file.

        Examples
        --------
        >>> from sbpy.data import DataClass
        >>> import astropy.units as u
        >>> dat = DataClass.from_array([[1, 2, 3]*u.deg,
        ...                             [4, 5, 6]*u.km,
        ...                             ['a', 'b', 'c']],
        ...                            names=('a', 'b', 'c'))
        >>> dat.to_file('test.txt')

        """

        self._table.write(filename, format=format, **kwargs)

    def __len__(self):
        """Get number of data elements in _table"""
        return len(self._table)

    def __getattr__(self, field):
        """Get attribute from ``self._table` (columns, rows); checks
        for and may use alternative field names."""

        if field in dir(self):
            return self.field
        else:
            try:
                field = self._translate_columns(field)[0]
                return self._table[field]
            except (KeyError, IndexError, AttributeError):
                raise AttributeError('Attribute {:s} not available.'.format(
                    field))

    def __repr__(self):
        """Return representation of the underlying data table
        (``self._table.__repr__()``)"""
        return self._table.__repr__()

    def __getitem__(self, ident):
        """Return columns or rows from data table (``self._table``); checks
        for and may use alternative field names."""

        # iterable
        if isinstance(ident, (list, tuple, ndarray)):
            if all([isinstance(i, str) for i in ident]):
                # list of column names
                self = self._convert_columns(ident)
                newkeylist = [self._translate_columns(i)[0] for i in ident]
                ident = newkeylist
                # return as new DataClass object
                return self.from_table(self._table[ident])
            # ignore lists of boolean (masks)
            elif all([isinstance(i, bool) for i in ident]):
                pass
            # ignore lists of integers
            elif all([isinstance(i, int) for i in ident]):
                pass
        # individual strings
        elif isinstance(ident, str):
            self = self._convert_columns(ident)
            ident = self._translate_columns(ident)[0]

        # return as element from self_table
        return self._table[ident]

    def __setitem__(self, *args):
        """Refer cls.__setitem__ to self._table"""
        self._table.__setitem__(*args)

    def _translate_columns(self, target_colnames):
        """Translate target_colnames to the corresponding column names
        present in this object's table. Returns a list of actual column
        names present in this object that corresponds to target_colnames
        (order is preserved). Raises KeyError if not all columns are
        present or one or more columns could not be translated.
        """

        if not isinstance(target_colnames, (list, ndarray, tuple)):
            target_colnames = [target_colnames]

        translated_colnames = deepcopy(target_colnames)
        for idx, colname in enumerate(target_colnames):
            # colname is already a column name in self.table
            if colname in self.column_names:
                continue
            # colname is an alternative column name
            elif colname in sum(conf.fieldnames, []):
                for alt in conf.fieldnames[conf.fieldname_idx[colname]]:
                    # translation available for colname
                    if alt in self.column_names:
                        translated_colnames[idx] = alt
                        break
            # colname is unknown, raise a KeyError
            else:
                raise KeyError('field {:s} not available.'.format(
                    colname))

        return translated_colnames

    def _convert_columns(self, target_colnames):
        """Convert target_colnames, if necessary. Converted columns will be
        added as columns to ``self`` using the field names provided in
        target_colnames. No error is returned by this function if a
        field could not be converted.
        """

        if not isinstance(target_colnames, (list, ndarray, tuple)):
            target_colnames = [target_colnames]

        for colname in target_colnames:
            # ignore, if colname is unknown (KeyError)
            try:
                # ignore if colname has already been converted
                if any([alt in self.column_names for alt
                        in conf.fieldnames[conf.fieldname_idx[colname]]]):
                    continue
                # consider alternative names for colname -> alt
                for alt in conf.fieldnames[conf.fieldname_idx[colname]]:
                    if alt in list(conf.field_eq.keys()):
                        # conversion identified
                        convname = self._translate_columns(
                            list(conf.field_eq[alt].keys())[0])[0]
                        convfunc = list(conf.field_eq[alt].values())[0]
                        if convname in self.column_names:
                            # create new column for the converted field
                            self.add_column(convfunc(self.table[convname]),
                                            colname)
                            break
            except KeyError:
                continue

        return self

    @property
    def table(self):
        """Return `~astropy.table.QTable` object containing all data."""
        return self._table

    @property
    def column_names(self):
        """Return a list of all column names in the data table."""
        return self._table.columns

    def add_rows(self, rows, join_type='inner'):
        """Append additional rows to the existing data table. An individual
        row can be provided in list, tuple, `~numpy.ndarray`, or
        dictionary form. Multiple rows can be provided in the form of
        a list, tuple, or `~numpy.ndarray` of individual
        rows. Multiple rows can also be provided in the form of a
        `~astropy.table.QTable` or another `~sbpy.data.DataClass`
        object. Parameter ``join_type`` defines which columns appear
        in the final output table: ``inner`` only keeps those columns
        that appear in both the original table and the rows to be
        added; ``outer`` will keep all columns and populate some with
        placeholders, if necessary. In case of a list, the list
        elements must be in the same order as the table columns. In
        either case, matching `~astropy.units` must be provided in
        ``rows`` if used in the data table.

        Parameters
        ----------
        rows : list, tuple, `~numpy.ndarray`, dict, or `~collections.OrderedDict`
            Data to be appended to the table; required to have the same
            length as the existing table, as well as the same units.
        join_type : str, optional
            Defines which columns are kept in the output table: ``inner``
            only keeps those columns that appear in both the original
            table and the rows to be added; ``outer`` will keep all
            columns and populate them with placeholders, if necessary.
            Default: ``inner``

        Returns
        -------
        n : int, the total number of rows in the data table

        Examples
        --------
        >>> from sbpy.data import DataClass
        >>> import astropy.units as u
        >>> dat = DataClass.from_array([[1, 2, 3]*u.Unit('m'),
        ...                             [4, 5, 6]*u.m/u.s,
        ...                             ['a', 'b', 'c']],
        ...                            names=('a', 'b', 'c'))
        >>> dat.add_rows({'a': 5*u.m, 'b': 8*u.m/u.s, 'c': 'e'})
        4
        >>> print(dat.table)
         a    b    c
         m  m / s
        --- ----- ---
        1.0   4.0   a
        2.0   5.0   b
        3.0   6.0   c
        5.0   8.0   e
        >>> dat.add_rows(([6*u.m, 9*u.m/u.s, 'f'],
        ...               [7*u.m, 10*u.m/u.s, 'g']))
        6
        >>> dat.add_rows(dat)
        12

        """
        if isinstance(rows, QTable):
            self._table = vstack([self._table, rows], join_type=join_type)
        if isinstance(rows, DataClass):
            self._table = vstack([self._table, rows.table],
                                 join_type=join_type)
        if isinstance(rows, (dict, OrderedDict)):
            try:
                newrow = [rows[colname] for colname in self._table.columns]
            except KeyError as e:
                raise ValueError('data for column {0} missing in row {1}'.
                                 format(e, rows))
            self.add_rows(newrow)
        if isinstance(rows, (list, ndarray, tuple)):
            if (not isinstance(rows[0], (u.quantity.Quantity, float)) and
                    isinstance(rows[0], (dict, OrderedDict,
                                         list, ndarray, tuple))):
                for subrow in rows:
                    self.add_rows(subrow)
            else:
                self._table.add_row(rows)
        return len(self._table)

    def add_column(self, data, name, **kwargs):
        """Append a single column to the current data table. The lenght of
        the input list, `~numpy.ndarray`, or tuple must match the current
        number of rows in the data table.

        Parameters
        ----------
        data : list, `~numpy.ndarray`, or tuple
            Data to be filled into the table; required to have the same
            length as the existing table's number rows.
        name : str
            Name of the new column; must be different from already existing
            column names.
        **kwargs : additional parameters
            Additional optional parameters will be passed on to
            `~astropy.table.Table.add_column`.

        Returns
        -------
        n : int, the total number of columns in the data table

        Examples
        --------
        >>> from sbpy.data import DataClass
        >>> import astropy.units as u
        >>> dat = DataClass.from_array([[1, 2, 3]*u.Unit('m'),
        ...                             [4, 5, 6]*u.m/u.s,
        ...                             ['a', 'b', 'c']],
        ...                            names=('a', 'b', 'c'))
        >>> dat.add_column([10, 20, 30]*u.kg, name='d')
        4
        >>> print(dat.table)
         a    b    c   d
         m  m / s      kg
        --- ----- --- ----
        1.0   4.0   a 10.0
        2.0   5.0   b 20.0
        3.0   6.0   c 30.0
        """

        self._table.add_column(Column(data, name=name), **kwargs)
        return len(self.column_names)
예제 #19
0
    def _do_photometry(self, n_start=1):
        """
        Helper function which performs the iterations of the photometry
        process.

        Parameters
        ----------
        n_start : int
            Integer representing the start index of the iteration.  It
            is 1 if init_guesses are None, and 2 otherwise.

        Returns
        -------
        output_table : `~astropy.table.Table` or None
            Table with the photometry results, i.e., centroids and
            fluxes estimations and the initial estimates used to start
            the fitting process.
        """
        output_table = QTable()
        self._define_fit_param_names()

        for (init_parname, fit_parname) in zip(self._pars_to_set.keys(),
                                               self._pars_to_output.keys()):
            output_table.add_column(Column(name=init_parname))
            output_table.add_column(Column(name=fit_parname))

        sources = self.finder(self._residual_image)

        n = n_start
        while ((sources is not None and len(sources) > 0)
               and (self.niters is None or n <= self.niters)):
            positions = np.transpose(
                (sources['xcentroid'], sources['ycentroid']))
            apertures = CircularAperture(positions, r=self.aperture_radius)
            sources['aperture_flux'] = aperture_photometry(
                self._residual_image, apertures)['aperture_sum']

            init_guess_tab = QTable(names=['id', 'x_0', 'y_0', 'flux_0'],
                                    data=[
                                        sources['id'], sources['xcentroid'],
                                        sources['ycentroid'],
                                        sources['aperture_flux']
                                    ])
            self._get_additional_columns(sources, init_guess_tab)

            for param_tab_name, param_name in self._pars_to_set.items():
                if param_tab_name not in (['x_0', 'y_0', 'flux_0']):
                    init_guess_tab.add_column(
                        Column(name=param_tab_name,
                               data=(getattr(self.psf_model, param_name) *
                                     np.ones(len(sources)))))

            star_groups = self.group_maker(init_guess_tab)
            table, self._residual_image = super().nstar(
                self._residual_image, star_groups)

            star_groups = star_groups.group_by('group_id')
            table = hstack([star_groups, table])

            table['iter_detected'] = n * np.ones(table['x_fit'].shape,
                                                 dtype=int)

            output_table = vstack([output_table, table])

            # do not warn if no sources are found beyond the first iteration
            with warnings.catch_warnings():
                warnings.simplefilter('ignore', NoDetectionsWarning)
                sources = self.finder(self._residual_image)

            n += 1

        return output_table
예제 #20
0
파일: ephem.py 프로젝트: sjoset/sbpy
    def from_mpc(cls,
                 targetids,
                 epochs=None,
                 location='500',
                 ra_format=None,
                 dec_format=None,
                 **kwargs):
        """Load ephemerides from the
        `Minor Planet Center <https://minorplanetcenter.net>`_.

        Parameters
        ----------
        targetids : str or iterable of str
            Target identifier, resolvable by the Minor Planet
            Ephemeris Service [MPES]_, e.g., 2P, C/1995 O1, P/Encke,
            (1), 3200, Ceres, and packed designations, for one or more
            targets.

        epochs : `~astropy.time.Time` object, or dictionary, optional
            Request ephemerides at these epochs.  May be a single
            epoch or multiple epochs as `~astropy.time.Time`
            (see :ref:`epochs`)or a
            dictionary describing a linearly-spaced array
            of epochs. All epochs should be provided in UTC; if not,
            they will be converted to UTC and a
            `~sbpy.data.TimeScaleWarning` will be raised.
            If ``None`` (default), the current date and
            time will be used.

            For the dictionary format, the keys ``start`` (start
            epoch), ``step`` (step size), ``stop`` (end epoch), and/or
            ``number`` (number of epochs total) are used.  Only one of
            ``stop`` and ``number`` may be specified at a time.
            ``step``, ``stop``, and ``number`` are optional. The values of
            of ``start`` and ``stop`` must be `~astropy.time.Time` objects.
            ``number`` should be an integer value; ``step`` should be a
            `~astropy.units.Quantity` with an integer value and units of
            seconds, minutes, hours, or days.

            All epochs should be provided in UTC; if not, they will be
            converted to UTC and a `~sbpy.data.TimeScaleWarning` will be
            raised.

        location : various, optional
            Location of the observer as an IAU observatory code
            [OBSCODES]_ (string), a 3-element array of Earth
            longitude, latitude, altitude, or an
            `~astropy.coordinates.EarthLocation`.  Longitude and
            latitude should be parseable by
            `~astropy.coordinates.Angle`, and altitude should be
            parsable by `~astropy.units.Quantity` (with units of
            length).  If ``None``, then the geocenter (code 500) is
            used.

        ra_format : dict, optional
            Format the RA column with
            `~astropy.coordinates.Angle.to_string` using these keyword
            arguments, e.g.,
            ``{'sep': ':', 'unit': 'hourangle', 'precision': 1}``.

        dec_format : dict, optional
            Format the Dec column with
            `~astropy.coordinates.Angle.to_string` using these keyword
            arguments, e.g., ``{'sep': ':', 'precision': 0}``.

        **kwargs
            Additional keyword arguments are passed to
            `~astroquery.mpc.MPC.get_ephemerides`: ``eph_type``,
            ``proper_motion``, ``proper_motion_unit``, ``suppress_daytime``,
            ``suppress_set``, ``perturbed``, ``unc_links``, ``cache``.

        Returns
        -------
        `~Ephem` object
            The resulting object will be populated with columns as
            defined in
            `~astroquery.mpc.get_ephemerides`; refer
            to that document on information on how to modify the list
            of queried parameters.


        Examples
        --------
        Query a single set of ephemerides of Ceres as observed from
        Maunakea:
        >>> from sbpy.data import Ephem
        >>> from astropy.time import Time
        >>> epoch = Time('2018-05-14', scale='utc')
        >>> eph = Ephem.from_mpc('ceres', epoch, 568) # doctest: +REMOTE_DATA

        Query a range of ephemerides of comet 2P/Encke as observed from
        Maunakea:
        >>> epochs = {'start': Time('2019-01-01'),
        ...           'step': 1*u.d, 'number': 365}
        >>> eph = Ephem.from_mpc('2P', epochs, 568) # doctest: +REMOTE_DATA

        Notes
        -----
        * All properties are provided in the J2000.0 reference system.
        * See `astroquery.mpc.MPC.get_ephemerides` and the Minor
          Planet Ephemeris Service user's guide [MPES]_ for details,
          including acceptable target names.


        References
        ----------
        .. [MPES] Wiliams, G. The Minor Planet Ephemeris Service.
           https://minorplanetcenter.net/iau/info/MPES.pdf

        .. [OBSCODES] IAU Minor Planet Center.  List of observatory
           codes. https://minorplanetcenter.net/iau/lists/ObsCodesF.html

        """

        # parameter check

        # if targetids is a list, run separate Horizons queries and append
        if not isinstance(targetids, (list, ndarray, tuple)):
            targetids = [targetids]

        _epochs = None  # avoid modifying epochs in-place
        start = None
        if epochs is None:
            _epochs = Time([Time.now()])
        elif isinstance(epochs, Time):
            if not iterable(epochs):
                _epochs = Time([epochs])
            else:
                _epochs = epochs

            if _epochs.scale != 'utc':
                warn(('converting {} epochs to utc for use in '
                      'astroquery.mpc').format(_epochs.scale),
                     TimeScaleWarning)
                _epochs = _epochs.utc
        elif isinstance(epochs, dict):
            _epochs = epochs.copy()
            start = _epochs['start']  # required
            if start.scale != 'utc':
                warn(('converting {} start epoch to utc for use in '
                      'astroquery.mpc').format(start.scale), TimeScaleWarning)
                start = start.utc
            step = _epochs.get('step')
            stop = _epochs.get('stop')
            if stop is not None and stop.scale != 'utc':
                warn(('converting {} stop epoch to utc for use in '
                      'astroquery.mpc').format(stop.scale), TimeScaleWarning)
                stop = stop.utc
            number = _epochs.get('number')

            if step is not None and stop is None:
                step = u.Quantity(step)
                if step.unit not in (u.d, u.h, u.min, u.s):
                    raise QueryError(
                        'step must have units of days, hours, minutes,'
                        ' or seconds')
            if stop is not None:
                if step is not None and number is None:
                    # start and stop both defined, estimate number of steps
                    dt = (Time(stop).jd - Time(start).jd) * u.d
                    number = int((dt / step).decompose()) + 1
                elif step is None and number is not None:
                    step = int(
                        (stop - start).jd * 1440 / (number - 1)) * u.minute
                else:
                    raise QueryError(
                        ('epoch definition unclear; step xor number '
                         'must be provided with start and stop'))
        else:
            raise ValueError('Invalid `epochs` parameter')

        # append ephemerides table for each targetid
        all_eph = None
        for targetid in targetids:
            try:
                # get ephemeris
                if start is None:
                    eph = []
                    for i in range(len(_epochs)):
                        e = MPC.get_ephemeris(targetid,
                                              location=location,
                                              start=Time(_epochs[i],
                                                         scale='utc'),
                                              number=1,
                                              **kwargs)
                        e['Date'] = e['Date'].iso  # for vstack to work
                        eph.append(e)
                    eph = QTable(vstack(eph))
                    eph['Date'] = Time(eph['Date'], scale='utc')
                else:
                    eph = MPC.get_ephemeris(targetid,
                                            location=location,
                                            start=start,
                                            step=step,
                                            number=number,
                                            **kwargs)
            except InvalidQueryError as e:
                raise QueryError('Error raised by astroquery.mpc: {:s}'.format(
                    str(e)))

            # add targetname column
            eph.add_column(Column([targetid] * len(eph), name='Targetname'),
                           index=0)

            if all_eph is None:
                all_eph = eph
            else:
                all_eph = vstack([all_eph, eph])

        all_eph = QTable(all_eph)

        # convert RA and Dec to Angle
        all_eph['RA'] = Angle(all_eph['RA'], all_eph['RA'].unit)
        all_eph['Dec'] = Angle(all_eph['Dec'], all_eph['Dec'].unit)

        if ra_format is not None:
            all_eph['RA'].info.format = lambda x: x.to_string(**ra_format)

        if dec_format is not None:
            all_eph['Dec'].info.format = lambda x: x.to_string(**dec_format)

        return cls.from_table(all_eph)
예제 #21
0
def mk_summary(dlas, prefix, outfil, specpath=None, htmlfil=None):
    """ Loops through the DLA list and generates a Table

    Also pushes the 1D spectra into the folder

    Parameters
    ----------
    dlas : DLASurvey
    prefix : str
    outfil : str
      Name of the output FITS summary file
    htmlfil : str, optional

    Returns
    -------
    """
    #
    if htmlfil is None:
        htmlfil = 'tmp.html'

    # # Constructing
    # QSO, RA/DEC
    cqso = Column(dlas.qso, name='QSO')
    ra = dlas.coord.ra.degree
    dec = dlas.coord.dec.degree
    jname = []
    for abs_sys in dlas._abs_sys:
        jname.append(survey_name(prefix, abs_sys))

    cjname = Column(jname, name='Name')
    cra = Column(ra, name='RA', unit=u.degree)
    cdec = Column(dec, name='DEC', unit=u.degree)
    czem = Column(dlas.zem, name='Z_QSO')

    # Begin the Table
    dla_table = QTable([cjname, cqso, cra, cdec, czem])

    # LLS properties
    czabs = Column(dlas.zabs, name='ZABS')
    cNHI = Column(dlas.NHI, name='logNHI')
    csigNHI = Column(dlas.sig_NHI, name='sig(logNHI)')

    # Add to Table
    dla_table.add_columns([czabs, cNHI, csigNHI])

    # Spectra files
    all_sfiles = []
    for jj, ills in enumerate(dlas._abs_sys):
        sub_spec = mk_1dspec(ills, name=cjname[jj], outpath=specpath)
        # Pad
        while len(sub_spec) < 5:
            sub_spec.append(str('NULL'))
        # Append
        all_sfiles.append(sub_spec)

    cspec = Column(np.array(all_sfiles), name='SPEC_FILES')
    dla_table.add_column(cspec)

    # Sort
    dla_table.sort('RA')

    # Write
    print('Writing {:s}'.format(outfil))
    xxf.table_to_fits(dla_table, outfil)
    print('Writing {:s}'.format(htmlfil))
    Table(dla_table).write(htmlfil)

    return dla_table
예제 #22
0
파일: geom.py 프로젝트: vikasj78/gammapy
    def group_table(self, edges):
        """Compute bin groups table for the map axis, given coarser bin edges.

        Parameters
        ----------
        edges : `~astropy.units.Quantity`
            Group bin edges.

        Returns
        -------
        groups : `~astropy.table.Table`
            Map axis group table.
        """
        # TODO: try to simplify this code
        if not self.node_type == "edges":
            raise ValueError("Only edge based map axis can be grouped")

        edges_pix = self.coord_to_pix(edges)
        edges_pix = np.clip(edges_pix, -0.5, self.nbin - 0.5)
        edges_idx = np.round(edges_pix + 0.5) - 0.5
        edges_idx = np.unique(edges_idx)
        edges_ref = self.pix_to_coord(edges_idx)

        groups = QTable()
        groups[f"{self.name}_min"] = edges_ref[:-1]
        groups[f"{self.name}_max"] = edges_ref[1:]

        groups["idx_min"] = (edges_idx[:-1] + 0.5).astype(int)
        groups["idx_max"] = (edges_idx[1:] - 0.5).astype(int)

        if len(groups) == 0:
            raise ValueError("No overlap between reference and target edges.")

        groups["bin_type"] = "normal   "

        edge_idx_start, edge_ref_start = edges_idx[0], edges_ref[0]
        if edge_idx_start > 0:
            underflow = {
                "bin_type": "underflow",
                "idx_min": 0,
                "idx_max": edge_idx_start,
                f"{self.name}_min": self.pix_to_coord(-0.5),
                f"{self.name}_max": edge_ref_start,
            }
            groups.insert_row(0, vals=underflow)

        edge_idx_end, edge_ref_end = edges_idx[-1], edges_ref[-1]

        if edge_idx_end < (self.nbin - 0.5):
            overflow = {
                "bin_type": "overflow",
                "idx_min": edge_idx_end + 1,
                "idx_max": self.nbin - 1,
                f"{self.name}_min": edge_ref_end,
                f"{self.name}_max": self.pix_to_coord(self.nbin - 0.5),
            }
            groups.add_row(vals=overflow)

        group_idx = Column(np.arange(len(groups)))
        groups.add_column(group_idx, name="group_idx", index=0)
        return groups
예제 #23
0
def initialize_galaxy(Nstep):
    galaxy = QTable()
    dummy = np.zeros(Nstep)
    col_dummy = Column(name='age',data=dummy,unit=u.yr)
    galaxy.add_column(col_dummy)
    col_dummy = Column(name='Ms',data=dummy,unit=u.Msun)
    galaxy.add_column(col_dummy)
    col_dummy = Column(name='Mg',data=dummy,unit=u.Msun)
    galaxy.add_column(col_dummy)
    col_dummy = Column(name='Mh',data=dummy,unit=u.Msun)
    galaxy.add_column(col_dummy)
    col_dummy = Column(name='Mcgm',data=dummy,unit=u.Msun)
    galaxy.add_column(col_dummy)
    col_dummy = Column(name='dMrdt',data=dummy,unit=u.Msun/u.yr)
    galaxy.add_column(col_dummy)
    col_dummy = Column(name='sfr',data=dummy,unit=u.Msun/u.yr)
    galaxy.add_column(col_dummy)
    col_dummy = Column(name='tlogoh',data=dummy,unit=u.m/u.m)
    galaxy.add_column(col_dummy)
    col_dummy = Column(name='lookback',data=dummy,unit=u.yr)
    galaxy.add_column(col_dummy)
    col_dummy = Column(name='dt',data=dummy,unit=u.yr)
    galaxy.add_column(col_dummy)

    return galaxy
예제 #24
0
nicetable26 = np.vstack((np.round(table26, decimals=2)))
name = Table(nicetable26,
             names=('Pixels', 'Speckles', 'Percent', 'Avg Intensity'))
name.pprint_all()

nicetable = np.vstack((np.round(table38, decimals=2)))
name = Table(nicetable,
             names=('Pixels', 'Speckles', 'Percent', 'Avg Intensity'))
name.pprint_all()

nicetable = np.vstack((np.round(table40, decimals=2)))
name = Table(nicetable,
             names=('Pixels', 'Speckles', 'Percent', 'Avg Intensity'))
name.pprint_all()

nicetable = np.vstack((np.round(table44, decimals=2)))
name = Table(nicetable,
             names=('Pixels', 'Speckles', 'Percent', 'Avg Intensity'))
name.pprint_all()

bigavg = (table26 + table30 + table38 + table40 + table44) / 5

column_len = bigavg.shape[0]

print('\n\n')
"""plotting the final averages"""
t = QTable(np.round(bigavg, decimals=2),
           names=('Pixels', 'Speckles', 'Percent', 'Avg Intensity'))
t.add_column(np.arange(1, 1 + column_len), name='Annulus', index=0)
#print(t)
t.pprint(align='^')
예제 #25
0
파일: geom.py 프로젝트: gammapy/gammapy
    def group_table(self, edges):
        """Compute bin groups table for the map axis, given coarser bin edges.

        Parameters
        ----------
        edges : `~astropy.units.Quantity`
            Group bin edges.

        Returns
        -------
        groups : `~astropy.table.Table`
            Map axis group table.
        """
        # TODO: try to simplify this code
        if not self.node_type == "edges":
            raise ValueError("Only edge based map axis can be grouped")

        edges_pix = self.coord_to_pix(edges)
        edges_pix = np.clip(edges_pix, -0.5, self.nbin - 0.5)
        edges_idx = np.round(edges_pix + 0.5) - 0.5
        edges_idx = np.unique(edges_idx)
        edges_ref = self.pix_to_coord(edges_idx) * self.unit

        groups = QTable()
        groups["{}_min".format(self.name)] = edges_ref[:-1]
        groups["{}_max".format(self.name)] = edges_ref[1:]

        groups["idx_min"] = (edges_idx[:-1] + 0.5).astype(int)
        groups["idx_max"] = (edges_idx[1:] - 0.5).astype(int)

        if len(groups) == 0:
            raise ValueError("No overlap between reference and target edges.")

        groups["bin_type"] = "normal   "

        edge_idx_start, edge_ref_start = edges_idx[0], edges_ref[0]
        if edge_idx_start > 0:
            underflow = {
                "bin_type": "underflow",
                "idx_min": 0,
                "idx_max": edge_idx_start,
                "{}_min".format(self.name): self.pix_to_coord(-0.5) * self.unit,
                "{}_max".format(self.name): edge_ref_start,
            }
            groups.insert_row(0, vals=underflow)

        edge_idx_end, edge_ref_end = edges_idx[-1], edges_ref[-1]

        if edge_idx_end < (self.nbin - 0.5):
            overflow = {
                "bin_type": "overflow",
                "idx_min": edge_idx_end + 1,
                "idx_max": self.nbin - 1,
                "{}_min".format(self.name): edge_ref_end,
                "{}_max".format(self.name): self.pix_to_coord(self.nbin - 0.5)
                * self.unit,
            }
            groups.add_row(vals=overflow)

        group_idx = Column(np.arange(len(groups)))
        groups.add_column(group_idx, name="group_idx", index=0)
        return groups