Esempio n. 1
0
    def observed_fetch(self, table=None, red=True, blu=True):
        """Read a fields_observed.dat file and save the contents.

        Arguments to be passed:
            table (default=None) : path of fields_observed file - default
                                   behaviour is to look in package data 
                                   directory for fields_observed.dat.
            red (default=True)   : boolean which when true specifies that
                                   details of the red concats should be read.
            blu (default=True)   : boolean which when true specifies that
                                   details of the blue concats should be read.
        """
        # Ensure date columns always read in as strings - makes it 
        # possible to validate values that are empty against "".
        conv = {"Hari_dat" : [ascii.convert_numpy(np.str)],
                "ugr_dat" : [ascii.convert_numpy(np.str)]}
        if table==None:
            try:
                self.observed = ascii.read(datadir+'/fields_observed.dat', 
                                        converters=conv)
            except:
                print ("Error: No table specified, and no " 
                       "fields_observed.dat")
                print "       in module data directory."
                return 0
        else:
            self.observed = ascii.read(table, converters=conv)
        self.bool_red = red
        self.bool_blu = blu
        return
Esempio n. 2
0
def read(inputfile):
    """Read the given input file and return parsed Astropy Table."""

    # NOTE: You can replace this with meaningful column names if you want.
    colnames = ['col_01', 'col_02', 'col_03', 'col_04', 'col_05',
                'col_06', 'col_07', 'col_08', 'col_09', 'col_10']

    # NOTE: You can change the data types here for each col as you see fit.
    # ps. Tried using defaultdict magic but didn't work, not sure why.
    #     So, we have to define the dictionary explicitly here.
    # This converts everything to float. It uses INTERNAL column names.
    converters = {'col1': [ascii.convert_numpy(np.float)],
                  'col2': [ascii.convert_numpy(np.float)],
                  'col3': [ascii.convert_numpy(np.float)],
                  'col4': [ascii.convert_numpy(np.float)],
                  'col5': [ascii.convert_numpy(np.float)],
                  'col6': [ascii.convert_numpy(np.float)],
                  'col7': [ascii.convert_numpy(np.float)],
                  'col8': [ascii.convert_numpy(np.float)],
                  'col9': [ascii.convert_numpy(np.float)],
                  'col10': [ascii.convert_numpy(np.float)]}

    tab = ascii.read(inputfile, format='no_header', guess=False,
                     Inputter=RuiterInputter, data_start=0, names=colnames,
                     converters=converters)

    # Now you can science with this data!
    return tab
Esempio n. 3
0
    def __init__(self, definition_file):
        self._dqcol = 'DQFLAG'
        self._sdcol = 'short'  # SHORT_DESCRIPTION
        self._ldcol = 'long'  # LONG_DESCRIPTION

        # Need to replace ~ with $HOME
        self.tab = ascii.read(os.path.expanduser(definition_file),
                              names=(self._dqcol, self._sdcol, self._ldcol),
                              converters={
                                  self._dqcol:
                                  [ascii.convert_numpy(np.uint16)],
                                  self._sdcol: [ascii.convert_numpy(np.str)],
                                  self._ldcol: [ascii.convert_numpy(np.str)]
                              })

        # Another table to store metadata
        self.metadata = ascii.read(self.tab.meta['comments'],
                                   delimiter='=',
                                   format='no_header',
                                   names=['key', 'val'])

        # Ensure table has OK flag to detect good pixel
        self._okflag = 0
        if self._okflag not in self.tab[self._dqcol]:
            self.tab.add_row([self._okflag, 'OK', 'Good pixel'])

        # Sort table in ascending order
        self.tab.sort(self._dqcol)

        # Compile a list of flags
        self._valid_flags = self.tab[self._dqcol]
Esempio n. 4
0
    def observed_fetch(self, table=None, red=True, blu=True):
        """Read a fields_observed.dat file and save the contents.

        Arguments to be passed:
            table (default=None) : path of fields_observed file - default
                                   behaviour is to look in package data 
                                   directory for fields_observed.dat.
            red (default=True)   : boolean which when true specifies that
                                   details of the red concats should be read.
            blu (default=True)   : boolean which when true specifies that
                                   details of the blue concats should be read.
        """
        # Ensure date columns always read in as strings - makes it
        # possible to validate values that are empty against "".
        conv = {
            "Hari_dat": [ascii.convert_numpy(np.str)],
            "ugr_dat": [ascii.convert_numpy(np.str)]
        }
        if table == None:
            try:
                self.observed = ascii.read(datadir + '/fields_observed.dat',
                                           converters=conv)
            except:
                print(
                    "Error: No table specified, and no "
                    "fields_observed.dat")
                print "       in module data directory."
                return 0
        else:
            self.observed = ascii.read(table, converters=conv)
        self.bool_red = red
        self.bool_blu = blu
        return
Esempio n. 5
0
    def __init__(self, definition_file):
        self._dqcol = 'DQFLAG'
        self._sdcol = 'short'  # SHORT_DESCRIPTION
        self._ldcol = 'long'   # LONG_DESCRIPTION

        # Need to replace ~ with $HOME
        self.tab = ascii.read(
            os.path.expanduser(definition_file),
            names = (self._dqcol, self._sdcol, self._ldcol),
            converters = {self._dqcol: [ascii.convert_numpy(np.uint16)],
                          self._sdcol: [ascii.convert_numpy(np.str)],
                          self._ldcol: [ascii.convert_numpy(np.str)]})

        # Another table to store metadata
        self.metadata = ascii.read(self.tab.meta['comments'], delimiter='=',
                                   format='no_header', names=['key', 'val'])

        # Ensure table has OK flag to detect good pixel
        self._okflag = 0
        if self._okflag not in self.tab[self._dqcol]:
            self.tab.add_row([self._okflag, 'OK', 'Good pixel'])

        # Sort table in ascending order
        self.tab.sort(self._dqcol)

        # Compile a list of flags
        self._valid_flags = self.tab[self._dqcol]
Esempio n. 6
0
def _convert_csv_to_coordinates(_csv_file, _ra, _dec):
    #Converters for CSV reading
    converters = {'col1': [ascii.convert_numpy(np.float32)], 'col2': [ascii.convert_numpy(np.float32)]}
    _data = ascii.read(_csv_file, converters=converters)

    for i in range(len(_data)):
        _ra.append(_data['ra'][i])
        _dec.append(_data['dec'][i])
Esempio n. 7
0
def _search_nearby_of_tess_target(tic_id):
    # To avoid warnings / overflow error in attempting to convert GAIA DR2, TIC ID, TOI
    # as int32 (the default) in some cases
    return ascii.read(f"https://exofop.ipac.caltech.edu/tess/download_nearbytarget.php?id={tic_id}&output=csv",
                      format="csv",
                      fast_reader=False,
                      converters={
                          "GAIA DR2": [ascii.convert_numpy(np.str)],
                          "TIC ID": [ascii.convert_numpy(np.str)],
                          "TOI": [ascii.convert_numpy(np.str)],
                          })
Esempio n. 8
0
def test_types_from_dat():
    converters = {'a': [ascii.convert_numpy(float)],
                  'e': [ascii.convert_numpy(str)]}

    dat = ascii.read(['a b c d e', '1 1 cat 2.1 4.2'],
                     Reader=ascii.Basic,
                     converters=converters)

    assert dat['a'].dtype.kind == 'f'
    assert dat['b'].dtype.kind == 'i'
    assert dat['c'].dtype.kind in ('S', 'U')
    assert dat['d'].dtype.kind == 'f'
    assert dat['e'].dtype.kind in ('S', 'U')
Esempio n. 9
0
def test_types_from_dat():
    converters = {'a': [ascii.convert_numpy(float)],
                  'e': [ascii.convert_numpy(str)]}

    dat = ascii.read(['a b c d e', '1 1 cat 2.1 4.2'],
                     Reader=ascii.Basic,
                     converters=converters)

    assert dat['a'].dtype.kind == 'f'
    assert dat['b'].dtype.kind == 'i'
    assert dat['c'].dtype.kind in ('S', 'U')
    assert dat['d'].dtype.kind == 'f'
    assert dat['e'].dtype.kind in ('S', 'U')
Esempio n. 10
0
def makeEmptyTable():
    """
    Returns an empty sky model table.
    """
    outlines = ['Z, Z, 0.0, 0.0, 0.0\n']
    colNames = ['Name', 'Type', 'Ra', 'Dec', 'I']
    converters = {}
    nameCol = 'col{0}'.format(colNames.index('Name')+1)
    converters[nameCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]
    typeCol = 'col{0}'.format(colNames.index('Type')+1)
    converters[typeCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]
    table = Table.read(outlines, guess=False, format='ascii.no_header', delimiter=',',
        names=colNames, comment='#', data_start=0, converters=converters)
    table.remove_rows(0)
    return table
Esempio n. 11
0
    def from_leap_seconds_list(cls, file):
        """Create a table from a file like the IETF ``leap-seconds.list``.

        Parameters
        ----------
        file : path-like, optional
            Full local or network path to the file holding leap-second data
            in a format consistent with that used by IETF.  Up to date versions
            can be retrieved from ``iers.IETF_LEAP_SECOND_URL``.

        Notes
        -----
        The file *must* contain the expiration date in a comment line, like
        '# File expires on:  28 June 2020'
        """
        from astropy.io.ascii import convert_numpy  # Here to avoid circular import

        names = ['ntp_seconds', 'tai_utc', 'comment', 'day', 'month', 'year']
        # Note: ntp_seconds does not fit in 32 bit, so causes problems on
        # 32-bit systems without the np.int64 converter.
        self = cls._read_leap_seconds(
            file, names=names, include_names=names[:2],
            converters={'ntp_seconds': [convert_numpy(np.int64)]})
        self['mjd'] = (self['ntp_seconds']/86400 + 15020).round()
        # Note: cannot use Time.ymdhms, since that might require leap seconds.
        isot = Time(self['mjd'], format='mjd', scale='tai').isot
        ymd = np.array([[int(part) for part in t.partition('T')[0].split('-')]
                        for t in isot])
        self['year'], self['month'], self['day'] = ymd.T
        return self
Esempio n. 12
0
def get_ifot(event_type, start=None, stop=None, props=[], columns=[], timeout=TIMEOUT, types={}):
    start = DateTime('1998:001' if start is None else start)
    stop = DateTime(stop)
    event_props = '.'.join([event_type] + props)

    params = odict(r='home',
                   t='qserver',
                   format='tsv',
                   tstart=start.date,
                   tstop=stop.date,
                   e=event_props,
                   ul='7',
                   )
    if columns:
        params['columns'] = ','.join(columns)

    # Get the TSV data for the iFOT event table
    url = ROOTURL + URLS['ifot']
    response = requests.get(url, auth=get_auth(), params=params, timeout=timeout)

    # For Py2 convert from unicode to ASCII str
    text = response.text
    text = re.sub(r'\r\n', ' ', text)
    lines = [x for x in text.split('\t\n') if x.strip()]

    converters = {key: [ascii.convert_numpy(getattr(np, type_))]
                  for key, type_ in types.items()}
    dat = ascii.read(lines, format='tab', guess=False, converters=converters,
                     fill_values=None)
    return dat
Esempio n. 13
0
def load_sao() -> Table:
    """Load the SAO-TYC2 cross match."""
    print('Loading SAO-TYC2 cross match')
    xmatch_files = [
        'sao_tyc2_xmatch.csv',
        'sao_tyc2_suppl1_xmatch.csv',
        'sao_tyc2_suppl2_xmatch.csv',
    ]
    data = vstack(
        [
            io_ascii.read(
                XMATCH_DIR/f,
                include_names=['SAO', 'TYC1', 'TYC2', 'TYC3', 'angDist', 'delFlag'],
                format='csv',
                converters={'delFlag': [io_ascii.convert_numpy(np.str)]},
            ) for f in xmatch_files
        ],
        join_type='exact',
    )

    data = data[data['delFlag'].mask]
    data.remove_column('delFlag')

    parse_tyc_cols(data)

    data = unique(data.group_by(['TYC', 'angDist']), keys=['TYC'])
    data.remove_column('angDist')

    data.add_index('TYC')
    return data
Esempio n. 14
0
File: occweb.py Progetto: sot/kadi
def get_ifot(event_type, start=None, stop=None, props=[], columns=[], timeout=TIMEOUT, types={}):
    start = DateTime('1998:001' if start is None else start)
    stop = DateTime(stop)
    event_props = '.'.join([event_type] + props)

    params = odict(r='home',
                   t='qserver',
                   format='tsv',
                   tstart=start.date,
                   tstop=stop.date,
                   e=event_props,
                   ul='7',
                   )
    if columns:
        params['columns'] = ','.join(columns)

    # Get the TSV data for the iFOT event table
    url = ROOTURL + URLS['ifot']
    response = requests.get(url, auth=get_auth(), params=params, timeout=timeout)

    # For Py2 convert from unicode to ASCII str
    text = response.text.encode('ascii', 'ignore') if six.PY2 else response.text
    text = re.sub(r'\r\n', ' ', text)
    lines = [x for x in text.split('\t\n') if x.strip()]

    converters = {key: [ascii.convert_numpy(getattr(np, type_))]
                  for key, type_ in types.items()}
    dat = ascii.read(lines, format='tab', guess=False, converters=converters,
                     fill_values=None)
    return dat
Esempio n. 15
0
def return_line_labels(wl, tol=1):
    """Given a wl array, return the nearest n line labels next to the line, that are within
    tolerance = 1 Ang of each point."""

    # for linelist_air.dat, col_starts=[3, 20], col_ends=[17, 28]
    # for linelist_kurucz.dat, col_starts=[3, 13], col_ends=[10, 20]

    lines = ascii.read(
        "linelist_kurucz.dat",
        Reader=ascii.FixedWidth,
        col_starts=[3, 13],
        col_ends=[10, 20],
        converters={"line": [ascii.convert_numpy(np.float)], "element": [ascii.convert_numpy(np.str)]},
        guess=False,
    )
    lines["line"] = 10 * lines["line"]  # Convert from nanometers to AA

    # truncate list to speed execution
    ind = (lines["line"] >= np.min(wl) - tol) & (lines["line"] <= np.max(wl) + tol)
    lines = lines[ind]

    # for each wl, query all known lines that are within tol, add these to the set of known lines
    line_labels = []
    for w in wl:
        # Find nearby wl lines within tol
        ind = (w - tol <= lines["line"]) & (lines["line"] <= w + tol)

        # Truncated lines
        lines_trunc = lines[ind]

        # Sort them by closeness to current pixel
        distances = np.abs(w - lines_trunc["line"])
        distance_ind = np.argsort(distances)

        # Sort lines by closest label
        lines_sort = lines_trunc[distance_ind]

        # Take only 6 lines
        lines_clip = lines_sort[:6]

        # Create a new set
        labels = "\n".join(["{} {:.2f}".format(label, line) for line, label in lines_clip])

        line_labels.append(labels)

    return line_labels
Esempio n. 16
0
def read_detector_pars(filename):
    """Read detector parameters from ASCII file.

    Table must contain 4 columns without header.
    Comment lines are allowed and will be ignored.
    Columns are automatically named:

    #. ``OBSMODE`` - Observation mode.
    #. ``SCALE`` - Pixel scale in arcseconds.
    #. ``NX`` - X dimension in pixels.
    #. ``NY`` - Y dimension in pixels.

    Example::

        # DETECTORS.DAT -- Comments.
        # More comments.
        acs,hrc     0.027   1024  1024
        acs,sbc     0.032   1024  1024
        stis,g140l  0.0244  1024  1024
        stis,g140m  0.0290  1024  1024

    Parameters
    ----------
    filename : str
        Detector parameters filename. Must be ASCII format.

    Returns
    -------
    data : `~astropy.table.Table`
        Data table.

    """
    return ascii.read(filename,
                      guess=False,
                      format='no_header',
                      delimiter=r'\s',
                      names=('OBSMODE', 'SCALE', 'NX', 'NY'),
                      converters={
                          'OBSMODE': [ascii.convert_numpy(str)],
                          'SCALE': [ascii.convert_numpy(float)],
                          'NX': [ascii.convert_numpy(int)],
                          'NY': [ascii.convert_numpy(int)]
                      })
Esempio n. 17
0
    def __getitem__(self, k):
        func, type_ = convert_numpy(str)

        def convert_func(vals):
            """Lowercase the conversion"""
            results = func(vals)
            results = [result.lower() for result in results]
            return results

        return [(convert_func, type_)]
Esempio n. 18
0
def _convert_to_str():
    func, type_ = convert_numpy(str)

    def convert_func(vals):
        """Lowercase the conversion"""
        results = func(vals)
        results = [result.lower() for result in results]
        return results

    return [(convert_func, type_)]
Esempio n. 19
0
def compare_backstop_history(history, state_key, compare_val=True):
    hist = ascii.read(history, guess=False, format='no_header',
                      converters={'col1': [ascii.convert_numpy(np.str)]})
    start = DateTime(hist['col1'][0], format='greta') - 1 / 86400.
    stop = DateTime(hist['col1'][-1], format='greta') + 1 / 86400.
    sts = states.get_states(start=start, stop=stop, state_keys=state_key)
    sts = sts[1:]  # Drop the first state (which is continuity at start time)
    assert len(sts) == len(hist)
    assert np.all(DateTime(sts['datestart']).greta == hist['col1'])
    if compare_val:
        assert np.all(sts[state_key] == hist['col3'])
Esempio n. 20
0
def makeEmptyTable():
    """
    Returns an empty sky model table.
    """
    outlines = ['Z, Z, 0.0, 0.0, 0.0\n']
    colNames = ['Name', 'Type', 'Ra', 'Dec', 'I']
    converters = {}
    nameCol = 'col{0}'.format(colNames.index('Name') + 1)
    converters[nameCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]
    typeCol = 'col{0}'.format(colNames.index('Type') + 1)
    converters[typeCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]
    table = Table.read(outlines,
                       guess=False,
                       format='ascii.no_header',
                       delimiter=',',
                       names=colNames,
                       comment='#',
                       data_start=0,
                       converters=converters)
    table.remove_rows(0)
    return table
Esempio n. 21
0
def identify_lines(wi, temp, logg, Z):
    lines = ascii.read("linelist.dat", Reader=ascii.FixedWidth, col_starts=[3,17], col_ends=[16,27],
                       converters={'line': [ascii.convert_numpy(np.float)],
                                   'element': [ascii.convert_numpy(np.str)]})
    print(lines.dtype)

    wl = pt.w
    ind = (wl >= wi[0]) & (wl <= wi[1])
    wl = wl[ind]

    combinations = [[temp[0], logg[0], Z[0]],
                    [temp[0], logg[0], Z[1]],
                    [temp[0], logg[1], Z[0]],
                    [temp[0], logg[1], Z[1]],
                    [temp[1], logg[0], Z[0]],
                    [temp[1], logg[0], Z[1]],
                    [temp[1], logg[1], Z[0]],
                    [temp[1], logg[1], Z[1]]]

    #[print(comb) for comb in combinations]
    fluxes = [pt.load_flux_full(*comb, norm=True)[pt.ind][ind] for comb in combinations]

    fig = plt.figure(figsize=(14, 8))
    ax = fig.add_subplot(111)
    ax.xaxis.set_major_formatter(FSF("%.0f"))
    ax.xaxis.set_major_locator(MultipleLocator(1.))
    ax.xaxis.set_minor_locator(MultipleLocator(0.2))

    ind2 = (lines['line'] >= wi[0]) & (lines['line'] <= wi[1])
    for line, label in lines[ind2]:
        ax.axvline(float(line), color="0.5")
        ax.annotate(label, (line, 0.9), xycoords=('data', 'axes fraction'), rotation='vertical', ha='center', va='center')

    for i, fl in enumerate(fluxes):
        ax.plot(wl, fl, label="%s %s %s" % tuple(combinations[i]))

    ax.legend()
    plt.show()
    pass
Esempio n. 22
0
def read_wavecat(filename):
    """Read wavelength catalog from ASCII file.

    Table must contain two columns without header.
    Comment lines are allowed and will be ignored.
    Columns are automatically named:

    #. ``OBSMODE`` - Observation mode.
    #. ``FILENAME`` - Corresponding wavelength table filename
       or parameters.

    Example::

        # WAVECAT.DAT -- Comments.
        # More comments.
        cos,fuv     (900.0,3000.0,1.0)
        cos,nuv     (1000.0,12000.0,1.0)
        acs,hrc     synphot$wavecats/acs.dat
        acs,wfc1    synphot$wavecats/acs.dat

    Parameters
    ----------
    filename : str
        Wavelength catalog filename. Must be ASCII format.

    Returns
    -------
    data : `~astropy.table.Table`
        Data table.

    """
    return ascii.read(filename,
                      names=('OBSMODE', 'FILENAME'),
                      guess=False,
                      format='no_header',
                      converters={
                          'OBSMODE': [ascii.convert_numpy(str)],
                          'FILENAME': [ascii.convert_numpy(str)]
                      })
Esempio n. 23
0
def readDataFile(nanvals, read_mode, id_col, data_file):
    """
    Read input data file.
    """
    # TODO to separate IDs (strings) from the rest of the data, I read the file
    # twice. This is very slow for large files.

    # Identify all these strings as invalid entries.
    fill_msk = [('', '0')] + [(_, '0') for _ in nanvals]
    # Store IDs as strings.
    if read_mode == 'num':
        # Read IDs as strings, not applying the 'fill_msk'
        data = ascii.read(data_file,
                          converters={id_col: [ascii.convert_numpy(np.str)]},
                          format='no_header')
        # Store IDs
        id_data = data[id_col]
        # Read rest of the data applying the mask
        data = ascii.read(data_file, fill_values=fill_msk, format='no_header')
        # Replace IDs column
        data[id_col] = id_data
    elif read_mode == 'nam':
        # Read IDs as strings, not applying the 'fill_msk'
        data = ascii.read(data_file,
                          converters={id_col: [ascii.convert_numpy(np.str)]})
        # Store IDs
        try:
            id_data = data[id_col]
        except KeyError:
            raise ValueError(
                "ERROR: the '{}' key could not be found. Check that \n"
                "the 'id' name is properly written, and that all columns \n"
                "have *unique* names\n".format(id_col))
        # Read rest of the data applying the mask
        data = ascii.read(data_file, fill_values=fill_msk)
        # Replace IDs column
        data[id_col] = id_data

    return data
Esempio n. 24
0
def compare_backstop_history(history, state_key, compare_val=True):
    hist = ascii.read(history,
                      guess=False,
                      format='no_header',
                      converters={'col1': [ascii.convert_numpy(np.str)]})
    start = DateTime(hist['col1'][0], format='greta') - 1 / 86400.
    stop = DateTime(hist['col1'][-1], format='greta') + 1 / 86400.
    sts = states.get_states(start=start, stop=stop, state_keys=state_key)
    sts = sts[1:]  # Drop the first state (which is continuity at start time)
    assert len(sts) == len(hist)
    assert np.all(DateTime(sts['datestart']).greta == hist['col1'])
    if compare_val:
        assert np.all(sts[state_key] == hist['col3'])
Esempio n. 25
0
def read(inputfile):
    """Read the given input file and return parsed Astropy Table."""

    # NOTE: You can replace this with meaningful column names if you want.
    colnames = [
        'col_01', 'col_02', 'col_03', 'col_04', 'col_05', 'col_06', 'col_07',
        'col_08', 'col_09', 'col_10'
    ]

    # NOTE: You can change the data types here for each col as you see fit.
    # ps. Tried using defaultdict magic but didn't work, not sure why.
    #     So, we have to define the dictionary explicitly here.
    # This converts everything to float. It uses INTERNAL column names.
    converters = {
        'col1': [ascii.convert_numpy(np.float)],
        'col2': [ascii.convert_numpy(np.float)],
        'col3': [ascii.convert_numpy(np.float)],
        'col4': [ascii.convert_numpy(np.float)],
        'col5': [ascii.convert_numpy(np.float)],
        'col6': [ascii.convert_numpy(np.float)],
        'col7': [ascii.convert_numpy(np.float)],
        'col8': [ascii.convert_numpy(np.float)],
        'col9': [ascii.convert_numpy(np.float)],
        'col10': [ascii.convert_numpy(np.float)]
    }

    tab = ascii.read(inputfile,
                     format='no_header',
                     guess=False,
                     Inputter=RuiterInputter,
                     data_start=0,
                     names=colnames,
                     converters=converters)

    # Now you can science with this data!
    return tab
Esempio n. 26
0
    def __init__(self, definition_file):
        self._dqcol = 'DQFLAG'
        self._sdcol = 'SHORT_DESCRIPTION'
        self._ldcol = 'LONG_DESCRIPTION'
        self.tab = ascii.read(
            definition_file,
            names = (self._dqcol, self._sdcol, self._ldcol),
            converters = {self._dqcol: [ascii.convert_numpy(np.uint16)],
                          self._sdcol: [ascii.convert_numpy(np.str)],
                          self._ldcol: [ascii.convert_numpy(np.str)]})
        self.metadata = ascii.read(self.tab.meta['comments'], delimiter='=',
                                   format='no_header', names=['key', 'val'])

        # Ensure table has OK flag to detect good pixel
        self._okflag = 0
        if self._okflag not in self.tab[self._dqcol]:
            self.tab.add_row([self._okflag, 'OK', 'Good pixel'])

        # Sort table in ascending order
        self.tab.sort(self._dqcol)

        # Compile a list of flags
        self._valid_flags = self.tab[self._dqcol]
        self._total_flags = int(self._valid_flags.sum())
Esempio n. 27
0
def _read_table(filename, ext, dtypes):
    """Generic table reader.

    Parameters
    ----------
    filename : str
        Table filename.
        If suffix is not 'fits' or 'fit', assume ASCII format.

    ext : int
        Data extension.
        This is ignored for ASCII file.

    dtypes : dict
        Dictionary that maps column names to data types.

    Returns
    -------
    data : `~astropy.io.fits.FITS_rec` or `~astropy.table.Table`
        Data table.

    Raises
    ------
    synphot.exceptions.SynphotError
        Failure to parse table.

    """
    # FITS
    if filename.endswith('.fits') or filename.endswith('.fit'):
        with fits.open(filename) as f:
            data = f[ext].data.copy()

        err_str = ''
        for key, val in dtypes.items():
            if not np.issubdtype(data[key].dtype, val):
                err_str += 'Expect {0} to be {1} but get {2}.\n'.format(
                    key, val, data[key].dtype)
        if err_str:
            raise synexceptions.SynphotError(err_str)

    # ASCII
    else:  # pragma: no cover
        converters = dict(
            [[k, ascii.convert_numpy(v)] for k, v in dtypes.items()])
        data = ascii.read(filename, converters=converters)

    return data
Esempio n. 28
0
def _read_table(filename, ext, dtypes):
    """Generic table reader.

    Parameters
    ----------
    filename : str
        Table filename.
        If suffix is not 'fits' or 'fit', assume ASCII format.

    ext : int
        Data extension.
        This is ignored for ASCII file.

    dtypes : dict
        Dictionary that maps column names to data types.

    Returns
    -------
    data : `~astropy.io.fits.FITS_rec` or `~astropy.table.Table`
        Data table.

    Raises
    ------
    synphot.exceptions.SynphotError
        Failure to parse table.

    """
    # FITS
    if filename.endswith('.fits') or filename.endswith('.fit'):
        with fits.open(filename) as f:
            data = f[ext].data.copy()

        err_str = ''
        for key, val in dtypes.items():
            if not np.issubdtype(data[key].dtype, val):
                err_str += 'Expect {0} to be {1} but get {2}.\n'.format(
                    key, val, data[key].dtype)
        if err_str:
            raise synexceptions.SynphotError(err_str)

    # ASCII
    else:  # pragma: no cover
        converters = dict([[k, ascii.convert_numpy(v)]
                           for k, v in dtypes.items()])
        data = ascii.read(filename, converters=converters)

    return data
Esempio n. 29
0
    def get_converters(headline, delimiter=','):
        """Return a list of new names and a dict table of converter functions for the columns
        Column descriptions should look like [name]:datatype
        Returns names (list) and converters (dict)

        ## Arguments

        * `headline` (str): First line of sql output with column info in format "[name]:datatype"

        ## Keyword Arguments

        * `delimiter` (str): delimiter between columns

        ## Returns

        * `names` (list), `converters` (dict)

        """
        pat = re.compile(r'\[(?P<name>[^[]+)\]:(?P<datatype>.+)$')
        # probably need a boolean datatype in this list
        tmap = defaultdict(lambda: numpy.str,
                           int=numpy.int32,
                           smallint=numpy.int16,
                           tinyint=numpy.uint8,
                           bigint=numpy.int64,
                           integer=numpy.int64,
                           bit=numpy.uint8,
                           float=numpy.float64,
                           decimal=numpy.float64,
                           real=numpy.float32,
                           datetime=numpy.datetime64)
        cols = headline.split(delimiter)
        converters = {}
        names = []
        for c in cols:
            m = pat.match(c)
            if not m:
                print("Unable to parse column name '{}'".format(c))
            else:
                newname = m.group('name')
                names.append(newname)
                numpy_type = tmap[m.group('datatype').lower()]
                converters[newname] = [ascii.convert_numpy(numpy_type)]
        return names, converters
Esempio n. 30
0
def read_distortion_coeffs_file(filename):
    """Read the file containing the table of distortion coefficients

    Example:

    # NIRCAM distortion coefficient file

    # Source file: jw01144001001_01101_00001_nrcb4_cal.fits
    # Aperture: NRCB4_FULL
    # Filter/Pupil: F200W/CLEAR
    # Generated 2022-01-25T16:16:04.533 utc
    # by verap
    #
      AperName , siaf_index , exponent_x , exponent_y ,                Sci2IdlX ,                Sci2IdlY ,                Idl2SciX ,                Idl2SciY
    NRCB4_FULL ,         00 ,          0 ,          0 ,                     0.0 ,                     0.0 ,                     0.0 ,                     0.0
    NRCB4_FULL ,         10 ,          1 ,          0 ,    0.031281790934487304 ,  0.00014142457551002174 ,      31.967141087158005 ,    -0.14404661727118445
    NRCB4_FULL ,         11 ,          0 ,          1 ,                     0.0 ,    0.031447520345431045 ,   3.469446951953614e-18 ,       31.79851632221204
    NRCB4_FULL ,         20 ,          2 ,          0 ,  -6.709581883542899e-08 ,    6.38422037163669e-08 ,     0.00215373180436267 ,  -0.0020935324940174927
    NRCB4_FULL ,         21 ,          1 ,          1 , -2.1509448922459775e-07 ,  -9.112311025594254e-08 ,     0.00702920876108879 ,   0.0028750871441249734

    Parameters
    ----------
    filename : str
        Name of text file containing the data.

    Returns
    -------
    tab : astropy.table.Table
        Table containing distortion coefficients.
    """
    converters = {'siaf_index': [ascii.convert_numpy(str)]}
    tab = ascii.read(filename,
                     format='csv',
                     header_start=7,
                     data_start=8,
                     converters=converters)

    # Catch if the file format changes
    if 'Sci2IdlX' not in tab.colnames:
        raise ValueError(
            "distortion_coeffs_file was not read correctly. You may need to adjust header and data starting lines."
        )

    return tab
Esempio n. 31
0
def read_waveset(filename, wave_unit=u.AA):
    """Read wavelength table from ASCII file.

    Table must contain a single column without header.
    Comment lines are allowed and will be ignored.
    Column is automatically named ``WAVELENGTH``.

    Example::

        # ACS.DAT -- Comments.
        # More comments.
        1000.
        2000.
        5000.
        9000.

    Parameters
    ----------
    filename : str
        Wavelength table filename. Must be ASCII format.

    wave_unit : str or `~astropy.units.Unit`
        Wavelength unit.

    Returns
    -------
    waveset : `~astropy.units.quantity.Quantity`
        Wavelength set array.

    """
    wave_unit = units.validate_wave_unit(wave_unit)
    data = ascii.read(filename,
                      guess=False,
                      format='no_header',
                      names=('WAVELENGTH', ),
                      converters={'WAVELENGTH': [ascii.convert_numpy(float)]})
    waveset = data['WAVELENGTH'].data

    if not isinstance(waveset, u.Quantity):
        waveset = waveset * u.AA

    return waveset
Esempio n. 32
0
def test_read_fixed_width_format():
    """Test reading with pandas read_fwf()

    """
    tbl = """\
    a   b   c
    1  2.0  a
    2  3.0  b"""
    buf = StringIO()
    buf.write(tbl)

    # Explicitly provide converters to avoid casting 'a' to int32.
    # See https://github.com/astropy/astropy/issues/8682
    t = Table.read(tbl, format='ascii', guess=False,
                   converters={'a': [ascii.convert_numpy(np.int64)]})

    buf.seek(0)
    t2 = Table.read(buf, format='pandas.fwf')

    assert t.colnames == t2.colnames
    assert np.all(t == t2)
Esempio n. 33
0
def test_write_with_mixins():
    """Writing a table with mixins just drops them via to_pandas()

    This also tests passing a kwarg to pandas read and write.
    """
    sc = SkyCoord([1, 2], [3, 4], unit='deg')
    q = [5, 6] * u.m
    qt = QTable([[1, 2], q, sc], names=['i', 'q', 'sc'])

    buf = StringIO()
    qt.write(buf, format='pandas.csv', sep=' ')
    exp = ['i q sc.ra sc.dec', '1 5.0 1.0 3.0', '2 6.0 2.0 4.0']
    assert buf.getvalue().splitlines() == exp

    # Read it back
    buf.seek(0)
    qt2 = Table.read(buf, format='pandas.csv', sep=' ')
    # Explicitly provide converters to avoid casting 'i' to int32.
    # See https://github.com/astropy/astropy/issues/8682
    exp_t = ascii.read(exp, converters={'i': [ascii.convert_numpy(np.int64)]})
    assert qt2.colnames == exp_t.colnames
    assert np.all(qt2 == exp_t)
Esempio n. 34
0
def test_write_with_mixins():
    """Writing a table with mixins just drops them via to_pandas()

    This also tests passing a kwarg to pandas read and write.
    """
    sc = SkyCoord([1, 2], [3, 4], unit='deg')
    q = [5, 6] * u.m
    qt = QTable([[1, 2], q, sc], names=['i', 'q', 'sc'])

    buf = StringIO()
    qt.write(buf, format='pandas.csv', sep=' ')
    exp = ['i q sc.ra sc.dec',
           '1 5.0 1.0 3.0',
           '2 6.0 2.0 4.0']
    assert buf.getvalue().splitlines() == exp

    # Read it back
    buf.seek(0)
    qt2 = Table.read(buf, format='pandas.csv', sep=' ')
    # Explicitly provide converters to avoid casting 'i' to int32.
    # See https://github.com/astropy/astropy/issues/8682
    exp_t = ascii.read(exp, converters={'i': [ascii.convert_numpy(np.int64)]})
    assert qt2.colnames == exp_t.colnames
    assert np.all(qt2 == exp_t)
Esempio n. 35
0
def photRead(final_phot, nanvals, col_IDs):
    """
    Select a file with photometry to read and compare with APASS.
    """
    id_id, x_id, y_id, V_id = col_IDs

    fill_msk = [('', '0')] + [(_, '0') for _ in nanvals]
    # Read IDs as strings, not applying the 'fill_msk'
    phot = ascii.read(final_phot,
                      converters={id_id: [ascii.convert_numpy(np.str)]})
    # Store IDs
    id_data = phot[id_id]
    # Read rest of the data applying the mask
    phot = ascii.read(final_phot, fill_values=fill_msk)
    # Replace IDs column
    phot[id_id] = id_data

    # # Final calibrated photometry
    # fill_msk = [(_, np.nan) for _ in nanvals]
    # phot = Table.read(
    #     final_phot, fill_values=fill_msk, format="ascii",
    #     converters={id_col: [ascii.convert_numpy(np.str)]})

    # # Mask stars with no valid V magnitude.
    # try:
    #     Vmsk = ~phot['V']._mask
    #     phot = phot[Vmsk]
    # except AttributeError:
    #     pass

    x_p, y_p, Vmag = phot[x_id], phot[y_id], phot[V_id]

    # Remove meta data to avoid https://github.com/astropy/astropy/issues/7357
    phot.meta = {}

    return phot, x_p, y_p, Vmag
Esempio n. 36
0
    "perc": u.percent,
    "pi_E": None,
    "pi_EE": None,
    "pi_EN": None,
    "pi_rel": None,
    "ppm": cds.ppm,
    "seconds": u.s,
    "Solar mass": u.M_sun,
    "solarradius": u.R_sun,
    "Solar Radius": u.R_sun,
    "log10(cm/s**2)": u.dex(u.cm / u.s**2),
    "dex": u.dex(None),
    "sexagesimal": None
}

CONVERTERS = dict(koi_quarters=[ascii.convert_numpy(str)])

# 'ps' and 'pscomppars' are the main tables of detected exoplanets.
# Calls to the old tables ('exoplanets', 'compositepars', 'exomultpars') will
# return errors and urge the user to call the 'ps' or 'pscomppars' tables
OBJECT_TABLES = {
    "ps": "pl_",
    "pscomppars": "pl_",
    "exoplanets": "pl_",
    "compositepars": "fpl_",
    "exomultpars": "mpl_"
}
MAP_TABLEWARNINGS = {
    "exoplanets": "Planetary Systems (PS)",
    "compositepars":
    "Planetary System Composite Parameters table (PSCompPars)",
Esempio n. 37
0
    'D:/mask/MDPL2/NewMDCLUSTER_0001/MDPL2-NewMDCLUSTER_0001*AHF_halos')
#print(namestr[0])
red_shift = [float(namestr[57:-10]) for namestr in namestr]
print(red_shift)
##sort to red_shift and save as h5py file
redshift = np.sort(red_shift)
with h5py.File('redshift_DMO.h5', 'w') as f:
    f['a'] = np.array(redshift)
#print(redshift)
##next step,try to read the analysis catalogue from the data,and save as h5py files
###firstly,read the halo mass and the red_shift
##get the number of main halo
main_value = asc.read(
    'D:/mask/MDPL2/NewMDCLUSTER_0001/MDPL2-NewMDCLUSTER_0001.z0.000.AHF_mtree_idx',
    converters={
        'col1': [asc.convert_numpy(np.int64)],
        'col2': [asc.convert_numpy(np.int64)]
    })
N = len(main_value['col1'])
M = len(redshift)
main_tree = np.zeros((N, M), dtype=np.int64)  #save the link to main progenitor
with h5py.File('main_tree_DMO.h5', 'w') as f:
    f['a'] = np.array(main_tree)
for k in range(0, len(redshift) - 1):
    if redshift[k] <= 9.75:
        id_value = asc.read(
            'D:/mask/MDPL2/NewMDCLUSTER_0001/MDPL2-NewMDCLUSTER_0001.z%.3f.AHF_mtree_idx'
            % redshift[k],
            converters={
                'col1': [asc.convert_numpy(np.int64)],
                'col2': [asc.convert_numpy(np.int64)]
Esempio n. 38
0
 def __getitem__(self, k):
     return [convert_numpy(str)]
Esempio n. 39
0
def plot(args):
    import matplotlib.pyplot as pyplot

    ##-------------------------------------------------------------------------
    ## Set date to tonight if not specified
    ##-------------------------------------------------------------------------
    now = datetime.datetime.now()
    DateString = now.strftime("%Y%m%d")
    if not args.date:
        args.date = DateString


    ##-------------------------------------------------------------------------
    ## Define File Names
    ##-------------------------------------------------------------------------
    LogFile = os.path.join('/', 'var', 'log', 'Kegerator', 'PlotLog_'+args.date+".txt")
    PlotFile = os.path.join('/', 'var', 'log', 'Kegerator', args.date+".png")
    DataFile = os.path.join('/', 'var', 'log', 'Kegerator', args.date+".txt")


    ##-------------------------------------------------------------------------
    ## Create logger object
    ##-------------------------------------------------------------------------
    logger = logging.getLogger('MyLogger')
    logger.setLevel(logging.DEBUG)
    ## Set up console output
    LogConsoleHandler = logging.StreamHandler()
    if args.verbose:
        LogConsoleHandler.setLevel(logging.DEBUG)
    else:
        LogConsoleHandler.setLevel(logging.INFO)
    LogFormat = logging.Formatter('%(asctime)23s %(levelname)8s: %(message)s')
    LogConsoleHandler.setFormatter(LogFormat)
    logger.addHandler(LogConsoleHandler)
    ## Set up file output
    LogFileHandler = logging.FileHandler(LogFile)
    LogFileHandler.setLevel(logging.DEBUG)
    LogFileHandler.setFormatter(LogFormat)
    logger.addHandler(LogFileHandler)

    logger.info("Kegerator.py invoked with --plot option")
    logger.info("  Making plot for day of {}".format(args.date))


    ##-------------------------------------------------------------------------
    ## Read Data
    ##-------------------------------------------------------------------------
    if os.path.exists(DataFile):
        logger.info("  Found data file: {}".format(DataFile))
        data = ascii.read(DataFile, guess=False,
                          header_start=0, data_start=1,
                          Reader=ascii.basic.Basic,
                          converters={
                          'date': [ascii.convert_numpy('S10')],
                          'time': [ascii.convert_numpy('S12')],
                          'AmbTemp': [ascii.convert_numpy('f4')],
                          'KegTemp': [ascii.convert_numpy('f4')],
                          'KegTemp1': [ascii.convert_numpy('f4')],
                          'KegTemp2': [ascii.convert_numpy('f4')],
                          'KegTemp3': [ascii.convert_numpy('f4')],
                          'RH': [ascii.convert_numpy('f4')],
                          'AH': [ascii.convert_numpy('f4')],
                          'status': [ascii.convert_numpy('S11')],
                          })
        datetime_objects = [datetime.datetime.strptime(x['time'], '%H:%M:%S HST') for x in data]
        time_decimal = [(x.hour + x.minute/60. + x.second/3600.) for x in datetime_objects]
        DecimalTime = max(time_decimal)

    ##-------------------------------------------------------------------------
    ## Make Plot
    ##-------------------------------------------------------------------------
        plot_upper_temp = 45
        plot_lower_temp = 29
        pyplot.ioff()
        plotpos = [
                   [0.05, 0.59, 0.65, 0.40], [0.73, 0.59, 0.21, 0.40],\
                   [0.05, 0.52, 0.65, 0.07], [0.73, 0.52, 0.21, 0.07],\
                   [0.05, 0.25, 0.65, 0.24], [0.73, 0.25, 0.21, 0.24],\
                   [0.05, 0.05, 0.65, 0.18], [0.73, 0.05, 0.21, 0.18],\
                  ]
        if len(data) > 1:
            logger.info("  Generating plot {} ... ".format(PlotFile))
            dpi = 100
            pyplot.figure(figsize=(14,8), dpi=dpi)

            ## Plot Temperature for This Day
            logger.debug("  Rendering Temperature Plot.")
            TemperatureAxes = pyplot.axes(plotpos[0], xticklabels=[])
            pyplot.title("Kegerator Temperatures for "+args.date)
            pyplot.plot(time_decimal, data['KegTemp'], 'ko', label="Median Temp.", markersize=3, markeredgewidth=0)
            pyplot.plot(time_decimal, data['KegTemp1'], 'bo', label="Temp. 1", markersize=2, markeredgewidth=0, alpha=0.6)
            pyplot.plot(time_decimal, data['KegTemp2'], 'go', label="Temp. 2", markersize=2, markeredgewidth=0, alpha=0.6)
            pyplot.plot(time_decimal, data['KegTemp3'], 'yo', label="Temp. 3", markersize=2, markeredgewidth=0, alpha=0.6)
            pyplot.plot([DecimalTime, DecimalTime], [-100,100], 'g-', alpha=0.4)
            pyplot.ylabel("Kegerator Temp. (F)")
            pyplot.xlim(0, 24)
            pyplot.xticks(np.arange(0,24,2))
            pyplot.ylim(plot_lower_temp, plot_upper_temp)
            pyplot.grid()
            pyplot.legend(loc='best', prop={'size': 10})
            TemperatureAxes.axhline(32, color='red', lw=4)
            TemperatureAxes.axhline(temp_low, color='blue', lw=4)
            TemperatureAxes.axhline(temp_high, color='blue', lw=4)

            ## Plot Temperature for Last Hour
            logger.debug("  Rendering Recent Temperature Plot.")
            RecentTemperatureAxes = pyplot.axes(plotpos[1], xticklabels=[], yticklabels=[])
            pyplot.title("Last Hour")
            pyplot.plot(time_decimal, data['KegTemp'], 'ko', label="Kegerator Temp", markersize=3, markeredgewidth=0)
            pyplot.plot(time_decimal, data['KegTemp1'], 'bo', label="Kegerator Temp 1", markersize=2, markeredgewidth=0, alpha=0.6)
            pyplot.plot(time_decimal, data['KegTemp2'], 'go', label="Kegerator Temp 2", markersize=2, markeredgewidth=0, alpha=0.6)
            pyplot.plot(time_decimal, data['KegTemp3'], 'yo', label="Kegerator Temp 3", markersize=2, markeredgewidth=0, alpha=0.6)
            pyplot.plot([DecimalTime, DecimalTime], [-100,100], 'g-', alpha=0.4)
            pyplot.xticks(np.arange(0,24,0.25))
            if DecimalTime > 1.0:
                pyplot.xlim(DecimalTime-1.0, DecimalTime+0.1)
            else:
                pyplot.xlim(0,1.1)
            pyplot.ylim(plot_lower_temp, plot_upper_temp)
            pyplot.grid()
            RecentTemperatureAxes.axhline(32, color='red', lw=4)
            RecentTemperatureAxes.axhline(temp_low, color='blue', lw=4)
            RecentTemperatureAxes.axhline(temp_high, color='blue', lw=4)

            ## Plot Relay State
            translator = {'On': 1, 'Off': 0, 'unknown': -0.25}
            relay_state = [translator[val] for val in data['status']]
            logger.debug("  Rendering Relay Status Plot.")
            RelayAxes = pyplot.axes(plotpos[2], yticklabels=[])
            pyplot.plot(time_decimal, relay_state, 'ko-', markersize=3, markeredgewidth=0)
            pyplot.plot([DecimalTime, DecimalTime], [-1,2], 'g-', alpha=0.4)
            pyplot.ylabel("Relay")
            pyplot.xlim(0, 24)
            pyplot.yticks([0,1])
            
            pyplot.ylim(-0.5,1.5)
            pyplot.xticks(np.arange(0,24,2))
            pyplot.grid()

            ## Plot Relay State for Last Hour
            logger.debug("  Rendering Recent Relay State Plot.")
            RecentRelayAxes = pyplot.axes(plotpos[3], yticklabels=[])
            pyplot.plot(time_decimal, relay_state, 'ko-', markersize=3, markeredgewidth=0)
            pyplot.plot([DecimalTime, DecimalTime], [-1,2], 'g-', alpha=0.4)
            pyplot.xticks(np.arange(0,24,0.25))
            if DecimalTime > 1.0:
                pyplot.xlim(DecimalTime-1.0, DecimalTime+0.1)
            else:
                pyplot.xlim(0,1.1)
            pyplot.yticks([0,1])
            pyplot.ylim(-0.5,1.5)
            pyplot.grid()


            ## Plot Humidity for This Day
            HumidityAxes = pyplot.axes(plotpos[4], xticklabels=[])
            logger.debug("  Rendering Humidity Plot.")
            pyplot.plot(time_decimal, data['RH'], 'bo', label="Humidity", markersize=3, markeredgewidth=0)
            pyplot.plot([DecimalTime, DecimalTime], [0,100], 'g-', alpha=0.4)
            pyplot.ylabel("Humidity (%)")
            pyplot.xlabel("Time (Hours HST)")
            pyplot.xlim(0, 24)
            pyplot.ylim(30,100)
            pyplot.xticks(np.arange(0,24,2))
            pyplot.grid()

            ## Plot Humidity for Last 2 Hours
            logger.debug("  Rendering Recent Humidity Plot.")
            RecentHumidityAxes = pyplot.axes(plotpos[5], yticklabels=[], xticklabels=[])
            pyplot.plot(time_decimal, data['RH'], 'bo', label="Humidity", markersize=3, markeredgewidth=0)
            pyplot.plot([DecimalTime, DecimalTime], [0,100], 'g-', alpha=0.4)
            pyplot.xticks(np.arange(0,24,0.25))
            if DecimalTime > 1.0:
                pyplot.xlim(DecimalTime-1.0, DecimalTime+0.1)
            else:
                pyplot.xlim(0,1.1)
            pyplot.ylim(30,100)
            pyplot.grid()

            ## Plot Case Temperature for This Day
            logger.debug("  Rendering Case Temperature Plot.")
            AmbTemperatureAxes = pyplot.axes(plotpos[6])
            pyplot.plot(time_decimal, data['AmbTemp'], 'ro', label="Ambient Temp", markersize=3, markeredgewidth=0)
            pyplot.plot([DecimalTime, DecimalTime], [-100,100], 'g-', alpha=0.4)
            pyplot.ylabel("Case Temp. (F)")
            pyplot.xlim(0, 24)
            pyplot.xticks(np.arange(0,24,2))
            pyplot.yticks(np.arange(60,100,5))
            pyplot.ylim(math.floor(min(data['AmbTemp'])-6), math.ceil(max(data['AmbTemp'])+6))
            pyplot.grid()

            ## Plot Case Temperature for Last Hour
            logger.debug("  Rendering Recent Case Temperature Plot.")
            RecentAmbTemperatureAxes = pyplot.axes(plotpos[7], yticklabels=[])
            pyplot.plot(time_decimal, data['AmbTemp'], 'ro', label="Ambient Temp", markersize=3, markeredgewidth=0)
            pyplot.plot([DecimalTime, DecimalTime], [-100,100], 'g-', alpha=0.4)
            pyplot.xticks(np.arange(0,24,0.25))
            pyplot.yticks(np.arange(60,100,5))
            pyplot.ylim(math.floor(min(data['AmbTemp'])-6), math.ceil(max(data['AmbTemp'])+6))
            if DecimalTime > 1.0:
                pyplot.xlim(DecimalTime-1.0, DecimalTime+0.1)
            else:
                pyplot.xlim(0,1.1)
            pyplot.grid()

            logger.debug("  Saving plot to file: {}".format(PlotFile))
            pyplot.savefig(PlotFile, dpi=dpi, bbox_inches='tight', pad_inches=0.05)
            logger.info("  done.")
    else:
        logger.info("Could not find data file: {}".format(DataFile))

    ##-------------------------------------------------------------------------
    ## Create Daily Symlink if Not Already
    ##-------------------------------------------------------------------------
    LinkFileName = 'latest.png'
    LinkFile = os.path.join('/', 'var', 'log', 'Kegerator', LinkFileName)
    if not os.path.exists(LinkFile):
        logger.info('Making {} symlink to {}'.format(LinkFile, PlotFile))
        os.symlink(PlotFile, LinkFile)
        logger.info("Done")
Esempio n. 40
0
def pca_gal(**kwargs):
    """Wrapper on pca_solve to handle galaxy eigenspectra.

    Parameters
    ----------
    inputfile : :class:`str`, optional
        The list of spectra to use.  If not specified, $IDLSPEC2D_DIR/tempates/eigeninput_gal.dat will be used.
    wavemin : :class:`float`, optional
        Minimum wavelength for the template.  If not specified 1850 Å will be used.
    wavemax : :class:`float`, optional
        Maximum wavelength for the template.  If not specified 10000 Å will be used.
    niter : :class:`int`, optional
        Number of iterations.  The default is 10.
    dump : :class:`str`, optional
        If set, save input data in a Python pickle file.
    flux : :class:`bool`, optional
        If set to ``True`` make some additional QA plots of the input spectra.
        The default is ``False``.

    Returns
    -------
    None

    Notes
    -----
    Creates spEigenGal-MJD.fits and some associated QA plots.
    """
    import os
    import os.path
    import pickle
    import matplotlib

    matplotlib.use("Agg")  # Non-interactive back-end
    import pylab
    from astropy.io import ascii, fits
    import numpy as np
    from matplotlib.font_manager import fontManager, FontProperties
    from ...goddard.astro import get_juldate
    from ...pydlutils.image import djs_maskinterp
    from ...pydlutils.math import djs_median
    from . import pca_solve, plot_eig, readspec, skymask, wavevector

    if "inputfile" in kwargs:
        inputfile = kwargs["inputfile"]
    else:
        inputfile = os.path.join(os.getenv("IDLSPEC2D_DIR"), "templates", "eigeninput_gal.dat")
    if "wavemin" in kwargs:
        wavemin = kwargs["wavemin"]
    else:
        wavemin = 1850.0
    if "wavemax" in kwargs:
        wavemax = kwargs["wavemax"]
    else:
        wavemax = 10000.0
    snmax = 100.0
    if "niter" in kwargs:
        niter = kwargs["niter"]
    else:
        niter = 10
    nkeep = 4
    minuse = 10
    #
    # Name the output files.
    #
    jd = get_juldate()
    outfile = "spEigenGal-%d" % int(jd - 2400000.5)
    #
    # Read the input spectra
    #
    converters = {
        "plate": [ascii.convert_numpy(np.int32)],
        "mjd": [ascii.convert_numpy(np.int32)],
        "fiber": [ascii.convert_numpy(np.int32)],
    }
    input_data = ascii.read(inputfile, names=["plate", "mjd", "fiber", "zfit"], converters=converters)
    plate = input_data["plate"].data
    mjd = input_data["mjd"].data
    fiber = input_data["fiber"].data
    zfit = input_data["zfit"].data
    if "dump" in kwargs:
        dumpfile = kwargs["dump"]
    else:
        dumpfile = "this-file-does-not-exist"
    if os.path.exists(dumpfile):
        print("Loading data from {0}.".format(dumpfile))
        f = open(dumpfile)
        pcaflux = pickle.load(f)
        f.close()
    else:
        spplate = readspec(plate, fiber, mjd=mjd, **kwargs)
        #
        # Insist that all of the requested spectra exist.
        #
        missing = spplate["plugmap"]["FIBERID"] == 0
        if missing.any():
            imissing = missing.nonzero()[0]
            for k in imissing:
                print("Missing plate={0:d} mjd={1:d} fiber={2:d}".format(plate[k], mjd[k], fiber[k]))
            raise ValueError("{0:d} missing object(s).".format(missing.sum()))
        #
        # Do not fit where the spectrum may be dominated by sky-sub residuals.
        #
        objinvvar = skymask(spplate["invvar"], spplate["andmask"], spplate["ormask"])
        ifix = spplate["flux"] ** 2 * objinvvar > snmax ** 2
        if ifix.any():
            objinvvar[ifix.nonzero()] = (snmax / spplate["flux"][ifix.nonzero()]) ** 2
        #
        # Set the new wavelength mapping here.  If the binsz keyword is not set,
        # then bin size is determined from the first spectrum returned by readspec.
        # This is fine in the case where all spectra have the same bin size
        # (though their starting wavelengths may differ).  However, this may not
        # be a safe assumption in the future.
        #
        if "binsz" in kwargs:
            objdloglam = kwargs["binsz"]
        else:
            objdloglam = spplate["loglam"][0, 1] - spplate["loglam"][0, 0]
        newloglam = wavevector(np.log10(wavemin), np.log10(wavemax), binsz=objdloglam)
        #
        # Do PCA solution.
        #
        pcaflux = pca_solve(
            spplate["flux"],
            objinvvar,
            spplate["loglam"],
            zfit,
            niter=niter,
            nkeep=nkeep,
            newloglam=newloglam,
            aesthetics="mean",
        )
        #
        # Fill in bad data with a running median of the good data.
        #
        qgood = pcaflux["usemask"] >= minuse
        medflux = np.zeros(pcaflux["flux"].shape, dtype=pcaflux["flux"].dtype)
        if not qgood.all():
            for i in range(nkeep):
                medflux[i, qgood] = djs_median(pcaflux["flux"][i, qgood], width=51, boundary="nearest")
                medflux[i, :] = djs_maskinterp(medflux[i, :], ~qgood, const=True)
            pcaflux["flux"][:, ~qgood] = medflux[:, ~qgood]
        #
        # Dump input fluxes to a file for debugging purposes.
        #
        if "dump" in kwargs:
            f = open(kwargs["dump"], "w")
            pickle.dump(pcaflux, f)
            f.close()
    #
    # Make plots
    #
    colorvec = ["k", "r", "g", "b", "m", "c"]
    smallfont = FontProperties(size="xx-small")
    nspectra = pcaflux["newflux"].shape[0]
    if "flux" in kwargs:
        nfluxes = 30
        separation = 5.0
        nplots = nspectra / nfluxes
        if nspectra % nfluxes > 0:
            nplots += 1
        for k in range(nplots):
            istart = k * nfluxes
            iend = min(istart + nfluxes, nspectra) - 1
            fig = pylab.figure(dpi=100)
            ax = fig.add_subplot(111)
            for l in range(istart, iend + 1):
                p = ax.plot(
                    10.0 ** pcaflux["newloglam"],
                    pcaflux["newflux"][l, :] + separation * (l % nfluxes),
                    "%s-" % colorvec[l % len(colorvec)],
                    linewidth=1,
                )
            ax.set_xlabel(r"Wavelength [$\AA$]")
            ax.set_ylabel(r"Flux [$\mathsf{10^{-17} erg\, cm^{-2} s^{-1} \AA^{-1}}$] + Constant")
            ax.set_title("Galaxies: Input Spectra %4d-%4d" % (istart + 1, iend + 1))
            ax.set_ylim(
                pcaflux["newflux"][istart, :].min(), pcaflux["newflux"][iend - 1, :].max() + separation * (nfluxes - 1)
            )
            fig.savefig("%s.flux.%04d-%04d.png" % (outfile, istart + 1, iend + 1))
            pylab.close(fig)
    fig = pylab.figure(dpi=100)
    ax = fig.add_subplot(111)
    p = ax.plot(10.0 ** pcaflux["newloglam"], (pcaflux["newivar"] == 0).sum(0) / float(nspectra), "k-")
    ax.set_xlabel(r"Wavelength [$\AA$]")
    ax.set_ylabel("Fraction of spectra with missing data")
    ax.set_title("Missing Data")
    fig.savefig(outfile + ".missing.png")
    pylab.close(fig)
    aratio10 = pcaflux["acoeff"][:, 1] / pcaflux["acoeff"][:, 0]
    aratio20 = pcaflux["acoeff"][:, 2] / pcaflux["acoeff"][:, 0]
    aratio30 = pcaflux["acoeff"][:, 3] / pcaflux["acoeff"][:, 0]
    fig = pylab.figure(dpi=100)
    ax = fig.add_subplot(111)
    p = ax.plot(aratio10, aratio20, marker="None", linestyle="None")
    for k in range(len(aratio10)):
        t = ax.text(
            aratio10[k],
            aratio20[k],
            "%04d-%04d" % (plate[k], fiber[k]),
            horizontalalignment="center",
            verticalalignment="center",
            color=colorvec[k % len(colorvec)],
            fontproperties=smallfont,
        )
    # ax.set_xlim([aratio10.min(),aratio10.max])
    # ax.set_xlim([aratio20.min(),aratio20.max])
    ax.set_xlabel("Eigenvalue Ratio, $a_1/a_0$")
    ax.set_ylabel("Eigenvalue Ratio, $a_2/a_0$")
    ax.set_title("Galaxies: Eigenvalue Ratios")
    fig.savefig(outfile + ".a2_v_a1.png")
    pylab.close(fig)
    fig = pylab.figure(dpi=100)
    ax = fig.add_subplot(111)
    p = ax.plot(aratio20, aratio30, marker="None", linestyle="None")
    for k in range(len(aratio10)):
        t = ax.text(
            aratio20[k],
            aratio30[k],
            "%04d-%04d" % (plate[k], fiber[k]),
            horizontalalignment="center",
            verticalalignment="center",
            color=colorvec[k % len(colorvec)],
            fontproperties=smallfont,
        )
    # ax.set_xlim([aratio10.min(),aratio10.max])
    # ax.set_xlim([aratio20.min(),aratio20.max])
    ax.set_xlabel("Eigenvalue Ratio, $a_2/a_0$")
    ax.set_ylabel("Eigenvalue Ratio, $a_3/a_0$")
    ax.set_title("Galaxies: Eigenvalue Ratios")
    fig.savefig(outfile + ".a3_v_a2.png")
    pylab.close(fig)
    #
    # Save output to FITS file.
    #
    if os.path.exists(outfile + ".fits"):
        os.remove(outfile + ".fits")
    hdu0 = fits.PrimaryHDU(pcaflux["flux"])
    hdu1 = fits.new_table(
        fits.ColDefs(
            [
                fits.Column(name="plate", format="J", array=plate),
                fits.Column(name="mjd", format="J", array=mjd),
                fits.Column(name="fiber", format="J", array=fiber),
                fits.Column(name="redshift", format="D", array=zfit),
            ]
        )
    )
    hdulist = fits.HDUList([hdu0, hdu1])
    hdulist[0].header.update("OBJECT", "GALAXY")
    hdulist[0].header.update("COEFF0", pcaflux["newloglam"][0])
    hdulist[0].header.update("COEFF1", pcaflux["newloglam"][1] - pcaflux["newloglam"][0])
    hdulist[0].header.update("IDLUTILS", "pydlutils", "Version of idlutils")
    hdulist[0].header.update("SPEC2D", "eigenspectra", "Version of idlspec2d")
    hdulist[0].header.update("RUN2D", os.getenv("RUN2D"), "Version of 2d reduction")
    hdulist[0].header.update("RUN1D", os.getenv("RUN1D"), "Version of 1d reduction")
    for i in range(len(pcaflux["eigenval"])):
        hdulist[0].header.update("EIGEN%d" % i, pcaflux["eigenval"][i])
    hdulist[1].header.update("FILENAME", inputfile)
    hdulist.writeto(outfile + ".fits")
    plot_eig(outfile + ".fits")
    return
Esempio n. 41
0
def set_columns(filename, fileformat=None):
    """
    Meat of the program: takes the columns from the input table and matches
    them to the columns provided by the user in the column form.
    Then, assigns units and column information and does all the proper file
    ingestion work.

    """

    if fileformat is None and 'fileformat' in request.args:
        fileformat = request.args['fileformat']


    # This function needs to know about the filename or have access to the
    # table; how do we arrange that?
    table = Table.read(os.path.join(app.config['UPLOAD_FOLDER'], filename),
                       format=fileformat)

    column_data = \
        {field:{'Name':value} for field,value in request.form.items() if '_units' not in field}
    for field,value in request.form.items():
        if '_units' in field:
            column_data[field[:-6]]['unit'] = value

    units_data = {}
    for key, pair in column_data.items():
        if key not in dimensionless_column_names and pair['Name'] not in dimensionless_column_names:
            units_data[pair['Name']] = pair['unit']

    mapping = {filename: [column_data, units_data]}

    # Parse the table file, step-by-step
    rename_columns(table, {k: v['Name'] for k,v in column_data.items()})
    set_units(table, units_data)
    table = fix_bad_types(table)
    convert_units(table)
    add_name_column(table, column_data.get('Username')['Name'])
    add_filename_column(table, filename)
    timestamp = datetime.now()
    add_timestamp_column(table, timestamp)

    add_generic_ids_if_needed(table)
    if column_data.get('issimulated') is None:
        add_is_sim_if_needed(table, False)
    else:
        add_is_sim_if_needed(table, True)

    if column_data.get('isgalactic') is None:
        add_is_gal_if_needed(table, False)
    else:
        add_is_gal_if_needed(table, True)

# Detect duplicate IDs in uploaded data and bail out if found
    seen = {}
    for row in table:
        name = row['Names']
        id = row['IDs']
        if id in seen:
            raise InvalidUsage("Duplicate ID detected in table: username = {0}, id = {1}. All IDs must be unique.".format(name, id))
        else:
            seen[id] = name

    # If merged table already exists, then append the new entries.
    # Otherwise, create the table

    merged_table_name = os.path.join(app.config['DATABASE_FOLDER'], 'merged_table.ipac')
    if os.path.isfile(merged_table_name):
        merged_table = Table.read(merged_table_name,
                                  converters={'Names':
                                              [ascii.convert_numpy('S64')],
                                              'IDs':
                                              [ascii.convert_numpy('S64')],
                                              'IsSimulated':
                                              [ascii.convert_numpy('S5')],
                                              'IsGalactic':
                                              [ascii.convert_numpy('S5')]},
                                  format='ascii.ipac')
        if 'IsGalactic' not in merged_table.colnames:
            # Assume that anything we didn't already tag as Galactic is probably Galactic
            add_is_gal_column(merged_table, True)

        if 'Timestamp' not in merged_table.colnames:
            # Create a fake timestamp for the previous entries if they don't already have one
            fake_timestamp = datetime.min
            add_timestamp_column(merged_table, fake_timestamp)
    else:
    # Maximum string length of 64 for username, ID -- larger strings are silently truncated
    # TODO: Adjust these numbers to something more reasonable, once we figure out what that is,
    #       and verify that submitted data obeys these limits
        merged_table = Table(data=None, names=['Names','IDs','SurfaceDensity',
                       'VelocityDispersion','Radius','IsSimulated', 'IsGalactic', 'Timestamp'],
                       dtype=[('str', 64),('str', 64),'float','float','float','bool','bool',('str', 26)])
        set_units(merged_table)

    table = reorder_columns(table, merged_table.colnames)
    append_table(merged_table, table)
    Table.write(merged_table, merged_table_name, format='ascii.ipac')

    username = column_data.get('Username')['Name']
    branch,timestamp = commit_change_to_database(username)
    time.sleep(2)
    pull_request(branch, username, timestamp)

    if not os.path.isdir('static/figures/'):
        os.mkdir('static/figures')
    if not os.path.isdir('static/jstables/'):
        os.mkdir('static/jstables')

    outfilename = os.path.splitext(filename)[0]
    myplot = plotData_Sigma_sigma(timeString(), table,
                                  os.path.join(app.config['MPLD3_FOLDER'],
                                               outfilename))

    tablecss = "table,th,td,tr,tbody {border: 1px solid black; border-collapse: collapse;}"
    write_table_jsviewer(table,
                         'static/jstables/{fn}.html'.format(fn=outfilename),
                         css=tablecss,
                         jskwargs={'use_local_files':False},
                         table_id=outfilename)

    return render_template('show_plot.html', imagename='/'+myplot,
                           tablefile='{fn}.html'.format(fn=outfilename))
Esempio n. 42
0
def build_poller_table(input, log_level):
    """Create a poller file from dataset names.

    Parameters
    -----------
    input : str, list
        Filename with list of dataset names, or just a Python list of dataset names, provided by the user.

    Returns
    --------
    poller_table : Table
        Astropy table object with the same columns as a poller file.

    """
    log.setLevel(log_level)

    # Check the input file is not empty
    if not os.path.getsize(input):
        log.error('Input poller manifest file, {}, is empty - processing is exiting.'.format(input))
        sys.exit(0)

    datasets = []
    is_poller_file = False
    obs_converters = {'col4': [ascii.convert_numpy(np.str)]}
    if isinstance(input, str):
        input_table = ascii.read(input, format='no_header', converters=obs_converters)
        if len(input_table.columns) == len(POLLER_COLNAMES):
            # We were provided a poller file
            # Now assign column names to table
            for i, colname in enumerate(POLLER_COLNAMES):
                input_table.columns[i].name = colname

            # Convert to a string column, instead of int64
            input_table['obset_id'] = input_table['obset_id'].astype(np.str)
            is_poller_file = True

        elif len(input_table.columns) == 1:
            input_table.columns[0].name = 'filename'
            is_poller_file = False

        # Since a poller file was the input, it is assumed all the input
        # data is in the locale directory so just collect the filenames.
        datasets = input_table[input_table.colnames[0]].tolist()
        filenames = list(input_table.columns[0])

    elif isinstance(input, list):
        filenames = input

    else:
        id = '[poller_utils.build_poller_table] '
        log.error("{}: Input {} not supported as input for processing.".format(id, input))
        raise ValueError

    # At this point, we have a poller file or a list of filenames.  If the latter, then any individual
    # filename can be a singleton or an association name.  We need to get the full list of actual
    # filenames from the association name.
    if not is_poller_file:
        for filename in filenames:
            # Look for dataset in local directory.
            if "asn" in filename or not os.path.exists(filename):
                # This retrieval will NOT overwrite any ASN members already on local disk
                # Return value will still be list of all members
                files = aqutils.retrieve_observation([filename[:9]], suffix=['FLC'], clobber=False)
                if len(files) == 0:
                    log.error("Filename {} not found in archive!!".format(filename))
                    log.error("Please provide ASN filename instead!")
                    raise ValueError
            else:
                files = [filename]
            datasets += files

    # Each image, whether from a poller file or from an input list needs to be
    # analyzed to ensure it is viable for drizzle processing.  If the image is not
    # viable, it should not be included in the output "poller" table.
    usable_datasets = analyze.analyze_wrapper(datasets)
    if not usable_datasets:
        log.warning("No usable images in poller file or input list for drizzling. The processing of this data is ending.")
        sys.exit(0)

    cols = OrderedDict()
    for cname in POLLER_COLNAMES:
        cols[cname] = []
    cols['filename'] = usable_datasets

    # If processing a list of files, evaluate each input dataset for the information needed
    # for the poller file
    if not is_poller_file:
        for d in usable_datasets:
            with fits.open(d) as dhdu:
                hdr = dhdu[0].header
                cols['program_id'].append(d[1:4].upper())
                cols['obset_id'].append(str(d[4:6]))
                cols['proposal_id'].append(hdr['proposid'])
                cols['exptime'].append(hdr['exptime'])
                cols['detector'].append(hdr['detector'])
                cols['pathname'].append(os.path.abspath(d))
                # process filter names
                if d[0] == 'j':  # ACS data
                    filters = processing_utils.get_acs_filters(dhdu, all=True)
                elif d[0] == 'i':
                    filters = hdr['filter']
                cols['filters'].append(filters)

        # Build output table
        poller_data = [col for col in cols.values()]
        poller_table = Table(data=poller_data,
                             dtype=('str', 'int', 'str', 'str', 'float', 'object', 'str', 'str'))

        # Now assign column names to obset_table
        for i, colname in enumerate(POLLER_COLNAMES):
            poller_table.columns[i].name = colname
    # The input was a poller file, so just keep the viable data rows for output
    else:
        good_rows = []
        for d in usable_datasets:
            for i, old_row in enumerate(input_table):
                if d == input_table['filename'][i]:
                    good_rows.append(old_row)
            poller_table = Table(rows=good_rows, names=input_table.colnames,
                                 dtype=('str', 'int', 'str', 'str', 'float', 'object', 'str', 'str'))

    return poller_table
Esempio n. 43
0
def set_columns(filename, fileformat=None, testmode=False):
    """
    Meat of the program: takes the columns from the input table and matches
    them to the columns provided by the user in the column form.
    Then, assigns units and column information and does all the proper file
    ingestion work.

    """
    log.debug("Beginning set_columns.")

    if fileformat is None and 'fileformat' in request.args:
        fileformat = request.args['fileformat']

    if 'testmode' in request.args:
        if request.args['testmode'].lower() == 'skip':
            testmode = 'skip'
        else:
            testmode = request.args['testmode'].lower() == 'true'
    if testmode:
        loglevel = log.level
        log.setLevel(10)

    log.debug("Test mode = {0}.".format(testmode))

    log.debug("Reading table {0}".format(filename))
    try:
        table = Table.read(os.path.join(app.config['UPLOAD_FOLDER'], filename),
                           format=fileformat)
    except Exception as ex:
        return render_template('error.html', error=str(ex),
                               traceback=traceback.format_exc(ex))

    # Have to fix the column reading twice
    fix_bad_colnames(table)

    log.debug("Parsing column data.")
    log.debug("form: {0}".format(request.form))
    column_data = {field: {'Name': value}
                   for field, value in request.form.items()
                   if '_units' not in field}
    log.debug("Looping through form items.")
    for field, value in request.form.items():
        if '_units' in field:
            column_data[field[:-6]]['unit'] = value

    log.debug("Looping through column_data.")
    units_data = {}
    for key, pair in column_data.items():
        if (key not in dimensionless_column_names and
            pair['Name'] not in dimensionless_column_names):

            units_data[pair['Name']] = pair['unit']

    log.debug("Created mapping.")
    mapping = {filename: [column_data, units_data]}  # Not used??

    log.debug("Further table handling.")
    key_rename_mapping = {k: v['Name'] for k, v in column_data.items()}
    log.debug("Mapping: {0}".format(key_rename_mapping))
    # Parse the table file, step-by-step
    rename_columns(table, key_rename_mapping)
    set_units(table, units_data)
    table = fix_bad_types(table)
    try:
        convert_units(table)
    except Exception as ex:
        if testmode:
            raise ex
        else:
            return render_template('error.html', error=str(ex),
                                   traceback=traceback.format_exc(ex))
    add_repeat_column(table, column_data.get('Username')['Name'], 'Names')
    if 'ADS_ID' not in table.colnames:
        add_repeat_column(table, request.form['adsid'], 'ADS_ID')
    if 'Publication_DOI_or_URL' not in table.colnames:
        add_repeat_column(table, request.form['doi'], 'Publication_DOI_or_URL')
    if 'DataURL' not in table.colnames:
        add_repeat_column(table, request.form['dataurl'], 'DataURL')
    if 'synthimURL' not in table.colnames:
        add_repeat_column(table, request.form['synthimurl'], 'synthimURL')
    timestamp = datetime.now()
    add_repeat_column(table, timestamp, 'Timestamp')

    add_generic_ids_if_needed(table)
    if column_data.get('ObsSim')['Name'] == 'IsObserved':
        add_is_sim_if_needed(table, False)
    else:
        add_is_sim_if_needed(table, True)

    if column_data.get('GalExgal')['Name'] == 'IsExtragalactic':
        add_is_gal_if_needed(table, False)
    else:
        add_is_gal_if_needed(table, True)

    # Rename the uploaded file to something unique, and store this name
    # in the table
    extension = os.path.splitext(filename)[-1]
    full_filename_old = os.path.join(app.config['UPLOAD_FOLDER'], filename)
    with open(full_filename_old) as file:
        unique_filename = \
            hashlib.sha1(file.read()).hexdigest()[0:36 - len(extension)] + \
            extension
    full_filename_new = os.path.join(app.config['UPLOAD_FOLDER'],
                                     unique_filename)
    os.rename(full_filename_old, full_filename_new)
    add_repeat_column(table, unique_filename, 'Filename')
    log.debug("Table column names after add_filename_column: ", table.colnames)

    store_form_data(request, fileformat, unique_filename)

    handle_email(request.form['Email'], unique_filename)

    # Detect duplicate IDs in uploaded data and bail out if found
    seen = {}
    for row in table:
        name = row['Names']
        id = row['IDs']
        if id in seen:
            raise InvalidUsage("Duplicate ID detected in table: username = {0}"
                               ", id = {1}. All IDs must be"
                               " unique.".format(name, id))
        else:
            seen[id] = name

    # If merged table already exists, then append the new entries.
    # Otherwise, create the table

    merged_table_name = os.path.join(app.config['DATABASE_FOLDER'],
                                     'merged_table.ipac')
    if os.path.isfile(merged_table_name):
        merged_table = Table.read(merged_table_name,
                                  converters={'Names':
                                              [ascii.convert_numpy('S64')],
                                              'IDs':
                                              [ascii.convert_numpy('S64')],
                                              'IsSimulated':
                                              [ascii.convert_numpy('S5')],
                                              'IsGalactic':
                                              [ascii.convert_numpy('S5')],
                                              'Filename':
                                              [ascii.convert_numpy('S36')]},
                                  format='ascii.ipac')
        if 'IsGalactic' not in merged_table.colnames:
            # Assume that anything we didn't already tag as Galactic is
            # probably Galactic
            add_repeat_column(merged_table, True, 'IsGalactic')

        if 'Timestamp' not in merged_table.colnames:
            # Create a fake timestamp for the previous entries if they don't
            # already have one
            fake_timestamp = datetime.min
            add_repeat_column(merged_table, fake_timestamp, "Timestamp")

        if 'Filename' not in merged_table.colnames:
            # If we don't know the filename, flag it as unknown
            add_repeat_column(merged_table, 'Unknown' + ' ' * 29, 'Filename')

        if 'ADS_ID' not in merged_table.colnames:
            # If we don't know the filename, flag it as unknown
            add_repeat_column(merged_table, 'Unknown' + ' ' * 13, 'ADS_ID')

        if 'Publication_DOI_or_URL' not in merged_table.colnames:
            # If we don't know the filename, flag it as unknown
            add_repeat_column(merged_table, 'Unknown' + ' ' * 57,
                              'Publication_DOI_or_URL')

        if 'DataURL' not in merged_table.colnames:
            # If we don't know the filename, flag it as unknown
            add_repeat_column(merged_table, 'Unknown' + ' ' * 57, 'DataURL')

        if 'synthimURL' not in merged_table.colnames:
            # If we don't know the filename, flag it as unknown
            add_repeat_column(merged_table, 'Unknown' + ' ' * 57,
                              'synthimURL')

    else:
        # Maximum string length of 64 for username, ID -- larger strings are
        # silently truncated
        # TODO: Adjust these numbers to something more reasonable, once we
        #       figure out what that is, and verify that submitted data obeys
        #       these limits
        names = ['Names', 'IDs', 'SurfaceDensity',
                 'VelocityDispersion', 'Radius', 'IsSimulated', 'IsGalactic',
                 'Timestamp', 'Filename', 'ADS_ID', 'Publication_DOI_or_URL',
                 'DataURL', 'synthimURL']
        col_dtypes = [('str', 64), ('str', 64), 'float', 'float', 'float',
                      'bool', 'bool', ('str', 26), ('str', 36), ('str', 20),
                      ('str', 64), ('str', 64), ('str', 64)]
        merged_table = Table(data=None, names=names, dtype=col_dtypes)
        # dts = merged_table.dtype
        # Hack to force fixed-width: works only on strings
        # merged_table.add_row(["_"*dts[ind].itemsize if dts[ind].kind=='S'
        #                       else False if dts[ind].kind == 'b'
        #                       else np.nan
        #                       for ind in range(len(dts))])
        set_units(merged_table)

    table = reorder_columns(table, merged_table.colnames)
    print("Table column names after reorder_columns: ", table.colnames)
    print("Merged table column names after reorder_columns: ",
          merged_table.colnames)

    # Detect whether any username, ID pairs match entries already in the
    # merged table
    duplicates = {}
    for row in merged_table:
        name = row['Names']
        id = row['IDs']
        if id in seen:
            if name == seen[id]:
                duplicates[id] = name

    handle_duplicates(table, merged_table, duplicates)

    append_table(merged_table, table)

    username = column_data.get('Username')['Name']

    if testmode != 'skip':
        try:
            link_pull_database, link_pull_uploads = \
                create_pull_request(username=username,
                                    merged_table=merged_table,
                                    merged_table_name=merged_table_name,
                                    table_widths=table_widths,
                                    unique_filename=unique_filename,
                                    testmode=testmode)
        except Exception as ex:
            if testmode:
                raise ex
            else:
                return render_template('error.html', error=str(ex),
                                       traceback=traceback.format_exc(ex))
        if isinstance(link_pull_database, Exception):
            ex = link_pull_database
            return render_template('error.html', error=str(ex),
                                   traceback=traceback.format_exc(ex))
    else:
        link_pull_database, link_pull_uploads = 'placeholder', 'placeholder'

    outfilename = os.path.splitext(filename)[0]
    log.debug("Creating plot {0}.".format(outfilename))
    myplot_html, myplot_png = \
        plotData_Sigma_sigma(timeString(), table, outfilename,
                             html_dir=app.config['MPLD3_FOLDER'],
                             png_dir=app.config['PNG_PLOT_FOLDER'])

    log.debug("Creating table.")
    tablecss = "table,th,td,tr,tbody {border: 1px solid black; border-collapse: collapse;}"
    table_name = os.path.join(TABLE_FOLDER, '{fn}.html'.format(fn=outfilename))
    write_table_jsviewer(table,
                         table_name,
                         css=tablecss,
                         jskwargs={'use_local_files': False},
                         table_id=outfilename)

    if myplot_html is None:
        assert myplot_png is None  # should be both or neither
        imagename = None
        png_imagename = None
    else:
        imagename = '/' + myplot_html
        png_imagename = "/" + myplot_png

    return render_template('show_plot.html',
                           imagename=imagename,
                           png_imagename=png_imagename,
                           tablefile='{fn}.html'.format(fn=outfilename),
                           link_pull_uploads=link_pull_uploads,
                           link_pull_database=link_pull_database)
Esempio n. 44
0
File: core.py Progetto: mkelley/pds3
def read_ascii_table(label, key, path='.'):
    """Read an ASCII table as described by the label.

    Only fixed length records are supported.

    Parameters
    ----------
    label : dict
      The label, as read by `read_label`.
    key : string
      The label key of the object that describes the table.
    path : string, optional
      Directory path to label/table.

    Returns
    -------
    table : astropy Table

    Raises
    ------
    NotImpementedError
    ValueError

    """

    import numpy as np
    from astropy.io import ascii

    # The table object description.
    desc = label[key]

    if not isinstance(desc['COLUMN'], list):
        # For tables with a single column, desc['COLUMN'] needs to be a list
        desc['COLUMN'] = [desc['COLUMN']]

    # Setup table column formats
    n = desc['COLUMNS']
    col_starts = []
    col_ends = []
    converters = dict()
    def repeat(dtype, col):
        n = col.get('ITEMS', 1)
        return dtype if n == 1 else (dtype,) * n
    
    for i in range(n):
        col = desc['COLUMN'][i]
        col_starts.append(col['START_BYTE'] - 1)
        col_ends.append(col_starts[-1] + col['BYTES'] - 1)

        if col['DATA_TYPE'] == 'ASCII_REAL':
            dtype = repeat(np.float, col)
        elif col['DATA_TYPE'] == 'ASCII_INTEGER':
            dtype = repeat(np.int, col)
        elif col['DATA_TYPE'] == 'CHARACTER':
            dtype = repeat('S{}'.format(col['BYTES']), col)
        else:
            raise ValueError("Unknown data type: ", col['DATA_TYPE'])
        converters['col{}'.format(i+1)] = [ascii.convert_numpy(dtype)]

    nrows = desc['ROWS']

    # Open the file object, and skip ahead to the start of the table,
    # if needed.  Read the table.
    if isinstance(label['^' + key], tuple):
        filename, start = label['^' + key]
        start = int(start) - 1
        filename = _find_file(filename, path=path)
        if 'RECORD_BYTES' in label:
            record_bytes = label['RECORD_BYTES']
        else:
            record_bytes = desc['RECORD_BYTES']

        #inf = open(filename, 'r')
        #inf.seek(record_bytes * start)
    else:
        filename = _find_file(label['^' + key], path=path)
        start = 0
        #inf = open(filename, 'r')

    table = ascii.read(filename, format='fixed_width_no_header',
                       data_start=start, data_end=nrows+start,
                       col_starts=col_starts, col_ends=col_ends,
                       converters=converters, guess=False)
    #inf.close()

    # Mask data
    for i in range(n):
        col = desc['COLUMN'][i]
        missing_constant = col.get('MISSING_CONSTANT', None)
        if missing_constant is None:
            continue

        j = table.columns[i] == missing_constant
        if np.any(j):
            table.columns[i].mask = j

    # Save column meta data.
    for i in range(n):
        for j in range(desc.get('ITEMS', 1)):
            col = desc['COLUMN'][i]
            table.columns[i].name = col['NAME']
            if 'DESCRIPTION' in col:
                table.columns[i].description = col['DESCRIPTION']

    # Save table meta data.
    for k, v in desc.items():
        if k is not 'COLUMN':
            table.meta[k] = v

    return table
Esempio n. 45
0
def get_data(z_value, haloid):
    z = z_value
    tr_halo = haloid  #save the ID need to trace
    ###firstly,read the data at redshift,and save to review
    halo_list = asc.read(
        'D:/mask/MUSIC/MUSIC_reshift/NewMDCLUSTER_0001/GadgetMUSIC-NewMDCLUSTER_0001.z%.3f.AHF_halos'
        % z,
        converters={
            'col1': [asc.convert_numpy(np.int64)],
            'col2': [asc.convert_numpy(np.int64)]
        })
    cNFW = np.array(halo_list['col43'])
    Mhalo = np.array(halo_list['col4'])
    Mstar = np.array(halo_list['col65'])
    Nstar = np.array(halo_list['col64'])
    #star mass and number
    Mgas = np.array(halo_list['col45'])
    Ngas = np.array(halo_list['col44'])
    #gas mass and number
    xhalo = np.array(halo_list['col6'])
    yhalo = np.array(halo_list['col7'])
    zhalo = np.array(halo_list['col8'])
    #x,y,z: position of halo
    Rvir = np.array(halo_list['col12'])
    #virial radius of halo
    locpeak = np.array(halo_list['col14'])
    #loc_peak means the entral of potential
    Npart = np.array(halo_list['col5'])
    #the number of particles of the halo
    Nbins = np.array(halo_list['col37'])
    #calculate the mass of a star particle and gasparticle
    ARRAY = np.array([
        cNFW, Mhalo, Mstar, Nstar, Mgas, Ngas, xhalo, yhalo, zhalo, Rvir,
        locpeak, Npart, Nbins
    ])
    #for ARRAY,each rows response one property,et. row 1--cNFW
    M = ARRAY.shape[0]
    #read the hosthalo value,and the ID value,and then compare to find the number of halo belong to a give ID
    Host = np.array(halo_list['col2'])
    Host = np.int64(Host)
    ID = np.array(halo_list['col1'])
    ID = np.int64(ID)
    ID_save = np.array([ID, Host])
    with h5py.File(
            'D:/python1/pydocument/O_Scatter/MUSIC_reshift/NewMDCLUSTER_0001/halo_ID_%.0f.%.3f.h5'
            % (tr_halo, z), 'w') as f:
        f['a'] = np.array(ID_save)
    with h5py.File(
            'D:/python1/pydocument/O_Scatter/MUSIC_reshift/NewMDCLUSTER_0001/halo_ID_%.0f.%.3f.h5'
            % (tr_halo, z)) as f:
        for t in range(len(ID_save)):
            f['a'][t, :] = ID_save[t, :]

    #get the rows of the array
    with h5py.File(
            'D:/python1/pydocument/O_Scatter/MUSIC_reshift/NewMDCLUSTER_0001/halo_data_%.0f.%.3f.h5'
            % (tr_halo, z), 'w') as f:
        f['a'] = np.array(ARRAY)
    with h5py.File(
            'D:/python1/pydocument/O_Scatter/MUSIC_reshift/NewMDCLUSTER_0001/halo_data_%.0f.%.3f.h5'
            % (tr_halo, z)) as f:
        for t in range(M):
            f['a'][t, :] = ARRAY[t, :]
    halo_profile = pd.read_table(
        'D:/mask/MUSIC/MUSIC_reshift/NewMDCLUSTER_0001/GadgetMUSIC-NewMDCLUSTER_0001.z%.3f.AHF_profiles'
        % z,
        dtype=np.float)
    r = np.array(halo_profile['#r(1)'])
    r = np.abs(r)  #to make sure that r is positive
    n_part = np.array(halo_profile['npart(2)'])
    m_in_r = np.array(halo_profile['M_in_r(3)'])
    m_star = np.array(halo_profile['M_star(26)'])
    m_gas = np.array(halo_profile['M_gas(25)'])
    dens = np.array(halo_profile['dens(5)'])
    ovdens = np.array(halo_profile['ovdens(4)'])
    BRRAY = np.array([r, n_part, m_in_r, m_star, m_gas, dens, ovdens])
    #for BRRAY,each rows response one property,et. row 1--r
    N = BRRAY.shape[0]
    with h5py.File(
            'D:/python1/pydocument/O_Scatter/MUSIC_reshift/NewMDCLUSTER_0001/profile_data_%.0f.%.3f.h5'
            % (tr_halo, z), 'w') as f:
        f['a'] = np.array(BRRAY)
    with h5py.File(
            'D:/python1/pydocument/O_Scatter/MUSIC_reshift/NewMDCLUSTER_0001/profile_data_%.0f.%.3f.h5'
            % (tr_halo, z)) as f:
        for t in range(N):
            f['a'][t, :] = BRRAY[t, :]
    return ARRAY, BRRAY
Esempio n. 46
0
def main(args):
    #     temp_high = 42.0
    #     temp_low = 38.0
    status = 'unknown'

    GPIO.setmode(GPIO.BCM)
    GPIO.setup(23, GPIO.OUT)

    ##-------------------------------------------------------------------------
    ## Create logger object
    ##-------------------------------------------------------------------------
    logger = logging.getLogger('MyLogger')
    logger.setLevel(logging.DEBUG)
    ## Set up console output
    LogConsoleHandler = logging.StreamHandler()
    if args.verbose:
        LogConsoleHandler.setLevel(logging.DEBUG)
    else:
        LogConsoleHandler.setLevel(logging.INFO)
    LogFormat = logging.Formatter('%(asctime)23s %(levelname)8s: %(message)s')
    LogConsoleHandler.setFormatter(LogFormat)
    logger.addHandler(LogConsoleHandler)
    ## Set up file output
    now = datetime.datetime.now()
    DateString = '{}'.format(now.strftime('%Y%m%d'))
    TimeString = '{} HST'.format(now.strftime('%H:%M:%S'))
    LogFileName = os.path.join('/', 'var', 'log', 'Kegerator',
                               'Log_{}.txt'.format(DateString))
    LogFileHandler = logging.FileHandler(LogFileName)
    LogFileHandler.setLevel(logging.DEBUG)
    LogFileHandler.setFormatter(LogFormat)
    logger.addHandler(LogFileHandler)

    ##-------------------------------------------------------------------------
    ## Get Temperature and Humidity Values
    ##-------------------------------------------------------------------------
    logger.info('#### Reading Temperature and Humidity Sensors ####')
    temperatures_F = []

    try:
        logger.debug('Reading DHT22')
        DHT = DHT22.DHT22(pin=18)
        DHT.read()
        logger.debug('  Temperature = {:.3f} F, Humidity = {:.1f} %'.format(
            DHT.temperature_F, DHT.humidity))
        temperatures_F.append(DHT.temperature_F)
        RH = DHT.humidity
        AH = humidity.relative_to_absolute_humidity(DHT.temperature_C,
                                                    DHT.humidity)
        logger.debug('  Absolute Humidity = {:.2f} g/m^3'.format(AH))
    except:
        RH = float('nan')
        AH = float('nan')

    logger.debug('Reading DS18B20')
    sensor = DS18B20.DS18B20()
    sensor.read()
    for temp in sensor.temperatures_C:
        logger.debug('  Temperature = {:.3f} F'.format(temp * 9. / 5. + 32.))
        temperatures_F.append(temp * 9. / 5. + 32.)

    ##-------------------------------------------------------------------------
    ## Record Values to Table
    ##-------------------------------------------------------------------------
    datafile = os.path.join('/', 'var', 'log', 'Kegerator',
                            '{}.txt'.format(DateString))
    logger.debug(
        "Preparing astropy table object for data file {}".format(datafile))
    if not os.path.exists(datafile):
        logger.info("Making new astropy table object")
        SummaryTable = table.Table(names=('date', 'time', 'AmbTemp', 'KegTemp', 'KegTemp1', 'KegTemp2', 'KegTemp3', 'RH', 'AH', 'status'), \
                                   dtype=('S10',  'S12',  'f4',      'f4',      'f4',       'f4',       'f4',       'f4', 'f4', 'S8') )
    else:
        logger.debug(
            "Reading astropy table object from file: {0}".format(datafile))
        try:
            SummaryTable = ascii.read(
                datafile,
                guess=False,
                header_start=0,
                data_start=1,
                Reader=ascii.basic.Basic,
                converters={
                    'date': [ascii.convert_numpy('S10')],
                    'time': [ascii.convert_numpy('S12')],
                    'AmbTemp': [ascii.convert_numpy('f4')],
                    'KegTemp': [ascii.convert_numpy('f4')],
                    'KegTemp1': [ascii.convert_numpy('f4')],
                    'KegTemp2': [ascii.convert_numpy('f4')],
                    'KegTemp3': [ascii.convert_numpy('f4')],
                    'hum': [ascii.convert_numpy('f4')],
                    'AH': [ascii.convert_numpy('f4')],
                    'status': [ascii.convert_numpy('S11')],
                })
        except:
            logger.critical("Failed to read data file: {0} {1} {2}".format(
                sys.exc_info()[0],
                sys.exc_info()[1],
                sys.exc_info()[2]))

    ##-------------------------------------------------------------------------
    ## Turn Kegerator Relay On or Off Based on Temperature
    ##-------------------------------------------------------------------------
    temperatures_F.sort()
    ambient_temperature = temperatures_F.pop()
    assert ambient_temperature > max(temperatures_F)
    logger.info('Ambient Temperature = {:.1f}'.format(ambient_temperature))
    for temp in temperatures_F:
        logger.info('Kegerator Temperatures = {:.1f} F'.format(temp))
    temperature = np.median(temperatures_F)
    logger.info('Median Temperature = {:.1f} F'.format(temperature))
    if temperature > temp_high:
        status = 'On'
        logger.info(
            'Temperature {:.1f} is greater than {:.1f}.  Turning freezer {}.'.
            format(temperature, temp_high, status))
        GPIO.output(23, True)
    elif temperature < temp_low:
        status = 'Off'
        logger.info(
            'Temperature {:.1f} is less than {:.1f}.  Turning freezer {}.'.
            format(temperature, temp_low, status))
        GPIO.output(23, False)
    else:
        if len(SummaryTable) > 0:
            status = SummaryTable['status'][-1]
        else:
            status = 'unknown'
        logger.info(
            'Temperature if {:.1f}.  Taking no action.  Status is {}'.format(
                temperature, status))

    ##-------------------------------------------------------------------------
    ## Add row to data table
    ##-------------------------------------------------------------------------
    logger.debug("Writing new row to data table.")
    while len(temperatures_F) < 4:
        temperatures_F.append(float('nan'))
    SummaryTable.add_row((DateString, TimeString, ambient_temperature, temperature, \
                          temperatures_F[0], temperatures_F[1], temperatures_F[2], \
                          RH, AH, status))
    ## Write Table to File
    logger.debug("  Writing new data file.")
    ascii.write(SummaryTable, datafile, Writer=ascii.basic.Basic)

    ##-------------------------------------------------------------------------
    ## Log to Carriots
    ##-------------------------------------------------------------------------
    logger.info('Sending Data to Carriots')
    logger.debug('  Creating Device object')
    Device = Carriots.Client(device_id="kegerator@joshwalawender")
    logger.debug('  Reading api key')
    Device.read_api_key_from_file(
        file=os.path.join(os.path.expanduser('~joshw'), '.carriots_api'))
    data_dict = {'Temperature': temperature, \
                 'Status': status
                 }
    logger.debug('  Data: {}'.format(data_dict))
    Device.upload(data_dict)

    logger.info('Done')
Esempio n. 47
0
def plot(args):
    import matplotlib.pyplot as pyplot

    ##-------------------------------------------------------------------------
    ## Set date to tonight if not specified
    ##-------------------------------------------------------------------------
    now = datetime.datetime.now()
    DateString = now.strftime("%Y%m%d")
    if not args.date:
        args.date = DateString

    ##-------------------------------------------------------------------------
    ## Define File Names
    ##-------------------------------------------------------------------------
    LogFile = os.path.join('/', 'var', 'log', 'Kegerator',
                           'PlotLog_' + args.date + ".txt")
    PlotFile = os.path.join('/', 'var', 'log', 'Kegerator', args.date + ".png")
    DataFile = os.path.join('/', 'var', 'log', 'Kegerator', args.date + ".txt")

    ##-------------------------------------------------------------------------
    ## Create logger object
    ##-------------------------------------------------------------------------
    logger = logging.getLogger('MyLogger')
    logger.setLevel(logging.DEBUG)
    ## Set up console output
    LogConsoleHandler = logging.StreamHandler()
    if args.verbose:
        LogConsoleHandler.setLevel(logging.DEBUG)
    else:
        LogConsoleHandler.setLevel(logging.INFO)
    LogFormat = logging.Formatter('%(asctime)23s %(levelname)8s: %(message)s')
    LogConsoleHandler.setFormatter(LogFormat)
    logger.addHandler(LogConsoleHandler)
    ## Set up file output
    LogFileHandler = logging.FileHandler(LogFile)
    LogFileHandler.setLevel(logging.DEBUG)
    LogFileHandler.setFormatter(LogFormat)
    logger.addHandler(LogFileHandler)

    logger.info("Kegerator.py invoked with --plot option")
    logger.info("  Making plot for day of {}".format(args.date))

    ##-------------------------------------------------------------------------
    ## Read Data
    ##-------------------------------------------------------------------------
    if os.path.exists(DataFile):
        logger.info("  Found data file: {}".format(DataFile))
        data = ascii.read(DataFile,
                          guess=False,
                          header_start=0,
                          data_start=1,
                          Reader=ascii.basic.Basic,
                          converters={
                              'date': [ascii.convert_numpy('S10')],
                              'time': [ascii.convert_numpy('S12')],
                              'AmbTemp': [ascii.convert_numpy('f4')],
                              'KegTemp': [ascii.convert_numpy('f4')],
                              'KegTemp1': [ascii.convert_numpy('f4')],
                              'KegTemp2': [ascii.convert_numpy('f4')],
                              'KegTemp3': [ascii.convert_numpy('f4')],
                              'RH': [ascii.convert_numpy('f4')],
                              'AH': [ascii.convert_numpy('f4')],
                              'status': [ascii.convert_numpy('S11')],
                          })
        datetime_objects = [
            datetime.datetime.strptime(x['time'], '%H:%M:%S HST') for x in data
        ]
        time_decimal = [(x.hour + x.minute / 60. + x.second / 3600.)
                        for x in datetime_objects]
        DecimalTime = max(time_decimal)

        ##-------------------------------------------------------------------------
        ## Make Plot
        ##-------------------------------------------------------------------------
        plot_upper_temp = 45
        plot_lower_temp = 29
        pyplot.ioff()
        plotpos = [
                   [0.05, 0.59, 0.65, 0.40], [0.73, 0.59, 0.21, 0.40],\
                   [0.05, 0.52, 0.65, 0.07], [0.73, 0.52, 0.21, 0.07],\
                   [0.05, 0.25, 0.65, 0.24], [0.73, 0.25, 0.21, 0.24],\
                   [0.05, 0.05, 0.65, 0.18], [0.73, 0.05, 0.21, 0.18],\
                  ]
        if len(data) > 1:
            logger.info("  Generating plot {} ... ".format(PlotFile))
            dpi = 100
            pyplot.figure(figsize=(14, 8), dpi=dpi)

            ## Plot Temperature for This Day
            logger.debug("  Rendering Temperature Plot.")
            TemperatureAxes = pyplot.axes(plotpos[0], xticklabels=[])
            pyplot.title("Kegerator Temperatures for " + args.date)
            pyplot.plot(time_decimal,
                        data['KegTemp'],
                        'ko',
                        label="Median Temp.",
                        markersize=3,
                        markeredgewidth=0)
            pyplot.plot(time_decimal,
                        data['KegTemp1'],
                        'bo',
                        label="Temp. 1",
                        markersize=2,
                        markeredgewidth=0,
                        alpha=0.6)
            pyplot.plot(time_decimal,
                        data['KegTemp2'],
                        'go',
                        label="Temp. 2",
                        markersize=2,
                        markeredgewidth=0,
                        alpha=0.6)
            pyplot.plot(time_decimal,
                        data['KegTemp3'],
                        'yo',
                        label="Temp. 3",
                        markersize=2,
                        markeredgewidth=0,
                        alpha=0.6)
            pyplot.plot([DecimalTime, DecimalTime], [-100, 100],
                        'g-',
                        alpha=0.4)
            pyplot.ylabel("Kegerator Temp. (F)")
            pyplot.xlim(0, 24)
            pyplot.xticks(np.arange(0, 24, 2))
            pyplot.ylim(plot_lower_temp, plot_upper_temp)
            pyplot.grid()
            pyplot.legend(loc='best', prop={'size': 10})
            TemperatureAxes.axhline(32, color='red', lw=4)
            TemperatureAxes.axhline(temp_low, color='blue', lw=4)
            TemperatureAxes.axhline(temp_high, color='blue', lw=4)

            ## Plot Temperature for Last Hour
            logger.debug("  Rendering Recent Temperature Plot.")
            RecentTemperatureAxes = pyplot.axes(plotpos[1],
                                                xticklabels=[],
                                                yticklabels=[])
            pyplot.title("Last Hour")
            pyplot.plot(time_decimal,
                        data['KegTemp'],
                        'ko',
                        label="Kegerator Temp",
                        markersize=3,
                        markeredgewidth=0)
            pyplot.plot(time_decimal,
                        data['KegTemp1'],
                        'bo',
                        label="Kegerator Temp 1",
                        markersize=2,
                        markeredgewidth=0,
                        alpha=0.6)
            pyplot.plot(time_decimal,
                        data['KegTemp2'],
                        'go',
                        label="Kegerator Temp 2",
                        markersize=2,
                        markeredgewidth=0,
                        alpha=0.6)
            pyplot.plot(time_decimal,
                        data['KegTemp3'],
                        'yo',
                        label="Kegerator Temp 3",
                        markersize=2,
                        markeredgewidth=0,
                        alpha=0.6)
            pyplot.plot([DecimalTime, DecimalTime], [-100, 100],
                        'g-',
                        alpha=0.4)
            pyplot.xticks(np.arange(0, 24, 0.25))
            if DecimalTime > 1.0:
                pyplot.xlim(DecimalTime - 1.0, DecimalTime + 0.1)
            else:
                pyplot.xlim(0, 1.1)
            pyplot.ylim(plot_lower_temp, plot_upper_temp)
            pyplot.grid()
            RecentTemperatureAxes.axhline(32, color='red', lw=4)
            RecentTemperatureAxes.axhline(temp_low, color='blue', lw=4)
            RecentTemperatureAxes.axhline(temp_high, color='blue', lw=4)

            ## Plot Relay State
            translator = {'On': 1, 'Off': 0, 'unknown': -0.25}
            relay_state = [translator[val] for val in data['status']]
            logger.debug("  Rendering Relay Status Plot.")
            RelayAxes = pyplot.axes(plotpos[2], yticklabels=[])
            pyplot.plot(time_decimal,
                        relay_state,
                        'ko-',
                        markersize=3,
                        markeredgewidth=0)
            pyplot.plot([DecimalTime, DecimalTime], [-1, 2], 'g-', alpha=0.4)
            pyplot.ylabel("Relay")
            pyplot.xlim(0, 24)
            pyplot.yticks([0, 1])

            pyplot.ylim(-0.5, 1.5)
            pyplot.xticks(np.arange(0, 24, 2))
            pyplot.grid()

            ## Plot Relay State for Last Hour
            logger.debug("  Rendering Recent Relay State Plot.")
            RecentRelayAxes = pyplot.axes(plotpos[3], yticklabels=[])
            pyplot.plot(time_decimal,
                        relay_state,
                        'ko-',
                        markersize=3,
                        markeredgewidth=0)
            pyplot.plot([DecimalTime, DecimalTime], [-1, 2], 'g-', alpha=0.4)
            pyplot.xticks(np.arange(0, 24, 0.25))
            if DecimalTime > 1.0:
                pyplot.xlim(DecimalTime - 1.0, DecimalTime + 0.1)
            else:
                pyplot.xlim(0, 1.1)
            pyplot.yticks([0, 1])
            pyplot.ylim(-0.5, 1.5)
            pyplot.grid()

            ## Plot Humidity for This Day
            HumidityAxes = pyplot.axes(plotpos[4], xticklabels=[])
            logger.debug("  Rendering Humidity Plot.")
            pyplot.plot(time_decimal,
                        data['RH'],
                        'bo',
                        label="Humidity",
                        markersize=3,
                        markeredgewidth=0)
            pyplot.plot([DecimalTime, DecimalTime], [0, 100], 'g-', alpha=0.4)
            pyplot.ylabel("Humidity (%)")
            pyplot.xlabel("Time (Hours HST)")
            pyplot.xlim(0, 24)
            pyplot.ylim(30, 100)
            pyplot.xticks(np.arange(0, 24, 2))
            pyplot.grid()

            ## Plot Humidity for Last 2 Hours
            logger.debug("  Rendering Recent Humidity Plot.")
            RecentHumidityAxes = pyplot.axes(plotpos[5],
                                             yticklabels=[],
                                             xticklabels=[])
            pyplot.plot(time_decimal,
                        data['RH'],
                        'bo',
                        label="Humidity",
                        markersize=3,
                        markeredgewidth=0)
            pyplot.plot([DecimalTime, DecimalTime], [0, 100], 'g-', alpha=0.4)
            pyplot.xticks(np.arange(0, 24, 0.25))
            if DecimalTime > 1.0:
                pyplot.xlim(DecimalTime - 1.0, DecimalTime + 0.1)
            else:
                pyplot.xlim(0, 1.1)
            pyplot.ylim(30, 100)
            pyplot.grid()

            ## Plot Case Temperature for This Day
            logger.debug("  Rendering Case Temperature Plot.")
            AmbTemperatureAxes = pyplot.axes(plotpos[6])
            pyplot.plot(time_decimal,
                        data['AmbTemp'],
                        'ro',
                        label="Ambient Temp",
                        markersize=3,
                        markeredgewidth=0)
            pyplot.plot([DecimalTime, DecimalTime], [-100, 100],
                        'g-',
                        alpha=0.4)
            pyplot.ylabel("Case Temp. (F)")
            pyplot.xlim(0, 24)
            pyplot.xticks(np.arange(0, 24, 2))
            pyplot.yticks(np.arange(60, 100, 5))
            pyplot.ylim(math.floor(min(data['AmbTemp']) - 6),
                        math.ceil(max(data['AmbTemp']) + 6))
            pyplot.grid()

            ## Plot Case Temperature for Last Hour
            logger.debug("  Rendering Recent Case Temperature Plot.")
            RecentAmbTemperatureAxes = pyplot.axes(plotpos[7], yticklabels=[])
            pyplot.plot(time_decimal,
                        data['AmbTemp'],
                        'ro',
                        label="Ambient Temp",
                        markersize=3,
                        markeredgewidth=0)
            pyplot.plot([DecimalTime, DecimalTime], [-100, 100],
                        'g-',
                        alpha=0.4)
            pyplot.xticks(np.arange(0, 24, 0.25))
            pyplot.yticks(np.arange(60, 100, 5))
            pyplot.ylim(math.floor(min(data['AmbTemp']) - 6),
                        math.ceil(max(data['AmbTemp']) + 6))
            if DecimalTime > 1.0:
                pyplot.xlim(DecimalTime - 1.0, DecimalTime + 0.1)
            else:
                pyplot.xlim(0, 1.1)
            pyplot.grid()

            logger.debug("  Saving plot to file: {}".format(PlotFile))
            pyplot.savefig(PlotFile,
                           dpi=dpi,
                           bbox_inches='tight',
                           pad_inches=0.05)
            logger.info("  done.")
    else:
        logger.info("Could not find data file: {}".format(DataFile))

    ##-------------------------------------------------------------------------
    ## Create Daily Symlink if Not Already
    ##-------------------------------------------------------------------------
    LinkFileName = 'latest.png'
    LinkFile = os.path.join('/', 'var', 'log', 'Kegerator', LinkFileName)
    if not os.path.exists(LinkFile):
        logger.info('Making {} symlink to {}'.format(LinkFile, PlotFile))
        os.symlink(PlotFile, LinkFile)
        logger.info("Done")
Esempio n. 48
0
File: triand.py Progetto: adrn/MDM
import astropy.coordinates as coord
import astropy.units as u
from astropy.io import ascii
from astropy.time import Time
from astropy.table import Table, Column, join
import numpy as np

# Project
from streams.coordinates import sex_to_dec
from streams.observation.time import gmst_to_utc, lmst_to_gmst
from streams.observation.rrlyrae import time_to_phase, phase_to_time
from streams.util import project_root

data_file = os.path.join(project_root, "data", "catalog", "TriAnd_RRLyr.txt")
stars = ascii.read(data_file,
                   converters={'objectID' : [ascii.convert_numpy(np.str)]},
                   header_start=0,
                   data_start=1,
                   delimiter=" ")

# Need to wrap so RA's go 22,23,24,0,1,etc.
ras = np.array(stars['ra'])
ras[ras > 90.] = ras[ras > 90.] - 360.
idx = np.argsort(ras)
stars = stars[idx]

names = ["TriAndRRL{0}".format(ii+1) for ii in range(len(stars))]
stars.add_column(Column(names, name='name'))

# Read in RR Lyrae standards
RRLyr_stds1 = ascii.read("/Users/adrian/Documents/GraduateSchool/Observing/Std RR Lyrae/nemec_RRLyrae.txt")
Esempio n. 49
0
def main(args):
#     temp_high = 42.0
#     temp_low = 38.0
    status = 'unknown'

    GPIO.setmode(GPIO.BCM)
    GPIO.setup(23, GPIO.OUT)

    ##-------------------------------------------------------------------------
    ## Create logger object
    ##-------------------------------------------------------------------------
    logger = logging.getLogger('MyLogger')
    logger.setLevel(logging.DEBUG)
    ## Set up console output
    LogConsoleHandler = logging.StreamHandler()
    if args.verbose:
        LogConsoleHandler.setLevel(logging.DEBUG)
    else:
        LogConsoleHandler.setLevel(logging.INFO)
    LogFormat = logging.Formatter('%(asctime)23s %(levelname)8s: %(message)s')
    LogConsoleHandler.setFormatter(LogFormat)
    logger.addHandler(LogConsoleHandler)
    ## Set up file output
    now = datetime.datetime.now()
    DateString = '{}'.format(now.strftime('%Y%m%d'))
    TimeString = '{} HST'.format(now.strftime('%H:%M:%S'))
    LogFileName = os.path.join('/', 'var', 'log', 'Kegerator', 'Log_{}.txt'.format(DateString))
    LogFileHandler = logging.FileHandler(LogFileName)
    LogFileHandler.setLevel(logging.DEBUG)
    LogFileHandler.setFormatter(LogFormat)
    logger.addHandler(LogFileHandler)

    ##-------------------------------------------------------------------------
    ## Get Temperature and Humidity Values
    ##-------------------------------------------------------------------------
    logger.info('#### Reading Temperature and Humidity Sensors ####')
    temperatures_F = []

    try:
        logger.debug('Reading DHT22')
        DHT = DHT22.DHT22(pin=18)
        DHT.read()
        logger.debug('  Temperature = {:.3f} F, Humidity = {:.1f} %'.format(DHT.temperature_F, DHT.humidity))
        temperatures_F.append(DHT.temperature_F)
        RH = DHT.humidity
        AH = humidity.relative_to_absolute_humidity(DHT.temperature_C, DHT.humidity)
        logger.debug('  Absolute Humidity = {:.2f} g/m^3'.format(AH))
    except:
        RH = float('nan')
        AH = float('nan')


    logger.debug('Reading DS18B20')
    sensor = DS18B20.DS18B20()
    sensor.read()
    for temp in sensor.temperatures_C:
        logger.debug('  Temperature = {:.3f} F'.format(temp*9./5.+32.))
        temperatures_F.append(temp*9./5.+32.)


    ##-------------------------------------------------------------------------
    ## Record Values to Table
    ##-------------------------------------------------------------------------
    datafile = os.path.join('/', 'var', 'log', 'Kegerator', '{}.txt'.format(DateString))
    logger.debug("Preparing astropy table object for data file {}".format(datafile))
    if not os.path.exists(datafile):
        logger.info("Making new astropy table object")
        SummaryTable = table.Table(names=('date', 'time', 'AmbTemp', 'KegTemp', 'KegTemp1', 'KegTemp2', 'KegTemp3', 'RH', 'AH', 'status'), \
                                   dtype=('S10',  'S12',  'f4',      'f4',      'f4',       'f4',       'f4',       'f4', 'f4', 'S8') )
    else:
        logger.debug("Reading astropy table object from file: {0}".format(datafile))
        try:
            SummaryTable = ascii.read(datafile, guess=False,
                                      header_start=0, data_start=1,
                                      Reader=ascii.basic.Basic,
                                      converters={
                                      'date': [ascii.convert_numpy('S10')],
                                      'time': [ascii.convert_numpy('S12')],
                                      'AmbTemp': [ascii.convert_numpy('f4')],
                                      'KegTemp': [ascii.convert_numpy('f4')],
                                      'KegTemp1': [ascii.convert_numpy('f4')],
                                      'KegTemp2': [ascii.convert_numpy('f4')],
                                      'KegTemp3': [ascii.convert_numpy('f4')],
                                      'hum': [ascii.convert_numpy('f4')],
                                      'AH': [ascii.convert_numpy('f4')],
                                      'status': [ascii.convert_numpy('S11')],
                                      })
        except:
            logger.critical("Failed to read data file: {0} {1} {2}".format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]))


    ##-------------------------------------------------------------------------
    ## Turn Kegerator Relay On or Off Based on Temperature
    ##-------------------------------------------------------------------------
    temperatures_F.sort()
    ambient_temperature = temperatures_F.pop()
    assert ambient_temperature > max(temperatures_F)
    logger.info('Ambient Temperature = {:.1f}'.format(ambient_temperature))
    for temp in temperatures_F:
        logger.info('Kegerator Temperatures = {:.1f} F'.format(temp))
    temperature = np.median(temperatures_F)
    logger.info('Median Temperature = {:.1f} F'.format(temperature))
    if temperature > temp_high:
        status = 'On'
        logger.info('Temperature {:.1f} is greater than {:.1f}.  Turning freezer {}.'.format(temperature, temp_high, status))
        GPIO.output(23, True)
    elif temperature < temp_low:
        status = 'Off'
        logger.info('Temperature {:.1f} is less than {:.1f}.  Turning freezer {}.'.format(temperature, temp_low, status))
        GPIO.output(23, False)
    else:
        if len(SummaryTable) > 0:
            status = SummaryTable['status'][-1]
        else:
            status = 'unknown'
        logger.info('Temperature if {:.1f}.  Taking no action.  Status is {}'.format(temperature, status))


    ##-------------------------------------------------------------------------
    ## Add row to data table
    ##-------------------------------------------------------------------------
    logger.debug("Writing new row to data table.")
    while len(temperatures_F) < 4:
        temperatures_F.append(float('nan'))
    SummaryTable.add_row((DateString, TimeString, ambient_temperature, temperature, \
                          temperatures_F[0], temperatures_F[1], temperatures_F[2], \
                          RH, AH, status))
    ## Write Table to File
    logger.debug("  Writing new data file.")
    ascii.write(SummaryTable, datafile, Writer=ascii.basic.Basic)


    ##-------------------------------------------------------------------------
    ## Log to Carriots
    ##-------------------------------------------------------------------------
    logger.info('Sending Data to Carriots')
    logger.debug('  Creating Device object')
    Device = Carriots.Client(device_id="kegerator@joshwalawender")
    logger.debug('  Reading api key')
    Device.read_api_key_from_file(file=os.path.join(os.path.expanduser('~joshw'), '.carriots_api'))
    data_dict = {'Temperature': temperature, \
                 'Status': status
                 }
    logger.debug('  Data: {}'.format(data_dict))
    Device.upload(data_dict)

    logger.info('Done')
Esempio n. 50
0
def createTable(outlines, metaDict, colNames, colDefaults):
    """
    Creates an astropy table from inputs.

    Parameters
    ----------
    outlines : list of str
        Input lines
    metaDict : dict
        Input meta data
    colNames : list of str
        Input column names
    colDefaults : list
        Input column default values

    Returns
    -------
    table : astropy.table.Table object

    """
    # Before loading table into an astropy Table object, set lengths of Name,
    # Patch, and Type columns to 100 characters
    log = logging.getLogger('LSMTool.Load')

    converters = {}
    nameCol = 'col{0}'.format(colNames.index('Name')+1)
    converters[nameCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]
    typeCol = 'col{0}'.format(colNames.index('Type')+1)
    converters[typeCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]
    if 'Patch' in colNames:
        patchCol = 'col{0}'.format(colNames.index('Patch')+1)
        converters[patchCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]

    log.debug('Creating table...')
    table = Table.read('\n'.join(outlines), guess=False, format='ascii.no_header', delimiter=',',
        names=colNames, comment='#', data_start=0, converters=converters)

    # Convert spectral index values from strings to arrays.
    if 'SpectralIndex' in table.keys():
        log.debug('Converting spectral indices...')
        specOld = table['SpectralIndex'].data.tolist()
        specVec = []
        maskVec = []
        maxLen = 0
        for l in specOld:
            try:
                if type(l) is float or type(l) is int:
                    maxLen = 1
                else:
                    specEntry = [float(f) for f in l.split(';')]
                    if len(specEntry) > maxLen:
                        maxLen = len(specEntry)
            except:
                pass
        log.debug('Maximum number of spectral-index terms in model: {0}'.format(maxLen))
        for l in specOld:
            try:
                if type(l) is float or type(l) is int:
                    specEntry = [float(l)]
                    specMask = [False]
                else:
                    specEntry = [float(f) for f in l.split(';')]
                    specMask = [False] * len(specEntry)
                while len(specEntry) < maxLen:
                    specEntry.append(0.0)
                    specMask.append(True)
                specVec.append(specEntry)
                maskVec.append(specMask)
            except:
                specVec.append([0.0]*maxLen)
                maskVec.append([True]*maxLen)
        specCol = MaskedColumn(name='SpectralIndex', data=np.array(specVec, dtype=np.float))
        specCol.mask = maskVec
        specIndx = table.keys().index('SpectralIndex')
        table.remove_column('SpectralIndex')
        table.add_column(specCol, index=specIndx)

    # Convert RA and Dec to Angle objects
    log.debug('Converting RA...')
    RARaw = table['Ra'].data.tolist()
    RACol = Column(name='Ra', data=RA2Angle(RARaw))
    def raformat(val):
        return Angle(val, unit='degree').to_string(unit='hourangle', sep=':')
    RACol.format = raformat
    RAIndx = table.keys().index('Ra')
    table.remove_column('Ra')
    table.add_column(RACol, index=RAIndx)

    log.debug('Converting Dec...')
    DecRaw = table['Dec'].data.tolist()
    DecCol = Column(name='Dec', data=Dec2Angle(DecRaw))
    def decformat(val):
        return Angle(val, unit='degree').to_string(unit='degree', sep='.')
    DecCol.format = decformat
    DecIndx = table.keys().index('Dec')
    table.remove_column('Dec')
    table.add_column(DecCol, index=DecIndx)

    def fluxformat(val):
        return '{0:0.3f}'.format(val)
    table.columns['I'].format = fluxformat

    # Set column units and default values
    for i, colName in enumerate(colNames):
        log.debug("Setting units for column '{0}' to {1}".format(
            colName, allowedColumnUnits[colName.lower()]))
        table.columns[colName].unit = allowedColumnUnits[colName.lower()]

        if hasattr(table.columns[colName], 'filled') and colDefaults[i] is not None:
            fillVal = colDefaults[i]
            if colName == 'SpectralIndex':
                while len(fillVal) < maxLen:
                    fillVal.append(0.0)
            log.debug("Setting default value for column '{0}' to {1}".
                format(colName, fillVal))
            table.columns[colName].fill_value = fillVal
    table.meta = metaDict

    return table
Esempio n. 51
0
>>> run -i compare_grating_moves
>>> print kadi_mta_bad
>>> print mta_kadi_bad
"""
import numpy as np
import Ska.Numpy
from astropy.table import Table
from astropy.io import ascii
from kadi import events
from Chandra.Time import DateTime

kadi_moves = events.grating_moves.filter(start='2000:160', stop='2014:008',
                                         grating__contains='ETG').table
mta_moves = Table.read('mta_grating_moves.dat', format='ascii',
                       converters={'START_TIME': [ascii.convert_numpy(np.str)],
                                   'STOP_TIME': [ascii.convert_numpy(np.str)]})
mta_moves.sort('START_TIME')

kadi_starts = kadi_moves['tstart']
mta_starts = DateTime(mta_moves['START_TIME'], format='greta').secs

# Kadi to nearest MTA

indexes = np.arange(len(mta_starts))
i_nearest = Ska.Numpy.interpolate(indexes, mta_starts, kadi_starts,
                                  sorted=True, method='nearest')
mta_nearest = mta_moves[i_nearest]
mta_nearest_starts = mta_starts[i_nearest]
dt = kadi_moves['tstart'] - mta_nearest_starts
kadi_mta = Table([i_nearest, kadi_moves['start'], mta_nearest['START_TIME'], dt],
Esempio n. 52
0
def set_columns(filename, fileformat=None):
    """
    Meat of the program: takes the columns from the input table and matches
    them to the columns provided by the user in the column form.
    Then, assigns units and column information and does all the proper file
    ingestion work.

    """

    if fileformat is None and 'fileformat' in request.args:
        fileformat = request.args['fileformat']


    # This function needs to know about the filename or have access to the
    # table; how do we arrange that?
    table = Table.read(os.path.join(app.config['UPLOAD_FOLDER'], filename),
                       format=fileformat)
    
    column_data = \
        {field:{'Name':value} for field,value in request.form.items() if '_units' not in field}
    for field,value in request.form.items():
        if '_units' in field:
            column_data[field[:-6]]['unit'] = value
    
    units_data = {}
    for key, pair in column_data.items():
        if pair['Name'] != "Ignore" and pair['Name'] != "IsSimulated" and key != "Username":
            units_data[pair['Name']] = pair['unit']

    # Parse the table file, step-by-step
    rename_columns(table, {k: v['Name'] for k,v in column_data.items()})
    set_units(table, units_data)
    table = fix_bad_types(table)
    convert_units(table)
    add_name_column(table, column_data.get('Username')['Name'])
    add_filename_column(table, filename)
    add_generic_ids_if_needed(table)
    if column_data.get('issimulated') is None:
        add_is_sim_if_needed(table, False)
    else:
        add_is_sim_if_needed(table, True)

    # If merged table already exists, then append the new entries.
    # Otherwise, create the table

    merged_table_name = os.path.join(app.config['UPLOAD_FOLDER'], 'merged_table.ipac')
    if os.path.isfile(merged_table_name):
        merged_table = Table.read(merged_table_name, converters={'Names': [ascii.convert_numpy('S64')], 
        'IDs': [ascii.convert_numpy('S64')], 'IsSimulated': [ascii.convert_numpy('S5')]}, format='ascii.ipac')
    else:
    # Maximum string length of 64 for username, ID -- larger strings are silently truncated
    # TODO: Adjust these numbers to something more reasonable, once we figure out what that is,
    #       and verify that submitted data obeys these limits
        merged_table = Table(data=None, names=['Names','IDs','SurfaceDensity',
                       'VelocityDispersion','Radius','IsSimulated'], dtype=[('str', 64),('str', 64),'float','float','float','bool'])
        set_units(merged_table)

    table = reorder_columns(table, merged_table.colnames)
    append_table(merged_table, table)
    Table.write(merged_table, merged_table_name, format='ascii.ipac')

    if not os.path.isdir('static/figures/'):
        os.mkdir('static/figures')
    if not os.path.isdir('static/jstables/'):
        os.mkdir('static/jstables')

    outfilename = os.path.splitext(filename)[0]
    myplot = plotData_Sigma_sigma(timeString(), table, 'static/figures/'+outfilename)

    tablecss = "table,th,td,tr,tbody {border: 1px solid black; border-collapse: collapse;}"
    write_table_jsviewer(table,
                         'static/jstables/{fn}.html'.format(fn=outfilename),
                         css=tablecss,
                         jskwargs={'use_local_files':False},
                         table_id=outfilename)

    return render_template('show_plot.html', imagename='/'+myplot,
                           tablefile='{fn}.html'.format(fn=outfilename))
Esempio n. 53
0
def build_poller_table(input, log_level, poller_type='svm'):
    """Create a poller file from dataset names.

    Parameters
    -----------
    input : str, list
        Filename with list of dataset names, or just a Python list of dataset names, provided by the user.

    Returns
    --------
    poller_table : Table
        Astropy table object with the same columns as a poller file.

    """
    log.setLevel(log_level)

    is_poller_file = False
    # Check the input file is not empty
    if not isinstance(input, list) and not os.path.getsize(input):
        log.error(
            'Input poller manifest file, {}, is empty - processing is exiting.'
            .format(input))
        sys.exit(0)

    if poller_type == 'mvm':
        poller_colnames = MVM_POLLER_COLNAMES
        poller_dtype = MVM_POLLER_DTYPE
    else:
        poller_colnames = POLLER_COLNAMES
        poller_dtype = POLLER_DTYPE

    datasets = []
    obs_converters = {'col4': [ascii.convert_numpy(np.str)]}
    if isinstance(input, str):
        input_table = ascii.read(input,
                                 format='no_header',
                                 converters=obs_converters)
        if len(input_table.columns) == len(poller_colnames):
            # We were provided a poller file
            # Now assign column names to table
            for i, colname in enumerate(poller_colnames):
                input_table.columns[i].name = colname

            # Convert to a string column, instead of int64
            input_table['obset_id'] = input_table['obset_id'].astype(np.str)
            # Convert string column into a Bool column
            # The input poller file reports True if it has been reprocessed.
            # This code interprets that as False since it is NOT new, so the code
            # inverts the meaning from the pipeline poller file.
            if poller_type == 'mvm':
                input_table['skycell_new'] = [
                    int(not BOOL_STR_DICT[str(val).upper()])
                    for val in input_table['skycell_new']
                ]
            is_poller_file = True

        elif len(input_table.columns) == 1:
            input_table.columns[0].name = 'filename'
            is_poller_file = False

        # Since a poller file was the input, it is assumed all the input
        # data is in the locale directory so just collect the filenames.
        # datasets = input_table[input_table.colnames[0]].tolist()
        filenames = list(input_table.columns[0])

    elif isinstance(input, list):
        filenames = input

    else:
        id = '[poller_utils.build_poller_table] '
        log.error("{}: Input {} not supported as input for processing.".format(
            id, input))
        raise ValueError

    # At this point, we have a poller file or a list of filenames.  If the latter, then any individual
    # filename can be a singleton or an association name.  We need to get the full list of actual
    # filenames from the association name.
    if not is_poller_file:
        for filename in filenames:
            # Look for dataset in local directory.
            if "asn" in filename or not os.path.exists(filename):
                # This retrieval will NOT overwrite any ASN members already on local disk
                # Return value will still be list of all members
                files = aqutils.retrieve_observation([filename[:9]],
                                                     suffix=['FLC'],
                                                     clobber=False)
                if len(files) == 0:
                    log.error(
                        "Filename {} not found in archive!!".format(filename))
                    log.error("Please provide ASN filename instead!")
                    raise ValueError
            else:
                files = [filename]
            datasets += files
    else:
        datasets = filenames

    # Each image, whether from a poller file or from an input list needs to be
    # analyzed to ensure it is viable for drizzle processing.  If the image is not
    # viable, it should not be included in the output "poller" table.
    usable_datasets = analyze.analyze_wrapper(datasets)
    if not usable_datasets:
        log.warning(
            "No usable images in poller file or input list for drizzling. The processing of this data is ending."
        )
        sys.exit(0)

    cols = OrderedDict()
    for cname in poller_colnames:
        cols[cname] = []
    cols['filename'] = usable_datasets

    if poller_type == 'mvm':
        # determine sky-cell ID for input exposures now...
        scells = cell_utils.get_sky_cells(usable_datasets)
        scell_files = cell_utils.interpret_scells(scells)

    # If processing a list of files, evaluate each input dataset for the information needed
    # for the poller file
    if not is_poller_file:
        for d in usable_datasets:
            with fits.open(d) as dhdu:
                hdr = dhdu[0].header
                cols['program_id'].append(d[1:4].upper())
                cols['obset_id'].append(str(d[4:6]))
                cols['proposal_id'].append(hdr['proposid'])
                cols['exptime'].append(hdr['exptime'])
                cols['detector'].append(hdr['detector'])
                cols['pathname'].append(os.path.abspath(d))
                # process filter names
                if d[0] == 'j':  # ACS data
                    filters = processing_utils.get_acs_filters(dhdu, all=True)
                elif d[0] == 'i':
                    filters = hdr['filter']
                cols['filters'].append(filters)
        if poller_type == 'mvm':
            # interpret_scells returns:
            #  {'filename1':{'<sky cell id1>': SkyCell1,
            #               '<sky cell id2>':SkyCell2,
            #               'id': "<sky cell id1>;<sky cell id2>"},
            #   'filename2': ...}
            # This preserves 1 entry per filename, while providing info on
            # multiple SkyCell's for each filename as appropriate.
            #
            cols['skycell_id'] = [
                scell_files[fname]['id'] for fname in cols['filename']
            ]
            cols['skycell_new'] = [1] * len(cols['filename'])

        #
        # Build output table
        #
        poller_data = [col for col in cols.values()]
        poller_names = [colname for colname in cols]
        poller_table = Table(data=poller_data,
                             names=poller_names,
                             dtype=poller_dtype)

    # The input was a poller file, so just keep the viable data rows for output
    else:
        good_rows = []
        for d in usable_datasets:
            for i, old_row in enumerate(input_table):
                if d == input_table['filename'][i]:
                    good_rows.append(old_row)

        poller_table = Table(rows=good_rows,
                             names=input_table.colnames,
                             dtype=poller_dtype)

    #
    # If 'mvm' poller file, expand any multiple skycell entries into separate rows
    #
    if poller_type == 'mvm':
        # A new row will need to be added for each additional SkyCell that the
        # file overlaps...
        #
        poller_table['skycell_obj'] = [None] * len(poller_table)

        #
        # Make a copy of the original poller_table
        #
        new_poller_table = poller_table[poller_table['filename'] != None]
        for name in scell_files:
            for scell_id in scell_files[name]:
                if scell_id != 'id':
                    scell_obj = scell_files[name][scell_id]
                    for indx, row in enumerate(poller_table):
                        if row['filename'] != name:
                            continue
                        if new_poller_table[indx]['skycell_obj'] is None:
                            new_poller_table[indx]['skycell_obj'] = scell_obj
                            new_poller_table[indx]['skycell_id'] = scell_id
                        else:
                            poller_rows = poller_table[poller_table['filename']
                                                       == name]
                            sobj0 = poller_rows['skycell_obj'][0]
                            # Select only 1 row regardless of how many we have already
                            # added for this filename (in case file overlapped more than
                            # 2 sky cells at once).
                            poller_row = poller_rows[poller_rows['skycell_obj']
                                                     == sobj0]
                            # make copy of row for this filename
                            # assign updated values to skycell columns
                            poller_row['skycell_id'] = scell_id
                            poller_row['skycell_obj'] = scell_obj
                            # append new row to table
                            new_poller_table.add_row(poller_row[0])
        poller_table = new_poller_table

    return poller_table
Esempio n. 54
0
 def __getitem__(self, k):
     return [convert_numpy(np_str)]
Esempio n. 55
0
def hmf_gal(**kwargs):
    """Wrapper on hmf_solve analogous to pca_gal and pca_solve.

    Parameters
    ----------
    inputfile : :class:`str`, optional
        The list of spectra to use.  If not specified, $IDLSPEC2D_DIR/tempates/eigeninput_gal.dat will be used.
    wavemin : :class:`float`, optional
        Minimum wavelength for the template.  If not specified 1900 Å will be used.
    wavemax : :class:`float`, optional
        Maximum wavelength for the template.  If not specified 10000 Å will be used.
    K : :class:`int`, optional
        Number of templates to calculate.  The default is 4.
    nonnegative : :class:`bool`, optional
        If set to ``True`` use nonnegative HMF.  The default is ``False``.
    epsilon : :class:`float`, optional
        Value of regularization parameter to use.  The default is 0.0, which means it is not used.
    flux : :class:`bool`, optional
        If set to ``True`` make some additional QA plots of the input spectra.
        The default is ``False``.

    Returns
    -------
    None

    Notes
    -----
    Creates spEigenGal-MJD.fits and some associated QA plots.  These files
    will be created in the same directory as the inputfile (see above).

    The :envvar:`RUN2D` environment variable must be set.  This routine will search
    for a pickle file of the form ``eigeninput_gal_$RUN2D.dump``.  If this file
    is not found, it will be created.
    """
    import os
    import os.path
    import pickle
    import matplotlib
    matplotlib.use('Agg') # Non-interactive back-end
    import pylab
    from astropy.io import ascii, fits
    import numpy as np
    # from matplotlib.font_manager import fontManager, FontProperties
    from ...goddard.astro import get_juldate
    from ...pydlutils.image import djs_maskinterp
    from ...pydlutils.math import djs_median, find_contiguous
    from astropy.io import ascii
    from . import plot_eig, readspec, skymask, wavevector, hmf_solve
    from ..spec2d import combine1fiber
    if 'inputfile' in kwargs:
        inputfile = kwargs['inputfile']
    else:
        inputfile = os.path.join(os.getenv('IDLSPEC2D_DIR'),
            'templates','eigeninput_gal.dat')
    outdir = os.path.dirname(inputfile)
    if 'wavemin' in kwargs:
        wavemin = kwargs['wavemin']
    else:
        # Almost everything below 1900 is missing
        wavemin = 1900.0
        # wavemin = 1850.0
    if 'wavemax' in kwargs:
        wavemax = kwargs['wavemax']
    else:
        wavemax = 10000.0
    snmax = 100.0
    if 'K' in kwargs:
        K = kwargs['K']
    else:
        K = 4
    if 'nonnegative' in kwargs:
        nonnegative = kwargs['nonnegative']
    else:
        nonnegative = False
    if 'epsilon' in kwargs:
        epsilon = kwargs['epsilon']
    else:
        epsilon = 0.0
    if 'flux' in kwargs:
        plot_flux = kwargs['flux']
    else:
        plot_flux = False
    #
    # Name the output files.
    #
    jd = get_juldate()
    outfile = os.path.join(outdir,"spEigenGal-{0:d}".format(int(jd - 2400000.5)))
    #
    # Read the input spectra
    #
    converters = {'plate': [ascii.convert_numpy(np.int32)],
        'mjd': [ascii.convert_numpy(np.int32)],
        'fiber': [ascii.convert_numpy(np.int32)] }
    input_data = ascii.read(inputfile,names=['plate','mjd','fiber','zfit'],converters=converters)
    plate = input_data['plate'].data
    mjd = input_data['mjd'].data
    fiber = input_data['fiber'].data
    zfit = input_data['zfit'].data
    #
    # Run combine1fiber
    #
    dump = os.path.join(outdir,'eigeninput_gal_{0}.dump'.format(os.getenv('RUN2D')))
    if os.path.exists(dump):
        print("Loading data from {0}.".format(dump))
        f = open(dump)
        foo = pickle.load(f)
        newflux = foo['newflux']
        newivar = foo['newivar']
        newloglam = foo['newloglam']
        f.close()
    else:
        spplate = readspec(plate,fiber,mjd=mjd,**kwargs)
        #
        # Insist that all of the requested spectra exist.
        #
        missing = spplate['plugmap']['FIBERID'] == 0
        if missing.any():
            imissing = missing.nonzero()[0]
            for k in imissing:
                print("Missing plate={0:d} mjd={1:d} fiber={2:d}".format(plate[k],mjd[k],fiber[k]))
            raise ValueError("{0:d} missing object(s).".format(missing.sum()))
        #
        # Do not fit where the spectrum may be dominated by sky-sub residuals.
        #
        objinvvar = skymask(spplate['invvar'],spplate['andmask'],spplate['ormask'])
        ifix = spplate['flux']**2 * objinvvar > snmax**2
        if ifix.any():
            objinvvar[ifix.nonzero()] = (snmax/spplate['flux'][ifix.nonzero()])**2
        #
        # Set the new wavelength mapping here.  If the binsz keyword is not set,
        # then bin size is determined from the first spectrum returned by readspec.
        # This is fine in the case where all spectra have the same bin size
        # (though their starting wavelengths may differ).  However, this may not
        # be a safe assumption in the future.
        #
        if 'binsz' in kwargs:
            objdloglam = kwargs['binsz']
        else:
            objdloglam = spplate['loglam'][0,1] - spplate['loglam'][0,0]
        newloglam = wavevector(np.log10(wavemin),np.log10(wavemax),
            binsz=objdloglam)
        nobj, npix = spplate['flux'].shape
        #
        # The redshift of each object in pixels would be logshift/objdloglam.
        #
        logshift = np.log10(1.0 + zfit)
        #
        # Determine the new wavelength mapping.
        #
        fullloglam = newloglam
        dloglam = fullloglam[1] - fullloglam[0]
        nnew = fullloglam.size
        fullflux = np.zeros((nobj,nnew),dtype='d')
        fullivar = np.zeros((nobj,nnew),dtype='d')
        #
        # Shift each spectrum to z = 0 and sample at the output wavelengths
        #
        if spplate['loglam'].ndim == 1:
            indx = spplate['loglam'] > 0
            rowloglam = loglam[indx]
        for iobj in range(nobj):
            print("OBJECT {0:5d}".format(iobj))
            if spplate['loglam'].ndim > 1:
                if spplate['loglam'].shape[0] != nobj:
                    raise ValueError('Wrong number of dimensions for loglam.')
                indx = spplate['loglam'][iobj,:] > 0
                rowloglam = spplate['loglam'][iobj,indx]
            flux1,ivar1 = combine1fiber(rowloglam-logshift[iobj],spplate['flux'][iobj,indx],
                objinvvar[iobj,indx],newloglam=fullloglam,binsz=dloglam,aesthetics='mean') # ,verbose=True)
            fullflux[iobj,:] = flux1
            fullivar[iobj,:] = ivar1
        #
        # Find the columns out side of which there is no data at all
        #
        # nzi = fullivar.nonzero()
        # firstcol = nzi[1].min()
        # lastcol = nzi[1].max()
        # newflux = fullflux[:,firstcol:lastcol+1]
        # newivar = fullivar[:,firstcol:lastcol+1]
        # newloglam = fullloglam[firstcol:lastcol+1]
        # nnew = newloglam.size
        newflux = fullflux
        newivar = fullivar
        newloglam = fullloglam
        foo = dict()
        foo['newflux'] = newflux
        foo['newivar'] = newivar
        foo['newloglam'] = newloglam
        f = open(dump,'w')
        pickle.dump(foo,f)
        f.close()
    #
    # Find the columns out side of which there is no data at all
    #
    si = newflux*newivar
    zerocol = (newflux.sum(0) == 0) | (newivar.sum(0) == 0) | (si.sum(0) == 0)
    #
    # Find the largest set of contiguous pixels
    #
    goodcol = find_contiguous(~zerocol)
    newflux = newflux[:,goodcol]
    newivar = newivar[:,goodcol]
    # si = si[:,goodcol]
    newloglam = newloglam[goodcol]
    N,M = newflux.shape
    #
    # Run the HMF iteration
    #
    a,g = hmf_solve(newflux,newivar,
        K=K,nonnegative=nonnegative,epsilon=epsilon)
    #
    # Make plots
    #
    colorvec = ['k','r','g','b','m','c']
    # smallfont = FontProperties(size='xx-small');
    nspectra = N
    if plot_flux:
        nfluxes = 30
        separation = 5.0
        nplots = nspectra/nfluxes
        if nspectra % nfluxes > 0:
            nplots += 1
        for k in range(nplots):
            istart = k*nfluxes
            iend = min(istart+nfluxes,nspectra) - 1
            fig = pylab.figure(dpi=100)
            ax = fig.add_subplot(111)
            for l in range(istart,iend+1):
                p = ax.plot(10.0**pcaflux['newloglam'],pcaflux['newflux'][l,:]+separation*(l%nfluxes),
                    '%s-'%colorvec[l%len(colorvec)],linewidth=1)
            ax.set_xlabel(r'Wavelength [$\AA$]')
            ax.set_ylabel(r'Flux [$\mathsf{10^{-17} erg\, cm^{-2} s^{-1} \AA^{-1}}$] + Constant')
            ax.set_title('Galaxies: Input Spectra %4d-%4d' % (istart+1,iend+1))
            ax.set_ylim(pcaflux['newflux'][istart,:].min(),pcaflux['newflux'][iend-1,:].max()+separation*(nfluxes-1))
            fig.savefig('%s.flux.%04d-%04d.png'%(outfile,istart+1,iend+1))
            pylab.close(fig)
    fig = pylab.figure(dpi=100)
    ax = fig.add_subplot(111)
    p = ax.plot(10.0**newloglam,(newivar == 0).sum(0)/float(nspectra),'k-')
    ax.set_xlabel(r'Wavelength [$\AA$]')
    ax.set_ylabel('Fraction of spectra with missing data')
    ax.set_title('Missing Data')
    fig.savefig(outfile+'.missing.png')
    pylab.close(fig)
    # aratio10 = pcaflux['acoeff'][:,1]/pcaflux['acoeff'][:,0]
    # aratio20 = pcaflux['acoeff'][:,2]/pcaflux['acoeff'][:,0]
    # aratio30 = pcaflux['acoeff'][:,3]/pcaflux['acoeff'][:,0]
    # fig = pylab.figure(dpi=100)
    # ax = fig.add_subplot(111)
    # p = ax.plot(aratio10,aratio20,marker='None',linestyle='None')
    # for k in range(len(aratio10)):
    #     t = ax.text(aratio10[k],aratio20[k],'%04d-%04d'%(plate[k],fiber[k]),
    #         horizontalalignment='center', verticalalignment='center',
    #         color=colorvec[k%len(colorvec)],
    #         fontproperties=smallfont)
    # ax.set_xlim([aratio10.min(),aratio10.max])
    # ax.set_xlim([aratio20.min(),aratio20.max])
    # ax.set_xlabel('Eigenvalue Ratio, $a_1/a_0$')
    # ax.set_ylabel('Eigenvalue Ratio, $a_2/a_0$')
    # ax.set_title('Galaxies: Eigenvalue Ratios')
    # fig.savefig(outfile+'.a2_v_a1.png')
    # pylab.close(fig)
    # fig = pylab.figure(dpi=100)
    # ax = fig.add_subplot(111)
    # p = ax.plot(aratio20,aratio30,marker='None',linestyle='None')
    # for k in range(len(aratio10)):
    #     t = ax.text(aratio20[k],aratio30[k],'%04d-%04d'%(plate[k],fiber[k]),
    #         horizontalalignment='center', verticalalignment='center',
    #         color=colorvec[k%len(colorvec)],
    #         fontproperties=smallfont)
    # ax.set_xlim([aratio10.min(),aratio10.max])
    # ax.set_xlim([aratio20.min(),aratio20.max])
    # ax.set_xlabel('Eigenvalue Ratio, $a_2/a_0$')
    # ax.set_ylabel('Eigenvalue Ratio, $a_3/a_0$')
    # ax.set_title('Galaxies: Eigenvalue Ratios')
    # fig.savefig(outfile+'.a3_v_a2.png')
    # pylab.close(fig)
    #
    # Save output to FITS file.
    #
    if os.path.exists(outfile+'.fits'):
        os.remove(outfile+'.fits')
    hdu0 = fits.PrimaryHDU(g)
    hdu1 = fits.new_table(fits.ColDefs([
        fits.Column(name='plate',format='J',array=plate),
        fits.Column(name='mjd',format='J',array=mjd),
        fits.Column(name='fiber',format='J',array=fiber),
        fits.Column(name='redshift',format='D',array=zfit)]))
    hdulist = fits.HDUList([hdu0,hdu1])
    hdulist[0].header.update('OBJECT','GALAXY','Type of template')
    hdulist[0].header.update('COEFF0',newloglam[0],'ln(lambda) of the first spectral pixel')
    hdulist[0].header.update('COEFF1',newloglam[1]-newloglam[0],'Delta ln(lambda)')
    hdulist[0].header.update('NONNEG',nonnegative,'Was nonnegative HMF used?')
    hdulist[0].header.update('EPSILON',epsilon,'Regularization parameter used.')
    hdulist[0].header.update('IDLUTILS','pydlutils','Version of idlutils')
    hdulist[0].header.update('SPEC2D','hmf','Version of idlspec2d')
    hdulist[0].header.update('RUN2D',os.getenv('RUN2D'),'Version of 2d reduction')
    hdulist[0].header.update('RUN1D',os.getenv('RUN1D'),'Version of 1d reduction')
    # for i in range(len(pcaflux['eigenval'])):
    #     hdulist[0].header.update("EIGEN%d" % i,pcaflux['eigenval'][i])
    hdulist[1].header.update('FILENAME',inputfile,'Original input file')
    hdulist.writeto(outfile+'.fits')
    plot_eig(outfile+'.fits')
    return
Esempio n. 56
0
    # read in sinks
    sinks = []
    for i in xrange(nsinks):
        line = f.readline()
        sl = line.split()
        # id, mass, x, y, z, u, v, w
        sinkline = [int(sl[0]),float(sl[1]),float(sl[2]),float(sl[3]),
            float(sl[4]),float(sl[5]),float(sl[6]),float(sl[7])]
        # only take sinks greater than 0.1 Msun
        if sinkline[1] > 0.1:
            sinks.append(sinkline)
    f.close()
    return np.array(sinks) 

sinkcolumnnames = ['ID', 'mass', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'age']   
sinkconverters = {'ID': [ascii.convert_numpy(np.int8)],
    'mass': [ascii.convert_numpy(np.float32)],
    'x': [ascii.convert_numpy(np.float32)],
    'y': [ascii.convert_numpy(np.float32)],
    'z': [ascii.convert_numpy(np.float32)],
    'vx': [ascii.convert_numpy(np.float32)],
    'vy': [ascii.convert_numpy(np.float32)],
    'vz': [ascii.convert_numpy(np.float32)]}

def get_time(infofile):
    f = open(infofile)
    for i in xrange(9):
        line = f.readline()
    sl = line.split()
    time = float(sl[2])
    for i in xrange(9):
Esempio n. 57
0
def set_columns(filename, fileformat=None):
    """
    Meat of the program: takes the columns from the input table and matches
    them to the columns provided by the user in the column form.
    Then, assigns units and column information and does all the proper file
    ingestion work.

    """
    log.debug("Beginning set_columns.")

    if fileformat is None and 'fileformat' in request.args:
        fileformat = request.args['fileformat']

    log.debug("Reading table {0}".format(filename))
    try:
        table = Table.read(os.path.join(app.config['UPLOAD_FOLDER'], filename),
                           format=fileformat)
    except Exception as ex:
        return render_template('error.html', error=str(ex),
                               traceback=traceback.format_exc(ex))

    # Have to fix the column reading twice
    fix_bad_colnames(table)

    log.debug("Parsing column data.")
    log.debug("form: {0}".format(request.form))
    column_data = {field:{'Name':value}
                   for field,value in request.form.items()
                   if '_units' not in field}
    log.debug("Looping through form items.")
    for field,value in request.form.items():
        if '_units' in field:
            column_data[field[:-6]]['unit'] = value

    log.debug("Looping through column_data.")
    units_data = {}
    for key, pair in column_data.items():
        if key not in dimensionless_column_names and pair['Name'] not in dimensionless_column_names:
            units_data[pair['Name']] = pair['unit']

    log.debug("Created mapping.")
    mapping = {filename: [column_data, units_data]}

    log.debug("Further table handling.")
    # Parse the table file, step-by-step
    rename_columns(table, {k: v['Name'] for k,v in column_data.items()})
    set_units(table, units_data)
    table = fix_bad_types(table)
    try:
        convert_units(table)
    except Exception as ex:
        return render_template('error.html', error=str(ex), traceback=traceback.format_exc(ex))
    add_name_column(table, column_data.get('Username')['Name'])
    if 'ADS_ID' not in table.colnames:
        table.add_column(table.Column(name='ADS_ID', data=[request.form['adsid']]*len(table)))
    if 'DOI_or_URL' not in table.colnames:
        table.add_column(table.Column(name='DOI_or_URL', data=[request.form['doi']]*len(table)))
    timestamp = datetime.now()
    add_timestamp_column(table, timestamp)

    add_generic_ids_if_needed(table)
    if column_data.get('ObsSim')['Name'] == 'IsObserved':
        add_is_sim_if_needed(table, False)
    else:
        add_is_sim_if_needed(table, True)

    if column_data.get('GalExgal')['Name'] == 'IsExtragalactic':
        add_is_gal_if_needed(table, False)
    else:
        add_is_gal_if_needed(table, True)

    # Rename the uploaded file to something unique, and store this name in the table
    extension = os.path.splitext(filename)[-1]
    full_filename_old = os.path.join(app.config['UPLOAD_FOLDER'], filename)
    with open(full_filename_old) as file:
        unique_filename = hashlib.sha1(file.read()).hexdigest()[0:36-len(extension)] + extension
    full_filename_new = os.path.join(app.config['UPLOAD_FOLDER'], unique_filename)
    os.rename(full_filename_old, full_filename_new)
    add_filename_column(table, unique_filename)
    log.debug("Table column names after add_filename_column: ",table.colnames)

    handle_email(request.form['Email'], unique_filename)

    # Detect duplicate IDs in uploaded data and bail out if found
    seen = {}
    for row in table:
        name = row['Names']
        id = row['IDs']
        if id in seen:
            raise InvalidUsage("Duplicate ID detected in table: username = {0}, id = {1}. All IDs must be unique.".format(name, id))
        else:
            seen[id] = name

    # If merged table already exists, then append the new entries.
    # Otherwise, create the table

    merged_table_name = os.path.join(app.config['DATABASE_FOLDER'], 'merged_table.ipac')
    if os.path.isfile(merged_table_name):
        merged_table = Table.read(merged_table_name,
                                  converters={'Names':
                                              [ascii.convert_numpy('S64')],
                                              'IDs':
                                              [ascii.convert_numpy('S64')],
                                              'IsSimulated':
                                              [ascii.convert_numpy('S5')],
                                              'IsGalactic':
                                              [ascii.convert_numpy('S5')],
                                              'Filename':
                                              [ascii.convert_numpy('S36')]},
                                  format='ascii.ipac')
        if 'IsGalactic' not in merged_table.colnames:
            # Assume that anything we didn't already tag as Galactic is probably Galactic
            add_is_gal_column(merged_table, True)

        if 'Timestamp' not in merged_table.colnames:
            # Create a fake timestamp for the previous entries if they don't already have one
            fake_timestamp = datetime.min
            add_timestamp_column(merged_table, fake_timestamp)

        if 'Filename' not in merged_table.colnames:
            # If we don't know the filename, flag it as unknown
            add_filename_column(merged_table, 'Unknown'+' '*29)

        if 'ADS_ID' not in merged_table.colnames:
            # If we don't know the filename, flag it as unknown
            merged_table.add_column(table.Column(name='ADS_ID', data=['Unknown'+' '*13]*len(merged_table)))

        if 'DOI_or_URL' not in merged_table.colnames:
            # If we don't know the filename, flag it as unknown
            merged_table.add_column(table.Column(name='DOI_or_URL', data=['Unknown'+' '*57]*len(merged_table)))

    else:
        # Maximum string length of 64 for username, ID -- larger strings are silently truncated
        # TODO: Adjust these numbers to something more reasonable, once we figure out what that is,
        #       and verify that submitted data obeys these limits
        merged_table = Table(data=None, names=['Names','IDs','SurfaceDensity',
                       'VelocityDispersion','Radius','IsSimulated', 'IsGalactic', 'Timestamp', 'Filename',
                                              'ADS_ID', 'DOI_or_URL'],
                       dtype=[('str', 64),('str', 64),'float','float','float','bool','bool',
                              ('str', 26),('str', 36), ('str',20), ('str',64)])
        dts = merged_table.dtype
        # Hack to force fixed-width: works only on strings
        # merged_table.add_row(["_"*dts[ind].itemsize if dts[ind].kind=='S'
        #                       else False if dts[ind].kind == 'b'
        #                       else np.nan
        #                       for ind in range(len(dts))])
        set_units(merged_table)

    table = reorder_columns(table, merged_table.colnames)
    print("Table column names after reorder_columns: ",table.colnames)
    print("Merged table column names after reorder_columns: ",merged_table.colnames)

    # Detect whether any username, ID pairs match entries already in the merged table
    duplicates = {}
    for row in merged_table:
        name = row['Names']
        id = row['IDs']
        if id in seen:
            if name == seen[id]:
                duplicates[id] = name

    handle_duplicates(table, merged_table, duplicates)

    append_table(merged_table, table)
    ipac_writer(merged_table, merged_table_name, widths=table_widths)
    
    username = column_data.get('Username')['Name']

    print("Re-fetching the databases.")
    check_authenticate_with_github()

    print("Committing changes")
    # Add merged data to database
    try:
        branch_database,timestamp = commit_change_to_database(username)
        # Adding raw file to uploads
        branch_uploads,timestamp = commit_change_to_database(username, tablename=unique_filename,
                                                     workingdir='uploads/',
                                                     database='uploads',
                                                     branch=branch_database,
                                                     timestamp=timestamp)
    except Exception as ex:
        cleanup_git_directory('uploads/', allow_fail=False)
        cleanup_git_directory('database/', allow_fail=False)
        return render_template('error.html', error=str(ex),
                               traceback=traceback.format_exc(ex))


    try:
        log.debug("Creating pull requests")
        response_database, link_pull_database = pull_request(branch_database,
                                                             username,
                                                             timestamp)
        response_uploads, link_pull_uploads = pull_request(branch_database,
                                                           username,
                                                           timestamp,
                                                           database='uploads')
    except Exception as ex:
        cleanup_git_directory('uploads/', allow_fail=False)
        cleanup_git_directory('database/', allow_fail=False)
        return render_template('error.html', error=str(ex),
                               traceback=traceback.format_exc(ex))

    log.debug("Creating plot.")
    outfilename = os.path.splitext(filename)[0]
    myplot = plotData_Sigma_sigma(timeString(), table,
                                  os.path.join(app.config['MPLD3_FOLDER'],
                                               outfilename))

    log.debug("Creating table.")
    tablecss = "table,th,td,tr,tbody {border: 1px solid black; border-collapse: collapse;}"
    write_table_jsviewer(table,
                         os.path.join(TABLE_FOLDER, '{fn}.html'.format(fn=outfilename)),
                         css=tablecss,
                         jskwargs={'use_local_files':False},
                         table_id=outfilename)

    return render_template('show_plot.html', imagename='/'+myplot,
                           tablefile='{fn}.html'.format(fn=outfilename),
                           link_pull_uploads=link_pull_uploads,
                           link_pull_database=link_pull_database,
                          )