Ejemplo n.º 1
0
def UnitValue(caldict_entry):
    '''Turn unit name into floating point unit value'''

    try:
        uname = caldict_entry['UnitName']
        if uname and uname != 'None':
            try:
                if '/' in uname:
                    unames = list(filter(None, uname.split('/')))
                    uvalue1 = getattr(
                        core.G3Units,
                        list(filter(None, unames[0].split(' ')))[0])
                    uvalue2 = getattr(
                        core.G3Units,
                        list(filter(None, unames[1].split(' ')))[0])
                    uvalue = uvalue1 / uvalue2
                else:
                    uvalue = getattr(core.G3Units, uname)
            except AttributeError:
                uvalue = 1.
                core.log_warn('No entry in G3Units for ' + uname +
                              '. Setting UnitValue to 1.0\n')
        else:
            uvalue = 1.
    except KeyError:
        uvalue = 1.

    return uvalue
def _concatenate_timestreams(cls,
                             ts_lst,
                             ts_rounding_error=0.6,
                             ts_interp_threshold=0):
    """
    Concatenate G3Timestream objects together.

    Arguments
    ---------
    ts_lst : list
        list of G3Timestream objects.
    ts_rounding_error : float
        allowed error in timestream separation such that timestreams are
        contiguous, as a fraction of the sample rate. This should be
        0 by default, but is 0.5 to allow for downsampler shifting,
        and then bumpted again to 0.6 to allow for floating-point
        errors in what 0.5 is.
    ts_interp_threshold : float
        allowed timestream separation below which gaps between timestreams are
        interpolated to be made continuous

    Returns
    -------
    ts : G3Timestream instance
        The concatenation of the input list of G3Timestream objects
    """
    #check for contiguous timestreams
    for i in range(1, len(ts_lst)):
        ts_sep = (ts_lst[i].start.time -
                  ts_lst[i - 1].stop.time) * ts_lst[i].sample_rate
        if numpy.abs(ts_sep - 1) > ts_rounding_error:
            if (ts_interp_threshold > 0) and (
                (ts_sep - 1) < ts_interp_threshold):
                log_warn(
                    "Timestreams are not contiguous: timestreams %d and %d "
                    "separated by %f samples (%s).  Interpolating." %
                    (i, i - 1, ts_sep - 1, str(ts_lst[i].start)))
                v = numpy.linspace(ts_lst[i - 1][-1], ts_lst[i][0],
                                   ts_sep + 1)[1:-1]
                ts_interp = cls(v)
                ts_interp.units = ts_lst[0].units
                ts_interp.start = ts_lst[i -
                                         1].stop + 1. / ts_lst[i].sample_rate
                ts_interp.stop = ts_lst[i].start - 1. / ts_lst[i].sample_rate
                ts_lst = ts_lst[:i] + [ts_interp] + ts_lst[i:]
            else:
                log_fatal(
                    "Timestreams are not contiguous: timestreams %d and %d "
                    "separated by %f samples (%s)" %
                    (i, i - 1, ts_sep - 1, str(ts_lst[i].start)))
        if (ts_lst[i].units != ts_lst[0].units):
            log_fatal("Timestreams are not the same units")
    out_ts = cls(numpy.concatenate(ts_lst))
    out_ts.units = ts_lst[0].units
    out_ts.start = ts_lst[0].start
    out_ts.stop = ts_lst[-1].stop
    return out_ts
Ejemplo n.º 3
0
 def __init__(self,
              input='CalTimestreams',
              output_root=None,
              bands=None,
              bpm='BolometerProperties'):
     core.log_warn(
         "SplitTimestreamsByBand is deprecated, use SplitByBand instead")
     super(SplitTimestreamsByBand, self).__init__(input=input,
                                                  output_root=output_root,
                                                  bands=bands,
                                                  bpm=bpm)
Ejemplo n.º 4
0
 def BolometerPropertiesInjector(frame):
     if frame.type == core.G3FrameType.Wiring:
         nchan = len(frame['WiringMap'].keys())
         core.log_notice("Collecting data from %d mapped channels." % (nchan),
                         unit='Data Acquisition')
         bpm = os.path.join(os.path.dirname(args.hardware_map), 'nominal_online_cal.g3')
         try:
             if not os.path.exists(bpm):
                 raise IOError('Missing file %s' % bpm)
             fr = list(core.G3File(bpm))[0]
             return [frame, fr]
         except Exception as e:
             core.log_warn('Error loading BolometerPropertiesMap: %s' % (str(e)),
                           unit='Data Acquisition')
Ejemplo n.º 5
0
def check_iers(g3_time):
    """
    Check whether IERS calculations will work, and load an IERS database file
    from backup if all else fails.

    Arguments
    ---------
    g3_time : G3Time instance
        Most recent time for which an IERS calculation must be computed.
    """
    t = astropy.time.Time(g3_time.mjd, format="mjd")

    # check if accessing the IERS table outright works.
    try:
        t.ut1
        return
    except:
        pass

    # if that fails, allow extrapolation
    iers.conf.auto_max_age = None
    t = astropy.time.Time(g3_time.mjd, format="mjd")
    try:
        t.ut1
        core.log_warn("IERS auto update failed, allowing extrapolation",
                      unit="IERS")
        return
    except:
        pass

    # and if that fails, use a locally cached file that is hopefully setup correctly.
    fname = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                         "finals2000A.all")
    iers.conf.auto_download = False
    iers.IERS.iers_table = iers.IERS_A.open(fname)
    t = astropy.time.Time(g3_time.mjd, format="mjd")
    t.ut1
    core.log_warn("Using IERS table from local cache {}".format(fname),
                  unit="IERS")
Ejemplo n.º 6
0
def ValidateMaps(frame, ignore_missing_weights=False):
    """
    Validate that the input map frame has all the necessary keys.

    If ignore_missing_weights is False (default), a warning is issued when the
    frame contains weighted Stokes maps without a weights map.  Set this option
    to True when feeding single bolometer map frames with common weights through
    a pipeline.
    """

    if isinstance(frame, core.G3Frame) and frame.type != core.G3FrameType.Map:
        return

    map_id = frame.get("Id", None)

    if "T" not in frame:
        core.log_fatal("Map frame %s: Missing T map" % map_id, unit="ValidateMaps")
    if ("Q" in frame and not "U" in frame) or ("U" in frame and not "Q" in frame):
        core.log_fatal("Map frame %s: Missing Q or U map" % map_id, unit="ValidateMaps")
    if "Wpol" in frame and "Wunpol" in frame:
        core.log_fatal(
            "Map frame %s: Found both polarized and unpolarized weights" % map_id,
            unit="ValidateMaps",
        )

    stub = frame["T"].clone(False)
    for k in ["T", "Q", "U", "Wpol", "Wunpol"]:
        if k not in frame:
            continue
        if not frame[k].compatible(stub):
            core.log_fatal(
                "Map frame %s: Map %s not compatible with T map" % (map_id, k),
                unit="ValidateMaps",
            )
        if k in "TQU":
            if k == "U" and frame[k].pol_conv is maps.MapPolConv.none:
                core.log_warn(
                    "Map frame %s: U map polarization convention not set" % map_id,
                    unit="ValidateMaps",
                )
            if frame[k].weighted and not ignore_missing_weights:
                if "Wpol" not in frame and "Wunpol" not in frame:
                    core.log_warn(
                        "Map frame %s: Missing weights" % map_id, unit="ValidateMaps"
                    )
                if k == "T" and "Q" not in frame and "Wunpol" not in frame:
                    core.log_warn(
                        "Map frame %s: Missing unpolarized weights" % map_id,
                        unit="ValidateMaps",
                    )
                if k in "QU" and "Wpol" not in frame:
                    core.log_warn(
                        "Map frame %s: Missing polarized weights" % map_id,
                        unit="ValidateMaps",
                    )
        else:
            if frame[k].polarized and ("Q" not in frame or "U" not in frame):
                core.log_fatal(
                    "Map frame %s: Found unpolarized maps with polarized weights"
                    % map_id,
                    unit="ValidateMaps",
                )
            elif not frame[k].polarized and ("Q" in frame or "U" in frame):
                core.log_fatal(
                    "Map frame %s: Found polarized maps with unpolarized weights"
                    % map_id,
                    unit="ValidateMaps",
                )
Ejemplo n.º 7
0
def load_skymap_fits(filename,
                     hdu=None,
                     keys=None,
                     memmap=False,
                     apply_units=False):
    """
    Load a fits file containing a sky map.

    Arguments
    ---------
    filename : str
        Path to fits file
    hdu : int, optional
        If supplied, the data are extract from the given HDU index.
    keys : list of strings, optional
        If supplied, return only these keys in the output dictionary.
        Options are: T, Q, U, W.
    memmap : bool, optional
        Argument passed to astropy.io.fits.open. If True, the map is not read
        into memory, but only the required pixels are read when needed. Default:
        False.
    apply_units : bool, optional
        If True, and input maps have known units, multiply by the appropriate
        conversion factor to return maps in G3Units.

    Returns
    -------
    a dictionary of maps keyed with e.g. 'T', 'Q', 'U' and 'W'.
    """

    import astropy.io.fits
    assert (os.path.exists(filename))

    map_type = None
    pol = None
    map_opts = {}
    output = {}

    # defaults for missing header entries
    pol_conv = None
    units = 'Tcmb'
    coord_ref = 'Equatorial'
    proj = 'Proj5'
    weighted = False
    alpha_center = None
    delta_center = None
    res = None
    xres = None

    unit_dict = {
        'k_cmb': ('Tcmb', core.G3Units.K),
        'kcmb': ('Tcmb', core.G3Units.K),
        'kcmb^2': ('Tcmb', core.G3Units.K**2),
    }

    if keys is None:
        keys = ['T', 'Q', 'U', 'W']

    with astropy.io.fits.open(filename, memmap=memmap) as hdulist:
        for hidx, H in enumerate(hdulist):

            hdr = H.header
            mtype = hdr.get('MAPTYPE', None)

            if mtype == 'FLAT' or 'PROJ' in hdr or 'WCSAXES' in hdr or 'CTYPE1' in hdr:
                # flat map
                if not map_type:
                    map_type = 'flat'
                elif map_type != 'flat':
                    raise ValueError(
                        "Expected a {} sky map in HDU {}, found a flat map".
                        format(map_type, hidx))
                # expect that flat sky maps on disk are probably in IAU
                if not pol_conv:
                    pol_conv = 'IAU'
            elif mtype == 'HEALPIX' or hdr.get(
                    'PIXTYPE', None) == 'HEALPIX' or 'NSIDE' in hdr:
                # healpix map
                if not map_type:
                    map_type = 'healpix'
                elif map_type != 'healpix':
                    raise ValueError(
                        "Expected a {} sky map in HDU {}, found a healpix map".
                        format(map_type, hidx))
                # expect that healpix maps on disk are probably in COSMO
                if not pol_conv:
                    pol_conv = 'COSMO'

            if 'POLAR' in hdr:
                pdict = {'T': True, 'F': False}
                pol = pdict.get(hdr['POLAR'], hdr['POLAR'])
            if pol and map_type is not None:
                if hdr.get('POLCCONV', '').upper() not in ['IAU', 'COSMO']:
                    core.log_warn(
                        'Polarization convention not set, assuming %s' %
                        pol_conv,
                        unit='load_skymap_fits')
                else:
                    pol_conv = hdr['POLCCONV'].upper()
            uconv = None
            if 'TUNIT' in hdr:
                u = hdr['TUNIT'].lower()
                if u in unit_dict:
                    units, uconv = unit_dict[u]
            else:
                units = hdr.get('UNITS', units)
            if 'COORDSYS' in hdr:
                cdict = {'C': 'Equatorial', 'G': 'Galactic', 'L': 'Local'}
                coord_ref = cdict.get(hdr['COORDSYS'], coord_ref)
            else:
                coord_ref = hdr.get('COORDREF', coord_ref)
            overflow = hdr.get('OVERFLOW', 0)

            map_opts.update(
                units=getattr(core.G3TimestreamUnits, units),
                coord_ref=getattr(MapCoordReference, coord_ref),
                pol_conv=getattr(MapPolConv, pol_conv) if pol_conv else None,
            )

            if map_type == 'flat':
                map_opts.update(flat_pol=hdr.get('FLATPOL', False))
                map_opts.update(parse_wcs_header(hdr))

            elif map_type == 'healpix':
                nside = hdr['NSIDE']
                nested = hdr['ORDERING'].strip().lower() in ['nest', 'nested']
                map_opts.update(nested=nested)

            # primary HDU
            if H.data is None:
                continue

            # extracting a particular HDU
            if map_type is not None and hdu is not None and hidx != hdu:
                continue

            if map_type == 'flat':
                if hdr.get('ISWEIGHT', None):
                    if 'W' not in keys:
                        continue
                else:
                    if hdr.get('POLTYPE', 'T') not in keys:
                        continue
                data = np.array(H.data, dtype=float)
                if map_opts.pop('transpose', False):
                    data = np.array(data.T)
                if uconv is not None:
                    data *= uconv

            # map type must be known if the HDU contains map data
            if map_type not in ['flat', 'healpix']:
                raise ValueError("Unknown map type in HDU {}".format(hidx))

            if map_type == 'flat' and hdr.get('ISWEIGHT', None):
                # flat map weights
                assert ('T' in output)
                if pol is None:
                    pol = True if ('Q' in output and 'U' in output) else False
                weight_map = output.setdefault(
                    'W', G3SkyMapWeights(output['T'], polarized=pol))
                fm = FlatSkyMap(data, **map_opts)
                fm.overflow = overflow
                setattr(weight_map, hdr['WTYPE'], fm)
                del data

            elif map_type == 'flat' and not hdr.get('ISWEIGHT', None):
                # flat map data
                ptype = hdr.get('POLTYPE', 'T')
                pol_type = getattr(MapPolType, ptype, None)
                fm = FlatSkyMap(data, pol_type=pol_type, **map_opts)
                fm.overflow = overflow
                output[ptype] = fm
                del data

            elif map_type == 'healpix':
                # healpix map data

                col_dict = {
                    'TEMPERATURE': 'T',
                    'Q_POLARISATION': 'Q',
                    'U_POLARISATION': 'U',
                    'Q-POLARISATION': 'Q',
                    'U-POLARISATION': 'U',
                    'I_STOKES': 'T',
                    'Q_STOKES': 'Q',
                    'U_STOKES': 'U',
                    'I': 'T',
                    'II': 'TT',
                    'IQ': 'TQ',
                    'IU': 'TU',
                    'II_COV': 'TT',
                    'IQ_COV': 'TQ',
                    'IU_COV': 'TU',
                    'QQ_COV': 'QQ',
                    'QU_COV': 'QU',
                    'UU_COV': 'UU',
                }

                pix = None

                partial = hdr.get('INDXSCHM') == 'EXPLICIT' or hdr.get(
                    'OBJECT') == 'PARTIAL'

                for cidx, hcol in enumerate(H.data.names):
                    col = col_dict.get(hcol, hcol)
                    if col in 'TQU' and col not in keys:
                        continue
                    elif col in ['TT', 'TQ', 'TU', 'QQ', 'QU', 'UU'
                                 ] and 'W' not in keys:
                        continue

                    data = np.array(H.data[hcol], dtype=float).ravel()

                    if col == 'PIXEL' or (partial and cidx == 0):
                        pix = np.array(data, dtype=int).ravel()
                        del data
                        continue

                    uconv = None
                    u = 'TUNIT{:d}'.format(cidx + 1)
                    if u in hdr:
                        u = hdr[u].lower()
                        if u in unit_dict:
                            units, uconv = unit_dict[u.lower()]
                            if uconv is not None:
                                data *= uconv
                    overflow = hdr.get('TOFLW{:d}'.format(cidx + 1), overflow)

                    weighted = hdr.get(
                        'TISWGT{:d}'.format(cidx + 1),
                        col in ['TT', 'TQ', 'TU', 'QQ', 'QU', 'UU'],
                    )
                    if weighted:
                        assert ('T' in output)
                        if pol is None:
                            pol = True if ('Q' in output
                                           and 'U' in output) else False
                        weight_map = output.setdefault(
                            'W', G3SkyMapWeights(output['T'], polarized=pol))

                        mdata = (pix, data, nside) if pix is not None else data
                        hm = HealpixSkyMap(mdata, **map_opts)
                        hm.overflow = overflow

                        setattr(weight_map, col, hm)

                    else:
                        pol_type = getattr(MapPolType, col, None)
                        mdata = (pix, data, nside) if pix is not None else data

                        hm = HealpixSkyMap(mdata,
                                           pol_type=pol_type,
                                           **map_opts)
                        output[col] = hm

                    del mdata, data

                del pix
                break

            del H.data

    for k, m in output.items():
        m.pol_conv = map_opts['pol_conv']
        if k == 'W':
            continue
        m.weighted = 'W' in output

    return output
Ejemplo n.º 8
0
    def Process(self, f, index_info=None):
        """Processes a frame.  Only Housekeeping frames will be examined;
        other frames will simply be counted.  All frames are passed
        through unmodified.  The index_info will be stored along with
        a description of the frame's data; see the .process_file
        function.

        """
        self.counter += 1
        if index_info is None:
            index_info = {'counter': self.counter}

        f = self.translator(f)
        assert(len(f) == 1)
        f = f[0]

        if f.type == core.G3FrameType.EndProcessing:
            return [f]

        if f.type != core.G3FrameType.Housekeeping:
            return [f]

        vers = f.get('hkagg_version', 0)
        assert(vers == 2)

        if f['hkagg_type'] == so3g.HKFrameType.session:
            session_id = f['session_id']
            if self.session_id is not None:
                if self.session_id != session_id:
                    self.flush()  # Note that this sets self.session_id=None.
            if self.session_id is None:
                core.log_info('New HK Session id = %i, timestamp = %i' %
                              (session_id, f['start_time']), unit='HKScanner')
                self.session_id = session_id

        elif f['hkagg_type'] == so3g.HKFrameType.status:
            # If a provider has disappeared, flush its information into a
            # FieldGroup.
            prov_cands = [_HKProvider.from_g3(p) for p in f['providers']]
            to_flush = list(self.providers.keys())  # prov_ids...
            for p in prov_cands:
                if p.prov_id in to_flush:
                    to_flush.remove(p.prov_id) # no, don't.
                else:
                    self.providers[p.prov_id] = p
            for prov_id in to_flush:
                self.flush([prov_id])

        elif f['hkagg_type'] == so3g.HKFrameType.data:
            # Data frame -- merge info for this provider.
            prov = self.providers[f['prov_id']]
            representatives = prov.blocks.keys()

            for bidx, (bname, b) in enumerate(zip(f['block_names'], f['blocks'])):
                assert(isinstance(b, core.G3TimesampleMap))
                if bname not in prov.blocks:
                    prov.blocks[bname] = {'fields': list(b.keys()),
                                          'start': b.times[0].time / core.G3Units.seconds,
                                          'index_info': []}
                # To ensure that the last sample is actually included
                # in the semi-open intervals we use to track frames,
                # the "end" time has to be after the final sample.
                prov.blocks[bname]['end'] = b.times[-1].time / core.G3Units.seconds + SPAN_BUFFER_SECONDS
                ii = {'block_index': bidx,
                      'timestamp': b.times[0].time,
                      'count': len(b.times)}
                ii.update(index_info)
                prov.blocks[bname]['index_info'].append(ii)
                
        else:
            core.log_warn('Weird hkagg_type: %i' % f['hkagg_type'],
                          unit='HKScanner')
        return [f]
Ejemplo n.º 9
0
Archivo: scanner.py Proyecto: jit9/so3g
    def __call__(self, f):
        """Processes a frame.  Only Housekeeping frames will be examined;
        other frames will simply be counted.  All frames are passed
        through unmodified.

        """
        if f.type == core.G3FrameType.EndProcessing:
            self.report_and_reset()
            return [f]

        if f.type != core.G3FrameType.Housekeeping:
            self.stats['n_other'] += 1
            return f

        self.stats['n_hk'] += 1

        if f['hkagg_type'] == so3g.HKFrameType.session:
            session_id = f['session_id']
            if self.session_id is not None:
                if self.session_id != session_id:
                    self.report_and_reset(
                    )  # note this does clear self.session_id.
            if self.session_id is None:
                core.log_info('New HK Session id = %i, timestamp = %i' %
                              (session_id, f['start_time']),
                              unit='HKScanner')
                self.session_id = session_id
                self.stats['n_session'] += 1

        elif f['hkagg_type'] == so3g.HKFrameType.status:
            # Have any providers disappeared?
            now_prov_id = [p['prov_id'].value for p in f['providers']]
            for p, info in self.providers.items():
                if p not in now_prov_id:
                    info['active'] = False

            # New providers?
            for p in now_prov_id:
                info = self.providers.get(p)
                if info is not None:
                    if not info['active']:
                        core.log_warn('prov_id %i came back to life.' % p,
                                      unit='HKScanner')
                        self.stats['concerns']['n_warning'] += 1
                        info['n_active'] += 1
                        info['active'] = True
                else:
                    self.providers[p] = {
                        'active':
                        True,  # Currently active (during processing).
                        'n_active':
                        1,  # Number of times this provider id became active.
                        'n_frames': 0,  # Number of data frames.
                        'timestamp_init':
                        f['timestamp'],  # Timestamp of provider appearance
                        'timestamp_data':
                        None,  # Timestamp of most recent data frame.
                        'ticks':
                        0,  # Total number of timestamps in all blocks.
                        'span': None,  # (earliest_time, latest_time)
                    }

        elif f['hkagg_type'] == so3g.HKFrameType.data:
            info = self.providers[f['prov_id']]
            info['n_frames'] += 1
            t_this = f['timestamp']
            if info['timestamp_data'] is None:
                t_ref = info['timestamp_init']
                if t_this < t_ref:
                    core.log_warn('data timestamp (%.1f) precedes provider '
                                  'timestamp by %f seconds.' %
                                  (t_this, t_this - t_ref),
                                  unit='HKScanner')
                    self.stats['concerns']['n_warning'] += 1
            elif t_this <= info['timestamp_data']:
                core.log_warn(
                    'data frame timestamps are not strictly ordered.',
                    unit='HKScanner')
                self.stats['concerns']['n_warning'] += 1
            info['timestamp_data'] = t_this  # update

            t_check = []
            for b in f['blocks']:
                if len(b.t):
                    if info['span'] is None:
                        info['span'] = b.t[0], b.t[-1]
                    else:
                        t0, t1 = info['span']
                        info['span'] = min(b.t[0], t0), max(b.t[-1], t1)
                    t_check.append(b.t[0])
                info['ticks'] += len(b.t)
                for k, v in b.data.items():
                    if len(v) != len(b.t):
                        core.log_error(
                            'Field "%s" has %i samples but .t has %i samples.'
                            % (k, len(v), len(b.t)))
                        self.stats['concerns']['n_error'] += 1
            if len(t_check) and abs(min(t_check) - t_this) > 60:
                core.log_warn(
                    'data frame timestamp (%.1f) does not correspond to '
                    'data timestamp vectors (%s) .' % (t_this, t_check),
                    unit='HKScanner')
                self.stats['concerns']['n_warning'] += 1

        else:
            core.log_warn('Weird hkagg_type: %i' % f['hkagg_type'],
                          unit='HKScanner')
            self.stats['concerns']['n_warning'] += 1

        return [f]
Ejemplo n.º 10
0
def WriteDB(fr, client, fields=None):
    '''
    Write points to the database for each field

    Arguments
    ---------
    client :
        InfluxDB client
    fields :
        Which gcp fields to add to database. See parse_field for options. If
        None, add all.
    '''
    from influxdb.exceptions import InfluxDBClientError
    from influxdb.exceptions import InfluxDBServerError

    if fr.type != core.G3FrameType.GcpSlow:
        return
    all_fields = build_field_list(fr)
    if fields is None:
        fields = all_fields.keys()
    dict_list = []
    for f in fields:
        field_dat = all_fields[f]
        if len(field_dat) == 4:
            stat, attr, ind, unit = field_dat
            try:
                dat = getattr(fr[stat], attr)[ind]
                time = getattr(fr[stat], 'time')
            except AttributeError:
                # OnlinePointingModel
                dat = fr[stat][attr][ind]
                time = fr[stat]['time']
        elif len(field_dat) == 3:
            stat, attr, unit = field_dat
            if stat not in fr:
                # Field only exists in live data stream
                continue
            try:
                dat = getattr(fr[stat], attr)
            except AttributeError:
                try:
                    dat = fr[stat][attr]
                except KeyError:  # Field only exists in live data stream
                    continue
            if 'Bench' in stat:  # funny time field for bench positions
                time = fr['BenchSampleTime']
            elif 'Mux' in stat:
                time = fr['MuxTime']
            elif stat in ['CryoStatus', 'Weather', 'PTStatus']:
                time = fr['{}Time'.format(stat)]
            else:
                try:
                    time = getattr(fr[stat], 'time')
                except AttributeError:
                    time = fr[stat]['time']
        elif len(field_dat) == 2:
            stat, unit = field_dat
            try:
                dat = fr[stat]
            except KeyError:  #eg, no obsid
                core.log_warn('No key {}'.format(stat), unit='InfluxDB')
                continue
            try:
                time = getattr(fr[stat], 'time')
            except AttributeError as err:
                time = [tm for tm in fr['antenna0']['tracker']['utc'][0]]

        # InfluxDB wants time in nanoseconds since the UNIX epoch in UTC
        try:
            time = [x.time / U.nanosecond for x in np.atleast_1d(time)]
        except AttributeError:
            time = [
                core.G3Time(t0).time / U.nanosecond
                for t0 in np.atleast_1d(time)
            ]
        if dat is None:
            core.log_warn('{} dat is None'.format(f), unit='InfluxDB')
            continue
        dat = np.atleast_1d(dat)
        try:
            dlen = len(dat)
        except TypeError:
            # sometimes source_name is a weird non-none value
            continue
        if unit is not None:
            if unit == 'C':
                zeropt_K = 273.15
                cal_dat = dat / U.K - zeropt_K
            else:
                cal_dat = dat / unit
        else:
            cal_dat = dat
        try:
            if np.any(np.isnan(cal_dat)):
                continue
        except TypeError:
            pass
        if 'heat' not in f:
            tag = f
        else:
            tag = f.replace('heat_', '')

        # for fields that have az/el components
        az_el_names = [
            'az', 'el', 'az', 'el', 'ra', 'dec', 'x', 'y', 'hr_angle', 'sin',
            'cos', 'lat'
        ]
        tag2 = f
        for name in az_el_names:
            # require name_ at beginning or _name at end
            match1 = re.findall('^{}_'.format(name), f)
            match2 = re.findall('_{}$'.format(name), f)
            if len(match1):
                tag2 = f.replace(match1[0], '')
            if len(match2):
                tag2 = f.replace(match2[0], '')
        # also group source names
        if 'source' in f:
            tag2 = 'source'
            stat = 'TrackerPointing'
        if stat == 'PTStatus':
            groups = ['now', 'min', 'max']
            for g in groups:
                match = re.findall('_{}$'.format(g), f)
                if len(match):
                    tag2 = f.replace(match[0], '')
        # group bench positions
        # require bench_ at beginning
        match = re.findall('^bench', f)
        if len(match):
            tag2 = attr  # y1, y2, etc
            stat = 'Bench'

        # group Mux properties
        if 'Mux' in stat:
            stat = 'muxHousekeeping'
            tag2 = 'ib' + f.split('ib')[-1]

        dict_list += make_lines(
            measurement=stat,
            field=f,
            time=time,
            dat=cal_dat,
            tags={
                'label': tag,
                'label2': tag2
            },
        )

    try:
        now = core.G3Time.Now()
        delay = float(now.time / U.nanosecond - time[-1]) / 1e9
        if delay > 5:
            core.log_info('{} Delay: {} s'.format(now.isoformat(), delay),
                          unit='InfluxDB')
    except RuntimeError:  # sometimes timestamp gets screwed up
        pass

    try:
        client.write_points(dict_list,
                            batch_size=len(dict_list),
                            protocol='line')
    except (InfluxDBClientError, InfluxDBServerError) as v:
        core.log_error('Error writing to database. {}'.format(v),
                       unit='InfluxDB')
Ejemplo n.º 11
0
    def readCalFile(self, calfile=None):
        cal_dict = {}
        if calfile is None:
            self.calibration_file = self.default_calibration_file
        else:
            self.calibration_file = calfile
        try:
            f = open(self.calibration_file, 'r')
        except:
            core.log_warn('3G calibration file ' + self.calibration_file +
                          ' not found.\n')
            return cal_dict
        ninvalid = 0
        for line in f:
            if line[0] != '#' and line[0] != '\n' and len(line) > 0:
                try:
                    line = line.replace('\n', '')
                    fline = list(filter(None,
                                        line.split(' ')))  #list() for py3
                    name = fline[0]
                    linedict = {}
                    linedict['Offset'] = np.float(fline[1])
                    linedict['ReciprocalFactor'] = np.float(fline[2])
                    try:
                        linedict['UnitName'] = fline[3]
                    except:
                        linedict['UnitName'] = 'None'
                    cal_dict[name] = linedict
                except:
                    ninvalid += 1
        if ninvalid > 0:
            if ninvalid == 1:
                core.log_warn(
                    'One invalid line found and ignored in calibration file.\n'
                )
            else:
                core.log_warn(
                    str(ninvalid) +
                    ' invalid lines found and ignored in calibration file.\n')
        f.close()

        # now parse into dictionary structure NW uses in ARCExtractor
        cal_dict_orig = cal_dict.copy()
        cal_dict = {}
        origkeys = cal_dict_orig.keys()
        origkeys = sorted(origkeys)
        names = [key for key in origkeys]
        regmaps = [(list(filter(None, (tname).split('.'))))[0]
                   for tname in names]
        uregmaps = np.unique(np.asarray(regmaps))
        for urm in uregmaps:
            cal_dict[urm] = {}
        regblocks = [(list(filter(None, (tname).split('.'))))[1]
                     for tname in names]
        uregblocks, argurb = np.unique(np.asarray(regblocks),
                                       return_index=True)
        for j in np.arange(len(uregblocks)):
            cal_dict[regmaps[argurb[j]]][uregblocks[j]] = {}
        regs = [(list(filter(None, (tname).split('.'))))[2] for tname in names]
        for j in np.arange(len(regs)):
            reg = regs[j]
            if '[' in reg:
                reg_plus_indices = reg.split('[')
                realreg = reg_plus_indices[0]
                indices = ((reg_plus_indices[1]).split(']'))[0]
                if '-' in indices:
                    cal_dict[regmaps[j]][regblocks[j]][realreg] = {}
                    siindices = indices.split('-')
                    iindices = (np.array(siindices)).astype(int)
                    for i in np.arange(np.max(iindices)-np.min(iindices)+1) + \
                            np.min(iindices):
                        cal_dict[regmaps[j]][regblocks[j]]\
                            [realreg][i] = cal_dict_orig[names[j]]
                else:
                    if indices == '0':
                        cal_dict[regmaps[j]][regblocks[j]][realreg] = {}
                    cal_dict[regmaps[j]][regblocks[j]][realreg]\
                        [np.int(indices)] = cal_dict_orig[names[j]]
            else:
                cal_dict[regmaps[j]][regblocks[j]][reg] = \
                    cal_dict_orig[names[j]]

        return cal_dict
Ejemplo n.º 12
0
def create_g3_cal_file(path,
                       read_from_gcp=True,
                       extra_dict=None,
                       use_extra_info=True,
                       gcp_cal_file=None):
    '''
    Create a G3 register calibration file. Usually reads in
    calibration and units from a GCP cal file then adds extra
    information that isn't in the GCP one. This extra information can
    be handed to the routine as a dict (note format below); otherwise,
    the hard-coded extra info will be used. (Feel free to edit this
    hard-coded info.)
    '''

    cal_dict = {}
    status = 0

    if read_from_gcp:
        try:
            default_calibration_file = \
                os.environ['GCP']+'/control/conf/spt/cal'
        except:
            default_calibration_file = \
                '/home/sptdat/gcproot/control/conf/spt/cal'
        if gcp_cal_file is None:
            calibration_file = default_calibration_file
        else:
            calibration_file = gcp_cal_file
        try:
            f = open(calibration_file)
        except:
            core.log_warn('GCP calibration file ' + calibration_file +
                          ' not found.\n')
            f = []
        ninvalid = 0
        for line in f:
            if line[0] != '#' and line[0] != '\n' and len(line) > 0:
                line = line.replace('\n', '')
                linedict = {}
                # cal file has spaces AND tabs (and comment characters), whee!
                info_and_comment = list(filter(None, line.split('#')))
                fline = list(filter(None, (info_and_comment[0]).split(' ')))
                if len(fline) == 1 and '\t' in fline[0]:
                    fline = list(
                        filter(None, (info_and_comment[0]).split('\t')))
                name = (fline[0]).replace('*', '0')
                try:
                    linedict['Offset'] = np.float(fline[1])
                    if '/' in fline[2]:
                        gainfacs = (fline[2]).split('/')
                        gainfac = np.float(gainfacs[1]) / np.float(gainfacs[0])
                    else:
                        gainfac = 1. / np.float(fline[2])
                    linedict['ReciprocalFactor'] = gainfac
                    linedict['UnitName'] = ''
                    # try to figure out units. currently cal file
                    # only has 2 forms, but this is not robust.
                    if len(info_and_comment) > 1:
                        comment = info_and_comment[1]
                        if '->' in comment:
                            comments = list(filter(None,
                                                   (comment).split('->')))
                            unitname = \
                                list(filter(None,(comments[1]).split(' ')))[0]
                            unitname = list(filter(None,
                                                   (unitname).split('/')))[0]
                        else:
                            comments = \
                                list(filter(None,(comment).split('display in ')))
                            unitname = \
                                list(filter(None,(comments[0]).split(' ')))[0]
                            unitname = unitname.replace('\n', '')
                        linedict['UnitName'] = unitname
                        # another hudge kludge
                        if 'rate' in name:
                            if '/' not in unitname:
                                unitname = unitname + '/s'
                                linedict['UnitName'] = unitname
                    cal_dict[name] = linedict
                except:
                    ninvalid += 1
        if ninvalid > 0:
            if ninvalid == 1:
                core.log_warn(
                    'One invalid line found and ignored in calibration file.\n'
                )
            else:
                core.log_warn(
                    str(ninvalid) +
                    ' invalid lines found and ignored in calibration file.\n')
        f.close()

    # now add extra info
    if use_extra_info:
        if extra_dict is None:
            cal_dict['antenna0.acu.az_pos'] = \
                {'ReciprocalFactor':1.,'Offset':0,'UnitName':'deg'}
            cal_dict['antenna0.acu.el_pos'] = \
                {'ReciprocalFactor':1.,'Offset':0,'UnitName':'deg'}
            cal_dict['antenna0.acu.az_err'] = \
                {'ReciprocalFactor':1.,'Offset':0,'UnitName':'deg'}
            cal_dict['antenna0.acu.el_err'] = \
                {'ReciprocalFactor':1.,'Offset':0,'UnitName':'deg'}

            cal_dict['antenna0.scu.temp'] = \
                {'ReciprocalFactor':1.,'Offset':-273.15,'UnitName':'K'}
            cal_dict['array.weather.airTemperature'] = \
                {'ReciprocalFactor':1.,'Offset':-273.15,'UnitName':'K'}
        else:
            try:
                for key in extra_dict:
                    cal_dict[key] = {}
                    cal_dict[key]['Offset'] = extra_dict[key]['Offset']
                    cal_dict[key]['ReciprocalFactor'] = \
                        extra_dict[key]['ReciprocalFactor']
                    cal_dict[key]['UnitName'] = extra_dict[key]['UnitName']
            except:
                core.log_warn(
                    'extra_dict input format not recognized, ignoring')
                del cal_dict[key]

    # write cal file
    fcal = open(path, 'w')
    fcal.write(
        '#-----------------------------------------------------------------------\n'
    )
    fcal.write('# This is an SPT-3G register calibration file. \n')
    fcal.write('#\n')
    fcal.write('# It was created on ' + time.ctime() + ' by user ' +
               os.getlogin() + '\n')
    fcal.write(
        '#  with the spt3g_software function gcp.CalFile.create_g3_cal_file. \n'
    )
    fcal.write('#\n')
    fcal.write('# Each line has the following format:\n')
    fcal.write('#\n')
    fcal.write('#   register offset 1/factor units\n')
    fcal.write('#\n')
    fcal.write(
        '# (Note that factors are stored as reciprocals, because they are most \n'
    )
    fcal.write('#  often one over a whole number.) \n')
    fcal.write('#\n')
    fcal.write('# The calibrated value of a register is given by:\n')
    fcal.write('#\n')
    fcal.write(
        '#   reg_cal = factor * (offset + register_value) * core.G3Units.$UNIT$\n'
    )
    fcal.write('#\n')
    fcal.write(
        '#-----------------------------------------------------------------------\n'
    )
    fcal.write('# Register      Offset       1/Factor      Units \n')
    fcal.write(
        '#-----------------------------------------------------------------------\n'
    )
    fcal.write('\n')
    ktemp = cal_dict.keys()
    ktemp.sort()
    for key in ktemp:
        kstr = (str(key)).ljust(42)
        gstr = '%e' % cal_dict[key]['ReciprocalFactor']
        ostr = '%10.4f' % cal_dict[key]['Offset']
        fcal.write(kstr + '  ' + ostr + '  ' + gstr + '  ' +
                   cal_dict[key]['UnitName'] + '\n')
    fcal.close()

    return status
Ejemplo n.º 13
0
    def __call__(self, f):
        """Processes a frame.  Only Housekeeping frames will be examined;
        other frames will simply be counted.  All frames are passed
        through unmodified.

        """
        if f.type == core.G3FrameType.EndProcessing:
            self.report_and_reset()
            return [f]

        if f.type != core.G3FrameType.Housekeeping:
            self.stats['n_other'] += 1
            return f

        self.stats['n_hk'] += 1
        vers = f.get('hkagg_version', 0)
        self.stats['versions'][vers] = self.stats['versions'].get(vers, 0) + 1

        if f['hkagg_type'] == so3g.HKFrameType.session:
            session_id = f['session_id']
            if self.session_id is not None:
                if self.session_id != session_id:
                    self.report_and_reset(
                    )  # note this does clear self.session_id.
            if self.session_id is None:
                core.log_info('New HK Session id = %i, timestamp = %i' %
                              (session_id, f['start_time']),
                              unit='HKScanner')
                self.session_id = session_id
                self.stats['n_session'] += 1

        elif f['hkagg_type'] == so3g.HKFrameType.status:
            # Have any providers disappeared?
            now_prov_id = [p['prov_id'].value for p in f['providers']]
            for p, info in self.providers.items():
                if p not in now_prov_id:
                    info['active'] = False

            # New providers?
            for p in now_prov_id:
                info = self.providers.get(p)
                if info is not None:
                    if not info['active']:
                        core.log_warn('prov_id %i came back to life.' % p,
                                      unit='HKScanner')
                        self.stats['concerns']['n_warning'] += 1
                        info['n_active'] += 1
                        info['active'] = True
                else:
                    self.providers[p] = {
                        'active':
                        True,  # Currently active (during processing).
                        'n_active':
                        1,  # Number of times this provider id became active.
                        'n_frames': 0,  # Number of data frames.
                        'timestamp_init':
                        f['timestamp'],  # Timestamp of provider appearance
                        'timestamp_data':
                        None,  # Timestamp of most recent data frame.
                        'ticks':
                        0,  # Total number of timestamps in all blocks.
                        'span': None,  # (earliest_time, latest_time)
                        'block_streams_map':
                        {},  # Map from field name to block name.
                    }

        elif f['hkagg_type'] == so3g.HKFrameType.data:
            info = self.providers[f['prov_id']]
            vers = f.get('hkagg_version', 0)

            info['n_frames'] += 1
            t_this = f['timestamp']
            if info['timestamp_data'] is None:
                t_ref = info['timestamp_init']
                if t_this < t_ref:
                    core.log_warn('data timestamp (%.1f) precedes provider '
                                  'timestamp by %f seconds.' %
                                  (t_this, t_this - t_ref),
                                  unit='HKScanner')
                    self.stats['concerns']['n_warning'] += 1
            elif t_this <= info['timestamp_data']:
                core.log_warn(
                    'data frame timestamps are not strictly ordered.',
                    unit='HKScanner')
                self.stats['concerns']['n_warning'] += 1
            info['timestamp_data'] = t_this  # update

            t_check = []

            blocks = f['blocks']
            if vers == 0:
                block_timef = lambda block: block.t
                block_itemf = lambda block: [(k, block.data[k])
                                             for k in block.data.keys()]
            elif vers >= 1:
                block_timef = lambda block: np.array(
                    [t.time / core.G3Units.seconds for t in b.times])
                block_itemf = lambda block: [(k, block[k])
                                             for k in block.keys()]

            if vers in [0]:
                block_name = lambda block_idx: list(
                    sorted(blocks[block_idx].data.keys()))[0]
            if vers in [1]:
                block_name = lambda block_idx: list(
                    sorted(blocks[block_idx].keys()))[0]
            elif vers >= 2:
                block_names = f.get('block_names', [])
                if len(block_names) != len(blocks):
                    # This is a schema error in its own right.
                    core.log_error(
                        'Frame does not have "block_names" entry, '
                        'or it is not the same length as "blocks".',
                        unit='HKScanner')
                    self.stats['concerns']['n_error'] += 1
                    # Fall back on v1 strategy.
                    block_name = lambda block_idx: list(
                        sorted(blocks[block_idx].keys()))[0]
                else:
                    block_name = lambda block_idx: f['block_names'][block_idx]

            for block_idx, b in enumerate(blocks):
                times = block_timef(b)
                if len(times):
                    if info['span'] is None:
                        info['span'] = times[0], times[-1]
                    else:
                        t0, t1 = info['span']
                        info['span'] = min(times[0], t0), max(times[-1], t1)
                    t_check.append(times[0])
                info['ticks'] += len(times)
                bname = block_name(block_idx)
                for k, v in block_itemf(b):
                    if len(v) != len(times):
                        core.log_error(
                            'Field "%s" has %i samples but .t has %i samples.'
                            % (k, len(v), len(times)))
                        self.stats['concerns']['n_error'] += 1
                    # Make sure field has a block_stream registered.
                    if k not in info['block_streams_map']:
                        info['block_streams_map'][k] = bname
                    if info['block_streams_map'][k] != bname:
                        core.log_error(
                            'Field "%s" appeared in block_name %s '
                            'and later in block_name %s.' %
                            (k, info['block_streams_map'][k], bname))
                        self.stats['concerns']['n_error'] += 1
            if len(t_check) and abs(min(t_check) - t_this) > 60:
                core.log_warn(
                    'data frame timestamp (%.1f) does not correspond to '
                    'data timestamp vectors (%s) .' % (t_this, t_check),
                    unit='HKScanner')
                self.stats['concerns']['n_warning'] += 1

        else:
            core.log_warn('Weird hkagg_type: %i' % f['hkagg_type'],
                          unit='HKScanner')
            self.stats['concerns']['n_warning'] += 1

        return [f]
Ejemplo n.º 14
0
    def Process(self, f, index_info=None):
        """Processes a frame.  Only Housekeeping frames will be examined;
        other frames will simply be counted.  All frames are passed
        through unmodified.  The index_info will be stored along with
        a description of the frame's data; see the .process_file
        function.

        """
        self.counter += 1
        if index_info is None:
            index_info = {'counter': self.counter}

        if f.type == core.G3FrameType.EndProcessing:
            return [f]

        if f.type != core.G3FrameType.Housekeeping:
            return f

        if f['hkagg_type'] == so3g.HKFrameType.session:
            session_id = f['session_id']
            if self.session_id is not None:
                if self.session_id != session_id:
                    self.flush()  # Note that this sets self.session_id=None.
            if self.session_id is None:
                core.log_info('New HK Session id = %i, timestamp = %i' %
                              (session_id, f['start_time']),
                              unit='HKScanner')
                self.session_id = session_id

        elif f['hkagg_type'] == so3g.HKFrameType.status:
            # If a provider has disappeared, flush its information into a
            # FieldGroup.
            prov_cands = [_HKProvider.from_g3(p) for p in f['providers']]
            to_flush = list(self.providers.keys())  # prov_ids...
            for p in prov_cands:
                if p.prov_id in to_flush:
                    to_flush.remove(p.prov_id)  # no, don't.
                else:
                    self.providers[p.prov_id] = p
            for prov_id in to_flush:
                self.flush([prov_id])

        elif f['hkagg_type'] == so3g.HKFrameType.data:
            # Data frame -- merge info for this provider.
            prov = self.providers[f['prov_id']]
            representatives = [block['fields'][0] for block in prov.blocks]
            for b in f['blocks']:
                fields = b.data.keys()
                if len(b.t) == 0 or len(fields) == 0:
                    continue
                for block_index, rep in enumerate(representatives):
                    if rep in fields:
                        break
                else:
                    block_index = len(prov.blocks)
                    prov.blocks.append({
                        'fields': fields,
                        'start': b.t[0],
                        'index_info': []
                    })
                # To ensure that the last sample is actually included
                # in the semi-open intervals we use to track frames,
                # the "end" time has to be after the final sample.
                prov.blocks[block_index]['end'] = b.t[-1] + SPAN_BUFFER_SECONDS
                prov.blocks[block_index]['index_info'].append(index_info)

        else:
            core.log_warn('Weird hkagg_type: %i' % f['hkagg_type'],
                          unit='HKScanner')
        return [f]