Exemple #1
0
 def ping(self):
     """
     Send a watchdog ping message to the GCP pager process.  This method is
     called by the `run` method at regular intervals whenever the
     `data_valid` method returns True.
     """
     try:
         if not self.sim:
             sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
             sock.settimeout(self.timeout)
             sock.connect((self.host, self.port))
             sock.send('watchdog {}'.format(self.name).encode())
             resp = sock.recv(4096)
             if resp:
                 core.log_debug(
                     'Sent watchdog ping, got response {}'.format(resp.decode()),
                     unit=self.unit,
                 )
             sock.close()
     except Exception as e:
         core.log_error('Error sending watchdog ping: {}'.format(e), unit=self.unit)
         # try again in ten seconds
         self.last_ping = time.time() - self.interval + 10
     else:
         core.log_info('Sent watchdog ping', unit=self.unit)
         self.last_ping = time.time()
    def __init__(self, port=50029, chunk_secs=1):
        # Store passed attributes
        self._port = port
        self._chunk_sec = int(chunk_secs)

        # Definitions for reading data strings
        self._endian = '<'
        self._uint_str = 'Q'
        self._double_str = 'd'
        # String to describe data length for 'struct' module
        self._len_str = self._endian + 'i'
        # String for packing values from G3DoubleObjects
        # as long long ints
        self._pack_str = self._endian + 'q'

        # Establish socket connection to slowDAQ publisher
        self.socket = socket.socket()
        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.socket.bind(('', port))
        self.socket.listen(5)
        self.socket.setblocking(False)
        core.log_info(
            "Listening for requests from slowdaq on port %d'" % (self._port),
            unit='CHWPSlowDAQTee')

        # Store and send data as a dictionary
        self.data = {}
        self.time_chunk = None
Exemple #3
0
    def __call__(self, f):
        """Processes a frame.  Only Housekeeping frames will be manipulated;
        others will be passed through untouched.

        """
        if f.type == core.G3FrameType.EndProcessing:
            return self.flush() + [f]

        if f.type != core.G3FrameType.Housekeeping:
            return f

        output = []

        if f['hkagg_type'] == so3g.HKFrameType.session:
            session_id = f['session_id']
            if self.session_id is not None:
                if self.session_id != session_id:
                    output += self.flush()  # this clears self.session._id.
                    output.append(f)
                else:
                    pass  # Don't re-emit an on-going session frame.
            if self.session_id is None:
                core.log_info('New HK Session id = %i, timestamp = %i' %
                              (session_id, f['start_time']),
                              unit='HKReframer')
                self.session_id = session_id
                output.append(f)

        elif f['hkagg_type'] == so3g.HKFrameType.status:
            # Only issue status if something has changed.
            changes = False
            # Flush any providers that are now expired.
            now_prov_id = [p['prov_id'].value for p in f['providers']]
            for p in list(self.providers.keys()):
                if p not in now_prov_id:
                    output += self.flush(p)
                    changes = True
            # Create bundlers for any new providers.
            for p in now_prov_id:
                if p not in self.providers:
                    t0 = f['timestamp']
                    self.providers[p] = _HKProvBundle(t0, self.target)
                    changes = True
            if changes:
                output.append(f)

        elif f['hkagg_type'] == so3g.HKFrameType.data:
            fb = self.providers[f['prov_id']]
            fb.add(f)
            if fb.ready():
                output += fb.rebundle()

        else:
            raise ValueError('Invalid hkagg_type')

        return output
Exemple #4
0
    def __init__(self, port=50010, verbose=False):

        # make some noise at startup
        core.log_info("Initialize gcp.GCPHousekeepingTee on port %d" % port,
                      unit='GCPHousekeepingTee')

        self.hkblob = self.PackHKToGCP(dfmux.DfMuxHousekeepingMap())
        self.socket = socket.socket()
        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)

        self.socket.bind(('', port))
        self.socket.listen(25)
        self.socket.setblocking(False)

        self.verbose = verbose  # flag for printing debugging statements
Exemple #5
0
    def __init__(self, port=50020, verbose=False, bolometers=[]):
        '''
        Send data from the given list of bolometer logical IDs to the GCP.
        '''

        core.log_info('Listening for requests from GCP on port %d' % port, unit='GCPBoloDataTee')
        core.log_info('Selected bolometers: %s' % bolometers, unit='GCPBoloDataTee')

        self.bololist = bolometers

        self.socket = socket.socket()
        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)

        self.socket.bind(('', port))
        self.socket.listen(5)
        self.socket.setblocking(False)
        self.data = {}
Exemple #6
0
    def flush(self, prov_id=None):
        """Flush all buffers; empty the provider list; reset session_id.
        Returns a list of flushed output frames.

        If prov_id is specified, then only that provider is flushed
        and popped from provider list.

        """
        if prov_id is not None:
            return self.providers.pop(prov_id).rebundle(True)
        core.log_info('Flushing session id %i' % (self.session_id),
                      unit='HKReframer')
        output = []
        for p in self.providers.values():
            output += p.rebundle(True)
        self.providers = {}
        self.session_id = None
        return output
Exemple #7
0
 def ping(self):
     # send a watchdog command to the pager server port
     try:
         sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
         sock.settimeout(self.timeout)
         sock.connect((self.host, self.port))
         sock.send('watchdog daq'.encode())
         resp = sock.recv(4096)
         if resp:
             core.log_debug("Sent DAQ watchdog ping, got response {}".format(resp.decode()),
                            unit='GCPWatchdog')
         sock.close()
     except Exception as e:
         core.log_error("Error sending watchdog ping: {}".format(e), unit='GCPWatchdog')
         # try again in ten seconds
         self.last_ping = time.time() - self.interval + 10
     else:
         core.log_info('Sent DAQ watchdog ping', unit='GCPWatchdog')
         self.last_ping = time.time()
Exemple #8
0
    def Process(self, f, index_info=None):
        """Processes a frame.  Only Housekeeping frames will be examined;
        other frames will simply be counted.  All frames are passed
        through unmodified.  The index_info will be stored along with
        a description of the frame's data; see the .process_file
        function.

        """
        self.counter += 1
        if index_info is None:
            index_info = {'counter': self.counter}

        f = self.translator(f)
        assert(len(f) == 1)
        f = f[0]

        if f.type == core.G3FrameType.EndProcessing:
            return [f]

        if f.type != core.G3FrameType.Housekeeping:
            return [f]

        vers = f.get('hkagg_version', 0)
        assert(vers == 2)

        if f['hkagg_type'] == so3g.HKFrameType.session:
            session_id = f['session_id']
            if self.session_id is not None:
                if self.session_id != session_id:
                    self.flush()  # Note that this sets self.session_id=None.
            if self.session_id is None:
                core.log_info('New HK Session id = %i, timestamp = %i' %
                              (session_id, f['start_time']), unit='HKScanner')
                self.session_id = session_id

        elif f['hkagg_type'] == so3g.HKFrameType.status:
            # If a provider has disappeared, flush its information into a
            # FieldGroup.
            prov_cands = [_HKProvider.from_g3(p) for p in f['providers']]
            to_flush = list(self.providers.keys())  # prov_ids...
            for p in prov_cands:
                if p.prov_id in to_flush:
                    to_flush.remove(p.prov_id) # no, don't.
                else:
                    self.providers[p.prov_id] = p
            for prov_id in to_flush:
                self.flush([prov_id])

        elif f['hkagg_type'] == so3g.HKFrameType.data:
            # Data frame -- merge info for this provider.
            prov = self.providers[f['prov_id']]
            representatives = prov.blocks.keys()

            for bidx, (bname, b) in enumerate(zip(f['block_names'], f['blocks'])):
                assert(isinstance(b, core.G3TimesampleMap))
                if bname not in prov.blocks:
                    prov.blocks[bname] = {'fields': list(b.keys()),
                                          'start': b.times[0].time / core.G3Units.seconds,
                                          'index_info': []}
                # To ensure that the last sample is actually included
                # in the semi-open intervals we use to track frames,
                # the "end" time has to be after the final sample.
                prov.blocks[bname]['end'] = b.times[-1].time / core.G3Units.seconds + SPAN_BUFFER_SECONDS
                ii = {'block_index': bidx,
                      'timestamp': b.times[0].time,
                      'count': len(b.times)}
                ii.update(index_info)
                prov.blocks[bname]['index_info'].append(ii)
                
        else:
            core.log_warn('Weird hkagg_type: %i' % f['hkagg_type'],
                          unit='HKScanner')
        return [f]
Exemple #9
0
    def __call__(self, f):
        """Processes a frame.  Only Housekeeping frames will be examined;
        other frames will simply be counted.  All frames are passed
        through unmodified.

        """
        if f.type == core.G3FrameType.EndProcessing:
            self.report_and_reset()
            return [f]

        if f.type != core.G3FrameType.Housekeeping:
            self.stats['n_other'] += 1
            return f

        self.stats['n_hk'] += 1

        if f['hkagg_type'] == so3g.HKFrameType.session:
            session_id = f['session_id']
            if self.session_id is not None:
                if self.session_id != session_id:
                    self.report_and_reset(
                    )  # note this does clear self.session_id.
            if self.session_id is None:
                core.log_info('New HK Session id = %i, timestamp = %i' %
                              (session_id, f['start_time']),
                              unit='HKScanner')
                self.session_id = session_id
                self.stats['n_session'] += 1

        elif f['hkagg_type'] == so3g.HKFrameType.status:
            # Have any providers disappeared?
            now_prov_id = [p['prov_id'].value for p in f['providers']]
            for p, info in self.providers.items():
                if p not in now_prov_id:
                    info['active'] = False

            # New providers?
            for p in now_prov_id:
                info = self.providers.get(p)
                if info is not None:
                    if not info['active']:
                        core.log_warn('prov_id %i came back to life.' % p,
                                      unit='HKScanner')
                        self.stats['concerns']['n_warning'] += 1
                        info['n_active'] += 1
                        info['active'] = True
                else:
                    self.providers[p] = {
                        'active':
                        True,  # Currently active (during processing).
                        'n_active':
                        1,  # Number of times this provider id became active.
                        'n_frames': 0,  # Number of data frames.
                        'timestamp_init':
                        f['timestamp'],  # Timestamp of provider appearance
                        'timestamp_data':
                        None,  # Timestamp of most recent data frame.
                        'ticks':
                        0,  # Total number of timestamps in all blocks.
                        'span': None,  # (earliest_time, latest_time)
                    }

        elif f['hkagg_type'] == so3g.HKFrameType.data:
            info = self.providers[f['prov_id']]
            info['n_frames'] += 1
            t_this = f['timestamp']
            if info['timestamp_data'] is None:
                t_ref = info['timestamp_init']
                if t_this < t_ref:
                    core.log_warn('data timestamp (%.1f) precedes provider '
                                  'timestamp by %f seconds.' %
                                  (t_this, t_this - t_ref),
                                  unit='HKScanner')
                    self.stats['concerns']['n_warning'] += 1
            elif t_this <= info['timestamp_data']:
                core.log_warn(
                    'data frame timestamps are not strictly ordered.',
                    unit='HKScanner')
                self.stats['concerns']['n_warning'] += 1
            info['timestamp_data'] = t_this  # update

            t_check = []
            for b in f['blocks']:
                if len(b.t):
                    if info['span'] is None:
                        info['span'] = b.t[0], b.t[-1]
                    else:
                        t0, t1 = info['span']
                        info['span'] = min(b.t[0], t0), max(b.t[-1], t1)
                    t_check.append(b.t[0])
                info['ticks'] += len(b.t)
                for k, v in b.data.items():
                    if len(v) != len(b.t):
                        core.log_error(
                            'Field "%s" has %i samples but .t has %i samples.'
                            % (k, len(v), len(b.t)))
                        self.stats['concerns']['n_error'] += 1
            if len(t_check) and abs(min(t_check) - t_this) > 60:
                core.log_warn(
                    'data frame timestamp (%.1f) does not correspond to '
                    'data timestamp vectors (%s) .' % (t_this, t_check),
                    unit='HKScanner')
                self.stats['concerns']['n_warning'] += 1

        else:
            core.log_warn('Weird hkagg_type: %i' % f['hkagg_type'],
                          unit='HKScanner')
            self.stats['concerns']['n_warning'] += 1

        return [f]
Exemple #10
0
 def report_and_reset(self):
     core.log_info('Report for session_id %i:\n' % self.session_id +
                   str(self.stats) + '\n' + str(self.providers) +
                   '\nEnd report.',
                   unit='HKScanner')
     self.session_id = None
Exemple #11
0
def WriteDB(fr, client, fields=None):
    '''
    Write points to the database for each field

    Arguments
    ---------
    client :
        InfluxDB client
    fields :
        Which gcp fields to add to database. See parse_field for options. If
        None, add all.
    '''
    from influxdb.exceptions import InfluxDBClientError
    from influxdb.exceptions import InfluxDBServerError

    if fr.type != core.G3FrameType.GcpSlow:
        return
    all_fields = build_field_list(fr)
    if fields is None:
        fields = all_fields.keys()
    dict_list = []
    for f in fields:
        field_dat = all_fields[f]
        if len(field_dat) == 4:
            stat, attr, ind, unit = field_dat
            try:
                dat = getattr(fr[stat], attr)[ind]
                time = getattr(fr[stat], 'time')
            except AttributeError:
                # OnlinePointingModel
                dat = fr[stat][attr][ind]
                time = fr[stat]['time']
        elif len(field_dat) == 3:
            stat, attr, unit = field_dat
            if stat not in fr:
                # Field only exists in live data stream
                continue
            try:
                dat = getattr(fr[stat], attr)
            except AttributeError:
                try:
                    dat = fr[stat][attr]
                except KeyError:  # Field only exists in live data stream
                    continue
            if 'Bench' in stat:  # funny time field for bench positions
                time = fr['BenchSampleTime']
            elif 'Mux' in stat:
                time = fr['MuxTime']
            elif stat in ['CryoStatus', 'Weather', 'PTStatus']:
                time = fr['{}Time'.format(stat)]
            else:
                try:
                    time = getattr(fr[stat], 'time')
                except AttributeError:
                    time = fr[stat]['time']
        elif len(field_dat) == 2:
            stat, unit = field_dat
            try:
                dat = fr[stat]
            except KeyError:  #eg, no obsid
                core.log_warn('No key {}'.format(stat), unit='InfluxDB')
                continue
            try:
                time = getattr(fr[stat], 'time')
            except AttributeError as err:
                time = [tm for tm in fr['antenna0']['tracker']['utc'][0]]

        # InfluxDB wants time in nanoseconds since the UNIX epoch in UTC
        try:
            time = [x.time / U.nanosecond for x in np.atleast_1d(time)]
        except AttributeError:
            time = [
                core.G3Time(t0).time / U.nanosecond
                for t0 in np.atleast_1d(time)
            ]
        if dat is None:
            core.log_warn('{} dat is None'.format(f), unit='InfluxDB')
            continue
        dat = np.atleast_1d(dat)
        try:
            dlen = len(dat)
        except TypeError:
            # sometimes source_name is a weird non-none value
            continue
        if unit is not None:
            if unit == 'C':
                zeropt_K = 273.15
                cal_dat = dat / U.K - zeropt_K
            else:
                cal_dat = dat / unit
        else:
            cal_dat = dat
        try:
            if np.any(np.isnan(cal_dat)):
                continue
        except TypeError:
            pass
        if 'heat' not in f:
            tag = f
        else:
            tag = f.replace('heat_', '')

        # for fields that have az/el components
        az_el_names = [
            'az', 'el', 'az', 'el', 'ra', 'dec', 'x', 'y', 'hr_angle', 'sin',
            'cos', 'lat'
        ]
        tag2 = f
        for name in az_el_names:
            # require name_ at beginning or _name at end
            match1 = re.findall('^{}_'.format(name), f)
            match2 = re.findall('_{}$'.format(name), f)
            if len(match1):
                tag2 = f.replace(match1[0], '')
            if len(match2):
                tag2 = f.replace(match2[0], '')
        # also group source names
        if 'source' in f:
            tag2 = 'source'
            stat = 'TrackerPointing'
        if stat == 'PTStatus':
            groups = ['now', 'min', 'max']
            for g in groups:
                match = re.findall('_{}$'.format(g), f)
                if len(match):
                    tag2 = f.replace(match[0], '')
        # group bench positions
        # require bench_ at beginning
        match = re.findall('^bench', f)
        if len(match):
            tag2 = attr  # y1, y2, etc
            stat = 'Bench'

        # group Mux properties
        if 'Mux' in stat:
            stat = 'muxHousekeeping'
            tag2 = 'ib' + f.split('ib')[-1]

        dict_list += make_lines(
            measurement=stat,
            field=f,
            time=time,
            dat=cal_dat,
            tags={
                'label': tag,
                'label2': tag2
            },
        )

    try:
        now = core.G3Time.Now()
        delay = float(now.time / U.nanosecond - time[-1]) / 1e9
        if delay > 5:
            core.log_info('{} Delay: {} s'.format(now.isoformat(), delay),
                          unit='InfluxDB')
    except RuntimeError:  # sometimes timestamp gets screwed up
        pass

    try:
        client.write_points(dict_list,
                            batch_size=len(dict_list),
                            protocol='line')
    except (InfluxDBClientError, InfluxDBServerError) as v:
        core.log_error('Error writing to database. {}'.format(v),
                       unit='InfluxDB')
Exemple #12
0
    def Process(self, f):
        """Translates one frame to the target schema.  Irrelevant frames are
        passed through unmodified.

        Args:
          f: a G3Frame

        Returns:
          A list containing only the translated frame.  G3Pipeline
          compatibility would permit us to return a single frame here,
          instead of a length-1 list.  But we also sometimes call
          Process outside of a G3Pipeline, where a consistent output
          type is desirable.  Returning lists is most
          future-compatible; consumers that want to assume length-1
          should assert it to be true.

        """
        if f.type == core.G3FrameType.EndProcessing:
            core.log_info(str(self.stats))
            return [f]

        if f.type != core.G3FrameType.Housekeeping:
            self.stats['n_other'] += 1
            return [f]

        # It is an HK frame.
        orig_version = f.get('hkagg_version', 0)

        self.stats['n_hk'] += 1
        self.stats['versions'][orig_version] = self.stats['versions'].get(orig_version, 0) + 1

        if orig_version > self.target_version and not self.future_tolerant:
            raise ValueError(
                ('Translator to v%i encountered v%i, but future_tolerant=False.')
                % (self.TARGET_VERSION, orig_version))

        if orig_version >= self.target_version:
            return [f]

        # Always update the version, even if that's our only change...
        if 'hkagg_version' in f:
            if 'hkagg_version_orig' not in f:
                f['hkagg_version_orig'] = orig_version
            del f['hkagg_version']
        f['hkagg_version'] = self.target_version

        # No difference in Session/Status for v0, v1, v2.
        if f.get('hkagg_type') != so3g.HKFrameType.data:
            return [f]

        if self.target_version == 0:
            return [f]

        if orig_version == 0:
            # Pop the data blocks out of the frame.
            orig_blocks = f.pop('blocks')
            f['blocks'] = core.G3VectorFrameObject()

            # Now process the data blocks.
            for block in orig_blocks:
                new_block = core.G3TimesampleMap()
                new_block.times = so3g.hk.util.get_g3_time(block.t)
                for k in block.data.keys():
                    v = block.data[k]
                    new_block[k] = core.G3VectorDouble(v)
                f['blocks'].append(new_block)

        if self.target_version == 1:
            return [f]

        if orig_version <= 1:
            # Add 'block_names'.  Since we don't want to start
            # caching Block Stream information, just compute a good
            # block name based on the alphabetically first field in
            # the block.
            block_names = []
            for block in f['blocks']:
                field_names = list(sorted(block.keys()))
                block_names.append('block_for_%s' % field_names[0])
                assert(len(block_names[-1]) < 256)  # What have you done.
            orig_block_names = []
            f['block_names'] = core.G3VectorString(block_names)

        return [f]
Exemple #13
0
    def __call__(self, f):
        """Processes a frame.  Only Housekeeping frames will be examined;
        other frames will simply be counted.  All frames are passed
        through unmodified.

        """
        if f.type == core.G3FrameType.EndProcessing:
            self.report_and_reset()
            return [f]

        if f.type != core.G3FrameType.Housekeeping:
            self.stats['n_other'] += 1
            return f

        self.stats['n_hk'] += 1
        vers = f.get('hkagg_version', 0)
        self.stats['versions'][vers] = self.stats['versions'].get(vers, 0) + 1

        if f['hkagg_type'] == so3g.HKFrameType.session:
            session_id = f['session_id']
            if self.session_id is not None:
                if self.session_id != session_id:
                    self.report_and_reset(
                    )  # note this does clear self.session_id.
            if self.session_id is None:
                core.log_info('New HK Session id = %i, timestamp = %i' %
                              (session_id, f['start_time']),
                              unit='HKScanner')
                self.session_id = session_id
                self.stats['n_session'] += 1

        elif f['hkagg_type'] == so3g.HKFrameType.status:
            # Have any providers disappeared?
            now_prov_id = [p['prov_id'].value for p in f['providers']]
            for p, info in self.providers.items():
                if p not in now_prov_id:
                    info['active'] = False

            # New providers?
            for p in now_prov_id:
                info = self.providers.get(p)
                if info is not None:
                    if not info['active']:
                        core.log_warn('prov_id %i came back to life.' % p,
                                      unit='HKScanner')
                        self.stats['concerns']['n_warning'] += 1
                        info['n_active'] += 1
                        info['active'] = True
                else:
                    self.providers[p] = {
                        'active':
                        True,  # Currently active (during processing).
                        'n_active':
                        1,  # Number of times this provider id became active.
                        'n_frames': 0,  # Number of data frames.
                        'timestamp_init':
                        f['timestamp'],  # Timestamp of provider appearance
                        'timestamp_data':
                        None,  # Timestamp of most recent data frame.
                        'ticks':
                        0,  # Total number of timestamps in all blocks.
                        'span': None,  # (earliest_time, latest_time)
                        'block_streams_map':
                        {},  # Map from field name to block name.
                    }

        elif f['hkagg_type'] == so3g.HKFrameType.data:
            info = self.providers[f['prov_id']]
            vers = f.get('hkagg_version', 0)

            info['n_frames'] += 1
            t_this = f['timestamp']
            if info['timestamp_data'] is None:
                t_ref = info['timestamp_init']
                if t_this < t_ref:
                    core.log_warn('data timestamp (%.1f) precedes provider '
                                  'timestamp by %f seconds.' %
                                  (t_this, t_this - t_ref),
                                  unit='HKScanner')
                    self.stats['concerns']['n_warning'] += 1
            elif t_this <= info['timestamp_data']:
                core.log_warn(
                    'data frame timestamps are not strictly ordered.',
                    unit='HKScanner')
                self.stats['concerns']['n_warning'] += 1
            info['timestamp_data'] = t_this  # update

            t_check = []

            blocks = f['blocks']
            if vers == 0:
                block_timef = lambda block: block.t
                block_itemf = lambda block: [(k, block.data[k])
                                             for k in block.data.keys()]
            elif vers >= 1:
                block_timef = lambda block: np.array(
                    [t.time / core.G3Units.seconds for t in b.times])
                block_itemf = lambda block: [(k, block[k])
                                             for k in block.keys()]

            if vers in [0]:
                block_name = lambda block_idx: list(
                    sorted(blocks[block_idx].data.keys()))[0]
            if vers in [1]:
                block_name = lambda block_idx: list(
                    sorted(blocks[block_idx].keys()))[0]
            elif vers >= 2:
                block_names = f.get('block_names', [])
                if len(block_names) != len(blocks):
                    # This is a schema error in its own right.
                    core.log_error(
                        'Frame does not have "block_names" entry, '
                        'or it is not the same length as "blocks".',
                        unit='HKScanner')
                    self.stats['concerns']['n_error'] += 1
                    # Fall back on v1 strategy.
                    block_name = lambda block_idx: list(
                        sorted(blocks[block_idx].keys()))[0]
                else:
                    block_name = lambda block_idx: f['block_names'][block_idx]

            for block_idx, b in enumerate(blocks):
                times = block_timef(b)
                if len(times):
                    if info['span'] is None:
                        info['span'] = times[0], times[-1]
                    else:
                        t0, t1 = info['span']
                        info['span'] = min(times[0], t0), max(times[-1], t1)
                    t_check.append(times[0])
                info['ticks'] += len(times)
                bname = block_name(block_idx)
                for k, v in block_itemf(b):
                    if len(v) != len(times):
                        core.log_error(
                            'Field "%s" has %i samples but .t has %i samples.'
                            % (k, len(v), len(times)))
                        self.stats['concerns']['n_error'] += 1
                    # Make sure field has a block_stream registered.
                    if k not in info['block_streams_map']:
                        info['block_streams_map'][k] = bname
                    if info['block_streams_map'][k] != bname:
                        core.log_error(
                            'Field "%s" appeared in block_name %s '
                            'and later in block_name %s.' %
                            (k, info['block_streams_map'][k], bname))
                        self.stats['concerns']['n_error'] += 1
            if len(t_check) and abs(min(t_check) - t_this) > 60:
                core.log_warn(
                    'data frame timestamp (%.1f) does not correspond to '
                    'data timestamp vectors (%s) .' % (t_this, t_check),
                    unit='HKScanner')
                self.stats['concerns']['n_warning'] += 1

        else:
            core.log_warn('Weird hkagg_type: %i' % f['hkagg_type'],
                          unit='HKScanner')
            self.stats['concerns']['n_warning'] += 1

        return [f]
Exemple #14
0
    def Process(self, f, index_info=None):
        """Processes a frame.  Only Housekeeping frames will be examined;
        other frames will simply be counted.  All frames are passed
        through unmodified.  The index_info will be stored along with
        a description of the frame's data; see the .process_file
        function.

        """
        self.counter += 1
        if index_info is None:
            index_info = {'counter': self.counter}

        if f.type == core.G3FrameType.EndProcessing:
            return [f]

        if f.type != core.G3FrameType.Housekeeping:
            return f

        if f['hkagg_type'] == so3g.HKFrameType.session:
            session_id = f['session_id']
            if self.session_id is not None:
                if self.session_id != session_id:
                    self.flush()  # Note that this sets self.session_id=None.
            if self.session_id is None:
                core.log_info('New HK Session id = %i, timestamp = %i' %
                              (session_id, f['start_time']),
                              unit='HKScanner')
                self.session_id = session_id

        elif f['hkagg_type'] == so3g.HKFrameType.status:
            # If a provider has disappeared, flush its information into a
            # FieldGroup.
            prov_cands = [_HKProvider.from_g3(p) for p in f['providers']]
            to_flush = list(self.providers.keys())  # prov_ids...
            for p in prov_cands:
                if p.prov_id in to_flush:
                    to_flush.remove(p.prov_id)  # no, don't.
                else:
                    self.providers[p.prov_id] = p
            for prov_id in to_flush:
                self.flush([prov_id])

        elif f['hkagg_type'] == so3g.HKFrameType.data:
            # Data frame -- merge info for this provider.
            prov = self.providers[f['prov_id']]
            representatives = [block['fields'][0] for block in prov.blocks]
            for b in f['blocks']:
                fields = b.data.keys()
                if len(b.t) == 0 or len(fields) == 0:
                    continue
                for block_index, rep in enumerate(representatives):
                    if rep in fields:
                        break
                else:
                    block_index = len(prov.blocks)
                    prov.blocks.append({
                        'fields': fields,
                        'start': b.t[0],
                        'index_info': []
                    })
                # To ensure that the last sample is actually included
                # in the semi-open intervals we use to track frames,
                # the "end" time has to be after the final sample.
                prov.blocks[block_index]['end'] = b.t[-1] + SPAN_BUFFER_SECONDS
                prov.blocks[block_index]['index_info'].append(index_info)

        else:
            core.log_warn('Weird hkagg_type: %i' % f['hkagg_type'],
                          unit='HKScanner')
        return [f]