Beispiel #1
0
    def __call__(self, f):
        if f.type == FT.Calibration and f['cal_type'] == 'focal_plane':
            self.focal_plane = f

        if f.type != FT.Scan:
            return [f]

        # As long as we have a focal_plane, we can create signal vectors.
        if self.focal_plane is None:
            return [f]
        f['signal'] = core.G3TimestreamMap()

        # Determine time samples we will be covering.
        if self.start_time is None:
            first = f['vertex_enc_raw'].t[0] * core.G3Units.sec
            self.start_time = core.G3Time(
                np.ceil(first / self.tick_step) * self.tick_step)
        # And we will end before...
        last = core.G3Time(f['vertex_enc_raw'].t[-1] * core.G3Units.sec)
        n = int((last.time - self.start_time.time) / self.tick_step)
        end_time = core.G3Time(self.start_time.time + n * self.tick_step)

        z = np.zeros(n)
        for k in self.focal_plane['signal_names']:
            f['signal'][k] = core.G3Timestream(z)

        # You can't broadcast-set the start and end time unless the
        # elements are already populated.
        f['signal'].start = self.start_time
        f['signal'].stop = end_time

        self.start_time = end_time
        return [f]
Beispiel #2
0
    def stream_fake_data(self, session, params=None):
        """stream_fake_data()

        **Process** - Process for streaming fake data. This will queue up
        G3Frames full of fake data to be sent to lyrebird.
        """
        self._run_fake_stream = True
        ndets = self.fp.num_dets
        chans = np.arange(ndets)
        frame_start = time.time()
        while self._run_fake_stream:
            time.sleep(2)
            frame_stop = time.time()
            ts = np.arange(frame_start, frame_stop, 1. / self.target_rate)
            frame_start = frame_stop
            nframes = len(ts)

            data_out = np.random.normal(0, 1, (nframes, ndets))
            data_out += np.sin(2 * np.pi * ts[:, None] + .2 * chans[None, :])

            for t, d in zip(ts, data_out):
                fr = core.G3Frame(core.G3FrameType.Scan)
                fr['idx'] = 0
                fr['data'] = core.G3VectorDouble(d)
                fr['timestamp'] = core.G3Time(t * core.G3Units.s)
                self.out_queue.put(fr)

                fr = core.G3Frame(core.G3FrameType.Scan)
                fr['idx'] = 1
                fr['data'] = core.G3VectorDouble(np.sin(d))
                fr['timestamp'] = core.G3Time(t * core.G3Units.s)
                self.out_queue.put(fr)

        return True, "Stopped fake stream process"
Beispiel #3
0
 def test_from_numpy_array(self):
     t0 = core.G3Time('2019-01-01T12:30:00')
     timestamps = np.linspace(t0.time, t0.time + 1e7 * SEC, 3000)
     vectime = core.G3VectorTime(timestamps)
     assert (vectime[0] == t0)
     assert (vectime[-1] == core.G3Time(timestamps[-1]))
     assert (len(vectime) == len(timestamps))
Beispiel #4
0
def get_g3_time(unix_time):
    """Convert a double or numpy array of floats to G3Time or
    G3VectorTime."""
    src = None
    if isinstance(unix_time, core.G3VectorDouble):
        src = (np.array(unix_time) * core.G3Units.seconds).astype('int')
    elif isinstance(unix_time, np.ndarray) and unix_time.ndim == 1:
        src = (unix_time * core.G3Units.seconds).astype('int')
    if src is not None:
        return core.G3VectorTime([core.G3Time(t) for t in src])
    return core.G3Time(int(unix_time * core.G3Units.seconds))
    def start_stream(self, session, params=None):

        if params is None:
            params = {}

        delay = params.get('delay', 1)
        ts_len = params.get('ts_len', 100)

        # Writes status frame
        f = core.G3Frame(core.G3FrameType.Housekeeping)
        f['session_id'] = 0
        f['start_time'] = time.time()
        self.writer.Process(f)

        self.is_streaming = True
        frame_num = 0
        while self.is_streaming:

            f = core.G3Frame(core.G3FrameType.Scan)

            t1 = time.time()
            t0 = t1 - delay

            ts = np.arange(t0, t1, 1 / self.freq)

            f['session_id'] = 0
            f['frame_num'] = frame_num
            f['data'] = core.G3TimestreamMap()

            for k, c in self.channels.items():

                fparams = copy.copy(c)
                bg = np.random.normal(0, fparams.get('stdev', 0), len(ts))
                if fparams['type'] == 'const':
                    xs = bg + fparams['val']
                elif fparams['type'] in ['lin', 'linear']:
                    xs = bg + ts * fparams['slope'] + fparams.get('offset', 0)
                # Wraps from -pi to pi
                xs = np.mod(xs + np.pi, 2 * np.pi) - np.pi

                f['data'][k] = core.G3Timestream(xs)
                f['data'][k].start = core.G3Time(t0 * core.G3Units.sec)
                f['data'][k].stop = core.G3Time(t1 * core.G3Units.sec)

            self.log.info("Writing G3 Frame")
            self.writer.Process(f)
            frame_num += 1
            time.sleep(delay)
        print("Writing EndProcessingFrame")
        f = core.G3Frame(core.G3FrameType.EndProcessing)
        self.writer.Process(f)
        return True, "Finished streaming"
    def start_stream(self, session, params=None):
        """
        Task to stream fake detector data as G3Frames

        Args:
            frame_rate (float, optional):
                Frequency [Hz] at which G3Frames are sent over the network.
                Defaults to 1 frame pers sec.
            sample_rate (float, optional):
                Sample rate [Hz] for each channel.
                Defaults to 10 Hz.
        """
        if params is None:
            params = {}

        frame_rate = params.get('frame_rate', 1.)
        sample_rate = params.get('sample_rate', 10.)

        f = core.G3Frame(core.G3FrameType.Observation)
        f['session_id'] = 0
        f['start_time'] = time.time()
        self.writer.Process(f)

        frame_num = 0
        self.is_streaming = True
        while self.is_streaming:

            frame_start = time.time()
            time.sleep(1. / frame_rate)
            frame_stop = time.time()
            times = np.arange(frame_start, frame_stop, 1. / sample_rate)

            f = core.G3Frame(core.G3FrameType.Scan)
            f['session_id'] = 0
            f['frame_num'] = frame_num
            f['data'] = core.G3TimestreamMap()

            for i, chan in enumerate(self.channels):
                ts = core.G3Timestream([chan.read(t) for t in times])
                ts.start = core.G3Time(frame_start * core.G3Units.sec)
                ts.stop = core.G3Time(frame_stop * core.G3Units.sec)
                f['data'][str(i)] = ts

            self.writer.Process(f)
            self.log.info("Writing frame...")
            frame_num += 1

        return True, "Finished streaming"
Beispiel #7
0
def addinfo(fr):
    global n
    if fr.type == core.G3FrameType.EndProcessing:
        return
    fr['time'] = core.G3Time(int(time.time() * core.G3Units.s))
    fr['count'] = n
    n += 1
Beispiel #8
0
def noise_scan_frames(n_frames=3,
                      n_dets=20,
                      input='signal',
                      n_samps=200,
                      samp_rate=0.005 * core.G3Units.second,
                      t_start=core.G3Time('2020-1-1T00:00:00')):
    """
    Generate a list of frames filled with noise data and nothing else. 
    
    Args:
        n_frames (int): number of frames to make
        n_dets (int): number of detectors per frame
        input (str): name of G3TimestreamMap for detectors, should be some form of 'signal'
        n_samps (int): number of samples per detector timestream
        samp_rate (G3Unit.second): detector sampling rate
        t_start (G3Time): start time of the set of frames
    """
    frame_list = []
    for n in range(n_frames):
        f = core.G3Frame()
        f.type = core.G3FrameType.Scan
        tsm = core.G3TimestreamMap()
        z = np.zeros((n_samps, ))
        for d in enumerate_det_id(n_dets):
            tsm[d] = core.G3Timestream(z)
        tsm.start = t_start
        tsm.stop = t_start + n_samps * samp_rate
        tsm = MakeNoiseData().apply(tsm)
        f[input] = tsm
        t_start += n_samps * samp_rate
        frame_list.append(f)
    return frame_list
def addinfo(fr):
    global n
    if fr.type != core.G3FrameType.Timepoint:
        return
    fr['time'] = core.G3Time(int(time.time() * core.G3Units.s))
    fr['count'] = n
    n += 1
Beispiel #10
0
 def test_to_numpy_array(self):
     t0 = core.G3Time('2019-01-01T12:30:00')
     timestamps = np.linspace(t0.time,
                              t0.time + 1e7 * SEC,
                              3000,
                              dtype='int64')
     vectime = core.G3VectorTime(timestamps)
     assert ((np.asarray(vectime) == timestamps).all())
Beispiel #11
0
 def tag_frame(self, fr):
     fr['frame_num'] = self.frame_num
     fr['session_id'] = self.session_id
     fr['sostream_id'] = self.stream_id
     fr['sostream_version'] = SOSTREAM_VERSION
     fr['time'] = core.G3Time(time.time() * core.G3Units.s)
     self.frame_num += 1
     return fr
Beispiel #12
0
def g3_cast(data, time=False):
    """
    Casts a generic datatype into a corresponding G3 type. With:
        int   -> G3Int
        str   -> G3String
        float -> G3Double

    and lists of type X will go to G3VectorX. If ``time`` is set to True, will
    convert to G3Time or G3VectorTime with the assumption that ``data`` consists
    of unix timestamps.

    Args:
        data (int, str, float, or list):
            Generic data to be converted to a corresponding G3Type.
        time (bool, optional):
            If True, will assume data contains unix timestamps and try to cast
            to G3Time or G3VectorTime.

    Returns:
        g3_data:
            Corresponding G3 datatype.
    """
    is_list = isinstance(data, list)
    if is_list:
        dtype = type(data[0])
        if not all(isinstance(d, dtype) for d in data):
            raise TypeError("Data list contains varying types!")
    else:
        dtype = type(data)
    if dtype not in _g3_casts.keys():
        raise TypeError("g3_cast does not support type {}. Type must"
                        "be one of {}".format(dtype, _g3_casts.keys()))
    if is_list:
        if time:
            return core.G3VectorTime(
                list(map(lambda t: core.G3Time(t * core.G3Units.s), data)))
        else:
            cast = _g3_list_casts[type(data[0])]
            return cast(data)
    else:
        if time:
            return core.G3Time(data * core.G3Units.s)
        else:
            cast = _g3_casts[type(data)]
            return cast(data)
Beispiel #13
0
def addinfo(fr):
    if fr.type == core.G3FrameType.EndProcessing:
        return
    global n
    fr['time'] = core.G3Time(int(time.time() * core.G3Units.s))
    fr['count'] = n
    fr2 = core.G3Frame(core.G3FrameType.Housekeeping)
    fr2['count'] = n
    n += 1
    return [fr, fr2]
Beispiel #14
0
def get_v2_stream():
    """Generate some example HK data, in schema version 2.

    Returns a list of frames constituting a valid version 2 HK stream.

    """
    # Create something to help us track the aggregator session.
    hksess = so3g.hk.HKSessionHelper(session_id=1234,
                                     hkagg_version=2,
                                     description="Test HK data.")

    # Register a data provider.
    prov_id = hksess.add_provider(
        description='Fake data for the real world.')

    # Start the stream -- write the initial session and status frames.
    frames = [
        hksess.session_frame(),
        hksess.status_frame(),
    ]

    # Now make a data frame.
    f = hksess.data_frame(prov_id=prov_id)

    # Add some data blocks.
    hk = core.G3TimesampleMap()
    hk.times = core.G3VectorTime([core.G3Time(i*core.G3Units.seconds) for i in [0, 1, 2, 3, 4]])
    hk['speed'] = core.G3VectorDouble([1.2, 1.2, 1.2, 1.2, 1.2])
    f['blocks'].append(hk)
    f['block_names'].append('group0')

    hk = core.G3TimesampleMap()
    hk.times = core.G3VectorTime([core.G3Time(i*core.G3Units.seconds) for i in [0, 1, 2, 3, 4]])
    hk['position'] = core.G3VectorInt([1, 2, 3, 4, 5])
    hk['mode'] = core.G3VectorString(['going', 'going', 'going', 'going', 'gone/'])
    f['blocks'].append(hk)
    f['block_names'].append('group1')

    frames.append(f)
    return frames
Beispiel #15
0
def get_test_block(length, keys=['a', 'b', 'c', 'd'], offset=0, ordered=True):
    type_cycle = [(core.G3VectorDouble, float), (core.G3VectorInt, int),
                  (core.G3VectorString, str), (core.G3VectorBool, bool)]
    t0 = core.G3Time('2019-01-01T12:30:00') + offset * SEC
    m = core.G3TimesampleMap()
    times = np.arange(length)
    if not ordered:
        np.random.shuffle(times)
    m.times = core.G3VectorTime(t0 + times * SEC)
    for i, k in enumerate(keys):
        y = (np.random.uniform(size=length) * 100).astype(int)
        constructor, cast_func = type_cycle[i % len(type_cycle)]
        vect = constructor(list(map(cast_func, y)))
        m[k] = vect
    return m
Beispiel #16
0
def g3_to_array(g3file, verbose=False):
    """
    Takes a G3 file output from the SMuRF archiver and reads to a numpy array.

    Parameters
    ----------
    g3file : full path to the G3 file
    verbose : OPTIONAL choice for verbose output (-v, --verbosity)

    Returns
    -------
    times : array of G3Time objects
    data : array of arrays, where each internal array is a SMuRF channel
    """
    frames = [fr for fr in core.G3File(g3file)]

    data = []

    frametimes = []
    for frame in frames:
        if frame.type == core.G3FrameType.Scan:
            frametime = frame['data'].times()
            frametimes.append(frametime)

    if frametimes == []:
        warnings.warn('Wrong frame type')

    strtimes = np.hstack(frametimes)

    times = []
    for strt in strtimes:
        t = core.G3Time(strt).time / core.G3Units.s
        times.append(t)
    times = np.asarray(times)

    channums = []

    i = 0
    while i < len(frames):
        if verbose:
            print('Trying frame %i' % i)
        frametype = frames[i].type
        if frametype == core.G3FrameType.Scan:
            for chan in frames[i]['data'].keys():
                channums.append(int(chan.strip('r')))
            break
        else:
            i += 1
    if verbose:
        print('Channel numbers obtained')

    channums.sort()
    for ch in channums:
        if verbose:
            print('Adding channel %s' % ch)
        chdata = []
        for frame in frames:
            if frame.type == core.G3FrameType.Scan:
                framedata = frame['data']['r' + format(ch, "04")]
                chdata.append(framedata)
        chdata_all = np.hstack(chdata)
        data.append(chdata_all)

    biases = []
    biasnums = []
    for num in frames[i]['tes_biases'].keys():
        biasnums.append(int(num))
    biasnums.sort()
    for b in biasnums:
        if verbose:
            print('Adding bias number %i' % b)
        bias = []
        for frame in frames:
            if frame.type == core.G3FrameType.Scan:
                biasdata = frame['tes_biases'][str(b)]
                bias.append(biasdata)
        bias_all = np.hstack(bias)
        biases.append(bias_all)
    biases = np.asarray(biases)
    data = np.asarray(data)
    return times, data, biases
Beispiel #17
0
    def HousekeepingFromJSON(cls, dat):
        '''
        Build HKBoardInfo object from a JSON blob returned by the
        _dump_housekeeping call
        '''
        # Board-global quantities
        boardhk = HkBoardInfo()
        if 'is128x' in dat:
            boardhk.is128x = dat['is128x']
        else:
            boardhk.is128x = False
        year = dat['timestamp']['y']
        if year == 0:
            # It probably isn't 1900
            systime = time.gmtime()
            # Check for New Year's, assuming no more than 24 hours clock slew
            if dat['timestamp']['d'] == 1 and systime.tm_yday >= 365:
                year = systime.tm_year + 1
            elif dat['timestamp']['d'] >= 365 and systime.tm_yday == 1:
                year = systime.tm_year - 1
            else:
                year = systime.tm_year
            year -= 2000
        boardhk.timestamp = core.G3Time(y=year,d=dat['timestamp']['d'],h=dat['timestamp']['h'],m=dat['timestamp']['m'],s=dat['timestamp']['s'],ss=dat['timestamp']['ss'])

        boardhk.timestamp_port = str(dat['timestamp_port'])
        boardhk.serial = str(dat['serial'])
        boardhk.fir_stage = dat['fir_stage']
        for i in dat['currents'].items():
            boardhk.currents[str(i[0])] = i[1]
        for i in dat['voltages'].items():
            boardhk.voltages[str(i[0])] = i[1]
        for i in dat['temperatures'].items():
            boardhk.temperatures[str(i[0])] = i[1]

        # Mezzanines
        for n, mezz in enumerate(dat['mezzanines']):
            mezzhk = HkMezzanineInfo()
            mezzhk.present = mezz['present']
            mezzhk.power = mezz['power']
            if mezzhk.present:
                mezzhk.serial = str(mezz['ipmi']['product']['serial_number'])
                mezzhk.part_number = str(mezz['ipmi']['product']['part_number'])
                mezzhk.revision = str(mezz['ipmi']['product']['version_number'])
                for i in mezz['currents'].items():
                    mezzhk.currents[str(i[0])] = i[1]
                for i in mezz['voltages'].items():
                    mezzhk.voltages[str(i[0])] = i[1]

            if mezzhk.present and mezzhk.power:
                mezzhk.temperature = mezz['temperature']
                # these parameters are not in the 64x housekeeping tuber
                mezzhk.squid_heater = mezz.get('squid_heater', 0.0)
                mezzhk.squid_controller_power = mezz.get('squid_controller_power', False)
                mezzhk.squid_controller_temperature = mezz.get('squid_controller_temperature', 0.0)

            # Modules
            for m, mod in enumerate(mezz['modules']):
                modhk = HkModuleInfo()
                modhk.routing = str(mod['routing'][0])
                modhk.module_number = m+1
                if mezzhk.present and mezzhk.power:
                    if 'gains' in mod:
                        modhk.carrier_gain = mod['gains']['carrier']
                        modhk.nuller_gain = mod['gains']['nuller']
                    if 'overload' in mod:
                        modhk.carrier_railed = mod['overload']['carrier']
                        modhk.nuller_railed = mod['overload']['nuller']
                        modhk.demod_railed = mod['overload']['demod']
                    if 'squid_current_bias' in mod:
                        modhk.squid_current_bias = mod['squid_current_bias']
                    if 'squid_flux_bias' in mod:
                        modhk.squid_current_bias = mod['squid_flux_bias']
                    if 'squid_feedback' in mod:
                        modhk.squid_feedback = str(mod['squid_feedback'])

                if 'squid_tuning' in mod and mod['squid_tuning'] is not None:
                    modhk.squid_state = str(mod['squid_tuning']['state'])
                    modhk.squid_transimpedance = mod['squid_tuning']['transimpedance'] if mod['squid_tuning']['transimpedance'] is not None else numpy.nan
                    modhk.squid_p2p = mod['squid_tuning']['p2p'] if mod['squid_tuning']['p2p'] is not None else numpy.nan

                for k, chan in enumerate(mod['channels']):
                    chanhk = HkChannelInfo()
                    chanhk.channel_number = k+1
                    chanhk.carrier_amplitude = chan['carrier_amplitude']
                    chanhk.nuller_amplitude = chan['nuller_amplitude']
                    chanhk.dan_gain = chan['dan_gain']
                    chanhk.dan_streaming_enable = chan['dan_streaming_enable']
                    if boardhk.is128x:
                        chanhk.carrier_frequency = chan['frequency']*core.G3Units.Hz
                        chanhk.demod_frequency = chan['frequency']*core.G3Units.Hz
                    else:
                        chanhk.carrier_frequency = chan['carrier_frequency']*core.G3Units.Hz
                        chanhk.demod_frequency = chan['demod_frequency']*core.G3Units.Hz
                        chanhk.dan_accumulator_enable = chan['dan_accumulator_enable']
                        chanhk.dan_feedback_enable = chan['dan_feedback_enable']
                    if 'dan_railed' in chan:
                        chanhk.dan_railed = chan['dan_railed']
                    if 'tuning' in chan and chan['tuning'] is not None:
                        chanhk.state = str(chan['tuning']['state'])
                        if ('rlatched' in chan['tuning'] and 
                            chan['tuning']['rlatched'] is not None):
                            chanhk.rlatched = chan['tuning']['rlatched']
                        if ('rnormal' in chan['tuning'] and 
                            chan['tuning']['rnormal'] is not None):
                            chanhk.rnormal = chan['tuning']['rnormal']
                        if ('rfrac_achieved' in chan['tuning'] and 
                            chan['tuning']['rfrac_achieved'] is not None):
                            chanhk.rfrac_achieved = chan['tuning']['rfrac_achieved']
                        if ('loopgain' in chan['tuning'] and
                            chan['tuning']['loopgain'] is not None):
                            chanhk.loopgain = chan['tuning']['loopgain']

                    modhk.channels[k+1] = chanhk
                mezzhk.modules[m+1] = modhk
            boardhk.mezz[n+1] = mezzhk
    
        return boardhk
Beispiel #18
0
def addinfo(fr):
    global n
    fr['time'] = core.G3Time(int(time.time() * core.G3Units.s))
    fr['count'] = n
    n += 1
Beispiel #19
0
    def HousekeepingFromJSON(cls, dat):
        '''
        Build HKBoardInfo object from the union of the JSON blobs
        returned by the classic_housekeeping and class_dfmux_dump
        Tuber calls
        '''
        # Board-global quantities
        boardhk = HkBoardInfo()

        if dat['ts']['port'] == 'IRIG test':
            boardhk.timestamp = core.G3Time(dat['ts']['s'] * core.G3Units.s)
        else:
            year = dat['ts']['y']
            if year == 0:
                # It probably isn't 1900
                systime = time.gmtime()
                # Check for New Year's, assuming no more than 24 hours clock slew
                if dat['ts']['d'] == 1 and systime.tm_yday >= 365:
                    year = systime.tm_year + 1
                elif dat['ts']['d'] >= 365 and systime.tm_yday == 1:
                    year = systime.tm_year - 1
                else:
                    year = systime.tm_year
                year -= 2000
            boardhk.timestamp = core.G3Time(y=year,
                                            d=dat['ts']['d'],
                                            h=dat['ts']['h'],
                                            m=dat['ts']['m'],
                                            s=dat['ts']['s'],
                                            ss=dat['ts']['ss'])

        boardhk.timestamp_port = str(dat['ts']['port'])
        boardhk.serial = 'N/A'
        boardhk.fir_stage = dat['fir_stage']
        for i in dat['voltages']['mb'].items():
            boardhk.voltages[str(i[0])] = i[1]
        for i in dat['temperatures']['mb'].items():
            boardhk.temperatures[str(i[0])] = i[1]

        # Mezzanines
        for mezz in [1, 2]:
            mezzhk = HkMezzanineInfo()
            mezzhk.power = dat['voltages']['mezz%d' % mezz]['v3p'] > 0
            mezzhk.present = True
            mezzhk.serial = 'N/A'
            mezzhk.part_number = 'SPTpol Mezz'
            mezzhk.revision = 'N/A'
            for i in dat['voltages']['mezz%d' % mezz].items():
                if isinstance(i[1], float):
                    mezzhk.voltages[str(i[0])] = i[1]

            # Modules
            for mod in [1, 2]:
                wire = (mezz - 1) * 2 + mod

                # XXX: SQUID data (bias point, FLL) not in HK call!
                # XXX: SQUID tuning state
                modhk = HkModuleInfo()
                modhk.carrier_gain = dat['mezz_gains']['wire%d' %
                                                       wire]['carrier']
                modhk.nuller_gain = dat['mezz_gains']['wire%d' %
                                                      wire]['nuller']
                modhk.demod_gain = dat['mezz_gains']['wire%d' % wire]['demod']

                modhk.carrier_railed = dat['voltages']['mezz%d' % mezz][
                    'overload_dmfs_car_%s' % (['a', 'b'][mod - 1])]
                modhk.nuller_railed = dat['voltages']['mezz%d' % mezz][
                    'overload_dmfs_nul_%s' % (['a', 'b'][mod - 1])]
                modhk.demod_railed = dat['voltages']['mezz%d' %
                                                     mezz]['overload_dmfd_%s' %
                                                           (['a', 'b'
                                                             ][mod - 1])]

                modhk.module_number = mod

                for chan in range(1,
                                  dat['config']['dmfd_channels_per_wire'] + 1):
                    chanhk = HkChannelInfo()
                    chanhk.channel_number = chan
                    chanhk.dan_gain = dat['dan']['gain'][wire - 1][chan - 1]
                    chanhk.dan_accumulator_enable = dat['dan'][
                        'accumulator_enable'][wire - 1][chan - 1]
                    chanhk.dan_feedback_enable = dat['dan']['feedback_enable'][
                        wire - 1][chan - 1]
                    chanhk.dan_streaming_enable = dat['dan'][
                        'streaming_enable'][wire - 1][chan - 1]
                    chanhk.dan_railed = dat['dan']['accumulator_railed'][
                        wire - 1][chan - 1]

                    # Channel params: (freq, phase, amp)
                    carrier = dat['dmfs']['carrier'][wire - 1][chan - 1]
                    nuller = dat['dmfs']['nuller'][wire - 1][chan - 1]
                    # Demod params: (freq, phase)
                    demod = dat['dmfd']['demod'][wire - 1][chan - 1]
                    chanhk.carrier_amplitude = carrier[2] / (2.**23 - 1.)
                    chanhk.nuller_amplitude = nuller[2] / (2.**23 - 1.)
                    chanhk.carrier_frequency = carrier[0] * 25.e6 / (
                        2.**32 - 1) * core.G3Units.Hz
                    chanhk.demod_frequency = demod[0] * 25.e6 / (
                        2.**32 - 1) * core.G3Units.Hz

                    modhk.channels[chan] = chanhk
                mezzhk.modules[mod] = modhk
            boardhk.mezz[mezz] = mezzhk

        return boardhk
Beispiel #20
0
for i in range(10):
    # Number of samples
    n = int(halfscan / v_az / dt)
    # Vector of unix timestamps
    t = frame_time + dt * np.arange(n)
    # Vector of az and el
    az = v_az * dt * np.arange(n)
    if i % 2:
        az = -az
    el = az * 0 + 50.

    # Construct a "block", which is a named G3TimesampleMap.
    block = core.G3TimesampleMap()
    block.times = core.G3VectorTime(
        [core.G3Time(_t * core.G3Units.s) for _t in t])
    block['az'] = core.G3VectorDouble(az)
    block['el'] = core.G3VectorDouble(el)

    # Create an output data frame template associated with this
    # provider.
    frame = session.data_frame(prov_id)

    # Add the block and block name to the frame, and write it.
    frame['block_names'].append('pointing')
    frame['blocks'].append(block)
    writer.Process(frame)

    # For next iteration.
    frame_time += n * dt
Beispiel #21
0
def cache_to_frames(tod,
                    start_frame,
                    n_frames,
                    frame_offsets,
                    frame_sizes,
                    common=None,
                    detector_fields=None,
                    flag_fields=None,
                    detector_map="detectors",
                    flag_map="flags",
                    units=None):
    """Gather all data from the distributed cache for a single frame.

    Args:
        tod (toast.TOD): instance of a TOD class.
        start_frame (int): the first frame index.
        n_frames (int): the number of frames.
        frame_offsets (list): list of the first samples of all frames.
        frame_sizes (list): list of the number of samples in each frame.
        common (tuple): (cache name, G3 type, frame name) of each common
            field.
        detector_fields (tuple): (cache name, frame name) of each detector
            field.
        flag_fields (tuple): (cache name, frame name) of each flag field.
        detector_map (str): the name of the frame timestream map.
        flag_map (str): then name of the frame flag map.
        units: G3 units of the detector data.

    """
    # Local sample range
    local_first = tod.local_samples[0]
    nlocal = tod.local_samples[1]

    # The process grid
    detranks, sampranks = tod.grid_size
    rankdet, ranksamp = tod.grid_ranks

    # Helper function:
    # For a given timestream, the gather is done across the
    # process row which contains the specific detector, or across
    # the first process row for common telescope data.
    def gather_field(prow, fld, indx, cacheoff, ncache):
        gproc = 0
        gdata = None

        # We are going to allreduce this later, so that every process
        # knows the dimensions of the field.
        allnnz = 0

        if rankdet == prow:
            #print("  proc {} doing gather of {}".format(tod.mpicomm.rank, fld), flush=True)
            # This process is in the process row that has this field,
            # participate in the gather operation.
            pdata = None
            # Find the data type and shape from the cache object
            mtype = None
            ref = tod.cache.reference(fld)
            nnz = 1
            if (len(ref.shape) > 1) and (ref.shape[1] > 0):
                nnz = ref.shape[1]
            if ref.dtype == np.dtype(np.float64):
                mtype = MPI.DOUBLE
            elif ref.dtype == np.dtype(np.int64):
                mtype = MPI.INT64_T
            elif ref.dtype == np.dtype(np.int32):
                mtype = MPI.INT32_T
            elif ref.dtype == np.dtype(np.uint8):
                mtype = MPI.UINT8_T
            else:
                msg = "Cannot gather cache field {} of type {}"\
                    .format(fld, ref.dtype)
                raise RuntimeError(msg)
            #print("field {}:  proc {} has nnz = {}".format(fld, tod.mpicomm.rank, nnz), flush=True)
            pz = 0
            if cacheoff is not None:
                pdata = ref.flatten()[nnz * cacheoff:nnz * (cacheoff + ncache)]
                pz = nnz * ncache
            else:
                pdata = np.zeros(0, dtype=ref.dtype)

            psizes = tod.grid_comm_row.gather(pz, root=0)
            disp = None
            totsize = None
            if ranksamp == 0:
                #print("Gathering field {} with type {}".format(fld, mtype), flush=True)
                # We are the process collecting the gathered data.
                gproc = tod.mpicomm.rank
                allnnz = nnz
                # Compute the displacements into the receive buffer.
                disp = [0]
                for ps in psizes[:-1]:
                    last = disp[-1]
                    disp.append(last + ps)
                totsize = np.sum(psizes)
                # allocate receive buffer
                gdata = np.zeros(totsize, dtype=ref.dtype)
                #print("Gatherv psizes = {}, disp = {}".format(psizes, disp), flush=True)

            #print("field {}:  proc {} start Gatherv".format(fld, tod.mpicomm.rank), flush=True)
            tod.grid_comm_row.Gatherv(pdata, [gdata, psizes, disp, mtype],
                                      root=0)
            #print("field {}:  proc {} finish Gatherv".format(fld, tod.mpicomm.rank), flush=True)

            del disp
            del psizes
            del pdata
            del ref

        # Now send this data to the root process of the whole communicator.
        # Only one process (the first one in process row "prow") has data
        # to send.

        # Create a unique message tag
        mtag = 10 * indx

        #print("  proc {} hit allreduce of gproc".format(tod.mpicomm.rank), flush=True)
        # All processes find out which one did the gather
        gproc = tod.mpicomm.allreduce(gproc, MPI.SUM)
        # All processes find out the field dimensions
        allnnz = tod.mpicomm.allreduce(allnnz, MPI.SUM)
        #print("  proc {} for field {}, gproc = {}".format(tod.mpicomm.rank, fld, gproc), flush=True)

        #print("field {}:  proc {}, gatherproc = {}, allnnz = {}".format(fld, tod.mpicomm.rank, gproc, allnnz), flush=True)

        rdata = None
        if gproc == 0:
            if gdata is not None:
                if allnnz == 1:
                    rdata = gdata
                else:
                    rdata = gdata.reshape((-1, allnnz))
        else:
            # Data not yet on rank 0
            if tod.mpicomm.rank == 0:
                # Receive data from the first process in this row
                #print("  proc {} for field {}, recv type".format(tod.mpicomm.rank, fld), flush=True)
                rtype = tod.mpicomm.recv(source=gproc, tag=(mtag + 1))

                #print("  proc {} for field {}, recv size".format(tod.mpicomm.rank, fld), flush=True)
                rsize = tod.mpicomm.recv(source=gproc, tag=(mtag + 2))

                #print("  proc {} for field {}, recv data".format(tod.mpicomm.rank, fld), flush=True)
                rdata = np.zeros(rsize, dtype=np.dtype(rtype))
                tod.mpicomm.Recv(rdata, source=gproc, tag=mtag)

                # Reshape if needed
                if allnnz > 1:
                    rdata = rdata.reshape((-1, allnnz))

            elif (tod.mpicomm.rank == gproc):
                # Send our data
                #print("  proc {} for field {}, send {} samples of {}".format(tod.mpicomm.rank, fld, len(gdata), gdata.dtype.char), flush=True)

                #print("  proc {} for field {}, send type with tag = {}".format(tod.mpicomm.rank, fld, mtag+1), flush=True)
                tod.mpicomm.send(gdata.dtype.char, dest=0, tag=(mtag + 1))

                #print("  proc {} for field {}, send size with tag = {}".format(tod.mpicomm.rank, fld, mtag+2), flush=True)
                tod.mpicomm.send(len(gdata), dest=0, tag=(mtag + 2))

                #print("  proc {} for field {}, send data with tag {}".format(tod.mpicomm.rank, fld, mtag), flush=True)
                tod.mpicomm.Send(gdata, 0, tag=mtag)
        return rdata

    # For efficiency, we are going to gather the data for all frames at once.
    # Then we will split those up when doing the write.

    # Frame offsets relative to the memory buffers we are gathering
    fdataoff = [0]
    for f in frame_sizes[:-1]:
        last = fdataoff[-1]
        fdataoff.append(last + f)

    # The list of frames- only on the root process.
    fdata = None
    if tod.mpicomm.rank == 0:
        fdata = [c3g.G3Frame(c3g.G3FrameType.Scan) for f in range(n_frames)]
    else:
        fdata = [None for f in range(n_frames)]

    # Compute the overlap of all frames with the local process.  We want to
    # to find the full sample range that this process overlaps the total set
    # of frames.

    cacheoff = None
    ncache = 0

    for f in range(n_frames):
        # Compute overlap of the frame with the local samples.
        fcacheoff, froff, nfr = local_frame_indices(local_first, nlocal,
                                                    frame_offsets[f],
                                                    frame_sizes[f])
        #print("proc {}:  frame {} has cache off {}, fr off {}, nfr {}".format(tod.mpicomm.rank, f, fcacheoff, froff, nfr), flush=True)
        if fcacheoff is not None:
            if cacheoff is None:
                cacheoff = fcacheoff
                ncache = nfr
            else:
                ncache += nfr
            #print("proc {}:    cache off now {}, ncache now {}".format(tod.mpicomm.rank, cacheoff, ncache), flush=True)

    # Now gather the full sample data one field at a time.  The root process
    # splits up the results into frames.

    # First gather common fields from the first row of the process grid.

    for findx, (cachefield, g3t, framefield) in enumerate(common):
        #print("proc {} entering gather_field(0, {}, {}, {}, {})".format(tod.mpicomm.rank, cachefield, findx, cacheoff, ncache), flush=True)
        data = gather_field(0, cachefield, findx, cacheoff, ncache)
        if tod.mpicomm.rank == 0:
            #print("Casting field {} to type {}".format(field, g3t), flush=True)
            if g3t == c3g.G3VectorTime:
                # Special case for time values stored as int64_t, but
                # wrapped in a class.
                for f in range(n_frames):
                    dataoff = fdataoff[f]
                    ndata = frame_sizes[f]
                    g3times = list()
                    for t in range(ndata):
                        g3times.append(c3g.G3Time(data[dataoff + t]))
                    fdata[f][framefield] = c3g.G3VectorTime(g3times)
                    del g3times
            else:
                # The bindings of G3Vector seem to only work with
                # lists.  This is probably horribly inefficient.
                for f in range(n_frames):
                    dataoff = fdataoff[f]
                    ndata = frame_sizes[f]
                    if len(data.shape) == 1:
                        fdata[f][framefield] = \
                            g3t(data[dataoff:dataoff+ndata].tolist())
                    else:
                        # We have a 2D quantity
                        fdata[f][framefield] = \
                            g3t(data[dataoff:dataoff+ndata,:].flatten().tolist())
        del data

    # Wait for everyone to catch up...
    tod.mpicomm.barrier()

    # For each detector field, processes which have the detector
    # in their local_dets should be in the same process row.
    # We do the gather over just this process row.

    if (detector_fields is not None) or (flag_fields is not None):
        dpats = {d: re.compile(".*{}.*".format(d)) for d in tod.local_dets}

        detmaps = None
        if detector_fields is not None:
            if tod.mpicomm.rank == 0:
                detmaps = [c3g.G3TimestreamMap() for f in range(n_frames)]

            for dindx, (cachefield, framefield) in enumerate(detector_fields):
                pc = -1
                for det, pat in dpats.items():
                    if pat.match(cachefield) is not None:
                        #print("proc {} has field {}".format(tod.mpicomm.rank, field), flush=True)
                        pc = rankdet
                        break
                # As a sanity check, verify that every process which
                # has this field is in the same process row.
                rowcheck = tod.mpicomm.gather(pc, root=0)
                prow = 0
                if tod.mpicomm.rank == 0:
                    rc = np.array([x for x in rowcheck if (x >= 0)],
                                  dtype=np.int32)
                    #print(field, rc, flush=True)
                    prow = np.max(rc)
                    if np.min(rc) != prow:
                        msg = "Processes with field {} are not in the "\
                            "same row\n".format(cachefield)
                        sys.stderr.write(msg)
                        tod.mpicomm.abort()

                # Every process finds out which process row is participating.
                prow = tod.mpicomm.bcast(prow, root=0)
                #print("proc {} got prow = {}".format(tod.mpicomm.rank, prow), flush=True)

                # Get the data on rank 0
                data = gather_field(prow, cachefield, dindx, cacheoff, ncache)

                if tod.mpicomm.rank == 0:
                    if units is None:
                        # We do this conditional, since we can't use
                        # G3TimestreamUnits.None in python ("None" is
                        # interpreted as python None).
                        for f in range(n_frames):
                            dataoff = fdataoff[f]
                            ndata = frame_sizes[f]
                            detmaps[f][framefield] = \
                                c3g.G3Timestream(data[dataoff:dataoff+ndata])
                    else:
                        for f in range(n_frames):
                            dataoff = fdataoff[f]
                            ndata = frame_sizes[f]
                            detmaps[f][framefield] = \
                                c3g.G3Timestream(data[dataoff:dataoff+ndata],
                                                 units)

            if tod.mpicomm.rank == 0:
                for f in range(n_frames):
                    fdata[f][detector_map] = detmaps[f]

        flagmaps = None
        if flag_fields is not None:
            if tod.mpicomm.rank == 0:
                flagmaps = [c3g.G3MapVectorInt() for f in range(n_frames)]
            for dindx, (cachefield, framefield) in enumerate(flag_fields):
                pc = -1
                for det, pat in dpats.items():
                    if pat.match(cachefield) is not None:
                        pc = rankdet
                        break
                # As a sanity check, verify that every process which
                # has this field is in the same process row.
                rowcheck = tod.mpicomm.gather(pc, root=0)
                prow = 0
                if tod.mpicomm.rank == 0:
                    rc = np.array([x for x in rowcheck if (x >= 0)],
                                  dtype=np.int32)
                    prow = np.max(rc)
                    if np.min(rc) != prow:
                        msg = "Processes with field {} are not in the "\
                            "same row\n".format(cachefield)
                        sys.stderr.write(msg)
                        tod.mpicomm.abort()

                # Every process finds out which process row is participating.
                prow = tod.mpicomm.bcast(prow, root=0)

                # Get the data on rank 0
                data = gather_field(prow, cachefield, dindx, cacheoff, ncache)

                if tod.mpicomm.rank == 0:
                    # The bindings of G3Vector seem to only work with
                    # lists...  Also there is no vectormap for unsigned
                    # char, so we have to use int...
                    for f in range(n_frames):
                        dataoff = fdataoff[f]
                        ndata = frame_sizes[f]
                        flagmaps[f][framefield] = \
                            c3g.G3VectorInt(\
                                data[dataoff:dataoff+ndata].astype(np.int32)\
                                .tolist())

            if tod.mpicomm.rank == 0:
                for f in range(n_frames):
                    fdata[f][flag_map] = flagmaps[f]

    return fdata
Beispiel #22
0
iv1 = so3g.IntervalsDouble()\
          .add_interval(0., 1.)\
          .add_interval(2., 3.)\
          .add_interval(4., 5.)

iv2 = so3g.IntervalsDouble()\
          .add_interval(1., 2.5)

assert (len((iv1 + iv2).array()) == 2)
assert (len((iv1 * iv2).array()) == 1)
assert (len((iv1 - iv2).array()) == 2)
assert (len((iv2 - iv1).array()) == 4)

print('Sanity check on G3Time')
ti = so3g.IntervalsTime()\
    .add_interval(core.G3Time('2018-1-1T00:00:00'),
                  core.G3Time('2018-1-2T00:00:00'))
print('    ', ti)
print('    ', ti.array())
print('    ', (-ti).array())

print()
print('Interval <-> mask testing')
mask = np.zeros(20, 'uint16')
n_bit, target_bit = 16, 12
for ikill, nint in [(None, 0), (19, 1), (0, 2), (10, 3), (11, 3)]:
    if ikill is not None:
        mask[ikill] = (1 << target_bit)
    iv = so3g.IntervalsInt.from_mask(mask, n_bit)
    assert (len(iv) == n_bit)
    for i in range(n_bit):
Beispiel #23
0
def WriteDB(fr, client, fields=None):
    '''
    Write points to the database for each field

    Arguments
    ---------
    client :
        InfluxDB client
    fields :
        Which gcp fields to add to database. See parse_field for options. If
        None, add all.
    '''
    from influxdb.exceptions import InfluxDBClientError
    from influxdb.exceptions import InfluxDBServerError

    if fr.type != core.G3FrameType.GcpSlow:
        return
    all_fields = build_field_list(fr)
    if fields is None:
        fields = all_fields.keys()
    dict_list = []
    for f in fields:
        field_dat = all_fields[f]
        if len(field_dat) == 4:
            stat, attr, ind, unit = field_dat
            try:
                dat = getattr(fr[stat], attr)[ind]
                time = getattr(fr[stat], 'time')
            except AttributeError:
                # OnlinePointingModel
                dat = fr[stat][attr][ind]
                time = fr[stat]['time']
        elif len(field_dat) == 3:
            stat, attr, unit = field_dat
            if stat not in fr:
                # Field only exists in live data stream
                continue
            try:
                dat = getattr(fr[stat], attr)
            except AttributeError:
                try:
                    dat = fr[stat][attr]
                except KeyError:  # Field only exists in live data stream
                    continue
            if 'Bench' in stat:  # funny time field for bench positions
                time = fr['BenchSampleTime']
            elif 'Mux' in stat:
                time = fr['MuxTime']
            elif stat in ['CryoStatus', 'Weather', 'PTStatus']:
                time = fr['{}Time'.format(stat)]
            else:
                try:
                    time = getattr(fr[stat], 'time')
                except AttributeError:
                    time = fr[stat]['time']
        elif len(field_dat) == 2:
            stat, unit = field_dat
            try:
                dat = fr[stat]
            except KeyError:  #eg, no obsid
                core.log_warn('No key {}'.format(stat), unit='InfluxDB')
                continue
            try:
                time = getattr(fr[stat], 'time')
            except AttributeError as err:
                time = [tm for tm in fr['antenna0']['tracker']['utc'][0]]

        # InfluxDB wants time in nanoseconds since the UNIX epoch in UTC
        try:
            time = [x.time / U.nanosecond for x in np.atleast_1d(time)]
        except AttributeError:
            time = [
                core.G3Time(t0).time / U.nanosecond
                for t0 in np.atleast_1d(time)
            ]
        if dat is None:
            core.log_warn('{} dat is None'.format(f), unit='InfluxDB')
            continue
        dat = np.atleast_1d(dat)
        try:
            dlen = len(dat)
        except TypeError:
            # sometimes source_name is a weird non-none value
            continue
        if unit is not None:
            if unit == 'C':
                zeropt_K = 273.15
                cal_dat = dat / U.K - zeropt_K
            else:
                cal_dat = dat / unit
        else:
            cal_dat = dat
        try:
            if np.any(np.isnan(cal_dat)):
                continue
        except TypeError:
            pass
        if 'heat' not in f:
            tag = f
        else:
            tag = f.replace('heat_', '')

        # for fields that have az/el components
        az_el_names = [
            'az', 'el', 'az', 'el', 'ra', 'dec', 'x', 'y', 'hr_angle', 'sin',
            'cos', 'lat'
        ]
        tag2 = f
        for name in az_el_names:
            # require name_ at beginning or _name at end
            match1 = re.findall('^{}_'.format(name), f)
            match2 = re.findall('_{}$'.format(name), f)
            if len(match1):
                tag2 = f.replace(match1[0], '')
            if len(match2):
                tag2 = f.replace(match2[0], '')
        # also group source names
        if 'source' in f:
            tag2 = 'source'
            stat = 'TrackerPointing'
        if stat == 'PTStatus':
            groups = ['now', 'min', 'max']
            for g in groups:
                match = re.findall('_{}$'.format(g), f)
                if len(match):
                    tag2 = f.replace(match[0], '')
        # group bench positions
        # require bench_ at beginning
        match = re.findall('^bench', f)
        if len(match):
            tag2 = attr  # y1, y2, etc
            stat = 'Bench'

        # group Mux properties
        if 'Mux' in stat:
            stat = 'muxHousekeeping'
            tag2 = 'ib' + f.split('ib')[-1]

        dict_list += make_lines(
            measurement=stat,
            field=f,
            time=time,
            dat=cal_dat,
            tags={
                'label': tag,
                'label2': tag2
            },
        )

    try:
        now = core.G3Time.Now()
        delay = float(now.time / U.nanosecond - time[-1]) / 1e9
        if delay > 5:
            core.log_info('{} Delay: {} s'.format(now.isoformat(), delay),
                          unit='InfluxDB')
    except RuntimeError:  # sometimes timestamp gets screwed up
        pass

    try:
        client.write_points(dict_list,
                            batch_size=len(dict_list),
                            protocol='line')
    except (InfluxDBClientError, InfluxDBServerError) as v:
        core.log_error('Error writing to database. {}'.format(v),
                       unit='InfluxDB')
Beispiel #24
0
#!/usr/bin/env python
from spt3g import core, coordinateutils
import numpy as np

np.random.seed(42)

n_samps = int(1e3)

az_0 = core.G3Timestream(np.random.rand(n_samps) * 2.0 * np.pi - np.pi)
pole_avoidance = 0.7
el_0 = core.G3Timestream(
    np.random.rand(n_samps) * np.pi * pole_avoidance -
    np.pi / 2.0 * pole_avoidance)

az_0.start = core.G3Time('20170329_000001')
el_0.start = core.G3Time('20170329_000001')

az_0.stop = core.G3Time('20170329_100001')
el_0.stop = core.G3Time('20170329_100001')

az_1 = az_0 + 10 * core.G3Units.arcmin
el_1 = el_0 + 10 * core.G3Units.arcmin

ra_0, dec_0 = coordinateutils.azel.convert_azel_to_radec(az_0, el_0)
ra_1, dec_1 = coordinateutils.azel.convert_azel_to_radec(az_1, el_1)

o_az_0 = az_0 + (np.random.rand() - 0.5) * 2 * core.G3Units.deg
o_el_0 = el_0 + (np.random.rand() - 0.5) * 2 * core.G3Units.deg
o_ra_0, o_dec_0 = coordinateutils.azel.convert_azel_to_radec(o_az_0, o_el_0)

import astropy.coordinates
Beispiel #25
0
    def _process_irig_packet(self, parse_index):
        # Unpack the IRIG data
        start_ind = parse_index
        end_ind = start_ind + self._irig_packet_size
        unpacked_data = np.array(
            struct.unpack(self._irig_unpack_str,
                          self._data[start_ind:end_ind]))
        # Unpack the IRIG header
        ind1 = 0
        ind2 = ind1 + self._irig_header_units
        header = unpacked_data[ind1:ind2][0]
        if header != self._irig_header:
            raise RuntimeError("%s: IRIG header error: 0x%x" %
                               (self._error_msg, header))
        # Unpack the IRIG clock
        ind1 = ind2
        ind2 = ind1 + self._irig_clock_units
        clock = unpacked_data[ind1:ind2][0]
        # Unpack the IRIG clock overflows
        ind1 = ind2
        ind2 = ind1 + self._irig_overflow_units
        overflow = unpacked_data[ind1:ind2][0]
        # Adjust the IRIG clock for overflows
        clock_adjusted = clock + (overflow << self._num_overflow_bits)
        # Unpack the IRIG info
        ind1 = ind2
        ind2 = ind1 + self._irig_data_length
        info = unpacked_data[ind1:ind2]
        # Unpack the IRIG synch pulses
        ind1 = ind2
        ind2 = ind1 + self._irig_data_length
        synch = unpacked_data[ind1:ind2]
        # Unpack the IRIG synch pulse overflows
        ind1 = ind2
        ind2 = ind1 + self._irig_data_length
        synch_overflow = unpacked_data[ind1:ind2]
        synch_adjusted = (synch + (synch_overflow << self._num_overflow_bits))

        # Convert raw IRIG bits to a meaningful time
        year, yday, hour, mins, secs = self._irig_time_conversion(info)
        # Obtain the time using the G3 object
        irig_time = core.G3Time(y=int(year),
                                d=int(yday),
                                h=int(hour),
                                m=int(mins),
                                s=int(secs),
                                ss=0)  # no subseconds

        # Store the time as a G3Int
        time_s = core.G3Double(
            float(irig_time.time) / float(core.G3Units.seconds))
        # time_s = core.G3UInt(irig_time.time)
        # Store clock value as a G3UInt
        clock_adjusted = core.G3UInt(int(clock_adjusted))
        # Store the synchronization pulse clock values as a
        # synch_adjusted = core.G3VectorUInt(np.array(synch_adjusted))

        # Create and return a G3 timepoint frame with clock and irig data
        irig_frame = core.G3Frame(core.G3FrameType.Timepoint)
        irig_frame['chwp_irig_time'] = time_s
        irig_frame['chwp_irig_clock'] = clock_adjusted
        # irig_frame['chwp_irig_synch'] = synch_adjusted
        return irig_frame
Beispiel #26
0
 def split_field(data,
                 g3t,
                 framefield,
                 mapfield=None,
                 g3units=units,
                 times=None):
     """Split a gathered data buffer into frames- only on root process.
     """
     if data is None:
         return
     if g3t == core3g.G3VectorTime:
         # Special case for time values stored as int64_t, but
         # wrapped in a class.
         for f in range(n_frames):
             dataoff = fdataoff[f]
             ndata = frame_sizes[f]
             g3times = list()
             for t in range(ndata):
                 g3times.append(core3g.G3Time(data[dataoff + t]))
             if mapfield is None:
                 fdata[f][framefield] = core3g.G3VectorTime(g3times)
             else:
                 fdata[f][framefield][mapfield] = \
                     core3g.G3VectorTime(g3times)
             del g3times
     elif g3t == so3g.IntervalsInt:
         # Flag vector is written as a simple boolean.
         for f in range(n_frames):
             dataoff = fdataoff[f]
             ndata = frame_sizes[f]
             # Extract flag vector (0 or 1) for this frame
             frame_flags = (data[dataoff:dataoff + ndata] != 0).astype(int)
             # Convert bit 0 to an IntervalsInt.
             ival = so3g.IntervalsInt.from_mask(frame_flags, 1)[0]
             if mapfield is None:
                 fdata[f][framefield] = ival
             else:
                 fdata[f][framefield][mapfield] = ival
     elif g3t == core3g.G3Timestream:
         if times is None:
             raise RuntimeError(
                 "You must provide the time stamp vector with a "
                 "Timestream object")
         for f in range(n_frames):
             dataoff = fdataoff[f]
             ndata = frame_sizes[f]
             timeslice = times[cacheoff + dataoff:cacheoff + dataoff +
                               ndata]
             tstart = timeslice[0] * 1e8
             tstop = timeslice[-1] * 1e8
             if mapfield is None:
                 if g3units is None:
                     fdata[f][framefield] = \
                         g3t(data[dataoff : dataoff + ndata])
                 else:
                     fdata[f][framefield] = \
                         g3t(data[dataoff : dataoff + ndata], g3units)
                 fdata[f][framefield].start = core3g.G3Time(tstart)
                 fdata[f][framefield].stop = core3g.G3Time(tstop)
             else:
                 # Individual detector data.  The only fields that
                 # we (optionally) compress.
                 if g3units is None:
                     tstream = g3t(data[dataoff:dataoff + ndata])
                 else:
                     tstream = g3t(data[dataoff:dataoff + ndata], g3units)
                 if compress and "compressor_gain_" + framefield in fdata[f]:
                     (tstream, gain,
                      offset) = recode_timestream(tstream, compress)
                     fdata[f]["compressor_gain_" +
                              framefield][mapfield] = gain
                     fdata[f]["compressor_offset_" +
                              framefield][mapfield] = offset
                 fdata[f][framefield][mapfield] = tstream
                 fdata[f][framefield][mapfield].start = core3g.G3Time(
                     tstart)
                 fdata[f][framefield][mapfield].stop = core3g.G3Time(tstop)
     else:
         # The bindings of G3Vector seem to only work with
         # lists.  This is probably horribly inefficient.
         for f in range(n_frames):
             dataoff = fdataoff[f]
             ndata = frame_sizes[f]
             if len(data.shape) == 1:
                 fdata[f][framefield] = \
                     g3t(data[dataoff : dataoff + ndata].tolist())
             else:
                 # We have a 2D quantity
                 fdata[f][framefield] = \
                     g3t(data[dataoff : dataoff + ndata, :].flatten()
                         .tolist())
     return
Beispiel #27
0
    def start_background_streamer(self, session, params=None):
        """start_background_streamer(params=None)

        Process to run streaming process. A data stream is started
        automatically. It can be stopped and started by the start and stop
        tasks. Either way keep alive flow control frames are being sent.

        Parameters
        ----------
        frame_rate : float, optional
            Frequency [Hz] at which G3Frames are sent over the network.
            Defaults to 1 frame pers sec.
        sample_rate : float, optional
            Sample rate [Hz] for each channel. Defaults to 10 Hz.

        """
        if params is None:
            params = {}

        self.writer = core.G3NetworkSender(hostname=self.target_host,
                                           port=self.port)

        frame_rate = params.get('frame_rate', 1.)
        sample_rate = params.get('sample_rate', 10.)

        frame_num = 0
        self.running_in_background = True

        # Control flags FIFO stack to keep Writer single threaded
        self.flags = deque([FlowControl.START])

        while self.running_in_background:
            # Send START frame
            if next(iter(self.flags), None) is FlowControl.START:
                self._set_stream_on()  # sends start flowcontrol
                self.is_streaming = True
                self.flags.popleft()

            print("stream running in background")
            self.log.debug("control flags: {f}", f=self.flags)
            # Send keep alive flow control frame
            f = core.G3Frame(core.G3FrameType.none)
            f['sostream_flowcontrol'] = FlowControl.ALIVE.value
            self.writer.Process(f)

            if self.is_streaming:
                frame_start = time.time()
                time.sleep(1. / frame_rate)
                frame_stop = time.time()
                times = np.arange(frame_start, frame_stop, 1. / sample_rate)

                f = core.G3Frame(core.G3FrameType.Scan)
                f['session_id'] = 0
                f['frame_num'] = frame_num
                f['sostream_id'] = self.stream_id
                f['data'] = core.G3TimestreamMap()

                for i, chan in enumerate(self.channels):
                    ts = core.G3Timestream([chan.read() for t in times])
                    ts.start = core.G3Time(frame_start * core.G3Units.sec)
                    ts.stop = core.G3Time(frame_stop * core.G3Units.sec)
                    f['data'][f"r{i:04}"] = ts

                self.writer.Process(f)
                self.log.info("Writing frame...")
                frame_num += 1

                # Send END frame
                if next(iter(self.flags), None) is FlowControl.END:
                    self._send_end_flowcontrol_frame()
                    self._send_cleanse_flowcontrol_frame()
                    self.is_streaming = False
                    self.flags.popleft()

            else:
                # Don't send keep alive frames too quickly
                time.sleep(1)

            # Shutdown streamer
            if next(iter(self.flags), None) is SHUTDOWN:
                self.running_in_background = False
                self.flags.popleft()

        # Teardown writer
        self.writer.Close()
        self.writer = None

        return True, "Finished streaming"
Beispiel #28
0
 def test_from_list(self):
     t0 = core.G3Time('2019-01-01T12:30:00')
     vectime = core.G3VectorTime([t0, t0 + 10 * SEC])
     assert (vectime[0].time == t0.time)
     assert (vectime[1].time == t0.time + 10 * SEC)
#!/usr/bin/env python

import numpy
from spt3g import core

ts = core.G3Timestream(numpy.zeros(50))
ts.units = core.G3TimestreamUnits.Power
ts.start = core.G3Time(0)
ts.stop = core.G3Time(10 * core.G3Units.s)

assert (ts[5] == 0)  # Check scalar getitem

ts[12] = 13.4  # Check scalar setitem
assert (ts[12] == 13.4)

ts[:] = numpy.arange(50)  # Test vector setitem
assert (ts[12]) == 12.0

# Test units preserved by slicing
assert (ts[:5].units == ts.units)

# Test length with slicing
assert (len(ts[::2]) == len(ts) / 2)
assert (len(ts[5:]) == len(ts) - 5)

# Test consistency with numpy slicing with steps that do and do not divide evenly into array length
assert ((numpy.asarray(ts[::2]) == numpy.asarray(ts)[::2]).all())
assert ((numpy.asarray(ts[::3]) == numpy.asarray(ts)[::3]).all())
assert ((numpy.asarray(ts[5::2]) == numpy.asarray(ts)[5::2]).all())
assert ((numpy.asarray(ts[5::3]) == numpy.asarray(ts)[5::3]).all())
assert ((numpy.asarray(ts[5:-4:2]) == numpy.asarray(ts)[5:-4:2]).all())
from adama_utils import get_obsids_from_times
from spt3g import core
import pandas as pd

obsids = get_obsids_from_times(core.G3Time("20190801_000000"),
                               core.G3Time("20190901_000000"),
                               sources=['ra0hdec-44.75', 'ra0hdec-52.25',
                                        'ra0hdec-59.75', 'ra0hdec-67.25'])

obsids_refactored = {'source':[], 'obsid':[]}
for source in obsids:
    for obsid in obsids[source]:
        obsids_refactored['source'].append(source)
        obsids_refactored['obsid'].append(obsid)

df = pd.DataFrame(obsids_refactored)
df.to_csv('obsids_to_process.txt', sep='\t', index=False)