コード例 #1
0
    def tune_dets(self, session, params):
        """tune_dets()

        **Task** - Emulates files that might come from a general tune dets
        function. These are some of the files found on simons1 registered when
        running the following ops with a single band:

             1. Find-freq
             2. setup_notches
             3. tracking_setup
             4. short g3 stream

        Args
        ----
        test_mode : bool
            If True, will skip any wait times associated with writing
            g3 files. 
        """
        # Find Freq
        action_time = time.time()
        files = [
            'amp_sweep_freq.txt', 'amp_sweep_resonance.txt',
            'amp_sweep_resp.txt'
        ]
        for f in files:
            self._write_smurf_file(f, 'find_freq', action_time=action_time)

        # Setup Notches
        action_time = time.time()
        files = ['channel_assignment_b0.txt', 'tune.npy']
        for f in files:
            self._write_smurf_file(f, 'setup_notches', action_time=action_time)

        # tracking setup
        action_time = time.time()
        fname = f"{int(time.time())}.dat"
        self._write_smurf_file(fname, 'tracking_setup', prepend_ctime=False)

        # Short g3 stream
        frame_gen = G3FrameGenerator(self.stream_id, self.sample_rate,
                                     self.nchans)
        fname = self._get_g3_filename(frame_gen.session_id, 0, makedirs=True)
        os.makedirs(os.path.dirname(fname), exist_ok=True)
        session.data['noise_file'] = fname
        writer = core.G3Writer(fname)
        writer(frame_gen.get_obs_frame())
        writer(frame_gen.get_status_frame())
        start = time.time()
        stop = start + 30
        frame_starts = np.arange(start, stop, self.frame_len)
        frame_stops = frame_starts + self.frame_len
        for t0, t1 in zip(frame_starts, frame_stops):
            if not params['test_mode']:
                now = time.time()
                if now < t1:
                    time.sleep(t1 - now)
            writer(frame_gen.get_data_frame(t0, t1))
        writer(core.G3Frame(core.G3FrameType.EndProcessing))

        return True, "Wrote tune files"
コード例 #2
0
ファイル: aggregator.py プロジェクト: simonsobs/ocs
    def Process(self, frames):
        """
        Writes frame to current file. If file has not been started
        or time_per_file has elapsed, file is closed and a new file is created
        by `filename` function passed to constructor
        """
        for frame in frames:
            ftype = frame['hkagg_type']

            if ftype == so3g.HKFrameType.session:
                self.last_session = frame
            elif ftype == so3g.HKFrameType.status:
                self.last_status = frame

            if self.writer is None:
                self.current_file = self.filename()
                self.log.info("Creating file: {}".format(self.current_file))
                self.writer = core.G3Writer(self.current_file)
                self.file_start_time = time.time()

                if ftype in [so3g.HKFrameType.data, so3g.HKFrameType.status]:
                    if self.last_session is not None:
                        self.writer(self.last_session)

                if ftype == so3g.HKFrameType.data:
                    if self.last_status is not None:
                        self.writer(self.last_status)

            self.writer(frame)

        if (time.time() - self.file_start_time) > self.time_per_file:
            self.close_file()

        return frames
コード例 #3
0
    def create_new_file(self):
        """Create a new file if needed.

        This is only done if an existing file is closed out (i.e. writer is
        None). Filename and path will be based on the time the file is made.
        Acquisitions that are grouped together by file duration will share the
        same basename as the start of the acquisition and have their suffix
        incremented.

        Writes the last meta data frame to the start of the file if there is
        one.

        Example:
            # Three 10 minute observations
            file_1 = 2019-08-07-01-30-00_000.g3
            file_2 = 2019-08-07-01-30-00_001.g3
            file_3 = 2019-08-07-01-30-00_002.g3

            # Acqusition stopped and new one started on the hour
            new_file = 2019-08-07-02-00-00_000.g3

        """
        if self.writer is None:
            # Used for tracking when to split acquisitions
            self.start_time = time.time()

            # Avoid duplicate filenames if new file started within 1 sec
            if self._last_file_timestamp is None:
                pass
            elif int(self.start_time) == self._last_file_timestamp:
                self.log.debug(
                    "New file started within 1 second of previous " +
                    "file, incrementing filename suffix.")
                self.filename_suffix += 1

            # Only create new dir and basename if we've finished an acquisition
            if self.filename_suffix == 0:
                stream_id = self.stream_id
                for f in self.frames:
                    if f.get('sostream_id') is not None:
                        stream_id = f['sostream_id']
                        break

                self.dirname = _create_dirname(self.start_time, self.data_dir,
                                               stream_id)
                self.basename = int(self.start_time)

            suffix = "_{:03d}".format(self.filename_suffix)
            filepath = _create_file_path(self.dirname, self.basename, suffix)
            self.log.info("Writing to file {}".format(filepath))
            self.writer = core.G3Writer(filename=filepath)
            self._last_file_timestamp = int(self.start_time)

            # Write the last metadata frame to the start of the new file
            if self.last_meta is not None:
                self.writer(self.last_meta)

            return filepath
コード例 #4
0
def write_example_file(filename='hk_out.g3'):
    """Generate an example g3 file with Wiring Frame.

    This is based on other test file writing functions, but is unique in that
    it contains a wiring frame for use in testing Seek/Tell.

    Structure of frames in this file should be:
        - Housekeeping
        - Housekeeping
        - Wiring
        - Housekeeping
        - Housekeeping

    Args:
        filename (str): filename to write data to

    """
    test_file = filename

    # Write a stream of HK frames.
    w = core.G3Writer(test_file)

    # Create something to help us track the aggregator session.
    hksess = so3g.hk.HKSessionHelper(session_id=1234,
                                     description="Test HK data.")

    # Register a data provider.
    prov_id = hksess.add_provider(description='Fake data for the real world.')

    # Start the stream -- write the initial session and status frames.
    f = hksess.session_frame()
    w.Process(f)
    f = hksess.status_frame()
    w.Process(f)

    # Write dummy wiring frame
    f = core.G3Frame()
    f.type = core.G3FrameType.Wiring
    w.Process(f)

    # Now make a data frame.
    f = hksess.data_frame(prov_id=prov_id)

    # Add a data block.
    hk = so3g.IrregBlockDouble()
    hk.prefix = 'hwp_'
    hk.data['position'] = [1, 2, 3, 4, 5]
    hk.data['speed'] = [1.2, 1.2, 1.2, 1.2, 1.2]
    hk.t = [0, 1, 2, 3, 4]
    f['blocks'].append(hk)

    # Write two more housekeeping frames.
    w.Process(f)
    w.Process(f)

    del w
コード例 #5
0
ファイル: timesample.py プロジェクト: tskisner/spt3g_software
 def test_30_serialization(self):
     m0 = get_test_block(100)
     m1 = get_test_block(200, offset=100)
     m2 = m0.concatenate(m1)
     m0.check()
     m1.check()
     m2.check()
     f = core.G3Frame()
     f['irreg0'] = m0
     f['irreg1'] = m1
     core.G3Writer('test.g3').Process(f)
     f = core.G3Reader('test.g3').Process(None)[0]
     f['irreg0'].check()
     f['irreg1'].check()
     f['irreg0'].concatenate(f['irreg1'])['b']
コード例 #6
0
    def test_50_compression(self):
        test_file = 'test_g3super.g3'

        # Entropy?
        sigma_bits = 8
        sigma = 2**sigma_bits
        _get_ts = lambda dtype: self._get_ts(
            100, 10000, sigma=sigma, dtype=dtype, seed=12345)

        w = core.G3Writer(test_file)

        sizes = {d: [] for d in ALL_DTYPES}
        for dtype in ALL_DTYPES:
            # No compression
            f = core.G3Frame()
            ts = _get_ts(dtype)
            sizes[dtype].append(ts.data.nbytes)
            ts.options(enable=0)
            f['ts_%s' % dtype] = ts
            w.Process(f)

            # Yes compression
            f = core.G3Frame()
            ts = _get_ts(dtype)
            f['ts_%s' % dtype] = ts
            w.Process(f)
        del w

        # Readback
        r = so3g.G3IndexedReader(test_file)
        last = 0
        for dtype in ALL_DTYPES:
            for i in range(2):
                r.Process(None)[0]
                here = r.Tell()
                sizes[dtype].append(here - last)
                last = here

        # Process the results...
        for dtype in ALL_DTYPES:
            err_msg = f'Failed for dtype={dtype}'
            n, s_uncomp, s_comp = sizes[dtype]
            comp_ratio = 1. - (s_uncomp - s_comp) / n
            # But really what matters is the bits-per-word, compressed.
            bits_per_word = comp_ratio * 8 * np.dtype(dtype).itemsize
            #print(dtype, bits_per_word / sigma_bits)
            # I think the theoretical limit is 1.3 or so...
            self.assertLess(bits_per_word, sigma_bits * 1.4, err_msg)
コード例 #7
0
ファイル: vecint.py プロジェクト: tskisner/spt3g_software
 def test_compression(self):
     """Confirm that minimum necessary int size is used for serialization."""
     count = 10000
     overhead = 200
     for isize, val in bit_sizes:
         w = core.G3Writer(test_filename)
         f = core.G3Frame()
         f['v'] = core.G3VectorInt([val] * count)
         w(f)
         del w
         on_disk = os.path.getsize(test_filename)
         self.assertTrue(
             abs(on_disk - count * isize / 8) <= overhead,
             "Storage for val %i took %.2f bytes/item, "
             "too far from %.2f bytes/item" %
             (val, on_disk / count, isize / 8))
コード例 #8
0
ファイル: vecint.py プロジェクト: tskisner/spt3g_software
    def test_serialize(self):
        """Confirm full ranges can be saved and loaded."""

        w = core.G3Writer(test_filename)
        for isize, val in bit_sizes:
            f = core.G3Frame()
            f['v'] = core.G3VectorInt([val] * 10)
            w(f)
        del w

        r = core.G3Reader(test_filename)
        for isize, val in bit_sizes:
            f = r(None)[0]
            v_in = list(f['v'])
            self.assertTrue(all([_v == val for _v in v_in]),
                            "Failed to save/load value %i" % val)
        del r
コード例 #9
0
    def start_stream(self, session, params=None):
        if params is None:
            params = {}

        time_per_file = params.get("time_per_file", 60*60) # [sec]
        data_dir = params.get("data_dir", "data/")

        self.log.info("Writing data to {}".format(data_dir))
        self.log.info("New file every {} seconds".format(time_per_file))

        reader = core.G3Reader("tcp://localhost:{}".format(self.port))
        writer = None

        last_meta = None
        self.is_streaming = True

        while self.is_streaming:
            if writer is None:
                start_time = datetime.utcnow()
                ts = start_time.timestamp()
                subdir = os.path.join(data_dir, "{:.5}".format(str(ts)))

                if not os.path.exists(subdir):
                    os.makedirs(subdir)

                filename = start_time.strftime("%Y-%m-%d-%H-%M-%S.g3")
                filepath = os.path.join(subdir, filename)
                writer = core.G3Writer(filename=filepath)
                if last_meta is not None:
                    writer(last_meta)

            frames = reader.Process(None)
            for f in frames:
                if f.type == core.G3FrameType.Housekeeping:
                    last_meta = f
                writer(f)

            if (datetime.utcnow().timestamp() - ts) > time_per_file:
                writer(core.G3Frame(core.G3FrameType.EndProcessing))
                writer = None

        if writer is not None:
            writer(core.G3Frame(core.G3FrameType.EndProcessing))

        return True, "Finished Streaming"
コード例 #10
0
ファイル: test_hk_getdata.py プロジェクト: jit9/so3g
def write_example_file(filename='hk_out.g3'):
    """Generate some example HK data and write to file.

    Args:
        filename (str): filename to write data to

    """
    test_file = filename

    # Write a stream of HK frames.
    # (Inspect the output with 'spt3g-dump hk_out.g3 so3g'.)
    w = core.G3Writer(test_file)

    # Create something to help us track the aggregator session.
    hksess = so3g.hk.HKSessionHelper(session_id=1234,
                                     description="Test HK data.")

    # Register a data provider.
    prov_id = hksess.add_provider(description='Fake data for the real world.')

    # Start the stream -- write the initial session and status frames.
    f = hksess.session_frame()
    w.Process(f)
    f = hksess.status_frame()
    w.Process(f)

    # Now make a data frame.
    f = hksess.data_frame(prov_id=prov_id)

    # Add a data block.
    hk = so3g.IrregBlockDouble()
    hk.prefix = 'hwp_'
    hk.data['position'] = [1, 2, 3, 4, 5]
    hk.data['speed'] = [1.2, 1.2, 1.2, 1.2, 1.2]
    hk.t = [0, 1, 2, 3, 4]
    f['blocks'].append(hk)

    w.Process(f)

    del w
コード例 #11
0
    def _readback_compare(self,
                          ts,
                          filename='readback_test.g3',
                          cleanup=True,
                          err_msg='(no detail)'):
        """Cache the data from ts, write ts to a file, read it back from file,
        compare to cached data.

        """
        # Cache
        fake_ts = (collections.namedtuple(
            'pseudo_ts', ['times', 'names', 'data']))(np.array(ts.times),
                                                      np.array(ts.names),
                                                      ts.data.copy())
        # Write
        f = core.G3Frame()
        f['item'] = ts
        core.G3Writer(filename).Process(f)
        # Read
        ts1 = core.G3Reader(filename).Process(None)[0]['item']
        self._check_equal(fake_ts, ts1, err_msg=err_msg)
        if cleanup:
            os.remove(filename)
コード例 #12
0
def write_example_file(filename='hk_out.g3', hkagg_version=2):
    """Generate some example HK data and write to file.

    Args:
        filename (str): filename to write data to
        hkagg_version (int): which HK version to write to file

    """
    test_file = filename

    # Write a stream of HK frames.
    # (Inspect the output with 'spt3g-dump hk_out.g3 so3g'.)
    seeder = Seeder()
    w = core.G3Pipeline()
    w.Add(seeder)
    w.Add(HKTranslator(target_version=hkagg_version))
    w.Add(core.G3Writer(test_file))

    if hkagg_version <= 1:
        seeder.extend(get_v0_stream())
    else:
        seeder.extend(get_v2_stream())
    w.Run()
    del w
コード例 #13
0
    def test_40_encoding_serialized(self):
        test_file = 'test_g3super.g3'
        offsets = {
            'int32': [0, 2**25, 2**26 / 3., -1.78 * 2**27],
            'int64': [0, 2**25, 2**26 / 3., -1.78 * 2**27],
            'float32': [0],
            'float64': [0, 2**25, 2**26 / 3., -1.78 * 2**27, 1.8 * 2**35],
        }
        decimals = 2
        precision = 10**-decimals

        w = core.G3Writer(test_file)
        records = []
        for dtype in ALL_DTYPES:
            for offset in offsets[dtype]:
                f = core.G3Frame()
                ts = self._get_ts(4, 100, sigma=100, dtype=dtype)
                ts.data += int(offset)
                if dtype in FLOAT_DTYPES:
                    ts.data[:] = np.round(ts.data)
                    ts.calibrate([.01] * ts.data.shape[0])
                records.append(ts.data.copy())
                f['a'] = ts
                w.Process(f)
        del w
        # readback
        r = core.G3Reader(test_file)
        for dtype in ALL_DTYPES:
            for offset in offsets[dtype]:
                err_msg = f'Failed for dtype={dtype}, offset={offset}'
                ts2 = r.Process(None)[0]['a']
                record = records.pop(0)
                np.testing.assert_allclose(record,
                                           ts2.data,
                                           atol=precision * 1e-3,
                                           err_msg=err_msg)
コード例 #14
0
ファイル: write_hk.py プロジェクト: tskisner/so3g
# data in the "SO HK" format.  When expanding it, check the SO HK
# format description to make sure your frame stream is compliant.

import time
import numpy as np

from spt3g import core
from so3g import hk

# Start a "Session" to help generate template frames.
session = hk.HKSessionHelper(hkagg_version=2)

# Create an output file and write the initial "session" frame.  If
# you break the data into multiple files, you write the session frame
# at the start of each file.
writer = core.G3Writer('hk_example.g3')
writer.Process(session.session_frame())

# Create a new data "provider".  This represents a single data
# source, sending data for some fixed list of fields.
prov_id = session.add_provider('platform')

# Whenever there is a change in the active "providers", write a
# "status" frame.
writer.Process(session.status_frame())

# Write, like, 10 half-scans.
frame_time = time.time()
v_az = 1.5  # degrees/second
dt = 0.001  # seconds
halfscan = 10  # degrees
コード例 #15
0
ファイル: test_frame_types.py プロジェクト: tskisner/so3g
import so3g as ss
from spt3g import core

ss.TestClass().runme()
ss.greet()

w = core.G3Writer('out.g3')
f = core.G3Frame()
f.type = core.G3FrameType.Scan
f['testv'] = core.G3VectorDouble([
    1.,
    2.,
    3.,
])
w.Process(f)
コード例 #16
0
ファイル: test_intervals.py プロジェクト: tskisner/so3g
# Type failing works?  Can't create mask from non-integer Intervals.
try:
    mask3 = so3g.IntervalsDouble.mask([], 8)
except ValueError:
    mask3 = 'failed'
assert (mask3 == 'failed')

print()
print('Map test')
tmap = so3g.MapIntervalsTime()
tmap['a'] = ti
print('    ', tmap)
print('    ', tmap['a'])

# Can we read and write them?
print()
test_filename = 'test_intervals.g3'
print('Writing to %s' % test_filename)
w = core.G3Writer(test_filename)
f = core.G3Frame()
f['map'] = tmap
f['iv'] = iv2
w.Process(f)
del w

print()
print('Reading from %s' % test_filename)
for f in core.G3File(test_filename):
    print('   ', f)
コード例 #17
0
ファイル: spt3g.py プロジェクト: simrannerval/toast
    def _export_frames(self,
                       path=None,
                       prefix=None,
                       cache_name=None,
                       cache_common=None,
                       cache_flag_name=None):
        """Export cache data to frames.

        This will either export the cache fields that correspond to the "real"
        data (those manipulated by the read / write methods) or will export
        alternate cache fields specified by the arguments.

        Args:
            path (str):  Override any path specified at construction time.
            prefix (str):  Override any prefix specified at construction time.
            cache_name (str):  When exporting data, the name of the cache
                object (<name>_<detector>) to use for the detector timestream.
                If None, use the TOD read* methods.
            cache_common (str):  When exporting data, the name of the cache
                object to use for common flags.  If None, use the TOD read*
                methods.
            cache_flag_name (str):  When exporting data, the name of the
                cache object (<name>_<detector>) to use for the detector
                flags.  If None, use the TOD read* methods.

        """
        self._cache_init()

        # Create the frame schema we are using when exporting data

        common_fields = [(STR_TIME, c3g.G3VectorTime, STR_TIME),
                         (STR_BORE, c3g.G3VectorDouble, STR_BORE),
                         (STR_BOREAZEL, c3g.G3VectorDouble, STR_BOREAZEL),
                         (STR_POS, c3g.G3VectorDouble, STR_POS),
                         (STR_VEL, c3g.G3VectorDouble, STR_VEL)]
        if cache_common is None:
            cname = "{}_{}".format(STR_FLAG, STR_COMMON)
            common_fields.append((cname, c3g.G3VectorUnsignedChar, cname))
        else:
            common_fields.append(
                (cache_common, c3g.G3VectorUnsignedChar, cache_common))

        det_fields = None
        if cache_name is None:
            det_fields = [ ("{}_{}".format(STR_DET, d), d) \
                           for d in self.detectors ]
        else:
            det_fields = [ ("{}_{}".format(cache_name, d), d) \
                           for d in self.detectors ]

        flag_fields = None
        if cache_flag_name is None:
            flag_fields = [ ("{}_{}".format(STR_FLAG, d), d) \
                           for d in self.detectors ]
        else:
            flag_fields = [ ("{}_{}".format(cache_flag_name, d), d) \
                           for d in self.detectors ]

        ex_path = self._path
        if path is not None:
            ex_path = path

        ex_prefix = self._prefix
        if prefix is not None:
            ex_prefix = prefix

        if (ex_path is None) or (ex_prefix is None):
            raise RuntimeError("You must specify the TOD path and prefix, "
                               "either at construction or export")

        ex_files = [ os.path.join(ex_path,
                    "{}_{:08d}.g3".format(ex_prefix, x)) \
                    for x in self._file_sample_offs ]

        for ifile, (ffile,
                    foff) in enumerate(zip(ex_files, self._file_frame_offs)):
            nframes = None
            #print("  ifile = {}, ffile = {}, foff = {}".format(ifile, ffile, foff), flush=True)
            if ifile == len(ex_files) - 1:
                # we are at the last file
                nframes = len(self._frame_sizes) - foff
            else:
                # get number of frames in this file
                nframes = self._file_frame_offs[ifile + 1] - foff

            writer = None
            if self.mpicomm.rank == 0:
                writer = c3g.G3Writer(ffile)
                props = self.meta()
                props["units"] = self._units
                props["have_azel"] = self._have_azel
                #print("Writing props:")
                #print(props, flush=True)
                write_spt3g_obs(writer, props, self._detquats,
                                self.total_samples)

            # Collect data for all frames in the file in one go.

            frm_offsets = [ self._frame_sample_offs[foff+f] \
                           for f in range(nframes) ]
            frm_sizes = [ self._frame_sizes[foff+f] \
                           for f in range(nframes) ]

            # if self.mpicomm.rank == 0:
            #     print("  {} file {}".format(self._path, ifile), flush=True)
            #     print("    start frame = {}, nframes = {}".format(foff, nframes), flush=True)
            #     print("    frame offs = ",frm_offsets, flush=True)
            #     print("    frame sizes = ",frm_sizes, flush=True)

            fdata = s3utils.cache_to_frames(self,
                                            foff,
                                            nframes,
                                            frm_offsets,
                                            frm_sizes,
                                            common=common_fields,
                                            detector_fields=det_fields,
                                            flag_fields=flag_fields,
                                            detector_map=STR_DET,
                                            flag_map=STR_FLAG,
                                            units=self._units)

            if self.mpicomm.rank == 0:
                for fdt in fdata:
                    writer(fdt)
                del writer
            del fdata

        return
コード例 #18
0
    def stream(self, session, params):
        """stream(duration=None)

        **Process** - Generates example fake-files organized in the same way as
        they would be a regular smurf-stream. For end-to-end testing, we want
        an example of a pysmurf-ancilliary file, and then regular g3 that rotate
        at regular intervals. The content of the files here don't match what
        actual G3 or pysmurf files look like, however the directory structure
        is the same.

        Parameters:
            duration (float, optional):
                If set, will stop stream after specified amount of time (sec).
        """
        session.set_status('starting')

        # Write initial smurf metadata
        action_time = time.time()
        files = ['freq.txt', 'mask.txt']
        for f in files:
            self._write_smurf_file(f,
                                   'take_g3_stream',
                                   action_time=action_time)

        end_time = None
        if params.get('duration') is not None:
            end_time = time.time() + params['duration']

        session.set_status('running')
        frame_gen = G3FrameGenerator(self.stream_id, self.sample_rate,
                                     self.nchans)
        session.data['session_id'] = frame_gen.session_id
        session.data['g3_files'] = []

        seq = 0
        fname = self._get_g3_filename(frame_gen.session_id, seq, makedirs=True)
        os.makedirs(os.path.dirname(fname), exist_ok=True)
        session.data['g3_files'].append(fname)
        writer = core.G3Writer(fname)
        file_start = time.time()

        writer(frame_gen.get_obs_frame())
        writer(frame_gen.get_status_frame())
        self.streaming = True
        while self.streaming:
            start = time.time()
            time.sleep(self.frame_len)
            stop = time.time()
            writer(frame_gen.get_data_frame(start, stop))

            if end_time is not None:
                if stop > end_time:
                    break

            if time.time() - file_start > self.file_duration:
                writer(core.G3Frame(core.G3FrameType.EndProcessing))
                seq += 1
                fname = self._get_g3_filename(frame_gen.session_id,
                                              seq,
                                              makedirs=True)
                os.makedirs(os.path.dirname(fname), exist_ok=True)
                session.data['g3_files'].append(fname)
                writer = core.G3Writer(fname)
                file_start = time.time()

        writer(core.G3Frame(core.G3FrameType.EndProcessing))

        return True, "Finished stream"
コード例 #19
0
# Test data. Exercise some complicated things (STL bits) that we don't
# necessarily have control over, mapping a few primitive types.
f = core.G3Frame()
f['Five'] = 5
v = core.G3VectorDouble([2.6, 7.2])
f['Vec'] = v
v = core.G3VectorInt([17, 42, 87])
f['VecInt'] = v
m = core.G3MapDouble()
m['Six'] = 6
m['GoingOnSixteen'] = 15.9
f['Map'] = m

if len(sys.argv) > 1:
    core.G3Writer(sys.argv[1])(f)
    sys.exit(0)

# For now, we test files from big-endian (PPC64) and little-endian (amd64)
# 64-bit systems. Should include some 32-bit ones.

for test in ['test-be.g3', 'test-le.g3']:
    print(test)
    testdata = core.G3Reader(os.path.join(testpath, test))(None)[0]

    assert (testdata['Five'] == f['Five'])
    assert (len(testdata['Vec']) == len(f['Vec']))
    for i in range(len(testdata['Vec'])):
        assert (testdata['Vec'][i] == f['Vec'][i])
    assert (len(testdata['VecInt']) == len(f['VecInt']))
    for i in range(len(testdata['VecInt'])):
コード例 #20
0
        return [f]

    def __call__(self, *args, **kwargs):
        return self.Process(*args, **kwargs)


if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser(
        usage='This program can be used to convert SO HK Frames to the '
        'latest schema version.')
    parser.add_argument('--output-file', '-o', default='out.g3')
    parser.add_argument('--target-version', type=int)
    parser.add_argument('files', nargs='+', help=
                        "SO Housekeeping files to convert.")
    args = parser.parse_args()

    # Run me on a G3File containing a Housekeeping stream.
    core.set_log_level(core.G3LogLevel.LOG_INFO)

    translator_args = {}
    if args.target_version is not None:
        translator_args['target_version'] = args.target_version

    print(f'Streaming to {args.output_file}')
    p = core.G3Pipeline()
    p.Add(core.G3Reader(args.files))
    p.Add(HKTranslator(**translator_args))
    p.Add(core.G3Writer(args.output_file))
    p.Run()
コード例 #21
0
from spt3g import core, calibration, dfmux, mapmaker
import numpy as np

fsingle = core.G3File('/home/adama/SPT/spt_analysis/20190125_singleprecision/' + \
                      'singleprecision/64502043_tod_v13.g3')
fdouble = core.G3File('/home/adama/SPT/spt_analysis/20190125_singleprecision/' + \
                      'doubleprecision/64502043_tod_v13.g3')
write_mix = core.G3Writer('mixedprecision/64502043_v13_single_pixelpointing.g3')

fr_single = fsingle.next()
fr_double = fdouble.next()
while True:
    if fr_single.type == core.G3FrameType.Scan and \
       fr_double.type == core.G3FrameType.Scan:
        new_frame = core.G3Frame(core.G3FrameType.Scan)
        new_frame['DeflaggedTimestreams'] = fr_double['DeflaggedTimestreams']
        new_frame['OnlineRaDecRotation'] = fr_double['OnlineRaDecRotation']
        new_frame['PixelPointing'] = fr_single['PixelPointing']
        new_frame['TodWeights'] = fr_double['TodWeights']

        bolos_double = np.array(list(fr_double['DeflaggedTimestreams'].keys()))
        bolos_pointing = np.array(list(fr_single['PixelPointing'].keys()))
        bolos_toremove = np.setdiff1d(bolos_double, bolos_pointing)

        for bolo in bolos_toremove:
            if bolo in new_frame['DeflaggedTimestreams']:
                new_frame['DeflaggedTimestreams'].pop(bolo)

        print(new_frame)
        write_mix.Process(new_frame)
    else:
コード例 #22
0
    def test_00_basic(self):
        """Write a stream of HK frames and scan it for errors."""

        # Write a stream of HK frames.
        # (Inspect the output with 'spt3g-dump hk_out.g3 so3g'.)
        print('Streaming to %s' % test_file)
        w = core.G3Writer(test_file)

        # Create something to help us track the aggregator session.
        hksess = so3g.hk.HKSessionHelper(session_id=None,
                                         hkagg_version=2,
                                         description="Test HK data.")

        # Register a data provider.
        prov_id = hksess.add_provider(
            description='Fake data for the real world.')

        # Start the stream -- write the initial session and status frames.
        w.Process(hksess.session_frame())
        w.Process(hksess.status_frame())

        # Add a bunch of data frames
        t_next = time.time()
        for i in range(10):
            f = hksess.data_frame(prov_id=prov_id, timestamp=t_next)
            hk = core.G3TimesampleMap()
            speed = [1.2, 1.2, 1.3, 1.2, 1.3]
            hk.times = [
                core.G3Time(_t * core.G3Units.second)
                for _t in t_next + np.arange(len(speed))
            ]
            hk['position'] = core.G3VectorDouble(np.arange(len(speed)))
            hk['speed'] = core.G3VectorDouble(speed)
            hk['error_bits'] = core.G3VectorInt([10] * len(speed))
            hk['mode_str'] = core.G3VectorString(['ok'] * len(speed))
            t_next += len(hk)
            f['blocks'].append(hk)
            f['block_names'].append('main_block')
            w.Process(f)

        w.Flush()
        del w

        print('Stream closed.\n\n')

        # Now play them back...
        print('Reading back:')
        for f in core.G3File(test_file):
            ht = f.get('hkagg_type')
            if ht == so3g.HKFrameType.session:
                print('Session: %i' % f['session_id'])
            elif ht == so3g.HKFrameType.status:
                print('  Status update: %i providers' % (len(f['providers'])))
            elif ht == so3g.HKFrameType.data:
                print('  Data: %i blocks' % len(f['blocks']))
                for i, block in enumerate(f['blocks']):
                    print('    Block %i' % i)
                    for k, v in block.items():
                        print('    %s' % k, v)

        # Scan and validate.
        print()
        print('Running HKScanner on the test data...')
        scanner = so3g.hk.HKScanner()
        pipe = core.G3Pipeline()
        pipe.Add(core.G3Reader(test_file))
        pipe.Add(scanner)
        pipe.Run()

        print('Stats: ', scanner.stats)
        print('Providers: ', scanner.providers)

        self.assertEqual(scanner.stats['concerns']['n_error'], 0)
        self.assertEqual(scanner.stats['concerns']['n_warning'], 0)
コード例 #23
0
ファイル: test_hkagg.py プロジェクト: jit9/so3g
    def test_00_basic(self):
        """Write a stream of HK frames and scan it for errors."""

        # Write a stream of HK frames.
        # (Inspect the output with 'spt3g-dump hk_out.g3 so3g'.)
        print('Streaming to %s' % test_file)
        w = core.G3Writer(test_file)

        # Create something to help us track the aggregator session.
        hksess = so3g.hk.HKSessionHelper(session_id=None,
                                         description="Test HK data.")

        # Register a data provider.
        prov_id = hksess.add_provider(
            description='Fake data for the real world.')

        # Start the stream -- write the initial session and status frames.
        w.Process(hksess.session_frame())
        w.Process(hksess.status_frame())

        # Add a bunch of data frames
        t_next = time.time()
        for i in range(10):
            f = hksess.data_frame(prov_id=prov_id, timestamp=t_next)
            hk = so3g.IrregBlockDouble()
            hk.prefix = 'hwp_'
            hk.data['position'] = [1, 2, 3, 4, 5]
            hk.data['speed'] = [1.2, 1.2, 1.3, 1.2, 1.3]
            hk.t = t_next + np.arange(len(hk.data['speed']))
            t_next += len(hk.data['speed'])
            f['blocks'].append(hk)
            w.Process(f)

        w.Flush()
        del w

        print('Stream closed.\n\n')

        # Now play them back...
        print('Reading back:')
        for f in core.G3File(test_file):
            ht = f.get('hkagg_type')
            if ht == so3g.HKFrameType.session:
                print('Session: %i' % f['session_id'])
            elif ht == so3g.HKFrameType.status:
                print('  Status update: %i providers' % (len(f['providers'])))
            elif ht == so3g.HKFrameType.data:
                print('  Data: %i blocks' % len(f['blocks']))
                for block in f['blocks']:
                    for k, v in block.data.items():
                        print('    %s%s' % (block.prefix, k), v)

        # Scan and validate.
        print()
        print('Running HKScanner on the test data...')
        scanner = so3g.hk.HKScanner()
        pipe = core.G3Pipeline()
        pipe.Add(core.G3Reader(test_file))
        pipe.Add(scanner)
        pipe.Run()

        print('Stats: ', scanner.stats)
        print('Providers: ', scanner.providers)

        self.assertEqual(scanner.stats['concerns']['n_error'], 0)
        self.assertEqual(scanner.stats['concerns']['n_warning'], 0)
コード例 #24
0
            for spectrum in ['TT', 'EE', 'BB']:
                ell_range_nobias = (Cls[0.0]['cls']['ell'] > 500) & \
                                   (Cls[0.0]['cls']['ell'] < 2000)
                ell_range_bias   = (Cls[bias_mag]['cls']['ell'] > 500) & \
                                   (Cls[bias_mag]['cls']['ell'] < 2000)
                cal_factor = np.mean(Cls[0.0]['cls'][spectrum][ell_range_nobias]) / \
                             np.mean(Cls[bias_mag]['cls'][spectrum][ell_range_bias])
                Cls[bias_mag]['cal'][spectrum] = cal_factor
                Cls[bias_mag]['cls_normalized'][
                    spectrum] = Cls[bias_mag]['cls'][spectrum] * cal_factor

        # save the maps to a G3 file
        if args.save_maps:
            fname_stub = 'linearbias_{:.1f}percentPerDeg'.format(bias_mag)
            w = core.G3Writer('{}_{}.g3'.format(
                fname_stub,
                os.path.splitext(os.path.basename(args.simskies[jsky]))[0]))
            w.Process(map_fr)
            w.Process(core.G3Frame(core.G3FrameType.EndProcessing))

        if args.fit_cosmology:
            if args.norm_to_unbiased and bias_mag != 0:
                res = minimize(neg2LogL, [67.87, 0.022277, 0.11843],
                               args=(Cls[bias_mag]['cls_normalized']),
                               method='powell',
                               options={
                                   'xtol': 1e-6,
                                   'disp': True
                               })
            else:
                res = minimize(neg2LogL, [67.87, 0.022277, 0.11843],
コード例 #25
0
    newframe = core.G3Frame(core.G3FrameType.Calibration)
    for field in old_boloprops:
        if field != 'BolometerProperties':
            newframe[field] = old_boloprops[field]
    newframe['BolometerProperties'] = calibration.BolometerPropertiesMap()

    print(len(old_boloprops['BolometerProperties']))

    for bolo in old_boloprops['BolometerProperties'].keys():
        if '/' in bolo:
            newbolo = bolo.split('/')[1]
        else:
            newbolo = bolo

        if newbolo in nominal_boloprops['NominalBolometerProperties'].keys():
            newframe['BolometerProperties'][bolo] = old_boloprops[
                'BolometerProperties'][bolo]
            newframe['BolometerProperties'][
                bolo].pol_angle = nominal_boloprops[
                    'NominalBolometerProperties'][newbolo].pol_angle
            #old_boloprops['BolometerProperties'][bolo].pol_angle = nominal_boloprops['NominalBolometerProperties'][newbolo].pol_angle
        else:
            old_boloprops['BolometerProperties'].pop(bolo)
            print('{} not in new nominal bolometer properties!'.format(bolo))
    print(len(old_boloprops['BolometerProperties']))

    writer = core.G3Writer(fnames_dict['new'])
    writer.Process(newframe)
    writer.Process(core.G3Frame(core.G3FrameType.EndProcessing))
コード例 #26
0
    def _export_observation(self, obs, cgroup,
                            detgroup=None, detectors=None, keep_offsets=False):
        """ Export observation in one or more frame files
        """

        grouprank = 0
        if cgroup is not None:
            grouprank = cgroup.rank

        # Observation information.  Anything here that is a simple data
        # type will get written to the observation frame.
        props = dict()
        for k, v in obs.items():
            if isinstance(v, (int, str, bool, float)):
                props[k] = v

        # Every observation must have a name...
        obsname = obs["name"]

        # The TOD
        tod = obs["tod"]
        nsamp = tod.total_samples
        if detectors is None:
            detquat = tod.detoffset()
            detindx = tod.detindx
            detnames = tod.detectors
        else:
            detquat_temp = tod.detoffset()
            detindx_temp = tod.detindx
            detquat = {}
            detindx = {}
            detnames = []
            toddets = set(tod.detectors)
            for det in detectors:
                if det not in toddets:
                    continue
                detnames.append(det)
                detquat[det] = detquat_temp[det]
                detindx[det] = detindx_temp[det]
        ndets = len(detquat)

        # Get any other metadata from the TOD
        props.update(tod.meta)

        # First process in the group makes the output directory
        obsdir = os.path.join(self._outdir, obsname)
        if cgroup.rank == 0:
            if not os.path.isdir(obsdir):
                os.makedirs(obsdir)
        cgroup.barrier()

        detranks, sampranks = tod.grid_size

        framesizes = self._get_framesizes(tod, obs, nsamp, keep_offsets)

        (flavors, flavor_type, flavor_maptype, copy_flavors
        ) = self._get_flavors(tod, detnames, grouprank)

        (frame_sample_offs, file_sample_offs, file_frame_offs
        ) = self._get_offsets(cgroup, grouprank, keep_offsets, len(detquat),
                              len(copy_flavors), framesizes)

        if detgroup is None:
            prefix = self._prefix
        else:
            prefix = "{}_{}".format(self._prefix, detgroup)
        ex_files = [os.path.join(obsdir,
                    "{}_{:08d}.g3".format(prefix, x))
                    for x in file_sample_offs]

        # Loop over each frame file.  Write the header frames and then
        # gather the data from all processes before writing the scan
        # frames.

        for ifile, (ffile, foff) in enumerate(zip(ex_files,
                                              file_frame_offs)):
            nframes = None
            # print("  ifile = {}, ffile = {}, foff = {}"
            #       .format(ifile, ffile, foff), flush=True)
            if ifile == len(ex_files) - 1:
                # we are at the last file
                nframes = len(framesizes) - foff
            else:
                # get number of frames in this file
                nframes = file_frame_offs[ifile + 1] - foff

            writer = None
            if grouprank == 0:
                writer = core3g.G3Writer(ffile)
                self._write_obs(writer, props, detindx)
                if "noise" in obs:
                    self._write_precal(writer, detquat, obs["noise"])
                else:
                    self._write_precal(writer, detquat, None)

            # Collect data for all frames in the file in one go.

            frm_offsets = [frame_sample_offs[foff + f]
                           for f in range(nframes)]
            frm_sizes = [framesizes[foff + f] for f in range(nframes)]

            if grouprank == 0 and self._verbose:
                print("  {} file {} detector group {}".format(
                    obsdir, ifile, detgroup), flush=True)
                print("    start frame = {}, nframes = {}"
                      .format(foff, nframes), flush=True)
                print("    frame offs = ", frm_offsets, flush=True)
                print("    frame sizes = ", frm_sizes, flush=True)

            fdata = tod_to_frames(
                tod, foff, nframes, frm_offsets, frm_sizes,
                cache_signal=self._cache_name,
                cache_flags=self._cache_flag_name,
                cache_common_flags=self._cache_common,
                copy_common=None,
                copy_detector=copy_flavors,
                units=self._units,
                dets=detnames,
                mask_flag_common=self._mask_flag_common,
                mask_flag=self._mask_flag,
                compress=self._compress,
            )

            if grouprank == 0:
                for fdt in fdata:
                    writer(fdt)
                del writer
            del fdata

        return
コード例 #27
0
ファイル: toast.py プロジェクト: giuspugl/sotodlib
    def exec(self, data):
        """Export data to a directory tree of so3g frames.

        For errors that prevent the export, this function will directly call
        MPI Abort() rather than raise exceptions.  This could be changed in
        the future if additional logic is implemented to ensure that all
        processes raise an exception when one process encounters an error.

        Args:
            data (toast.Data): The distributed data.

        """
        # the two-level toast communicator
        comm = data.comm
        # the global communicator
        cworld = comm.comm_world
        # the communicator within the group
        cgroup = comm.comm_group
        # the communicator with all processes with
        # the same rank within their group
        crank = comm.comm_rank

        # One process checks the path
        if cworld.rank == 0:
            if not os.path.isdir(self._outdir):
                os.makedirs(self._outdir)
        cworld.barrier()

        for obs in data.obs:
            # Observation information.  Anything here that is a simple data
            # type will get written to the observation frame.
            props = dict()
            for k, v in obs.items():
                if isinstance(v, (int, str, bool, float)):
                    props[k] = v

            # Every observation must have a name...
            obsname = obs["name"]

            # The TOD
            tod = obs["tod"]
            nsamp = tod.total_samples
            detquat = tod.detoffset()
            detindx = tod.detindx
            ndets = len(detquat)
            detnames = tod.detectors

            # Get any other metadata from the TOD
            props.update(tod.meta())

            # First process in the group makes the output directory
            obsdir = os.path.join(self._outdir, obsname)
            if cgroup.rank == 0:
                if not os.path.isdir(obsdir):
                    os.makedirs(obsdir)
            cgroup.barrier()

            detranks, sampranks = tod.grid_size

            # Determine frame sizes based on the data distribution
            framesizes = None
            if self._usechunks:
                framesizes = tod.total_chunks
            elif self._useintervals:
                if "intervals" not in obs:
                    raise RuntimeError(
                        "Observation does not contain intervals, cannot \
                        distribute using them")
                framesizes = intervals_to_chunklist(obs["intervals"], nsamp)
            if framesizes is None:
                framesizes = [nsamp]

            # Examine all the cache objects and find the set of prefixes
            flavors = set()
            flavor_type = dict()
            flavor_maptype = dict()
            pat = re.compile(r"^(.*?)_(.*)")
            for nm in list(tod.cache.keys()):
                mat = pat.match(nm)
                if mat is not None:
                    pref = mat.group(1)
                    md = mat.group(2)
                    if md in detnames:
                        # This cache field has the form <prefix>_<det>
                        if pref not in flavor_type:
                            ref = tod.cache.reference(nm)
                            if ref.dtype == np.dtype(np.float64):
                                flavors.add(pref)
                                flavor_type[pref] = core3g.G3Timestream
                                flavor_maptype[pref] = core3g.G3TimestreamMap
                            elif ref.dtype == np.dtype(np.int32):
                                flavors.add(pref)
                                flavor_type[pref] = core3g.G3VectorInt
                                flavor_maptype[pref] = core3g.G3MapVectorInt
                            elif ref.dtype == np.dtype(np.uint8):
                                flavors.add(pref)
                                flavor_type[pref] = so3g.IntervalsInt
                                flavor_maptype[pref] = so3g.MapIntervalsInt
                            else:
                                msg = "Cache prefix {} has unsupported \
                                    data type.  Skipping export"

                                raise RuntimeError(msg)
            flavors.discard(self._cache_name)
            flavors.discard(self._cache_flag_name)
            copy_flavors = [(x, flavor_type[x], flavor_maptype[x],
                             "signal_{}".format(x)) for x in flavors]

            print("found cache flavors ", flavors, flush=True)

            # Given the dimensions of this observation, compute the frame
            # file sizes and all relevant offsets.

            frame_sample_offs = None
            file_sample_offs = None
            file_frame_offs = None
            if cgroup.rank == 0:
                # Compute the frame file breaks.  We ignore the observation
                # and calibration frames since they are small.
                sampbytes = self._bytes_per_sample(len(detquat), len(flavors))

                file_sample_offs, file_frame_offs, frame_sample_offs = \
                    s3utils.compute_file_frames(
                        sampbytes, framesizes,
                        file_size=self._target_framefile)

            file_sample_offs = cgroup.bcast(file_sample_offs, root=0)
            file_frame_offs = cgroup.bcast(file_frame_offs, root=0)
            frame_sample_offs = cgroup.bcast(frame_sample_offs, root=0)

            ex_files = [
                os.path.join(obsdir, "{}_{:08d}.g3".format(self._prefix, x))
                for x in file_sample_offs
            ]

            # Loop over each frame file.  Write the header frames and then
            # gather the data from all processes before writing the scan
            # frames.

            for ifile, (ffile,
                        foff) in enumerate(zip(ex_files, file_frame_offs)):
                nframes = None
                print("  ifile = {}, ffile = {}, foff = {}".format(
                    ifile, ffile, foff),
                      flush=True)
                if ifile == len(ex_files) - 1:
                    # we are at the last file
                    nframes = len(framesizes) - foff
                else:
                    # get number of frames in this file
                    nframes = file_frame_offs[ifile + 1] - foff

                writer = None
                if cgroup.rank == 0:
                    writer = core3g.G3Writer(ffile)
                    self._write_obs(writer, props, detindx)
                    if "noise" in obs:
                        self._write_precal(writer, detquat, obs["noise"])
                    else:
                        self._write_precal(writer, detquat, None)

                # Collect data for all frames in the file in one go.

                frm_offsets = [
                    frame_sample_offs[foff + f] for f in range(nframes)
                ]
                frm_sizes = [framesizes[foff + f] for f in range(nframes)]

                if cgroup.rank == 0:
                    print("  {} file {}".format(obsdir, ifile), flush=True)
                    print("    start frame = {}, nframes = {}".format(
                        foff, nframes),
                          flush=True)
                    print("    frame offs = ", frm_offsets, flush=True)
                    print("    frame sizes = ", frm_sizes, flush=True)

                fdata = tod_to_frames(tod,
                                      foff,
                                      nframes,
                                      frm_offsets,
                                      frm_sizes,
                                      cache_signal=self._cache_name,
                                      cache_flags=self._cache_flag_name,
                                      cache_common_flags=self._cache_common,
                                      copy_common=None,
                                      copy_detector=copy_flavors,
                                      units=self._units)

                if cgroup.rank == 0:
                    for fdt in fdata:
                        writer(fdt)
                    del writer
                del fdata

        return