Ejemplo n.º 1
0
 def test_reinit_from_numpy_array(self):
     t0 = core.G3Time('2019-01-01T12:30:00')
     timestamps = np.linspace(t0.time,
                              t0.time + 1e7 * SEC,
                              3000,
                              dtype='int64')
     vectime = core.G3VectorTime(timestamps)
     revectime = core.G3VectorTime(np.asarray(vectime))
     assert ((np.asarray(revectime) == timestamps).all())
Ejemplo n.º 2
0
def _concat_hk_stream(blocks_in):
    """Concatenates an ordered list of compatible HK blocks into a single
    frame.  Each block should be a valid G3TimesampleMap with the same
    keys.

    Returns a single G3TimesampleMap with all fields concatenated.

    """
    blk = core.G3TimesampleMap()
    blk.times = core.G3VectorTime(blocks_in[0].times)
    fields = list(blocks_in[0].keys())
    for f in fields:
        f_short = f.split('.')[-1]
        blk[f] = blocks_in[0][f_short]
    for b in blocks_in[1:]:
        blk.times.extend(b.times)
    for f in fields:
        f_short = f.split('.')[-1]
        for _type in _SCHEMA_V1_BLOCK_TYPES:
            if isinstance(blocks_in[0][f_short], _type):
                break
        else:
            raise RuntimeError('Field "%s" is of unsupported type %s.' %
                               (f_short, type(blocks_in[0][f_short])))
        for b in blocks_in[1:]:
            blk[f].extend(b[f_short])
    return blk
Ejemplo n.º 3
0
 def test_from_numpy_array(self):
     t0 = core.G3Time('2019-01-01T12:30:00')
     timestamps = np.linspace(t0.time, t0.time + 1e7 * SEC, 3000)
     vectime = core.G3VectorTime(timestamps)
     assert (vectime[0] == t0)
     assert (vectime[-1] == core.G3Time(timestamps[-1]))
     assert (len(vectime) == len(timestamps))
Ejemplo n.º 4
0
    def test_11_fallback(self):
        """Test that the code does not fail to serialize short segments or
        highly random data.

        """
        # Short segments
        for nsamp in range(1, 20):
            ts = self._get_ts(1, nsamp, sigma=0, dtype='int32')
            ts.encode()
            self._readback_compare(ts)

        # Random time vector.
        n = 200
        ts = self._get_ts(1, n, sigma=0, dtype='int32')
        ts.times = core.G3VectorTime((np.random.uniform(size=n * 8) *
                                      256).astype('uint8').view(dtype='int64'))
        self._readback_compare(ts)

        # Random data array.
        n = 200
        ts = self._get_ts(1, n, sigma=0, dtype='int64')
        ts.data = (np.random.uniform(size=n * 8) *
                   256).astype('uint8').view(dtype='int64').reshape(1, -1)
        self._readback_compare(ts)

        # Small n_det (note 1-10 weren't causing a problem by 11+ were...)
        for n_det in range(1, 20):
            ts = self._get_ts(n_det, 1, sigma=0, dtype='int64')
            ts.data = (np.random.uniform(size=n_det*8) * 256).astype('uint8') \
                .view(dtype='int64').reshape(-1, 1)
            ts.encode()
            self._readback_compare(ts)
Ejemplo n.º 5
0
def get_g3_time(unix_time):
    """Convert a double or numpy array of floats to G3Time or
    G3VectorTime."""
    src = None
    if isinstance(unix_time, core.G3VectorDouble):
        src = (np.array(unix_time) * core.G3Units.seconds).astype('int')
    elif isinstance(unix_time, np.ndarray) and unix_time.ndim == 1:
        src = (unix_time * core.G3Units.seconds).astype('int')
    if src is not None:
        return core.G3VectorTime([core.G3Time(t) for t in src])
    return core.G3Time(int(unix_time * core.G3Units.seconds))
Ejemplo n.º 6
0
def get_v2_stream():
    """Generate some example HK data, in schema version 2.

    Returns a list of frames constituting a valid version 2 HK stream.

    """
    # Create something to help us track the aggregator session.
    hksess = so3g.hk.HKSessionHelper(session_id=1234,
                                     hkagg_version=2,
                                     description="Test HK data.")

    # Register a data provider.
    prov_id = hksess.add_provider(
        description='Fake data for the real world.')

    # Start the stream -- write the initial session and status frames.
    frames = [
        hksess.session_frame(),
        hksess.status_frame(),
    ]

    # Now make a data frame.
    f = hksess.data_frame(prov_id=prov_id)

    # Add some data blocks.
    hk = core.G3TimesampleMap()
    hk.times = core.G3VectorTime([core.G3Time(i*core.G3Units.seconds) for i in [0, 1, 2, 3, 4]])
    hk['speed'] = core.G3VectorDouble([1.2, 1.2, 1.2, 1.2, 1.2])
    f['blocks'].append(hk)
    f['block_names'].append('group0')

    hk = core.G3TimesampleMap()
    hk.times = core.G3VectorTime([core.G3Time(i*core.G3Units.seconds) for i in [0, 1, 2, 3, 4]])
    hk['position'] = core.G3VectorInt([1, 2, 3, 4, 5])
    hk['mode'] = core.G3VectorString(['going', 'going', 'going', 'going', 'gone/'])
    f['blocks'].append(hk)
    f['block_names'].append('group1')

    frames.append(f)
    return frames
Ejemplo n.º 7
0
def get_test_block(length, keys=['a', 'b', 'c', 'd'], offset=0, ordered=True):
    type_cycle = [(core.G3VectorDouble, float), (core.G3VectorInt, int),
                  (core.G3VectorString, str), (core.G3VectorBool, bool)]
    t0 = core.G3Time('2019-01-01T12:30:00') + offset * SEC
    m = core.G3TimesampleMap()
    times = np.arange(length)
    if not ordered:
        np.random.shuffle(times)
    m.times = core.G3VectorTime(t0 + times * SEC)
    for i, k in enumerate(keys):
        y = (np.random.uniform(size=length) * 100).astype(int)
        constructor, cast_func = type_cycle[i % len(type_cycle)]
        vect = constructor(list(map(cast_func, y)))
        m[k] = vect
    return m
Ejemplo n.º 8
0
def g3_cast(data, time=False):
    """
    Casts a generic datatype into a corresponding G3 type. With:
        int   -> G3Int
        str   -> G3String
        float -> G3Double

    and lists of type X will go to G3VectorX. If ``time`` is set to True, will
    convert to G3Time or G3VectorTime with the assumption that ``data`` consists
    of unix timestamps.

    Args:
        data (int, str, float, or list):
            Generic data to be converted to a corresponding G3Type.
        time (bool, optional):
            If True, will assume data contains unix timestamps and try to cast
            to G3Time or G3VectorTime.

    Returns:
        g3_data:
            Corresponding G3 datatype.
    """
    is_list = isinstance(data, list)
    if is_list:
        dtype = type(data[0])
        if not all(isinstance(d, dtype) for d in data):
            raise TypeError("Data list contains varying types!")
    else:
        dtype = type(data)
    if dtype not in _g3_casts.keys():
        raise TypeError("g3_cast does not support type {}. Type must"
                        "be one of {}".format(dtype, _g3_casts.keys()))
    if is_list:
        if time:
            return core.G3VectorTime(
                list(map(lambda t: core.G3Time(t * core.G3Units.s), data)))
        else:
            cast = _g3_list_casts[type(data[0])]
            return cast(data)
    else:
        if time:
            return core.G3Time(data * core.G3Units.s)
        else:
            cast = _g3_casts[type(data)]
            return cast(data)
Ejemplo n.º 9
0
    def get_data_frame(self, start, stop):

        times = np.arange(start, stop, 1. / self.sample_rate)
        nsamps = len(times)
        chans = np.arange(self.nchans)
        names = [f'r{ch:0>4}' for ch in chans]

        count_per_phi0 = 2**16
        data = np.zeros((self.nchans, nsamps), dtype=np.int32)
        data += count_per_phi0 * chans[:, None]
        data += (count_per_phi0 * 0.2 *
                 np.sin(2 * np.pi * 8 * times)).astype(int)
        data += (count_per_phi0 *
                 np.random.normal(0, 0.03, (self.nchans, nsamps))).astype(int)

        fr = core.G3Frame(core.G3FrameType.Scan)

        g3times = core.G3VectorTime(times * core.G3Units.s)
        fr['data'] = so3g.G3SuperTimestream(names, g3times, data)

        primary_names = [
            'UnixTime', 'FluxRampIncrement', 'FluxRampOffset', 'Counter0',
            'Counter1', 'Counter2', 'AveragingResetBits', 'FrameCounter',
            'TESRelaySetting'
        ]
        primary_data = np.zeros((len(primary_names), nsamps), dtype=np.int64)
        primary_data[0, :] = (times * 1e9).astype(int)
        fr['primary'] = so3g.G3SuperTimestream(primary_names, g3times,
                                               primary_data)

        tes_bias_names = [f'bias{bg:0>2}' for bg in range(NBIASLINES)]
        bias_data = np.zeros((NBIASLINES, nsamps), dtype=np.int32)
        fr['tes_biases'] = so3g.G3SuperTimestream(tes_bias_names, g3times,
                                                  bias_data)

        fr['timing_paradigm'] = 'Low Precision'
        fr['num_samples'] = nsamps

        self.tag_frame(fr)
        return fr
Ejemplo n.º 10
0
 def _get_ts(self,
             nchans,
             ntimes,
             sigma=256,
             dtype='int32',
             raw=False,
             seed=None):
     if seed is not None:
         np.random.seed(seed)
     names = ['x%i' % i for i in range(nchans)]
     times = core.G3VectorTime(
         (1680000000 + np.arange(ntimes) * .005) * core.G3Units.s)
     data = (np.random.normal(size=(len(names), len(times))) *
             sigma).astype(dtype)
     if raw:
         return names, times, data
     ts = so3g.G3SuperTimestream()
     ts.names = names
     ts.times = times
     if dtype in FLOAT_DTYPES:
         ts.quanta = np.ones(len(names))
     ts.data = data
     return ts
Ejemplo n.º 11
0
 def test_copy_constructor(self):
     t0 = core.G3VectorTime(np.array([100000000, 200000000]))
     t1 = core.G3VectorTime(t0)
     assert ((np.asarray(t0) == np.asarray(t1)).all())
Ejemplo n.º 12
0
 def test_from_list(self):
     t0 = core.G3Time('2019-01-01T12:30:00')
     vectime = core.G3VectorTime([t0, t0 + 10 * SEC])
     assert (vectime[0].time == t0.time)
     assert (vectime[1].time == t0.time + 10 * SEC)
Ejemplo n.º 13
0
halfscan = 10  # degrees

for i in range(10):
    # Number of samples
    n = int(halfscan / v_az / dt)
    # Vector of unix timestamps
    t = frame_time + dt * np.arange(n)
    # Vector of az and el
    az = v_az * dt * np.arange(n)
    if i % 2:
        az = -az
    el = az * 0 + 50.

    # Construct a "block", which is a named G3TimesampleMap.
    block = core.G3TimesampleMap()
    block.times = core.G3VectorTime(
        [core.G3Time(_t * core.G3Units.s) for _t in t])
    block['az'] = core.G3VectorDouble(az)
    block['el'] = core.G3VectorDouble(el)

    # Create an output data frame template associated with this
    # provider.
    frame = session.data_frame(prov_id)

    # Add the block and block name to the frame, and write it.
    frame['block_names'].append('pointing')
    frame['blocks'].append(block)
    writer.Process(frame)

    # For next iteration.
    frame_time += n * dt
Ejemplo n.º 14
0
def cache_to_frames(tod,
                    start_frame,
                    n_frames,
                    frame_offsets,
                    frame_sizes,
                    common=None,
                    detector_fields=None,
                    flag_fields=None,
                    detector_map="detectors",
                    flag_map="flags",
                    units=None):
    """Gather all data from the distributed cache for a single frame.

    Args:
        tod (toast.TOD): instance of a TOD class.
        start_frame (int): the first frame index.
        n_frames (int): the number of frames.
        frame_offsets (list): list of the first samples of all frames.
        frame_sizes (list): list of the number of samples in each frame.
        common (tuple): (cache name, G3 type, frame name) of each common
            field.
        detector_fields (tuple): (cache name, frame name) of each detector
            field.
        flag_fields (tuple): (cache name, frame name) of each flag field.
        detector_map (str): the name of the frame timestream map.
        flag_map (str): then name of the frame flag map.
        units: G3 units of the detector data.

    """
    # Local sample range
    local_first = tod.local_samples[0]
    nlocal = tod.local_samples[1]

    # The process grid
    detranks, sampranks = tod.grid_size
    rankdet, ranksamp = tod.grid_ranks

    # Helper function:
    # For a given timestream, the gather is done across the
    # process row which contains the specific detector, or across
    # the first process row for common telescope data.
    def gather_field(prow, fld, indx, cacheoff, ncache):
        gproc = 0
        gdata = None

        # We are going to allreduce this later, so that every process
        # knows the dimensions of the field.
        allnnz = 0

        if rankdet == prow:
            #print("  proc {} doing gather of {}".format(tod.mpicomm.rank, fld), flush=True)
            # This process is in the process row that has this field,
            # participate in the gather operation.
            pdata = None
            # Find the data type and shape from the cache object
            mtype = None
            ref = tod.cache.reference(fld)
            nnz = 1
            if (len(ref.shape) > 1) and (ref.shape[1] > 0):
                nnz = ref.shape[1]
            if ref.dtype == np.dtype(np.float64):
                mtype = MPI.DOUBLE
            elif ref.dtype == np.dtype(np.int64):
                mtype = MPI.INT64_T
            elif ref.dtype == np.dtype(np.int32):
                mtype = MPI.INT32_T
            elif ref.dtype == np.dtype(np.uint8):
                mtype = MPI.UINT8_T
            else:
                msg = "Cannot gather cache field {} of type {}"\
                    .format(fld, ref.dtype)
                raise RuntimeError(msg)
            #print("field {}:  proc {} has nnz = {}".format(fld, tod.mpicomm.rank, nnz), flush=True)
            pz = 0
            if cacheoff is not None:
                pdata = ref.flatten()[nnz * cacheoff:nnz * (cacheoff + ncache)]
                pz = nnz * ncache
            else:
                pdata = np.zeros(0, dtype=ref.dtype)

            psizes = tod.grid_comm_row.gather(pz, root=0)
            disp = None
            totsize = None
            if ranksamp == 0:
                #print("Gathering field {} with type {}".format(fld, mtype), flush=True)
                # We are the process collecting the gathered data.
                gproc = tod.mpicomm.rank
                allnnz = nnz
                # Compute the displacements into the receive buffer.
                disp = [0]
                for ps in psizes[:-1]:
                    last = disp[-1]
                    disp.append(last + ps)
                totsize = np.sum(psizes)
                # allocate receive buffer
                gdata = np.zeros(totsize, dtype=ref.dtype)
                #print("Gatherv psizes = {}, disp = {}".format(psizes, disp), flush=True)

            #print("field {}:  proc {} start Gatherv".format(fld, tod.mpicomm.rank), flush=True)
            tod.grid_comm_row.Gatherv(pdata, [gdata, psizes, disp, mtype],
                                      root=0)
            #print("field {}:  proc {} finish Gatherv".format(fld, tod.mpicomm.rank), flush=True)

            del disp
            del psizes
            del pdata
            del ref

        # Now send this data to the root process of the whole communicator.
        # Only one process (the first one in process row "prow") has data
        # to send.

        # Create a unique message tag
        mtag = 10 * indx

        #print("  proc {} hit allreduce of gproc".format(tod.mpicomm.rank), flush=True)
        # All processes find out which one did the gather
        gproc = tod.mpicomm.allreduce(gproc, MPI.SUM)
        # All processes find out the field dimensions
        allnnz = tod.mpicomm.allreduce(allnnz, MPI.SUM)
        #print("  proc {} for field {}, gproc = {}".format(tod.mpicomm.rank, fld, gproc), flush=True)

        #print("field {}:  proc {}, gatherproc = {}, allnnz = {}".format(fld, tod.mpicomm.rank, gproc, allnnz), flush=True)

        rdata = None
        if gproc == 0:
            if gdata is not None:
                if allnnz == 1:
                    rdata = gdata
                else:
                    rdata = gdata.reshape((-1, allnnz))
        else:
            # Data not yet on rank 0
            if tod.mpicomm.rank == 0:
                # Receive data from the first process in this row
                #print("  proc {} for field {}, recv type".format(tod.mpicomm.rank, fld), flush=True)
                rtype = tod.mpicomm.recv(source=gproc, tag=(mtag + 1))

                #print("  proc {} for field {}, recv size".format(tod.mpicomm.rank, fld), flush=True)
                rsize = tod.mpicomm.recv(source=gproc, tag=(mtag + 2))

                #print("  proc {} for field {}, recv data".format(tod.mpicomm.rank, fld), flush=True)
                rdata = np.zeros(rsize, dtype=np.dtype(rtype))
                tod.mpicomm.Recv(rdata, source=gproc, tag=mtag)

                # Reshape if needed
                if allnnz > 1:
                    rdata = rdata.reshape((-1, allnnz))

            elif (tod.mpicomm.rank == gproc):
                # Send our data
                #print("  proc {} for field {}, send {} samples of {}".format(tod.mpicomm.rank, fld, len(gdata), gdata.dtype.char), flush=True)

                #print("  proc {} for field {}, send type with tag = {}".format(tod.mpicomm.rank, fld, mtag+1), flush=True)
                tod.mpicomm.send(gdata.dtype.char, dest=0, tag=(mtag + 1))

                #print("  proc {} for field {}, send size with tag = {}".format(tod.mpicomm.rank, fld, mtag+2), flush=True)
                tod.mpicomm.send(len(gdata), dest=0, tag=(mtag + 2))

                #print("  proc {} for field {}, send data with tag {}".format(tod.mpicomm.rank, fld, mtag), flush=True)
                tod.mpicomm.Send(gdata, 0, tag=mtag)
        return rdata

    # For efficiency, we are going to gather the data for all frames at once.
    # Then we will split those up when doing the write.

    # Frame offsets relative to the memory buffers we are gathering
    fdataoff = [0]
    for f in frame_sizes[:-1]:
        last = fdataoff[-1]
        fdataoff.append(last + f)

    # The list of frames- only on the root process.
    fdata = None
    if tod.mpicomm.rank == 0:
        fdata = [c3g.G3Frame(c3g.G3FrameType.Scan) for f in range(n_frames)]
    else:
        fdata = [None for f in range(n_frames)]

    # Compute the overlap of all frames with the local process.  We want to
    # to find the full sample range that this process overlaps the total set
    # of frames.

    cacheoff = None
    ncache = 0

    for f in range(n_frames):
        # Compute overlap of the frame with the local samples.
        fcacheoff, froff, nfr = local_frame_indices(local_first, nlocal,
                                                    frame_offsets[f],
                                                    frame_sizes[f])
        #print("proc {}:  frame {} has cache off {}, fr off {}, nfr {}".format(tod.mpicomm.rank, f, fcacheoff, froff, nfr), flush=True)
        if fcacheoff is not None:
            if cacheoff is None:
                cacheoff = fcacheoff
                ncache = nfr
            else:
                ncache += nfr
            #print("proc {}:    cache off now {}, ncache now {}".format(tod.mpicomm.rank, cacheoff, ncache), flush=True)

    # Now gather the full sample data one field at a time.  The root process
    # splits up the results into frames.

    # First gather common fields from the first row of the process grid.

    for findx, (cachefield, g3t, framefield) in enumerate(common):
        #print("proc {} entering gather_field(0, {}, {}, {}, {})".format(tod.mpicomm.rank, cachefield, findx, cacheoff, ncache), flush=True)
        data = gather_field(0, cachefield, findx, cacheoff, ncache)
        if tod.mpicomm.rank == 0:
            #print("Casting field {} to type {}".format(field, g3t), flush=True)
            if g3t == c3g.G3VectorTime:
                # Special case for time values stored as int64_t, but
                # wrapped in a class.
                for f in range(n_frames):
                    dataoff = fdataoff[f]
                    ndata = frame_sizes[f]
                    g3times = list()
                    for t in range(ndata):
                        g3times.append(c3g.G3Time(data[dataoff + t]))
                    fdata[f][framefield] = c3g.G3VectorTime(g3times)
                    del g3times
            else:
                # The bindings of G3Vector seem to only work with
                # lists.  This is probably horribly inefficient.
                for f in range(n_frames):
                    dataoff = fdataoff[f]
                    ndata = frame_sizes[f]
                    if len(data.shape) == 1:
                        fdata[f][framefield] = \
                            g3t(data[dataoff:dataoff+ndata].tolist())
                    else:
                        # We have a 2D quantity
                        fdata[f][framefield] = \
                            g3t(data[dataoff:dataoff+ndata,:].flatten().tolist())
        del data

    # Wait for everyone to catch up...
    tod.mpicomm.barrier()

    # For each detector field, processes which have the detector
    # in their local_dets should be in the same process row.
    # We do the gather over just this process row.

    if (detector_fields is not None) or (flag_fields is not None):
        dpats = {d: re.compile(".*{}.*".format(d)) for d in tod.local_dets}

        detmaps = None
        if detector_fields is not None:
            if tod.mpicomm.rank == 0:
                detmaps = [c3g.G3TimestreamMap() for f in range(n_frames)]

            for dindx, (cachefield, framefield) in enumerate(detector_fields):
                pc = -1
                for det, pat in dpats.items():
                    if pat.match(cachefield) is not None:
                        #print("proc {} has field {}".format(tod.mpicomm.rank, field), flush=True)
                        pc = rankdet
                        break
                # As a sanity check, verify that every process which
                # has this field is in the same process row.
                rowcheck = tod.mpicomm.gather(pc, root=0)
                prow = 0
                if tod.mpicomm.rank == 0:
                    rc = np.array([x for x in rowcheck if (x >= 0)],
                                  dtype=np.int32)
                    #print(field, rc, flush=True)
                    prow = np.max(rc)
                    if np.min(rc) != prow:
                        msg = "Processes with field {} are not in the "\
                            "same row\n".format(cachefield)
                        sys.stderr.write(msg)
                        tod.mpicomm.abort()

                # Every process finds out which process row is participating.
                prow = tod.mpicomm.bcast(prow, root=0)
                #print("proc {} got prow = {}".format(tod.mpicomm.rank, prow), flush=True)

                # Get the data on rank 0
                data = gather_field(prow, cachefield, dindx, cacheoff, ncache)

                if tod.mpicomm.rank == 0:
                    if units is None:
                        # We do this conditional, since we can't use
                        # G3TimestreamUnits.None in python ("None" is
                        # interpreted as python None).
                        for f in range(n_frames):
                            dataoff = fdataoff[f]
                            ndata = frame_sizes[f]
                            detmaps[f][framefield] = \
                                c3g.G3Timestream(data[dataoff:dataoff+ndata])
                    else:
                        for f in range(n_frames):
                            dataoff = fdataoff[f]
                            ndata = frame_sizes[f]
                            detmaps[f][framefield] = \
                                c3g.G3Timestream(data[dataoff:dataoff+ndata],
                                                 units)

            if tod.mpicomm.rank == 0:
                for f in range(n_frames):
                    fdata[f][detector_map] = detmaps[f]

        flagmaps = None
        if flag_fields is not None:
            if tod.mpicomm.rank == 0:
                flagmaps = [c3g.G3MapVectorInt() for f in range(n_frames)]
            for dindx, (cachefield, framefield) in enumerate(flag_fields):
                pc = -1
                for det, pat in dpats.items():
                    if pat.match(cachefield) is not None:
                        pc = rankdet
                        break
                # As a sanity check, verify that every process which
                # has this field is in the same process row.
                rowcheck = tod.mpicomm.gather(pc, root=0)
                prow = 0
                if tod.mpicomm.rank == 0:
                    rc = np.array([x for x in rowcheck if (x >= 0)],
                                  dtype=np.int32)
                    prow = np.max(rc)
                    if np.min(rc) != prow:
                        msg = "Processes with field {} are not in the "\
                            "same row\n".format(cachefield)
                        sys.stderr.write(msg)
                        tod.mpicomm.abort()

                # Every process finds out which process row is participating.
                prow = tod.mpicomm.bcast(prow, root=0)

                # Get the data on rank 0
                data = gather_field(prow, cachefield, dindx, cacheoff, ncache)

                if tod.mpicomm.rank == 0:
                    # The bindings of G3Vector seem to only work with
                    # lists...  Also there is no vectormap for unsigned
                    # char, so we have to use int...
                    for f in range(n_frames):
                        dataoff = fdataoff[f]
                        ndata = frame_sizes[f]
                        flagmaps[f][framefield] = \
                            c3g.G3VectorInt(\
                                data[dataoff:dataoff+ndata].astype(np.int32)\
                                .tolist())

            if tod.mpicomm.rank == 0:
                for f in range(n_frames):
                    fdata[f][flag_map] = flagmaps[f]

    return fdata
Ejemplo n.º 15
0
 def split_field(data,
                 g3t,
                 framefield,
                 mapfield=None,
                 g3units=units,
                 times=None):
     """Split a gathered data buffer into frames- only on root process.
     """
     if data is None:
         return
     if g3t == core3g.G3VectorTime:
         # Special case for time values stored as int64_t, but
         # wrapped in a class.
         for f in range(n_frames):
             dataoff = fdataoff[f]
             ndata = frame_sizes[f]
             g3times = list()
             for t in range(ndata):
                 g3times.append(core3g.G3Time(data[dataoff + t]))
             if mapfield is None:
                 fdata[f][framefield] = core3g.G3VectorTime(g3times)
             else:
                 fdata[f][framefield][mapfield] = \
                     core3g.G3VectorTime(g3times)
             del g3times
     elif g3t == so3g.IntervalsInt:
         # Flag vector is written as a simple boolean.
         for f in range(n_frames):
             dataoff = fdataoff[f]
             ndata = frame_sizes[f]
             # Extract flag vector (0 or 1) for this frame
             frame_flags = (data[dataoff:dataoff + ndata] != 0).astype(int)
             # Convert bit 0 to an IntervalsInt.
             ival = so3g.IntervalsInt.from_mask(frame_flags, 1)[0]
             if mapfield is None:
                 fdata[f][framefield] = ival
             else:
                 fdata[f][framefield][mapfield] = ival
     elif g3t == core3g.G3Timestream:
         if times is None:
             raise RuntimeError(
                 "You must provide the time stamp vector with a "
                 "Timestream object")
         for f in range(n_frames):
             dataoff = fdataoff[f]
             ndata = frame_sizes[f]
             timeslice = times[cacheoff + dataoff:cacheoff + dataoff +
                               ndata]
             tstart = timeslice[0] * 1e8
             tstop = timeslice[-1] * 1e8
             if mapfield is None:
                 if g3units is None:
                     fdata[f][framefield] = \
                         g3t(data[dataoff : dataoff + ndata])
                 else:
                     fdata[f][framefield] = \
                         g3t(data[dataoff : dataoff + ndata], g3units)
                 fdata[f][framefield].start = core3g.G3Time(tstart)
                 fdata[f][framefield].stop = core3g.G3Time(tstop)
             else:
                 # Individual detector data.  The only fields that
                 # we (optionally) compress.
                 if g3units is None:
                     tstream = g3t(data[dataoff:dataoff + ndata])
                 else:
                     tstream = g3t(data[dataoff:dataoff + ndata], g3units)
                 if compress and "compressor_gain_" + framefield in fdata[f]:
                     (tstream, gain,
                      offset) = recode_timestream(tstream, compress)
                     fdata[f]["compressor_gain_" +
                              framefield][mapfield] = gain
                     fdata[f]["compressor_offset_" +
                              framefield][mapfield] = offset
                 fdata[f][framefield][mapfield] = tstream
                 fdata[f][framefield][mapfield].start = core3g.G3Time(
                     tstart)
                 fdata[f][framefield][mapfield].stop = core3g.G3Time(tstop)
     else:
         # The bindings of G3Vector seem to only work with
         # lists.  This is probably horribly inefficient.
         for f in range(n_frames):
             dataoff = fdataoff[f]
             ndata = frame_sizes[f]
             if len(data.shape) == 1:
                 fdata[f][framefield] = \
                     g3t(data[dataoff : dataoff + ndata].tolist())
             else:
                 # We have a 2D quantity
                 fdata[f][framefield] = \
                     g3t(data[dataoff : dataoff + ndata, :].flatten()
                         .tolist())
     return
Ejemplo n.º 16
0
 def split_field(data, g3t, framefield, mapfield=None, g3units=units):
     """Split a gathered data buffer into frames.
     """
     if tod.mpicomm.rank == 0:
         if g3t == core3g.G3VectorTime:
             # Special case for time values stored as int64_t, but
             # wrapped in a class.
             for f in range(n_frames):
                 dataoff = fdataoff[f]
                 ndata = frame_sizes[f]
                 g3times = list()
                 for t in range(ndata):
                     g3times.append(core3g.G3Time(data[dataoff + t]))
                 if mapfield is None:
                     fdata[f][framefield] = core3g.G3VectorTime(g3times)
                 else:
                     fdata[f][framefield][mapfield] = \
                         core3g.G3VectorTime(g3times)
                 del g3times
         elif g3t == so3g.IntervalsInt:
             # This means that the data is actually flags
             # and we should convert it into a list of intervals.
             fint = flags_to_intervals(data)
             for f in range(n_frames):
                 dataoff = fdataoff[f]
                 ndata = frame_sizes[f]
                 datalast = dataoff + ndata
                 chunks = list()
                 idomain = (0, ndata - 1)
                 for intr in fint:
                     # Interval sample ranges are defined relative to the
                     # frame itself.
                     cfirst = None
                     clast = None
                     if (intr[0] < datalast) and (intr[1] >= dataoff):
                         # there is some overlap...
                         if intr[0] < dataoff:
                             cfirst = 0
                         else:
                             cfirst = intr[0] - dataoff
                         if intr[1] >= datalast:
                             clast = ndata - 1
                         else:
                             clast = intr[1] - dataoff
                         chunks.append([cfirst, clast])
                 if mapfield is None:
                     if len(chunks) == 0:
                         fdata[f][framefield] = \
                             so3g.IntervalsInt()
                     else:
                         fdata[f][framefield] = \
                             so3g.IntervalsInt.from_array(
                                 np.array(chunks, dtype=np.int64))
                     fdata[f][framefield].domain = idomain
                 else:
                     if len(chunks) == 0:
                         fdata[f][framefield][mapfield] = \
                             so3g.IntervalsInt()
                     else:
                         fdata[f][framefield][mapfield] = \
                             so3g.IntervalsInt.from_array(
                                 np.array(chunks, dtype=np.int64))
                         fdata[f][framefield][mapfield].domain = idomain
             del fint
         elif g3t == core3g.G3Timestream:
             for f in range(n_frames):
                 dataoff = fdataoff[f]
                 ndata = frame_sizes[f]
                 if mapfield is None:
                     if g3units is None:
                         fdata[f][framefield] = \
                             g3t(data[dataoff:dataoff+ndata])
                     else:
                         fdata[f][framefield] = \
                             g3t(data[dataoff:dataoff+ndata], g3units)
                 else:
                     if g3units is None:
                         fdata[f][framefield][mapfield] = \
                             g3t(data[dataoff:dataoff+ndata])
                     else:
                         fdata[f][framefield][mapfield] = \
                             g3t(data[dataoff:dataoff+ndata], g3units)
         else:
             # The bindings of G3Vector seem to only work with
             # lists.  This is probably horribly inefficient.
             for f in range(n_frames):
                 dataoff = fdataoff[f]
                 ndata = frame_sizes[f]
                 if len(data.shape) == 1:
                     fdata[f][framefield] = \
                         g3t(data[dataoff:dataoff+ndata].tolist())
                 else:
                     # We have a 2D quantity
                     fdata[f][framefield] = \
                         g3t(data[dataoff:dataoff+ndata, :].flatten()
                             .tolist())
     return