Esempio n. 1
0
def anim(s, d, *args, **kwargs):
    """ Animate graph s with data d[nt,nx,ny] 
    optional argument s1 -> additional bundled graph
    optional kargument 'save = False' -> save png files for creating movie
    """

    if len(args) == 1:
        s1 = args[0]
    else:
        s1 = None

    try:
        save = kwargs['save']
    except:
        save = False

    nt = d.shape[0]

    print('animating for ', nt, 'timesteps')
    if save == True:
        print('Saving pics in folder Movie')
        if not os.path.exists('Movie'):
            os.makedirs('Movie')

    for i in range(nt):
        s.mlab_source.scalars = d[i, :, :]
        if s1 != None: s1.mlab_source.scalars = d[i, :, :]
        title = "t=" + np.string0(i)
        mlab.title(title, height=1.1, size=0.26)
        if save == True: mlab.savefig('Movie/anim%d.png' % i)
        yield
Esempio n. 2
0
def anim(s, d, *args, **kwargs):
    """ Animate graph s with data d[nt,nx,ny] 
    optional argument s1 -> additional bundled graph
    optional kargument 'save = False' -> save png files for creating movie
    """ 
       
    if len(args) == 1:
        s1 = args[0]
    else:
        s1=None

    try:
        save = kwargs['save']
    except:
        save = False
                    
            
    nt=d.shape[0]
    
    print('animating for ',nt,'timesteps') 
    if save == True : 
        print('Saving pics in folder Movie') 
        if not os.path.exists('Movie'):
            os.makedirs('Movie')

   
    for i in range(nt):
        s.mlab_source.scalars = d[i,:,:]
        if s1 != None : s1.mlab_source.scalars = d[i,:,:]
        title="t="+np.string0(i)
        mlab.title(title,height=1.1, size=0.26)
        if save == True : mlab.savefig('Movie/anim%d.png'%i)
        yield
Esempio n. 3
0
def set_explicit_dtype(x):
    """Force `x` to have a numpy type if it doesn't already have one.

    Parameters
    ----------
    x : numpy-typed object, bool, integer, float
        If not numpy-typed, type is attempted to be inferred. Currently only
        bool, int, and float are supported, where bool is converted to
        np.bool8, integer is converted to np.int64, and float is converted to
        np.float64. This ensures that full precision for all but the most
        extreme cases is maintained for inferred types.

    Returns
    -------
    x : numpy-typed object

    Raises
    ------
    TypeError
        In case the type of `x` is not already set or is not a valid inferred
        type. As type inference can yield different results for different
        inputs, rather than deal with everything, explicitly failing helps to
        avoid inferring the different instances of the same object differently
        (which will cause a failure later on when trying to concatenate the
        types in a larger array).

    """
    if hasattr(x, "dtype"):
        return x

    # "value" attribute is found in basic icecube.{dataclasses,icetray} dtypes
    # such as I3Bool, I3Double, I3Int, and I3String
    if hasattr(x, "value"):
        x = x.value

    # bools are numbers.Integral, so test for bool first
    if isinstance(x, bool):
        return np.bool8(x)

    if isinstance(x, Integral):
        x_new = np.int64(x)
        assert x_new == x
        return x_new

    if isinstance(x, Number):
        x_new = np.float64(x)
        assert x_new == x
        return x_new

    if isinstance(x, string_types):
        x_new = np.string0(x)
        assert x_new == x
        return x_new

    raise TypeError("Type of argument ({}) is invalid: {}".format(x, type(x)))
Esempio n. 4
0
def anim(s, d, *args, **kwargs):
    """Animate graph with mayavi

    Parameters
    ----------
    s : mayavi axis object
        Axis to animate data on
    d : array_like
        3-D array to animate
    s1 : mayavi axis object, optional
        Additional bundled graph (first item in *args)
    save : bool, optional
        Save png files for creating movie (default: False)

    """

    if len(args) == 1:
        s1 = args[0]
    else:
        s1=None

    try:
        save = kwargs['save']
    except:
        save = False
                    
            
    nt=d.shape[0]
    
    print('animating for ',nt,'timesteps') 
    if save == True : 
        print('Saving pics in folder Movie') 
        if not os.path.exists('Movie'):
            os.makedirs('Movie')

   
    for i in range(nt):
        s.mlab_source.scalars = d[i,:,:]
        if s1 is not None : s1.mlab_source.scalars = d[i,:,:]
        title="t="+np.string0(i)
        mlab.title(title,height=1.1, size=0.26)
        if save == True : mlab.savefig('Movie/anim%d.png'%i)
        yield
Esempio n. 5
0
def load_clsim_table_minimal(fpath, mmap=False, include_overflow=False):
    """Load a CLSim table from disk (optionally compressed with zstd).

    Similar to the `load_clsim_table` function but the full table, including
    under/overflow bins, is kept and no normalization or further processing is
    performed on the table data besides populating the ouptput OrderedDict.

    Parameters
    ----------
    fpath : string
        Path to file to be loaded. If the file has extension 'zst', 'zstd', or
        'zstandard', the file will be decompressed using the `python-zstandard`
        Python library before passing to `fits` for interpreting.

    mmap : bool, optional
        Whether to memory map the table

    include_overflow : bool, optional
        By default, overflow bins (if present) are removed

    Returns
    -------
    table : OrderedDict

    """
    t0 = time()

    table = OrderedDict()
    fpath = expand(fpath)

    if DEBUG:
        wstderr('Loading table from {} ...\n'.format(fpath))

    if isdir(fpath):
        indir = fpath
        if mmap:
            mmap_mode = 'r'
        else:
            mmap_mode = None

        for rel_fpath in listdir(indir):
            key, ext = splitext(rel_fpath)
            abs_fpath = join(indir, rel_fpath)

            if not (isfile(abs_fpath) and ext == '.npy'):
                continue

            if DEBUG:
                wstderr('    loading {} from "{}" ...'.format(key, abs_fpath))

            t1 = time()
            val = np.load(abs_fpath, mmap_mode=mmap_mode)

            # Pull "small" things (less than 10 MiB) into memory so we don't
            # have too many file handles open due to memory mapping
            if mmap and val.nbytes < 10 * 1024**2:
                val = np.copy(val)

            table[key] = val

            if DEBUG:
                wstderr(' ({} ms)\n'.format(np.round((time() - t1) * 1e3, 3)))

    elif isfile(fpath):
        from astropy.io import fits
        fobj = get_decompressd_fobj(fpath)
        pf_table = None
        try:
            pf_table = fits.open(fobj, mode='readonly', memmap=mmap)

            header = pf_table[0].header  # pylint: disable=no-member
            table['table_shape'] = np.array(pf_table[0].data.shape, dtype=int)  # pylint: disable=no-member
            table['group_refractive_index'] = set_explicit_dtype(
                force_little_endian(header['_i3_n_group']))
            table['phase_refractive_index'] = set_explicit_dtype(
                force_little_endian(header['_i3_n_phase']))

            n_dims = len(table['table_shape'])

            new_style = False
            axnames = [None] * n_dims
            binning = [None] * n_dims
            for key in header.keys():
                if not key.startswith('_i3_ax_'):
                    continue
                new_style = True
                axnum = header[key]
                axname = key[len('_i3_ax_'):]
                be0 = header['_i3_{}_min'.format(axname)]
                be1 = header['_i3_{}_max'.format(axname)]
                n_bins = header['_i3_{}_n_bins'.format(axname)]
                power = header.get('_i3_{}_power'.format(axname), 1)
                bin_edges = force_little_endian(pf_table[axnum + 1].data)  # pylint: disable=no-member
                assert np.isclose(bin_edges[0],
                                  be0), '%f .. %f' % (be0, bin_edges[0])
                assert np.isclose(bin_edges[-1],
                                  be1), '%f .. %f' % (be1, bin_edges[-1])
                assert len(bin_edges) == n_bins + 1, '%d vs. %d' % (
                    len(bin_edges), n_bins + 1)
                assert np.allclose(
                    bin_edges,
                    powerspace(start=be0,
                               stop=be1,
                               num=n_bins + 1,
                               power=power),
                )
                axnames[axnum] = axname
                binning[axnum] = bin_edges

            if not new_style:
                if n_dims == 5:
                    axnames = [
                        'r', 'costheta', 't', 'costhetadir', 'deltaphidir'
                    ]
                elif n_dims == 6:
                    axnames = [
                        'r', 'costheta', 'phi', 't', 'costhetadir',
                        'deltaphidir'
                    ]
                else:
                    raise NotImplementedError(
                        '{}-dimensional table not handled for old-style CLSim'
                        ' tables'.format(n_dims))
                binning = [
                    force_little_endian(pf_table[i + 1].data).flat
                    for i in range(len(axnames))
                ]  # pylint: disable=no-member

            for axnum, (axname, bin_edges) in enumerate(zip(axnames, binning)):
                assert axname is not None, 'missing axis %d name' % axnum
                assert bin_edges is not None, 'missing axis %d binning' % axnum

            dtype = np.dtype([(axname, np.float64, dim.size)
                              for axname, dim in zip(axnames, binning)])
            table['binning'] = np.array(tuple(binning), dtype=dtype)

            for keyroot in GENERIC_KEYS:
                keyname = '_i3_' + keyroot
                if keyname in header:
                    val = force_little_endian(header[keyname])
                    if keyroot in (
                            't_is_residual_time',
                            'disable_tilt',
                            'disable_anisotropy',
                    ):
                        val = np.bool8(val)
                    else:
                        val = set_explicit_dtype(val)
                    table[keyroot] = val

            # Get string values from keys that have a prefix preceded by the
            # value all in the key (I3 software had issues saving strings as
            # values in the header "dict" so the workaround was to store the
            # string value in this way)
            for infix in INFIX_KEYS:
                keyroot = '_i3_' + infix + '_'
                for keyname in header.keys():
                    if not keyname.startswith(keyroot):
                        continue
                    val = keyname[len(keyroot):]
                    table[infix] = np.string0(val)

            if include_overflow:
                slicer = (slice(None), ) * n_dims
            else:
                slicer = (slice(1, -1), ) * n_dims
            table['table'] = force_little_endian(pf_table[0].data[slicer])  # pylint: disable=no-member

            wstderr('    (load took {} s)\n'.format(np.round(time() - t0, 3)))

        except:
            wstderr('ERROR: Failed to load "{}"\n'.format(fpath))
            raise

        finally:
            del pf_table
            if hasattr(fobj, 'close'):
                fobj.close()
            del fobj

    else:  # fpath is neither dir nor file
        raise ValueError('Table does not exist at path "{}"'.format(fpath))

    if 'step_length' not in table:
        table['step_length'] = 1

    if 't_is_residual_time' not in table:
        table['t_is_residual_time'] = True

    if DEBUG:
        wstderr('  Total time to load: {} s\n'.format(np.round(time() - t0,
                                                               3)))

    return table
Esempio n. 6
0
 def _tobuffer(self, object_):
     if not isinstance(object_, basestring):
         raise TypeError("object is not a string: %r" % (object_, ))
     return numpy.string0(object_)
Esempio n. 7
0
def extract_i3_detector_status(frame):
    """Extract (most, maybe?) items found in a I3Detector frame.

    Parameters
    ----------
    frame : icecube.icetray.I3Frame
        Must contain key "I3DetectorStatus", whose value is an
        ``icecube.dataclasses.I3DetectorStatus`` object

    Returns
    -------
    detector_status : OrderedDict
        Roughly equivalent Numpy representation of I3DetectorStatus frame object

    Notes
    -----
    Frame keys seen that aren't extracted: IceTopBadTanks and
    I3FlasherSubrunMap; there might be an infinite number of others missed.
    Caveat emptor.

    """
    # Get `start_time` & `end_time` using standard functions
    detector_status = get_frame_item(frame,
                                     key="I3DetectorStatus",
                                     specs=START_END_TIME_SPEC,
                                     allow_missing=False)

    # TODO: roll the following few items into a single spec incl.
    #       START_END_TIME_SPEC

    key = "GRLSnapshotId"
    if key in frame:
        detector_status[key] = np.float64(frame[key].value)

    key = "OfflineProductionVersion"
    if key in frame:
        detector_status[key] = np.float64(frame[key].value)

    for key in ["GoodRunStartTime", "GoodRunEndTime"]:
        if key not in frame:
            continue
        detector_status[key] = np.array(
            [tuple(getattr(frame[key], name) for name in I3TIME_T.names)],
            dtype=I3TIME_T,
        )

    # DETECTOR_STATUS_T = np.dtype(
    #    [
    #        ('start_time', I3Time(2011,116382900000000000L)),
    #        ('end_time', I3Time(2011,116672520000000000L)),
    #        ('daq_configuration_name', 'sps-IC86-mitigatedHVs-V175'),
    #        ('dom_status', <icecube.dataclasses.Map_OMKey_I3DOMStatus>),
    #        ('trigger_status', <icecube.dataclasses.Map_TriggerKey_I3TriggerStatus>),
    #    ]
    # )

    i3_detector_status_frame_obj = frame["I3DetectorStatus"]

    dom_status_frame_obj = i3_detector_status_frame_obj.dom_status
    dom_status = np.empty(shape=len(dom_status_frame_obj), dtype=I3DOMSTATUS_T)
    for i, omkey in enumerate(sorted(dom_status_frame_obj.keys())):
        this_dom_status = dom_status_frame_obj[omkey]
        dom_status[i]["omkey"]["string"] = omkey.string
        dom_status[i]["omkey"]["om"] = omkey.om
        dom_status[i]["omkey"]["pmt"] = omkey.pmt
        dom_status[i]["cable_type"] = CableType(this_dom_status.cable_type)
        dom_status[i]["dac_fadc_ref"] = this_dom_status.dac_fadc_ref
        dom_status[i][
            "dac_trigger_bias_0"] = this_dom_status.dac_trigger_bias_0
        dom_status[i][
            "dac_trigger_bias_1"] = this_dom_status.dac_trigger_bias_1
        dom_status[i]["delta_compress"] = OnOff(this_dom_status.delta_compress)
        dom_status[i]["dom_gain_type"] = DOMGain(this_dom_status.dom_gain_type)
        dom_status[i]["fe_pedestal"] = this_dom_status.fe_pedestal
        dom_status[i]["lc_mode"] = LCMode(this_dom_status.lc_mode)
        dom_status[i]["lc_span"] = this_dom_status.lc_span
        dom_status[i]["lc_window_post"] = this_dom_status.lc_window_post
        dom_status[i]["lc_window_pre"] = this_dom_status.lc_window_pre
        dom_status[i]["mpe_threshold"] = this_dom_status.mpe_threshold
        dom_status[i]["n_bins_atwd_0"] = this_dom_status.n_bins_atwd_0
        dom_status[i]["n_bins_atwd_1"] = this_dom_status.n_bins_atwd_1
        dom_status[i]["n_bins_atwd_2"] = this_dom_status.n_bins_atwd_2
        dom_status[i]["n_bins_atwd_3"] = this_dom_status.n_bins_atwd_3
        dom_status[i]["n_bins_fadc"] = this_dom_status.n_bins_fadc
        dom_status[i]["pmt_hv"] = this_dom_status.pmt_hv
        dom_status[i]["slc_active"] = this_dom_status.slc_active
        dom_status[i]["spe_threshold"] = this_dom_status.spe_threshold
        dom_status[i]["status_atwd_a"] = OnOff(this_dom_status.status_atwd_a)
        dom_status[i]["status_atwd_b"] = OnOff(this_dom_status.status_atwd_b)
        dom_status[i]["status_fadc"] = OnOff(this_dom_status.status_fadc)
        dom_status[i]["trig_mode"] = TrigMode(this_dom_status.trig_mode)
        dom_status[i]["tx_mode"] = LCMode(this_dom_status.tx_mode)

    detector_status["dom_status"] = dom_status

    # Trigger status does not have uniform sub-fields across all types, so
    # build up dict keyed by str(trigger index)
    trigger_status_frame_obj = i3_detector_status_frame_obj.trigger_status
    trigger_status = OrderedDict()
    for trigger_key_fobj, trigger_status_fobj in trigger_status_frame_obj.items(
    ):
        this_trigger_config = OrderedDict()

        trigger_key = np.empty(shape=1, dtype=TRIGGERKEY_T)
        trigger_key["source"] = TriggerSourceID(trigger_key_fobj.source)
        trigger_key["type"] = TriggerTypeID(trigger_key_fobj.type)
        trigger_key["subtype"] = TriggerSubtypeID(trigger_key_fobj.subtype)
        # TODO: some config ID's aren't defined in the TriggerConfigID enum, no
        # idea where they come from and whether or not it's a bug. For now,
        # simply accept all ID's.
        # trigger_key["config_id"] = TriggerConfigID(trigger_key_fobj.config_id)
        if (hasattr(trigger_key_fobj, "config_id")
                and trigger_key_fobj.config_id is not None):
            config_id = trigger_key_fobj.config_id
        else:
            config_id = TriggerConfigID.NONE  # custom code; see `retro_types`
        trigger_key["config_id"] = config_id

        this_trigger_config["trigger_key"] = trigger_key

        readout_settings = OrderedDict()
        for subdet, settings in trigger_status_fobj.readout_settings.items():
            trigger_readout_config = np.empty(shape=1,
                                              dtype=I3TRIGGERREADOUTCONFIG_T)
            trigger_readout_config[0][
                "readout_time_minus"] = settings.readout_time_minus
            trigger_readout_config[0][
                "readout_time_plus"] = settings.readout_time_plus
            trigger_readout_config[0][
                "readout_time_offset"] = settings.readout_time_offset
            readout_settings[str(subdet)] = trigger_readout_config

        this_trigger_config["readout_settings"] = dict2struct(readout_settings)

        trigger_status[tuple(trigger_key[0])] = this_trigger_config

    detector_status["trigger_status"] = trigger_status
    detector_status["daq_configuration_name"] = np.string0(
        i3_detector_status_frame_obj.daq_configuration_name)

    return detector_status
Esempio n. 8
0
 def _tobuffer(self, object_):
     if not isinstance(object_, basestring):
         raise TypeError("object is not a string: %r" % (object_,))
     return numpy.string0(object_)
Esempio n. 9
0
                end_day_adj = "0"
            else:
                end_day_adj = ""
            if end_date_s.date().month < 10:
                end_month_adj = "0"
            else:
                end_month_adj = ""
            """
            Important: The issue of leap years
            - for a cy start of 2016-02-01 and a 4 week window, the cy end will be 2016-02-29
            - for a cy start of 2016-02-29 and a 4 week window, the cy end will be 2016-03-28
            - Both of these windows contain the 2016 leap year February date 02-29.
            - The py date equivalent of the 2016 leap year Feb date 02-29 does not exist on the calendar
            - if 02/29 is found to be a date in start_date_s or end_date_s, then day should be set to 28
            """
            start_md = np.string0(np.string0(start_date_s.date().month) + "-" + \
                                  np.string0(start_date_s.date().day))
            end_md = np.string0(np.string0(end_date_s.date().month) + "-" + \
                                np.string0(end_date_s.date().day))
            if end_md == '2-29':
                # leap year detected
                end_ts = pd.to_datetime("0" + str(2) + \
                                        "" + str(28) + str(yr1_end - 1)[2:])
            else:
                end_ts = pd.to_datetime(end_month_adj + str(end_date_s.date().month) + \
                                        end_day_adj + str(end_date_s.date().day) + str(yr1_end - 1)[2:])

            if start_md == '2-29':
                # leap year detected
                start_ts = pd.to_datetime("0" + str(2) + \
                                          "" + str(28) + str(yr1_start - 1)[2:])
            else: