def _convert_to_numpy_bytes(s):
    if isinstance(s, np.bytes_):
        return s
    elif isinstance(s, bytes):
        return np.bytes_(s)
    else:
        return np.bytes_(s.encode())
Пример #2
0
    def _set_attrs(self, config_name, config_number):
        """
        Sets attributes for the control group and its sub-members
        """
        self[config_name].attrs.update({
            "GPIB address":
            np.uint32(0),
            "Generator type":
            np.bytes_("Agilent 33220A - LAN"),
            "IP address":
            np.bytes_(f"192.168.1.{config_number}"),
            "Initial state":
            np.bytes_("*RST;"
                      ":FUNC:SQU:DCYC +5.0000000000000E+01;"
                      ":FUNC SQU;:FUNC:USER EXP_RISE;"
                      ':DISP:TEXT "";'
                      ":DISP 1;"
                      "*ESE +0;"
                      "*PSC 1;"
                      "*SRE +0\n"),
            "Waveform command list":
            np.bytes_(
                "FREQ 40000.000000 \nFREQ 80000.000000 \nFREQ 120000.000000 \n"
            ),
        })

        # add command list to _configs
        cl = self[config_name].attrs["Waveform command list"]
        cl = _bytes_to_str(cl).splitlines()
        cl = [command.strip() for command in cl]
        self._configs[config_name]["command list"] = cl
Пример #3
0
def check_shaderError(shader, flag, isProgram, errorMessage):
    success = bgl.Buffer(bgl.GL_INT, 1)
    slen = 1024
    if isProgram:
        bgl.glGetProgramiv(shader, flag, success)
        check_error("glGetProgramiv")

    else:
        bgl.glGetShaderiv(shader, flag, success)
        check_error("glGetShaderiv")

    import numpy as np
    from .bgl_ext import VoidBufValue

    offset = VoidBufValue(None)
    error = bgl.Buffer(bgl.GL_BYTE, slen)

    if isProgram:
        bgl.glGetProgramInfoLog(shader, slen, offset.buf, error)
        check_error("glGetProgramInfoLog")
    else:
        bgl.glGetShaderInfoLog(shader, slen, offset.buf, error)
        check_error("glGetShaderInfoLog")

    print(np.bytes_(error).decode("utf-8"))

    del offset
    if success[0] != bgl.GL_TRUE:
        print(errorMessage, np.bytes_(error).decode("utf-8"))
        raise RuntimeError(errorMessage, error)
Пример #4
0
def splitLVIS(l,nPer,outRoot):
  nBlocks=int(l.nWaves/nPer+1)
  # loop over blocks
  for i in range(0,nBlocks):
    sInd=i*nPer
    eInd=sInd+nPer
    if(sInd >= l.nWaves):
      break
    if(eInd>l.nWaves):
      eInd=l.nWaves
    # open file
    print("Splitting %d" % i)
    outName=outRoot+"."+str(i)+".h5"
    f=h5py.File(outName,'w')
    # create datasets
    for field in FIELDS:
      f.create_dataset(field,data=l.data[field][sInd:eInd])

    # Create ancillary_data
    f.create_group("ancillary_data")
    ancillary_data = f["ancillary_data"]
    ancillary_data.create_dataset('HDF5 Version', shape=(1,), data=np.bytes_(h5py.version.hdf5_version))
    ancillary_data.create_dataset('Maximum Latitude', shape=(1,), data=np.bytes_(np.str(np.max(f["LAT1023"]))))
    ancillary_data.create_dataset('Maximum Longitude', shape=(1,), data=np.bytes_(np.str(np.max(f["LON1023"]))))
    ancillary_data.create_dataset('Minimum Latitude', shape=(1,), data=np.bytes_(np.str(np.min(f["LON1023"]))))
    ancillary_data.create_dataset('Minimum Longitude', shape=(1,), data=np.bytes_(np.str(np.min(f["LON1023"]))))
    ancillary_data.create_dataset('ancillary_text', shape=(1,), data=l.ancillary_text)
    ancillary_data.create_dataset('reference_frame', shape=(1,), data=l.reference_frame)
    f.close()
    print("Written to",outName)
  
  return
Пример #5
0
def test_check_type_compat():
    """ Test check_type_compat[ibility] """

    # Positive tests
    assert check_type_compat(1, 2)
    assert check_type_compat(1.1, 2.1)
    assert check_type_compat(1.1+1j*3, 2.1+1j*8)
    assert check_type_compat('Test', 'Test2')
    assert check_type_compat(b'Test', b'Test2')
    assert check_type_compat(True, False)

    assert check_type_compat(1, np.int32(2))
    assert check_type_compat(1.1, np.float32(2.1))
    assert check_type_compat(1.1+1j*3, np.complex64(2.1+1j*8))
    assert check_type_compat('Test', np.str_('Test2'))  # pylint: disable=E1101
    assert check_type_compat(b'Test', np.bytes_('Test2'))  # pylint: disable=E1101
    assert check_type_compat(True, np.bool_(False))

    # Negative checks
    assert not check_type_compat(1, 2.1)
    assert not check_type_compat(1.1, 2)
    assert not check_type_compat(1.1+1j*3, 2.1)
    assert not check_type_compat('Test', 1)
    assert not check_type_compat('Test', b'Test2')
    assert not check_type_compat(True, 1)

    assert not check_type_compat(1.1, np.int32(2))
    assert not check_type_compat(1, np.float32(2.1))
    assert not check_type_compat(1, np.complex64(2.1+1j*8))
    assert not check_type_compat(1, np.str_('Test2'))  # pylint: disable=E1101
    assert not check_type_compat('Test', np.bytes_('Test2'))  # pylint: disable=E1101
    assert not check_type_compat(1, np.bool_(False))
Пример #6
0
    def _set_attrs(self, config_name, config_number):
        """
        Sets attributes for the control group and its sub-members
        """
        self[config_name].attrs.update({
            'GPIB address':
            np.uint32(0),
            'Generator type':
            np.bytes_('Agilent 33220A - LAN'),
            'IP address':
            np.bytes_('192.168.1.{}'.format(config_number)),
            'Initial state':
            np.bytes_('*RST;'
                      ':FUNC:SQU:DCYC +5.0000000000000E+01;'
                      ':FUNC SQU;:FUNC:USER EXP_RISE;'
                      ':DISP:TEXT "";'
                      ':DISP 1;'
                      '*ESE +0;'
                      '*PSC 1;'
                      '*SRE +0\n'),
            'Waveform command list':
            np.bytes_('FREQ 40000.000000 \n'
                      'FREQ 80000.000000 \n'
                      'FREQ 120000.000000 \n')
        })

        # add command list to _configs
        cl = self[config_name].attrs['Waveform command list']
        cl = cl.decode('utf-8').splitlines()
        cl = [command.strip() for command in cl]
        self._configs[config_name]['command list'] = cl
Пример #7
0
    def _build_config_sis3305_subgroup(self, config_name: str, slot: int,
                                       index: int):
        """
        Create and set attributes for a SIS 3305 configuration group.
        """
        # create group
        gname = 'SIS crate 3305 configurations[{}]'.format(index)
        gpath = config_name + '/' + gname
        self.create_group(gpath)

        # get channel array
        brd = self.slot_info[slot][0]
        sis_arr = self._active_brdch['SIS 3305'][brd - 1]

        # populate attributes
        self[gpath].attrs.update({
            'Bandwidth': np.uint32(1),
            'Channel mode': np.uint32(self._sis3305_mode),
            'Clock rate': np.uint32(0),
            'Samples': np.uint32(self.knobs.nt),
            'Shot averaging (software)': np.int32(1),
        })
        for ii in range(1, 9):
            # setup
            if 1 <= ii <= 4:
                fpga_str = 'FPGA 1 '
                ch = ii
            else:
                fpga_str = 'FPGA 2 '
                ch = ii - 4

            # 'FPGA # Avail #' fields
            if self._sis3305_mode == 2 and ch != 1:
                mode = 'FALSE'
            elif self._sis3305_mode == 1 and ch not in (1, 3):
                mode = 'FALSE'
            else:
                mode = 'TRUE'
            field = fpga_str + 'Avail {}'.format(ch)
            self[gpath].attrs[field] = np.bytes_(mode)

            # 'FPGA # Ch #' fields
            field = fpga_str + 'Ch {}'.format(ch)
            self[gpath].attrs[field] = np.int32(ii)

            # 'FPGA # Comment #' fields
            field = fpga_str + 'Comment {}'.format(ch)
            self[gpath].attrs[field] = np.bytes_('')

            # 'FPGA # Data type #' fields
            field = fpga_str + 'Data type {}'.format(ch)
            self[gpath].attrs[field] = \
                np.bytes_('probe name {}'.format(ii))

            # 'FPGA # Enabled #' fields
            field = fpga_str + 'Enabled {}'.format(ch)
            self[gpath].attrs[field] = \
                np.bytes_('TRUE' if sis_arr[ii - 1] else 'FALSE')
Пример #8
0
    def _add_probe_groups(self):
        """Adds all probe groups"""
        # - define probe names
        # - define receptacle number
        # - define configuration name
        # - create probe groups and sub-groups
        # - define probe group attributes
        for i in range(self._n_configs):
            # define probe name
            pname = 'probe{:02}'.format(i + 1)
            self._probe_names.append(pname)

            # define receptacle number
            if self._n_configs == 1:
                receptacle = random.randint(1, self._MAX_CONFIGS)
            else:
                receptacle = i + 1

            # create probe group
            probe_gname = 'Probe: XY[{}]: '.format(receptacle) + pname
            self.create_group(probe_gname)
            self.create_group(probe_gname + '/Axes[0]')
            self.create_group(probe_gname + '/Axes[1]')

            # set probe group attributes
            self[probe_gname].attrs.update({
                'Calibration': np.bytes_(
                    '2004-06-04 0.375 inch calibration'),
                'Level sy (cm)': np.float64(70.46),
                'Port': np.uint8(27),
                'Probe': np.bytes_(pname),
                'Probe channels': np.bytes_(''),
                'Probe type': np.bytes_('LaPD probe'),
                'Receptacle': np.int8(receptacle),
                'Unnamed': np.bytes_('lower East'),
                'sx at end (cm)': np.float64(112.01),
                'z': np.float64(830.699999)
            })

            # add attributes to Axes[0] group
            self[probe_gname + '/Axes[0]'].attrs.update({
                '6K #': np.uint8(1),
                'Axis': np.uint8((2 * (receptacle - 1)) + 1),
                'Id': np.uint8(receptacle)
            })

            # add attributes to Axes[1] group
            self[probe_gname + '/Axes[1]'].attrs.update({
                '6K #': np.uint8(1),
                'Axis': np.uint8((2 * (receptacle - 1)) + 2),
                'Id': np.uint8(receptacle)
            })

            # fill configs dict
            self._configs[receptacle] = {'probe name': pname,
                                         'receptacle': receptacle,
                                         'motion lists': []}
Пример #9
0
    def _build_config_sis3305_subgroup(self, config_name: str, slot: int,
                                       index: int):
        """
        Create and set attributes for a SIS 3305 configuration group.
        """
        # create group
        gname = f"SIS crate 3305 configurations[{index}]"
        gpath = f"{config_name}/{gname}"
        self.create_group(gpath)

        # get channel array
        brd = self.slot_info[slot][0]
        sis_arr = self._active_brdch["SIS 3305"][brd - 1]

        # populate attributes
        self[gpath].attrs.update({
            "Bandwidth": np.uint32(1),
            "Channel mode": np.uint32(self._sis3305_mode),
            "Clock rate": np.uint32(0),
            "Samples": np.uint32(self.knobs.nt),
            "Shot averaging (software)": np.int32(1),
        })
        for ii in range(1, 9):
            # setup
            if 1 <= ii <= 4:
                fpga_str = "FPGA 1 "
                ch = ii
            else:
                fpga_str = "FPGA 2 "
                ch = ii - 4

            # 'FPGA # Avail #' fields
            if self._sis3305_mode == 2 and ch != 1:
                mode = "FALSE"
            elif self._sis3305_mode == 1 and ch not in (1, 3):
                mode = "FALSE"
            else:
                mode = "TRUE"
            field = f"{fpga_str}Avail {ch}"
            self[gpath].attrs[field] = np.bytes_(mode)

            # 'FPGA # Ch #' fields
            field = f"{fpga_str}Ch {ch}"
            self[gpath].attrs[field] = np.int32(ii)

            # 'FPGA # Comment #' fields
            field = f"{fpga_str}Comment {ch}"
            self[gpath].attrs[field] = np.bytes_("")

            # 'FPGA # Data type #' fields
            field = f"{fpga_str}Data type {ch}"
            self[gpath].attrs[field] = np.bytes_(f"probe name {ii}")

            # 'FPGA # Enabled #' fields
            field = f"{fpga_str}Enabled {ch}"
            self[gpath].attrs[field] = np.bytes_(
                "TRUE" if sis_arr[ii - 1] else "FALSE")
Пример #10
0
def get_ctab_and_names(coords, labels, use_pretty_colors=True):
    """
    Generate a label table for from an existing label map.

    Parameters:
    - - - - -
    coords : array
        vertex coordinates
    labels : array
        cluster map
    use_pretty_colors : bool
        generate pretty colors
    """

    # only include vertices that have non-zero label-values
    full_map = np.zeros(labels.shape)

    idx = (labels > 0)
    coords = coords[idx, :]
    labels = labels[idx]

    # get unique label values
    unique_ids = np.unique(labels)
    n_clusters = len(unique_ids)

    # Create color table (first element = [0, 0, 0] for 'unknown' region)
    n_colors = n_clusters + 1
    ctab = np.hstack((get_equally_spaced_colors(n_colors), [[0]] * n_colors))

    # Create name list for new regions
    names = [np.bytes_('unknown')] + \
        [np.bytes_('parc_%i' % (i)) for i in unique_ids]

    # Reorder table and names according distance to sphere "bottom"
    # Set keys globally so return doesn't break if pretty_colors=false
    keys = "No color remapping done"
    if use_pretty_colors:

        # Compute mean height (on z-axis) per label to resort color table
        label_centers = np.array(
            [np.mean(coords[labels == l], axis=0) for l in np.unique(labels)])

        # Find new order of labels
        sphere_bottom = [0, 0, -100]
        keys = np.argsort(np.linalg.norm(label_centers - sphere_bottom,
                                         axis=1)) + 1

        # Relabels labels accordingly
        labels = np.array([np.where(keys == l)[0][0] + 1 for l in labels])

    full_map[idx] = labels
    labels = np.int32(full_map)

    return [keys, ctab, names, labels]
Пример #11
0
def _h5set(store, grp, key, value, path=None):
    """Set a key in an h5py container.

    This method recursively converts Mappings to h5py groups and transparently
    handles None values.
    """
    import h5py
    import numpy    # h5py depends on numpy, so this is safe.
    path = path + '/' + key if path else key

    # Guard against assigning a group to itself, e.g., `h5s[key] = h5s[key]`,
    # where h5s[key] is a mapping. This is necessary, because the original
    # mapping would be deleted prior to assignment.
    if key in grp:
        if isinstance(value, H5Group):
            if grp[key] == value._group:
                return  # Groups are identical, do nothing.
        elif isinstance(value, h5py._hl.dataset.Dataset):
            if grp == value.parent:
                return  # Dataset is identical, do nothing.

        # Delete any existing data
        del grp[key]

    # Mapping-types
    if isinstance(value, Mapping):
        subgrp = grp.create_group(key)
        for k, v in value.items():
            _h5set(store, subgrp, k, v, path)

    # Regular built-in types:
    elif value is None:
        grp.create_dataset(key, data=None, shape=None, dtype='f')
    elif isinstance(value, (int, float, str, bool, array.array)):
        grp[key] = value
    elif isinstance(value, bytes):
        grp[key] = numpy.bytes_(value)

    # NumPy types
    elif type(value).__module__ == numpy.__name__:
        grp[key] = value

    # h5py native types
    elif isinstance(value, h5py._hl.dataset.Dataset):
        grp[key] = value  # Creates hard-link!

    # Other types
    else:
        _load_pandas()   # might be a pandas type
        if _is_pandas_type(value):
            _requires_tables()
            store.close()
            with _pandas.HDFStore(store._filename, mode='a') as store_:
                store_[path] = value
            store.open()
        else:
            grp[key] = value
            warnings.warn(
                "Storage for object of type '{}' appears to have succeeded, but this "
                "type is not officially supported!".format(type(value)))
Пример #12
0
def set_attributes_all(target, attributes, discard_others=True):
    """ Set Attributes in bulk and optionally discard others.

    Sets each Attribute in turn (modifying it in place if possible if it
    is already present) and optionally discarding all other Attributes
    not explicitly set. This function yields much greater performance
    than the required individual calls to ``set_attribute``,
    ``set_attribute_string``, ``set_attribute_string_array`` and
    ``del_attribute`` put together.

    .. versionadded:: 0.2

    Parameters
    ----------
    target : Dataset or Group
        Dataset or Group to set the Attributes of.
    attributes : dict
        The Attributes to set. The keys (``str``) are the names. The
        values are ``tuple`` of the Attribute kind and the value to
        set. Valid kinds are ``'string_array'``, ``'string'``, and
        ``'value'``. The values must correspond to what
        ``set_attribute_string_array``, ``set_attribute_string`` and
        ``set_attribute`` would take respectively.
    discard_others : bool, optional
        Whether to discard all other Attributes not explicitly set
        (default) or not.

    See Also
    --------
    set_attribute
    set_attribute_string
    set_attribute_string_array

    """
    attrs = target.attrs
    existing = dict(attrs.items())
    # Generate special dtype for string arrays.
    str_arr_dtype = h5py.special_dtype(vlen=str)
    # Go through each attribute. If it is already present, modify it if
    # possible and create it otherwise (deletes old value.)
    for k, (kind, value) in attributes.items():
        if kind == 'string_array':
            attrs.create(k, [convert_to_str(s) for s in value],
                         dtype=str_arr_dtype)
        else:
            if kind == 'string':
                value = np.bytes_(value)
            if k not in existing:
                attrs.create(k, value)
            else:
                try:
                    if value.dtype == existing[k].dtype \
                            and value.shape == existing[k].shape:
                        attrs.modify(k, value)
                except:
                    attrs.create(k, value)
    # Discard all other attributes.
    if discard_others:
        for k in set(existing) - set(attributes):
            del attrs[k]
Пример #13
0
def set_attribute_string(target, name, value):
    """ Sets an attribute to a string on a Dataset or Group.

    If the attribute `name` doesn't exist yet, it is created. If it
    already exists, it is overwritten if it differs from `value`.

    Notes
    -----
    ``set_attributes_all`` is the fastest way to set and delete
    Attributes in bulk.

    Parameters
    ----------
    target : Dataset or Group
        Dataset or Group to set the string attribute of.
    name : str
        Name of the attribute to set.
    value : string
        Value to set the attribute to. Can be any sort of string type
        that will convert to a ``numpy.bytes_``

    See Also
    --------
    set_attributes_all

    """
    set_attribute(target, name, np.bytes_(value))
Пример #14
0
    def test_misc(self):
        """Test Miscellaneous features."""
        # make a default/clean 'Waveform' module
        self.mod.knobs.reset()

        # no RE match so _default_state_values_dict() is used for
        # 'command list'
        #
        config_name = self.mod.config_names[0]
        cl = np.bytes_("AMP 10.0 \nAMP 15.0 \nAMP 20.0 \n")
        self.mod[config_name].attrs["N5700 power supply command list"] = cl
        self.assertControlMapBasics(self.map, self.dgroup)
        self.mod.knobs.reset()

        # check warning if a general item is missing from group
        # - a warning is thrown, but mapping continues
        # - remove attribute 'IP address'
        config_name = self.mod.config_names[0]
        del self.mod[config_name].attrs["IP address"]
        with self.assertWarns(UserWarning):
            _map = self.map
        self.mod.knobs.reset()

        # '_construct_state_values_dict' throws KeyError when executing
        # '_build_configs'
        # - default dict is used for state values
        #
        with mock.patch.object(
            self.MAP_CLASS, "_construct_state_values_dict", side_effect=KeyError
        ):
            _map = self.map
            for cname, config in _map.configs.items():
                self.assertEqual(
                    config["state values"], _map._default_state_values_dict(cname)
                )
Пример #15
0
 def prepare_data(self, sed, exclude_nu_band=[], mask_catalog=['DEBL']):
     if len(sed['f4']) == 0:
         return None
     cat_nu_mask = np.array([True] * len(sed['f4']))
     for i in range(len(sed['f4'])):
         cat_bool = np.any(
             [sed['f4'][i] == np.bytes_(j) for j in mask_catalog])
         nu_bool = np.any([((sed['f0'][i] > j[0]) & (sed['f0'][i] < j[1]))
                           for j in exclude_nu_band])
         if np.any([cat_bool, nu_bool]):
             cat_nu_mask[i] = False
     sed = sed[(sed['f2'] != sed['f3']) & (sed['f3'] > 0) & cat_nu_mask]
     frequency = sed['f0']
     inds = np.sum(nu_bins <= frequency[:, np.newaxis], axis=1) - 1
     array = np.full((len(nu_bins) - 1) * 2, 0.)
     for i in range(len(nu_bins) - 1):
         tarray = sed['f1'][inds == i]
         if len(tarray) == 0:
             continue
         ret = np.log10(np.median(tarray))
         if not np.isfinite(ret):
             continue
         array[i] = ret / 10.
         array[len(nu_bins) - 1 + i] = np.var(np.log10(tarray)) / 10.
         array[np.isnan(array)] = 0
     return np.atleast_2d(array)
Пример #16
0
    def hdf_dataset(self):
        """ Setups and tears down a sample HDF5 file """
        filename = 'temp_test_ui.h5'
        fid = h5py.File(filename, 'w')
        data_m, data_n, data_p = [20, 22, 24]
        data = np.random.randn(data_m, data_n, data_p)

        fid.create_dataset('base', data=data)

        grp1 = fid.create_group('Group1')
        grp3 = fid.create_group('Group2/Group3')
        grp6 = fid.create_group('Group4/Group5/Group6')

        grp1.create_dataset('ingroup1_1', data=data)
        grp1.create_dataset('ingroup1_2', data=data)
        fid.create_dataset('Group2/ingroup2', data=data)
        grp3.create_dataset('ingroup3', data=data)

        grp6.create_dataset('ingroup6', data=data)

        fid['base'].attrs['Attribute_str'] = 'Test'
        fid['base'].attrs['Attribute_bytes'] = b'Test'
        fid['base'].attrs['Attribute_np_bytes'] = np.bytes_('Test') # pylint: disable=no-member
        fid['base'].attrs.create('Attribute_int', 1)
        fid['base'].attrs.create('Attribute_float', 1.1)
        fid['base'].attrs.create('Attribute_np_1d', np.array([1, 2, 3]))
        fid['base'].attrs.create('Attribute_np_2d', np.array([[1, 2, 3], [4, 5, 6]]))

        app = QApplication(sys.argv)  # pylint: disable=C0103, W0612
        yield filename

        # Tear-down
        if hdf_is_open(fid):
            fid.close()
        os.remove(filename)
Пример #17
0
def test_return_family_type():
    """ Test return_family_type """
    assert return_family_type(1) is int
    assert return_family_type(1.1) is float
    assert return_family_type(1 + 1j*3) is complex
    assert return_family_type('Test') is str
    assert return_family_type(b'Test') is bytes
    assert return_family_type(True) is bool

    assert return_family_type(np.int32(1)) is int
    assert return_family_type(np.int(1)) is int
    assert return_family_type(np.float32(1.1)) is float
    assert return_family_type(np.float(1.1)) is float
    assert return_family_type(np.complex64(1 + 1j*3)) is complex
    assert return_family_type(np.complex(1 + 1j*3)) is complex
    assert return_family_type(np.str('Test')) is str
    assert return_family_type(np.str_('Test')) is str  # pylint: disable=E1101
    assert return_family_type(np.bytes_('Test')) is bytes  # pylint: disable=E1101
    assert return_family_type(np.bool(True)) is bool
    assert return_family_type(np.bool_(True)) is bool

    with pytest.raises(TypeError):
        return_family_type([1, 2, 3])

    with pytest.raises(TypeError):
        return_family_type((1, 2, 3))

    with pytest.raises(TypeError):
        return_family_type({'a':1})
Пример #18
0
 def prepare_data(self, sed, exclude_nu_band=[], mask_catalog=['DEBL']):
     if len(sed['f4']) == 0:
         return None
     cat_nu_mask = np.array([True] * len(sed['f4']))
     for i in range(len(sed['f4'])):
         cat_bool = np.any(
             [sed['f4'][i] == np.bytes_(j) for j in mask_catalog])
         nu_bool = np.any([((sed['f0'][i] > j[0]) & (sed['f0'][i] < j[1]))
                           for j in exclude_nu_band])
         if np.any([cat_bool, nu_bool]):
             cat_nu_mask[i] = False
     sed = sed[(sed['f2'] != sed['f3']) & (sed['f3'] > 0) &
               (sed['f4'] != mask_catalog)]
     frequency = sed['f0']
     inds = np.sum(nu_bins <= frequency[:, np.newaxis], axis=1) - 1
     array = np.full((2, len(nu_bins) - 1), 0.)
     for i in range(len(nu_bins) - 1):
         tarray = sed['f1'][inds == i]
         tarray = tarray[tarray > 0]
         if (len(tarray) == 0):
             continue
         ret = np.log10(np.median(tarray))
         array[0, i] = ret / 10. + 2
         if len(tarray) < 2:
             array[1, i] = median_variances[i]
         else:
             if np.std(tarray) == 0:
                 array[1, i] = median_variances[i]
             else:
                 array[1, i] = np.log10(np.std(tarray)) / 10. + 2
         array[np.isnan(array)] = 0
     return np.atleast_2d(array)[np.newaxis, :, :, np.newaxis]
Пример #19
0
def test_HDF5_io(tempdir, order, element):
    pytest.importorskip("pygmsh")
    h5py = pytest.importorskip("h5py")

    from pygmsh.opencascade import Geometry
    from pygmsh import generate_mesh

    # Generate a sphere with gmsh with tetrahedral elements
    geo = Geometry()
    geo.add_raw_code("Mesh.Algorithm = 2;")
    geo.add_raw_code("Mesh.Algorithm3D = 10;")
    geo.add_raw_code("Mesh.ElementOrder = {0:d};".format(order))
    geo.add_ball([0, 0, 0], 1, char_length=0.3)
    geo.add_raw_code("Physical Volume (1) = {1};")

    msh = generate_mesh(geo, verbose=False, dim=3)

    # Write gmsh to HDF5
    filename = os.path.join(tempdir, "mesh_order{0:d}.h5".format(order))
    f = h5py.File(filename, "w", driver='mpio', comm=MPI4PY.COMM_WORLD)
    grp = f.create_group("my_mesh")
    grp.create_dataset("cell_indices", data=range(msh.cells[element].shape[0]))
    grp.create_dataset("coordinates", data=msh.points)
    top = grp.create_dataset("topology", data=msh.cells[element])
    top.attrs["celltype"] = np.bytes_('tetrahedron')
    f.close()

    # Read mesh from HDF5
    mesh_file = HDF5File(MPI.comm_world, filename, "r")
    mesh = mesh_file.read_mesh("/my_mesh", False, cpp.mesh.GhostMode.none)
    mesh_file.close()

    # Save mesh with VTK
    outfile = os.path.join(tempdir, "mesh{0:d}.pvd".format(order))
    VTKFile(outfile).write(mesh)
Пример #20
0
def getAttr( dataset, ky ):
  try:
    attrib = dataset.attrs[ky]
    return np.bytes_(dataset.attrs[ky]).decode().rstrip().split("\x00")[0]
  except KeyError:
    print( "HDF5 key not found: '"+ky+"'. Available keys:\n")
    print( list(dataset.attrs.keys()) )
    raise
Пример #21
0
def save_aer(ds, data):
    ''' send aer data dict to dataset buffer '''
    row = [
        np.fromstring(np.bytes_(data['dvs_timestamp']), dtype=np.uint8),
        np.fromstring(data['dvs_header'], dtype=np.uint8),
        np.fromstring(data['dvs_data'], dtype=np.uint8),
    ]
    ds.save({'dvs_timestamp': data['dvs_timestamp'], 'dvs_data': row})
Пример #22
0
def getAttr( dataset, ky ):
  try:
    attrib = dataset.attrs[ky]
    return np.bytes_(dataset.attrs[ky]).decode().rstrip().split("\x00")[0]
  except KeyError:
    print( "HDF5 key not found: '"+ky+"'. Available keys:\n")
    print( list(dataset.attrs.keys()) )
    raise
Пример #23
0
 def molpath(self, molfile):
     if "~" in molfile:
         molfile = os.path.expanduser(molfile)
     if PYVERSION == 3:
         self.radex.impex.molfile[:] = np.bytes_([""]*len(self.radex.impex.molfile))
     else:
         self.radex.impex.molfile[:] = ""
     utils.verify_collisionratefile(molfile)
     self.radex.impex.molfile[:len(molfile)] = molfile
Пример #24
0
 def datapath(self, radat):
     # self.radex data path not needed if molecule given as full path
     if PYVERSION == 3:
         self.radex.setup.radat[:] = np.bytes_([""] * len(self.radex.setup.radat))
     else:
         self.radex.setup.radat[:] = ""
     # there is dangerous magic here: radat needs to be interpreted as an array,
     # but you can't make it an array of characters easily...
     self.radex.setup.radat[:len(radat)] = radat
Пример #25
0
    def setUp(self):
        pass
        self.b_lit = b'bytes literal'
        self.s_lit = 'literal literal'
        self.u_lit = u'unicode literal'

        self.np_b_lit = np.bytes_('numpy bytes literal')
        self.np_s_lit = np.str_('numpy unicode literal')
        self.np_u_lit = np.unicode_('numpy unicode literal')
Пример #26
0
    def setUp(self):
        pass
        self.b_lit = b'bytes literal'
        self.s_lit = 'literal literal'
        self.u_lit = u'unicode literal'

        self.np_b_lit = np.bytes_('numpy bytes literal')
        self.np_s_lit = np.str_('numpy unicode literal')
        self.np_u_lit = np.unicode_('numpy unicode literal')
Пример #27
0
 def test_isscalar_numpy_array_scalars(self):
     self.assertTrue(lib.isscalar(np.int64(1)))
     self.assertTrue(lib.isscalar(np.float64(1.)))
     self.assertTrue(lib.isscalar(np.int32(1)))
     self.assertTrue(lib.isscalar(np.object_('foobar')))
     self.assertTrue(lib.isscalar(np.str_('foobar')))
     self.assertTrue(lib.isscalar(np.unicode_(u('foobar'))))
     self.assertTrue(lib.isscalar(np.bytes_(b'foobar')))
     self.assertTrue(lib.isscalar(np.datetime64('2014-01-01')))
     self.assertTrue(lib.isscalar(np.timedelta64(1, 'h')))
Пример #28
0
 def test_is_scalar_numpy_array_scalars(self):
     assert is_scalar(np.int64(1))
     assert is_scalar(np.float64(1.0))
     assert is_scalar(np.int32(1))
     assert is_scalar(np.object_("foobar"))
     assert is_scalar(np.str_("foobar"))
     assert is_scalar(np.unicode_("foobar"))
     assert is_scalar(np.bytes_(b"foobar"))
     assert is_scalar(np.datetime64("2014-01-01"))
     assert is_scalar(np.timedelta64(1, "h"))
Пример #29
0
    def _build_config_sis3302_subgroup(self, config_name: str, slot: int,
                                       index: int):
        """
        Create and set attributes for a SIS 3302 configuration group.
        """
        # create group
        gname = 'SIS crate 3302 configurations[{}]'.format(index)
        gpath = config_name + '/' + gname
        self.create_group(gpath)

        # get channel array
        brd = self.slot_info[slot][0]
        sis_arr = self._active_brdch['SIS 3302'][brd - 1]

        # populate attributes
        self[gpath].attrs.update({
            'Clock rate': np.uint32(7),
            'Sample averaging (hardware)': np.uint32(0),
            'Samples': np.uint32(self.knobs.nt),
            'Shot averaging (software)': np.int32(1),
        })
        for ii in range(1, 9):
            # 'Ch #' fields
            field = 'Ch {}'.format(ii)
            self[gpath].attrs[field] = np.int32(ii)

            # 'Comment #' fields
            field = 'Comment {}'.format(ii)
            self[gpath].attrs[field] = np.bytes_('')

            # 'DC offset #' fields
            field = 'DC offset {}'.format(ii)
            self[gpath].attrs[field] = np.float64(0.0)

            # 'Data type #' fields
            field = 'Data type {}'.format(ii)
            self[gpath].attrs[field] = \
                np.bytes_('probe name {}'.format(ii))

            # 'Enabled #' fields
            field = 'Enabled {}'.format(ii)
            self[gpath].attrs[field] = \
                np.bytes_('TRUE' if sis_arr[ii - 1] else 'FALSE')
Пример #30
0
 def _set_attrs(self):
     """Set the 'Discharge' group attributes"""
     # assign attributes
     self.attrs.update({
         'Calibration tag': np.bytes_(''),
         'Current conversion factor': np.float32(0.0),
         'Start time': np.float32(-0.0249856),
         'Timestep': np.float32(4.88E-5),
         'Voltage conversion factor': np.float32(0.0)
     })
Пример #31
0
 def _set_attrs(self):
     """Set the 'Discharge' group attributes"""
     # assign attributes
     self.attrs.update({
         "Calibration tag": np.bytes_(""),
         "Current conversion factor": np.float32(0.0),
         "Start time": np.float32(-0.0249856),
         "Timestep": np.float32(4.88e-5),
         "Voltage conversion factor": np.float32(0.0),
     })
Пример #32
0
 def test_isscalar_numpy_array_scalars(self):
     self.assertTrue(is_scalar(np.int64(1)))
     self.assertTrue(is_scalar(np.float64(1.)))
     self.assertTrue(is_scalar(np.int32(1)))
     self.assertTrue(is_scalar(np.object_('foobar')))
     self.assertTrue(is_scalar(np.str_('foobar')))
     self.assertTrue(is_scalar(np.unicode_(u('foobar'))))
     self.assertTrue(is_scalar(np.bytes_(b'foobar')))
     self.assertTrue(is_scalar(np.datetime64('2014-01-01')))
     self.assertTrue(is_scalar(np.timedelta64(1, 'h')))
Пример #33
0
 def test_isscalar_numpy_array_scalars(self):
     self.assertTrue(lib.isscalar(np.int64(1)))
     self.assertTrue(lib.isscalar(np.float64(1.0)))
     self.assertTrue(lib.isscalar(np.int32(1)))
     self.assertTrue(lib.isscalar(np.object_("foobar")))
     self.assertTrue(lib.isscalar(np.str_("foobar")))
     self.assertTrue(lib.isscalar(np.unicode_(u("foobar"))))
     self.assertTrue(lib.isscalar(np.bytes_(b"foobar")))
     self.assertTrue(lib.isscalar(np.datetime64("2014-01-01")))
     self.assertTrue(lib.isscalar(np.timedelta64(1, "h")))
Пример #34
0
def random_numpy(shape, dtype, allow_nan=True,
                 allow_unicode=False):
    # Makes a random numpy array of the specified shape and dtype
    # string. The method is slightly different depending on the
    # type. For 'bytes', 'str', and 'object'; an array of the
    # specified size is made and then each element is set to either
    # a numpy.bytes_, numpy.str_, or some other object of any type
    # (here, it is a randomly typed random numpy array). If it is
    # any other type, then it is just a matter of constructing the
    # right sized ndarray from a random sequence of bytes (all must
    # be forced to 0 and 1 for bool). Optionally include unicode
    # characters.
    if dtype == 'S':
        length = random.randint(1, max_string_length)
        data = np.zeros(shape=shape, dtype='S' + str(length))
        for x in np.nditer(data, op_flags=['readwrite']):
            if allow_unicode:
                chars = random_bytes_fullrange(length)
            else:
                chars = random_bytes(length)
            x[...] = np.bytes_(chars)
        return data
    elif dtype == 'U':
        length = random.randint(1, max_string_length)
        data = np.zeros(shape=shape, dtype='U' + str(length))
        for x in np.nditer(data, op_flags=['readwrite']):
            if allow_unicode:
                chars = _random_str_some_unicode(length)
            else:
                chars = random_str_ascii(length)
            x[...] = np.unicode_(chars)
        return data
    elif dtype == 'object':
        data = np.zeros(shape=shape, dtype='object')
        for index, x in np.ndenumerate(data):
            data[index] = random_numpy( \
                shape=random_numpy_shape( \
                object_subarray_dimensions, \
                max_object_subarray_axis_length), \
                dtype=random.choice(dtypes))
        return data
    else:
        nbytes = np.ndarray(shape=(1,), dtype=dtype).nbytes
        bts = np.random.bytes(nbytes * np.prod(shape))
        if dtype == 'bool':
            bts = b''.join([{True: b'\x01', False: b'\x00'}[ \
                ch > 127] for ch in bts])
        data = np.ndarray(shape=shape, dtype=dtype, buffer=bts)
        # If it is a floating point type and we are supposed to
        # remove NaN's, then turn them to zeros.
        if not allow_nan and data.dtype.kind in ('f', 'c') \
            and np.any(np.isnan(data)):
            data = data.copy()
            data[np.isnan(data)] = 0.0
        return data
Пример #35
0
    def locate(self, column, value, test="eq"):
        """

        locate index where column is equal to value
        :param column: DESCRIPTION
        :type column: TYPE
        :param value: DESCRIPTION
        :type value: TYPE
        :type test: type of test to try
        * 'eq': equals
        * 'lt': less than
        * 'le': less than or equal to
        * 'gt': greater than
        * 'ge': greater than or equal to.
        * 'be': between or equal to
        * 'bt': between

        If be or bt input value as a list of 2 values

        :return: DESCRIPTION
        :rtype: TYPE

        """
        if isinstance(value, str):
            value = np.bytes_(value)

        # use numpy datetime for testing against time.
        if column in ["start", "end", "start_date", "end_date"]:
            test_array = self.array[column].astype(np.datetime64)
            value = np.datetime64(value)
        else:
            test_array = self.array[column]

        if test == "eq":
            index_values = np.where(test_array == value)[0]
        elif test == "lt":
            index_values = np.where(test_array < value)[0]
        elif test == "le":
            index_values = np.where(test_array <= value)[0]
        elif test == "gt":
            index_values = np.where(test_array > value)[0]
        elif test == "ge":
            index_values = np.where(test_array >= value)[0]
        elif test == "be":
            if not isinstance(value, (list, tuple, np.ndarray)):
                msg = "If testing for between value must be an iterable of length 2."
                self.logger.error(msg)
                raise ValueError(msg)

            index_values = np.where((test_array > value[0])
                                    & (test_array < value[1]))[0]
        else:
            raise ValueError("Test {0} not understood".format(test))

        return index_values
Пример #36
0
def scalar_instances(times=True, extended_precision=True, user_dtype=True):
    # Hard-coded list of scalar instances.
    # Floats:
    yield param(np.sqrt(np.float16(5)), id="float16")
    yield param(np.sqrt(np.float32(5)), id="float32")
    yield param(np.sqrt(np.float64(5)), id="float64")
    if extended_precision:
        yield param(np.sqrt(np.longdouble(5)), id="longdouble")

    # Complex:
    yield param(np.sqrt(np.complex64(2 + 3j)), id="complex64")
    yield param(np.sqrt(np.complex128(2 + 3j)), id="complex128")
    if extended_precision:
        yield param(np.sqrt(np.longcomplex(2 + 3j)), id="clongdouble")

    # Bool:
    # XFAIL: Bool should be added, but has some bad properties when it
    # comes to strings, see also gh-9875
    # yield param(np.bool_(0), id="bool")

    # Integers:
    yield param(np.int8(2), id="int8")
    yield param(np.int16(2), id="int16")
    yield param(np.int32(2), id="int32")
    yield param(np.int64(2), id="int64")

    yield param(np.uint8(2), id="uint8")
    yield param(np.uint16(2), id="uint16")
    yield param(np.uint32(2), id="uint32")
    yield param(np.uint64(2), id="uint64")

    # Rational:
    if user_dtype:
        yield param(rational(1, 2), id="rational")

    # Cannot create a structured void scalar directly:
    structured = np.array([(1, 3)], "i,i")[0]
    assert isinstance(structured, np.void)
    assert structured.dtype == np.dtype("i,i")
    yield param(structured, id="structured")

    if times:
        # Datetimes and timedelta
        yield param(np.timedelta64(2), id="timedelta64[generic]")
        yield param(np.timedelta64(23, "s"), id="timedelta64[s]")
        yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)")

        yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)")
        yield param(np.datetime64("2020-06-07 12:43", "ms"),
                    id="datetime64[ms]")

    # Strings and unstructured void:
    yield param(np.bytes_(b"1234"), id="bytes")
    yield param(np.unicode_("2345"), id="unicode")
    yield param(np.void(b"4321"), id="unstructured_void")
Пример #37
0
 def outfile(self, outfile):
     if PYVERSION == 3:
         try:
             self.radex.impex.outfile[:] = np.bytes_([""]*len(self.radex.impex.outfile))
         except TypeError as ex:
             self.radex.impex.outfile = " " * self.radex.impex.outfile.dtype.itemsize
     else:
         self.radex.impex.outfile[:] = ""
     try:
         self.radex.impex.outfile[:len(outfile)] = outfile
     except IndexError:
         self.radex.impex.outfile = outfile + " " * (self.radex.impex.outfile.dtype.itemsize - len(outfile))
Пример #38
0
 def logfile(self, logfile):
     if PYVERSION == 3:
         try:
             self.radex.setup.logfile[:] = np.bytes_([""]*len(self.radex.setup.logfile))
         except TypeError as ex:
             self.radex.setup.logfile = " " * self.radex.setup.logfile.dtype.itemsize
     else:
         self.radex.setup.logfile[:] = ""
     try:
         self.radex.setup.logfile[:len(logfile)] = logfile
     except IndexError:
         self.radex.setup.logfile = logfile + " " * (self.radex.setup.logfile.dtype.itemsize - len(logfile))
Пример #39
0
def search_for_string(h5_str, value):
    match = False
    if h5_str is not None:
        if isinstance(h5_str, (str, np.string_)):
            if h5_str == value:
                match = True
        elif isinstance(h5_str, (list, np.ndarray)):
            match = False
            for i in range(len(h5_str)):
                if h5_str[i] == value or h5_str[i] == np.bytes_(value):
                    match = True
                    break
    return match
Пример #40
0
def random_numpy_scalar(dtype):
    # How a random scalar is made depends on th type. For must, it
    # is just a single number. But for the string types, it is a
    # string of any length.
    if dtype == 'S':
        return np.bytes_(random_bytes(random.randint(1,
                         max_string_length)))
    elif dtype == 'U':
        return np.unicode_(random_str_ascii(
                           random.randint(1,
                           max_string_length)))
    else:
        return random_numpy(tuple(), dtype)[()]
Пример #41
0
def check_shaderError(shader, flag, isProgram, errorMessage):
    success = bgl.Buffer(bgl.GL_INT, 1)

    if isProgram:
        bgl.glGetProgramiv(shader, flag, success)
    else:
        bgl.glGetShaderiv(shader, flag, success)

    if success[0] == bgl.GL_FALSE:
        import numpy as np
        import ctypes

        offset = bgl.Buffer(bgl.GL_INT, 1, (ctypes.c_int32 * 1).from_address(0))
        error = bgl.Buffer(bgl.GL_BYTE, 1024)
        if isProgram:
            bgl.glGetProgramInfoLog(shader, 1024, offset, error)
            print(errorMessage, np.bytes_(error).decode("utf-8"))
        else:
            bgl.glGetShaderInfoLog(shader, 1024, offset, error)
            print(errorMessage, np.bytes_(error).decode("utf-8"))

        del offset
        raise #RuntimeError(errorMessage, bgl.glGetShaderInfoLog(shader))
Пример #42
0
 def molpath(self, molfile):
     if "~" in molfile:
         molfile = os.path.expanduser(molfile)
     if PYVERSION == 3:
         try:
             self.radex.impex.molfile[:] = np.bytes_([""]*len(self.radex.impex.molfile))
         except TypeError as ex:
             self.radex.impex.molfile = " " * self.radex.impex.molfile.dtype.itemsize
     else:
         self.radex.impex.molfile[:] = ""
     utils.verify_collisionratefile(molfile)
     try:
         self.radex.impex.molfile[:len(molfile)] = molfile
     except IndexError:
         self.radex.impex.molfile = molfile + " " * (self.radex.impex.molfile.dtype.itemsize - len(molfile))
Пример #43
0
def set_attribute_string(target, name, value):
    """ Sets an attribute to a string on a Dataset or Group.

    If the attribute `name` doesn't exist yet, it is created. If it
    already exists, it is overwritten if it differs from `value`.

    Parameters
    ----------
    target : Dataset or Group
        Dataset or Group to set the string attribute of.
    name : str
        Name of the attribute to set.
    value : string
        Value to set the attribute to. Can be any sort of string type
        that will convert to a ``numpy.bytes_``

    """
    set_attribute(target, name, np.bytes_(value))
    def check_dict_like_other_type_key(self, tp, other_tp):
        data = random_dict(tp)

        key_gen = random_str_some_unicode(max_dict_key_length)
        if other_tp == 'numpy.bytes_':
            key = np.bytes_(key_gen.encode('UTF-8'))
        elif other_tp == 'numpy.unicode_':
            key = np.unicode_(key_gen)
        elif other_tp == 'bytes':
            key = key_gen.encode('UTF-8')
        elif other_tp == 'int':
            key = random_int()
        elif other_tp == 'float':
            key = random_float()

        data[key] = random_int()
        out = self.write_readback(data, random_name(),
                                  self.options)
        self.assert_equal(out, data)
def check_string_type_non_str_key(tp, other_tp, option_keywords):
    options = hdf5storage.Options(**option_keywords)
    key_value_names = (options.dict_like_keys_name,
                       options.dict_like_values_name)

    data = random_dict(tp)
    for k in key_value_names:
        if k in data:
            del data[k]
    keys = list(data.keys())

    key_gen = random_str_some_unicode(max_dict_key_length)
    if other_tp == 'numpy.bytes_':
        key = np.bytes_(key_gen.encode('UTF-8'))
    elif other_tp == 'numpy.unicode_':
        key = np.unicode_(key_gen)
    elif other_tp == 'bytes':
        key = key_gen.encode('UTF-8')
    data[key] = random_int()
    keys.append(key_gen)

    # Make a random name.
    name = random_name()

    # Write the data to the proper file with the given name with the
    # provided options. The file needs to be deleted after to keep junk
    # from building up.
    fld = None
    try:
        fld = tempfile.mkstemp()
        os.close(fld[0])
        filename = fld[1]
        hdf5storage.write(data, path=name, filename=filename,
                          options=options)

        with h5py.File(filename) as f:
            assert_equal_nose(set(keys), set(f[name].keys()))

    except:
        raise
    finally:
        if fld is not None:
            os.remove(fld[1])
Пример #46
0
 def datapath(self, radat):
     # self.radex data path not needed if molecule given as full path
     if PYVERSION == 3:
         try:
             self.radex.setup.radat[:] = np.bytes_([""] * len(self.radex.setup.radat))
         except TypeError as ex:
             # now radat gets treated as a single S120 instead of an array of S1s
             self.radex.setup.radat = " " * self.radex.setup.radat.dtype.itemsize
     else:
         self.radex.setup.radat[:] = ""
     # there is dangerous magic here: radat needs to be interpreted as an array,
     # but you can't make it an array of characters easily...
     try:
         self.radex.setup.radat[:len(radat)] = radat
     except IndexError:
         # in python3, this might just work, where the above doesn't?
         # (this works if RADAT is an S120)
         # the added space is because the right and left side must have *exactly* the same size
         self.radex.setup.radat = radat + " " * (self.radex.setup.radat.dtype.itemsize - len(radat))
Пример #47
0
def convert_to_numpy_bytes(data, length=None):
    """ Decodes data to Numpy UTF-8 econded string (bytes_).

    Decodes `data` to a Numpy UTF-8 encoded string, which is
    ``numpy.bytes_``, or an array of them in which case it will be ASCII
    encoded instead. If it can't be decoded, it is returned as
    is. Unsigned integers, Python string types (``str``, ``bytes``), and
    ``numpy.str_`` (UTF-32) are supported.

    For an array of unsigned integers, it may be desirable to make an
    array with strings of some specified length as opposed to an array
    of the same size with each element being a one element string. This
    naturally arises when converting strings to unsigned integer types
    in the first place, so it needs to be reversible.  The `length`
    parameter specifies how many to group together into a string
    (desired string length). For 1d arrays, this is along its only
    dimension. For higher dimensional arrays, it is done along each row
    (across columns). So, for a 3x10x5 input array of uints and a
    `length` of 5, the output array would be a 3x2x5 of 5 element
    strings.

    Parameters
    ----------
    data : some type
        Data decode into a Numpy UTF-8 encoded string/s.
    length : int or None, optional
        The number of consecutive elements (in the case of unsigned
        integer `data`) to compose each string in the output array from.
        ``None`` indicates the full amount for a 1d array or the number
        of columns (full length of row) for a higher dimension array.

    Returns
    -------
    numpy.bytes_ or numpy.ndarray of numpy.bytes_ or data
        If `data` can be decoded into a ``numpy.bytes_`` or a
        ``numpy.ndarray`` of them, the decoded version is returned.
        Otherwise, `data` is returned unchanged.

    See Also
    --------
    convert_to_str
    convert_to_numpy_str
    numpy.bytes_

    """
    # The method of conversion depends on its type.
    if isinstance(data, np.bytes_) or (isinstance(data, np.ndarray) \
            and data.dtype.char == 'S'):
        # It is already an np.bytes_ or array of them, so nothing needs
        # to be done.
        return data
    elif isinstance(data, (bytes, bytearray)):
        # Easily converted through constructor.
        return np.bytes_(data)
    elif (sys.hexversion >= 0x03000000 and isinstance(data, str)) \
            or (sys.hexversion < 0x03000000 \
            and isinstance(data, unicode)):
        return np.bytes_(data.encode('UTF-8'))
    elif isinstance(data, (np.uint16, np.uint32)):
        # They are single UTF-16 or UTF-32 scalars, and are easily
        # converted to a UTF-8 string and then passed through the
        # constructor.
        return np.bytes_(convert_to_str(data).encode('UTF-8'))
    elif isinstance(data, np.uint8):
        # It is just the uint8 version of the character, so it just
        # needs to be have the dtype essentially changed by having its
        # bytes read into ndarray.
        return np.ndarray(shape=tuple(), dtype='S1',
                          buffer=data.flatten().tostring())[()]
    elif isinstance(data, np.ndarray) and data.dtype.char == 'U':
        # We just need to convert it elementwise.
        new_data = np.zeros(shape=data.shape,
                            dtype='S' + str(data.dtype.itemsize))
        for index, x in np.ndenumerate(data):
            new_data[index] = np.bytes_(x.encode('UTF-8'))
        return new_data
    elif isinstance(data, np.ndarray) \
            and data.dtype.name in ('uint8', 'uint16', 'uint32'):
        # It is an ndarray of some uint type. How it is converted
        # depends on its shape. If its shape is just (), then it is just
        # a scalar wrapped in an array, which can be converted by
        # recursing the scalar value back into this function.
        shape = list(data.shape)
        if len(shape) == 0:
            return convert_to_numpy_bytes(data[()])

        # As there are more than one element, it gets a bit more
        # complicated. We need to take the subarrays of the specified
        # length along columns (1D arrays will be treated as row arrays
        # here), each of those converted to an str_ scalar (normal
        # string) and stuffed into a new array.
        #
        # If the length was not given, it needs to be set to full. Then
        # the shape of the new array needs to be calculated (divide the
        # appropriate dimension, which depends on the number of
        # dimentions).
        if len(shape) == 1:
            if length is None:
                length2 = shape[0]
                new_shape = (shape[0],)
            else:
                length2 = length
                new_shape = (shape[0]//length2,)
        else:
            if length is None:
                length2 = shape[-1]
            else:
                length2 = length
            new_shape = copy.deepcopy(shape)
            new_shape[-1] //= length2

        # The new array can be made as all zeros (nulls) with enough
        # padding to hold everything (dtype='UL' where 'L' is the
        # length). It will start out as a 1d array and be reshaped into
        # the proper shape later (makes indexing easier).
        new_data = np.zeros(shape=(np.prod(new_shape),),
                            dtype='S'+str(length2))

        # With data flattened into a 1d array, we just need to take
        # length sized chunks, convert them (if they are uint8 or 16,
        # then decode to str first, if they are uint32, put them as an
        # input buffer for an ndarray of type 'U').
        data = data.flatten()
        for i in range(0, new_data.shape[0]):
            chunk = data[(i*length2):((i+1)*length2)]
            if data.dtype.name == 'uint8':
                new_data[i] = np.ndarray(shape=tuple(),
                                         dtype=new_data.dtype,
                                         buffer=chunk.tostring())[()]
            else:
                new_data[i] = np.bytes_( \
                    convert_to_str(chunk).encode('UTF-8'))

        # Only thing is left is to reshape it.
        return new_data.reshape(tuple(new_shape))
    else:
        # Couldn't figure out what it is, so nothing can be done but
        # return it as is.
        return data
Пример #48
0
def random_numpy(shape, dtype, allow_nan=True,
                 allow_unicode=False,
                 object_element_dtypes=None):
    # Makes a random numpy array of the specified shape and dtype
    # string. The method is slightly different depending on the
    # type. For 'bytes', 'str', and 'object'; an array of the
    # specified size is made and then each element is set to either
    # a numpy.bytes_, numpy.str_, or some other object of any type
    # (here, it is a randomly typed random numpy array). If it is
    # any other type, then it is just a matter of constructing the
    # right sized ndarray from a random sequence of bytes (all must
    # be forced to 0 and 1 for bool). Optionally include unicode
    # characters. Optionally, for object dtypes, the allowed dtypes for
    # their elements can be given.
    if dtype == 'S':
        length = random.randint(1, max_string_length)
        data = np.zeros(shape=shape, dtype='S' + str(length))
        for index, x in np.ndenumerate(data):
            if allow_unicode:
                chars = random_bytes_fullrange(length)
            else:
                chars = random_bytes(length)
            data[index] = np.bytes_(chars)
        return data
    elif dtype == 'U':
        length = random.randint(1, max_string_length)
        data = np.zeros(shape=shape, dtype='U' + str(length))
        for index, x in np.ndenumerate(data):
            if allow_unicode:
                chars = random_str_some_unicode(length)
            else:
                chars = random_str_ascii(length)
            data[index] = np.unicode_(chars)
        return data
    elif dtype == 'object':
        if object_element_dtypes is None:
            object_element_dtypes = dtypes
        data = np.zeros(shape=shape, dtype='object')
        for index, x in np.ndenumerate(data):
            data[index] = random_numpy( \
                shape=random_numpy_shape( \
                object_subarray_dimensions, \
                max_object_subarray_axis_length), \
                dtype=random.choice(object_element_dtypes))
        return data
    else:
        nbytes = np.ndarray(shape=(1,), dtype=dtype).nbytes
        bts = np.random.bytes(nbytes * np.prod(shape))
        if dtype == 'bool':
            bts = b''.join([{True: b'\x01', False: b'\x00'}[ \
                ch > 127] for ch in bts])
        data = np.ndarray(shape=shape, dtype=dtype, buffer=bts)
        # If it is a floating point type and we are supposed to
        # remove NaN's, then turn them to zeros. Numpy will throw
        # RuntimeWarnings for some NaN values, so those warnings need to
        # be caught and ignored.
        if not allow_nan and data.dtype.kind in ('f', 'c'):
            data = data.copy()
            with warnings.catch_warnings():
                warnings.simplefilter('ignore', RuntimeWarning)
                if data.dtype.kind == 'f':
                    data[np.isnan(data)] = 0.0
                else:
                    data.real[np.isnan(data.real)] = 0.0
                    data.imag[np.isnan(data.imag)] = 0.0
        return data
Пример #49
0
def assert_equal_none_format(a, b, options=None):
    # Compares a and b for equality. b is always the original. If they
    # are dictionaries, a must be a structured ndarray and they must
    # have the same set of keys, after which they values must all be
    # compared. If they are a collection type (list, tuple, set,
    # frozenset, or deque), then the compairison must be made with b
    # converted to an object array. If the original is not a numpy type
    # (isn't or doesn't inherit from np.generic or np.ndarray), then it
    # is a matter of converting it to the appropriate numpy
    # type. Otherwise, both are supposed to be numpy types. For object
    # arrays, each element must be iterated over to be compared. Then,
    # if it isn't a string type, then they must have the same dtype,
    # shape, and all elements. If it is an empty string, then it would
    # have been stored as just a null byte (recurse to do that
    # comparison). If it is a bytes_ type, the dtype, shape, and
    # elements must all be the same. If it is string_ type, we must
    # convert to uint32 and then everything can be compared. Big longs
    # and ints get written as numpy.bytes_.
    if type(b) == dict or (sys.hexversion >= 0x2070000
                           and type(b) == collections.OrderedDict):
        assert type(a) == np.ndarray
        assert a.dtype.names is not None

        # Determine if any of the keys could not be stored as str. If
        # they all can be, then the dtype field names should be the
        # keys. Otherwise, they should be 'keys' and 'values'.
        all_str_keys = True
        if sys.hexversion >= 0x03000000:
            tp_str = str
            tp_bytes = bytes
            converters = {tp_str: lambda x: x,
                          tp_bytes: lambda x: x.decode('UTF-8'),
                          np.bytes_:
                          lambda x: bytes(x).decode('UTF-8'),
                          np.unicode_: lambda x: str(x)}
            tp_conv = lambda x: converters[type(x)](x)
            tp_conv_str = lambda x: tp_conv(x)
        else:
            tp_str = unicode
            tp_bytes = str
            converters = {tp_str: lambda x: x,
                          tp_bytes: lambda x: x.decode('UTF-8'),
                          np.bytes_:
                          lambda x: bytes(x).decode('UTF-8'),
                          np.unicode_: lambda x: unicode(x)}
            tp_conv = lambda x: converters[type(x)](x)
            tp_conv_str = lambda x: tp_conv(x).encode('UTF-8')
        tps = tuple(converters.keys())
        for k in b.keys():
            if type(k) not in tps:
                all_str_keys = False
                break
            try:
                k_str = tp_conv(k)
            except:
                all_str_keys = False
                break
        if all_str_keys:
            assert set(a.dtype.names) == set([tp_conv_str(k)
                                              for k in b.keys()])
            for k in b:
                assert_equal_none_format(a[tp_conv_str(k)][0],
                                         b[k], options)
        else:
            names = (options.dict_like_keys_name,
                     options.dict_like_values_name)
            assert set(a.dtype.names) == set(names)
            keys = a[names[0]]
            values = a[names[1]]
            assert_equal_none_format(keys, tuple(b.keys()), options)
            assert_equal_none_format(values, tuple(b.values()), options)
    elif type(b) in (list, tuple, set, frozenset, collections.deque):
        assert_equal_none_format(a, np.object_(list(b)), options)
    elif not isinstance(b, (np.generic, np.ndarray)):
        if b is None:
            # It should be np.float64([])
            assert type(a) == np.ndarray
            assert a.dtype == np.float64([]).dtype
            assert a.shape == (0, )
        elif (sys.hexversion >= 0x03000000 \
                and isinstance(b, (bytes, bytearray))) \
                or (sys.hexversion < 0x03000000 \
                and isinstance(b, (bytes, bytearray))):
            assert a == np.bytes_(b)
        elif (sys.hexversion >= 0x03000000 \
                and isinstance(b, str)) \
                or (sys.hexversion < 0x03000000 \
                and isinstance(b, unicode)):
            assert_equal_none_format(a, np.unicode_(b), options)
        elif (sys.hexversion >= 0x03000000 \
                and type(b) == int) \
                or (sys.hexversion < 0x03000000 \
                and type(b) == long):
            if b > 2**63 or b < -(2**63 - 1):
                assert_equal_none_format(a, np.bytes_(b), options)
            else:
                assert_equal_none_format(a, np.int64(b), options)
        else:
            assert_equal_none_format(a, np.array(b)[()], options)
    else:
        if b.dtype.name != 'object':
            if b.dtype.char in ('U', 'S'):
                if b.dtype.char == 'S' and b.shape == tuple() \
                        and len(b) == 0:
                    assert_equal(a, \
                        np.zeros(shape=tuple(), dtype=b.dtype.char), \
                        options)
                elif b.dtype.char == 'U':
                    if b.shape == tuple() and len(b) == 0:
                        c = np.uint32(())
                    else:
                        c = np.atleast_1d(b).view(np.uint32)
                    assert a.dtype == c.dtype
                    assert a.shape == c.shape
                    npt.assert_equal(a, c)
                else:
                    assert a.dtype == b.dtype
                    assert a.shape == b.shape
                    npt.assert_equal(a, b)
            else:
                # Now, if b.shape is just all ones, then a.shape will
                # just be (1,). Otherwise, we need to compare the shapes
                # directly. Also, dimensions need to be squeezed before
                # comparison in this case.
                assert np.prod(a.shape) == np.prod(b.shape)
                assert a.shape == b.shape \
                    or (np.prod(b.shape) == 1 and a.shape == (1,))
                if np.prod(a.shape) == 1:
                    a = np.squeeze(a)
                    b = np.squeeze(b)
                # If there was a null in the dtype, then it was written
                # as a Group so the field order could have changed.
                if '\\x00' in str(b.dtype):
                    assert set(a.dtype.descr) == set(b.dtype.descr)
                    # Reorder the fields of a.
                    c = np.empty(shape=b.shape, dtype=b.dtype)
                    for n in b.dtype.names:
                        c[n] = a[n]
                else:
                    c = a
                assert c.dtype == b.dtype
                with warnings.catch_warnings():
                    warnings.simplefilter('ignore', RuntimeWarning)
                    npt.assert_equal(c, b)
        else:
            assert a.dtype == b.dtype
            assert a.shape == b.shape
            for index, x in np.ndenumerate(a):
                assert_equal_none_format(a[index], b[index], options)
Пример #50
0
def assert_equal_matlab_format(a, b, options=None):
    # Compares a and b for equality. b is always the original. If they
    # are dictionaries, a must be a structured ndarray and they must
    # have the same set of keys, after which they values must all be
    # compared. If they are a collection type (list, tuple, set,
    # frozenset, or deque), then the compairison must be made with b
    # converted to an object array. If the original is not a numpy type
    # (isn't or doesn't inherit from np.generic or np.ndarray), then it
    # is a matter of converting it to the appropriate numpy
    # type. Otherwise, both are supposed to be numpy types. For object
    # arrays, each element must be iterated over to be compared. Then,
    # if it isn't a string type, then they must have the same dtype,
    # shape, and all elements. All strings are converted to numpy.str_
    # on read unless they were stored as a numpy.bytes_ due to having
    # non-ASCII characters. If it is empty, it has shape (1, 0). A
    # numpy.str_ has all of its strings per row compacted together. A
    # numpy.bytes_ string has to have the same thing done, but then it
    # needs to be converted up to UTF-32 and to numpy.str_ through
    # uint32. Big longs and ints end up getting converted to UTF-16
    # uint16's when written and read back as UTF-32 numpy.unicode_.
    #
    # In all cases, we expect things to be at least two dimensional
    # arrays.
    if type(b) == dict or (sys.hexversion >= 0x2070000
                           and type(b) == collections.OrderedDict):
        assert type(a) == np.ndarray
        assert a.dtype.names is not None

        # Determine if any of the keys could not be stored as str. If
        # they all can be, then the dtype field names should be the
        # keys. Otherwise, they should be 'keys' and 'values'.
        all_str_keys = True
        if sys.hexversion >= 0x03000000:
            tp_str = str
            tp_bytes = bytes
            converters = {tp_str: lambda x: x,
                          tp_bytes: lambda x: x.decode('UTF-8'),
                          np.bytes_:
                          lambda x: bytes(x).decode('UTF-8'),
                          np.unicode_: lambda x: str(x)}
            tp_conv = lambda x: converters[type(x)](x)
            tp_conv_str = lambda x: tp_conv(x)
        else:
            tp_str = unicode
            tp_bytes = str
            converters = {tp_str: lambda x: x,
                          tp_bytes: lambda x: x.decode('UTF-8'),
                          np.bytes_:
                          lambda x: bytes(x).decode('UTF-8'),
                          np.unicode_: lambda x: unicode(x)}
            tp_conv = lambda x: converters[type(x)](x)
            tp_conv_str = lambda x: tp_conv(x).encode('UTF-8')
        tps = tuple(converters.keys())
        for k in b.keys():
            if type(k) not in tps:
                all_str_keys = False
                break
            try:
                k_str = tp_conv(k)
            except:
                all_str_keys = False
                break
        if all_str_keys:
            assert set(a.dtype.names) == set([tp_conv_str(k)
                                              for k in b.keys()])
            for k in b:
                assert_equal_matlab_format(a[tp_conv_str(k)][0],
                                           b[k], options)
        else:
            names = (options.dict_like_keys_name,
                     options.dict_like_values_name)
            assert set(a.dtype.names) == set(names)
            keys = a[names[0]][0]
            values = a[names[1]][0]
            assert_equal_matlab_format(keys, tuple(b.keys()), options)
            assert_equal_matlab_format(values, tuple(b.values()),
                                       options)
    elif type(b) in (list, tuple, set, frozenset, collections.deque):
        assert_equal_matlab_format(a, np.object_(list(b)), options)
    elif not isinstance(b, (np.generic, np.ndarray)):
        if b is None:
            # It should be np.zeros(shape=(0, 1), dtype='float64'))
            assert type(a) == np.ndarray
            assert a.dtype == np.dtype('float64')
            assert a.shape == (1, 0)
        elif (sys.hexversion >= 0x03000000 \
                and isinstance(b, (bytes, str, bytearray))) \
                or (sys.hexversion < 0x03000000 \
                and isinstance(b, (bytes, unicode, bytearray))):
            if len(b) == 0:
                assert_equal(a, np.zeros(shape=(1, 0), dtype='U'),
                             options)
            elif isinstance(b, (bytes, bytearray)):
                try:
                    c = np.unicode_(b.decode('ASCII'))
                except:
                    c = np.bytes_(b)
                assert_equal(a, np.atleast_2d(c), options)
            else:
                assert_equal(a, np.atleast_2d(np.unicode_(b)), options)
        elif (sys.hexversion >= 0x03000000 \
                and type(b) == int) \
                or (sys.hexversion < 0x03000000 \
                and type(b) == long):
            if b > 2**63 or b < -(2**63 - 1):
                assert_equal(a, np.atleast_2d(np.unicode_(b)), options)
            else:
                assert_equal(a, np.atleast_2d(np.int64(b)), options)
        else:
            assert_equal(a, np.atleast_2d(np.array(b)), options)
    else:
        if b.dtype.name != 'object':
            if b.dtype.char in ('U', 'S'):
                if len(b) == 0 and (b.shape == tuple() \
                        or b.shape == (0, )):
                    assert_equal(a, np.zeros(shape=(1, 0),
                                             dtype='U'), options)
                elif b.dtype.char == 'U':
                    c = np.atleast_1d(b)
                    c = np.atleast_2d(c.view(np.dtype('U' \
                        + str(c.shape[-1]*c.dtype.itemsize//4))))
                    assert a.dtype == c.dtype
                    assert a.shape == c.shape
                    npt.assert_equal(a, c)
                elif b.dtype.char == 'S':
                    c = np.atleast_1d(b).view(np.ndarray)
                    if np.all(c.view(np.uint8) < 128):
                        c = c.view(np.dtype('S' \
                            + str(c.shape[-1]*c.dtype.itemsize)))
                        c = c.view(np.dtype('uint8'))
                        c = np.uint32(c.view(np.dtype('uint8')))
                        c = c.view(np.dtype('U' + str(c.shape[-1])))
                    c = np.atleast_2d(c)
                    assert a.dtype == c.dtype
                    assert a.shape == c.shape
                    npt.assert_equal(a, c)
                    pass
                else:
                    c = np.atleast_2d(b)
                    assert a.dtype == c.dtype
                    assert a.shape == c.shape
                    with warnings.catch_warnings():
                        warnings.simplefilter('ignore', RuntimeWarning)
                        npt.assert_equal(a, c)
            else:
                c = np.atleast_2d(b)
                # An empty complex number gets turned into a real
                # number when it is stored.
                if np.prod(c.shape) == 0 \
                        and b.dtype.name.startswith('complex'):
                    c = np.real(c)
                # If it is structured, check that the field names are
                # the same, in the same order, and then go through them
                # one by one. Otherwise, make sure the dtypes and shapes
                # are the same before comparing all values.
                if b.dtype.names is None and a.dtype.names is None:
                    assert a.dtype == c.dtype
                    assert a.shape == c.shape
                    with warnings.catch_warnings():
                        warnings.simplefilter('ignore', RuntimeWarning)
                        npt.assert_equal(a, c)
                else:
                    assert a.dtype.names is not None
                    assert b.dtype.names is not None
                    assert set(a.dtype.names) == set(b.dtype.names)
                    # The ordering of fields must be preserved if the
                    # MATLAB_fields attribute could be used, which can
                    # only be done if there are no non-ascii characters
                    # in any of the field names.
                    if sys.hexversion >= 0x03000000:
                        allfields = ''.join(b.dtype.names)
                    else:
                        allfields = unicode('').join( \
                            [nm.decode('UTF-8') \
                            for nm in b.dtype.names])
                    if np.all(np.array([ord(ch) < 128 \
                            for ch in allfields])):
                        assert a.dtype.names == b.dtype.names
                    a = a.flatten()
                    b = b.flatten()
                    for k in b.dtype.names:
                        for index, x in np.ndenumerate(a):
                            assert_equal_from_matlab(a[k][index],
                                                     b[k][index],
                                                     options)
        else:
            c = np.atleast_2d(b)
            assert a.dtype == c.dtype
            assert a.shape == c.shape
            for index, x in np.ndenumerate(a):
                assert_equal_matlab_format(a[index], c[index], options)
Пример #51
0
def test_field(fname, name, subdir):
    val = ut.verify_present(fname, "general/extracellular_ephys/"+subdir+"/", name.lower())
    if val != name and val != np.bytes_(name):
        ut.error("Checking metadata", "field value incorrect")
Пример #52
0
        
        if require_nsubs:
            rl = rl[nsubs>=require_nsubs]
            std = std[nsubs>=require_nsubs]
            se = se[nsubs>=require_nsubs]
            pct = pct[nsubs>=require_nsubs]
        
        pl.figure()
        pl.errorbar(rl, pct, yerr=std, color='gray', alpha=0.3, fmt='none')
        pl.errorbar(rl, pct, yerr=se, fmt='ko')
        pl.title('By subject, %i mice, mean %i trials each'%(len(subs),np.mean([sum(i) for i in n_trials])))
        draw_fit(rl,pct)

if plot:
    sub = data['b12']
    sesh = np.bytes_(list(sub['sessions'].keys())[-1])
    #sesh = b'20151014_172808'

    trials = sub['trials']
    trials = trials[trials['session']==sesh]
    cors = trials['ns'].argmax(axis=1)
    iters = sub['iterations']
    iters = iters[iters['session']==sesh]
    light = sub['light']
    light = light[light['session']==sesh]
    speaker = sub['speaker']
    speaker = speaker[speaker['session']==sesh]
    spout = sub['spout']
    spout = spout[spout['session']==sesh]
    licks = sub['licks']
    licks = licks[licks['session']==sesh]
Пример #53
0
 def outfile(self, outfile):
     if PYVERSION == 3:
         self.radex.impex.outfile[:] = np.bytes_([""]*len(self.radex.impex.outfile))
     else:
         self.radex.impex.outfile[:] = ""
     self.radex.impex.outfile[:len(outfile)] = outfile
Пример #54
0
 def test_bytes(self):
     s = readsav(path.join(DATA_PATH, 'scalar_string.sav'), verbose=False)
     assert_identical(s.s, np.bytes_("The quick brown fox jumps over the lazy python"))
Пример #55
0
def test_field(fname, name):
    val = ut.verify_present(fname, "general/", name.lower())
    if val != name and val != np.bytes_(name):
        ut.error("Checking metadata", "field value incorrect")
Пример #56
0
 def test_numpy_bytes_(self):
     s = "this is a test"
     arr = numpy.bytes_(s)
     self.assertEqual(s, punx.utils.decode_byte_string(arr), 'numpy.bytes_ string')
Пример #57
0
 def logfile(self, logfile):
     if PYVERSION == 3:
         self.radex.setup.logfile[:] = np.bytes_([""]*len(self.radex.setup.logfile))
     else:
         self.radex.setup.logfile[:] = ""
     self.radex.setup.logfile[:len(logfile)] = logfile
Пример #58
0
numpy_dtype_to_field_mapping = {
    np.float64().dtype.num    : 'double',
    np.float32().dtype.num    : 'float',
    np.bool_().dtype.num      : 'bit',
    np.uint8().dtype.num      : 'unsignedByte',
    np.int16().dtype.num      : 'short',
    np.int32().dtype.num      : 'int',
    np.int64().dtype.num      : 'long',
    np.complex64().dtype.num  : 'floatComplex',
    np.complex128().dtype.num : 'doubleComplex',
    np.unicode_().dtype.num   : 'unicodeChar'
}


numpy_dtype_to_field_mapping[np.bytes_().dtype.num] = 'char'


def _all_bytes(column):
    for x in column:
        if not isinstance(x, bytes):
            return False
    return True


def _all_unicode(column):
    for x in column:
        if not isinstance(x, six.text_type):
            return False
    return True