Example #1
1
    def test_init(self):
        import numpy as np
        import math
        import sys

        assert np.intp() == np.intp(0)
        assert np.intp("123") == np.intp(123)
        raises(TypeError, np.intp, None)
        assert np.float64() == np.float64(0)
        assert math.isnan(np.float64(None))
        assert np.bool_() == np.bool_(False)
        assert np.bool_("abc") == np.bool_(True)
        assert np.bool_(None) == np.bool_(False)
        assert np.complex_() == np.complex_(0)
        # raises(TypeError, np.complex_, '1+2j')
        assert math.isnan(np.complex_(None))
        for c in ["i", "I", "l", "L", "q", "Q"]:
            assert np.dtype(c).type().dtype.char == c
        for c in ["l", "q"]:
            assert np.dtype(c).type(sys.maxint) == sys.maxint
        for c in ["L", "Q"]:
            assert np.dtype(c).type(sys.maxint + 42) == sys.maxint + 42
        assert np.float32(np.array([True, False])).dtype == np.float32
        assert type(np.float32(np.array([True]))) is np.ndarray
        assert type(np.float32(1.0)) is np.float32
        a = np.array([True, False])
        assert np.bool_(a) is a
Example #2
0
 def __init__(self, filename, precision='single', verbose=False):        
     self.filename = filename
     self.precision = precision
     self.verbose = verbose
     self.file = open(self.filename, 'rb')
     self.nrow = 0
     self.ncol = 0
     self.nlay = 0
     self.times = []
     self.kstpkper = []
     self.textlist = []
     self.nrecords = 0
     self.header_dtype = np.dtype([('kstp','i4'),('kper','i4'),
                                   ('text','a16'),('ncol','i4'),
                                   ('nrow','i4'),('nlay','i4')])
     self.header2_dtype = np.dtype([('imeth','i4'),('delt','f4'),
                                   ('pertim','f4'),('totim','f4')])
                                   
     if precision is 'single':
         self.realtype = np.float32
     elif precision is 'double':
         self.realtype = np.float64
     else:
         raise Exception('Unknown precision specified: ' + precision)
     
     #read through the file and build the pointer index
     self._build_index()
     
     #allocate the value array
     self.value = np.empty( (self.nlay, self.nrow, self.ncol), 
                      dtype=self.realtype)
     return
Example #3
0
def dtyping(*args):
    """Find least common denominator dtype"""
    args = list(args)

    for i in xrange(len(args)):

        if isinstance(args[i], np.ndarray):
            args[i] = args[i].dtype
        elif isinstance(args[i], \
            (float, f.frac, float, int, long, float)):
            args[i] = type(args[i])

    if Poly in args: return Poly

    if float in args: return float
    if np.dtype(float) in args: return float

    if object in args: return object
    if f.frac in args: return f.frac

    if long in args: return long
    if int in args: return int
    if np.dtype(int) in args: return int

    if list in args: return list
    if tuple in args: return tuple

    raise ValueError, "dtypes not recognised " + str(args)
Example #4
0
 def test_dtype(self):
     dt = np.intc
     p = ndpointer(dtype=dt)
     self.assertTrue(p.from_param(np.array([1], dt)))
     dt = '<i4'
     p = ndpointer(dtype=dt)
     self.assertTrue(p.from_param(np.array([1], dt)))
     dt = np.dtype('>i4')
     p = ndpointer(dtype=dt)
     p.from_param(np.array([1], dt))
     self.assertRaises(TypeError, p.from_param,
                       np.array([1], dt.newbyteorder('swap')))
     dtnames = ['x', 'y']
     dtformats = [np.intc, np.float64]
     dtdescr = {'names' : dtnames, 'formats' : dtformats}
     dt = np.dtype(dtdescr)
     p = ndpointer(dtype=dt)
     self.assertTrue(p.from_param(np.zeros((10,), dt)))
     samedt = np.dtype(dtdescr)
     p = ndpointer(dtype=samedt)
     self.assertTrue(p.from_param(np.zeros((10,), dt)))
     dt2 = np.dtype(dtdescr, align=True)
     if dt.itemsize != dt2.itemsize:
         self.assertRaises(TypeError, p.from_param, np.zeros((10,), dt2))
     else:
         self.assertTrue(p.from_param(np.zeros((10,), dt2)))
Example #5
0
def test_working_type():
    # Which type do input types with slope and inter cast to in numpy?
    # Wrapper function because we need to use the dtype str for comparison.  We
    # need this because of the very confusing np.int32 != np.intp (on 32 bit).
    def wt(*args, **kwargs):
        return np.dtype(working_type(*args, **kwargs)).str
    d1 = np.atleast_1d
    for in_type in NUMERIC_TYPES:
        in_ts = np.dtype(in_type).str
        assert_equal(wt(in_type), in_ts)
        assert_equal(wt(in_type, 1, 0), in_ts)
        assert_equal(wt(in_type, 1.0, 0.0), in_ts)
        in_val = d1(in_type(0))
        for slope_type in NUMERIC_TYPES:
            sl_val = slope_type(1) # no scaling, regardless of type
            assert_equal(wt(in_type, sl_val, 0.0), in_ts)
            sl_val = slope_type(2) # actual scaling
            out_val = in_val / d1(sl_val)
            assert_equal(wt(in_type, sl_val), out_val.dtype.str)
            for inter_type in NUMERIC_TYPES:
                i_val = inter_type(0) # no scaling, regardless of type
                assert_equal(wt(in_type, 1, i_val), in_ts)
                i_val = inter_type(1) # actual scaling
                out_val = in_val - d1(i_val)
                assert_equal(wt(in_type, 1, i_val), out_val.dtype.str)
                # Combine scaling and intercept
                out_val = (in_val - d1(i_val)) / d1(sl_val)
                assert_equal(wt(in_type, sl_val, i_val), out_val.dtype.str)
    # Confirm that type codes and dtypes work as well
    f32s = np.dtype(np.float32).str
    assert_equal(wt('f4', 1, 0), f32s)
    assert_equal(wt(np.dtype('f4'), 1, 0), f32s)
Example #6
0
def test_dtype(seed=1234):
    np.random.seed(seed)

    dtype = [
        ("coords", np.float64, (4, )),
        ("log_prior", np.float64),
        ("log_likelihood", np.float64),
        ("accepted", bool)
    ]

    coords = np.random.randn(4)
    state = State(coords)
    assert state.dtype == np.dtype(dtype)

    state = State(coords, face=10.0, blah=6, _hidden=None)
    dtype += [
        ("blah", int),
        ("face", float),
    ]
    assert state.dtype == np.dtype(dtype)

    state = State(coords, face=10.0, blah=6, _hidden=None,
                  matrix=np.ones((3, 1)))
    dtype += [
        ("matrix", float, (3, 1)),
    ]
    assert state.dtype == np.dtype(dtype)

    state = State(coords, face=10.0, blah=6, _hidden=None,
                  matrix=np.ones((3, 1)), vector=np.zeros(3))
    dtype += [
        ("vector", float, (3,)),
    ]
    assert state.dtype == np.dtype(dtype)
Example #7
0
    def Read(self):

        #return numpy.ones((256, 819)).astype('float32'), numpy.ones(256).astype('int32')

        with open(self.featureFile,"rb") as f:

            dt = numpy.dtype([('numSamples',(numpy.int32,1)),('sampPeriod',(numpy.int32,1)),('sampSize',(numpy.int16,1)),('sampKind',(numpy.int16,1))])
            header =  numpy.fromfile(f,dt.newbyteorder('>' if self.byteOrder==ByteOrder.BigEndian else '<'),count=1)

            numSamples = header[0]['numSamples']
            sampPeriod = header[0]['sampPeriod']
            sampSize   = header[0]['sampSize']
            sampKind   = header[0]['sampKind']

            # print 'Num samples = {}'.format(numSamples)
            # print 'Sample period = {}'.format(sampPeriod)
            # print 'Sample size = {}'.format(sampSize)
            # print 'Sample kind = {}'.format(sampKind)
            dt = numpy.dtype([('sample',(numpy.float32,sampSize/4))])
            samples = numpy.fromfile(f,dt.newbyteorder('>' if self.byteOrder==ByteOrder.BigEndian else '<'),count=numSamples)

        self._markDone()

        if self.labelFile is None:
            labels = None
        else:
            labels = ReadLabel(self.labelFile)

        return samples[:]['sample'], labels
Example #8
0
def test_issue321():
    """L-PICOLA outputs single-precision with no mass block, which causes problems
    with testing kd-trees"""

    f = pynbody.load("testdata/lpicola/lpicola_z0p000.0")
    assert f['pos'].dtype==np.dtype('float32')
    assert f['mass'].dtype==np.dtype('float32')
Example #9
0
    def mean(self, axis=None):
        """Average the matrix over the given axis.  If the axis is None,
        average over both rows and columns, returning a scalar.
        """
        # Mimic numpy's casting.  The int32/int64 check works around numpy
        # 1.5.x behavior of np.issubdtype, see gh-2677.
        if (np.issubdtype(self.dtype, np.float_) or
            np.issubdtype(self.dtype, np.int_) or
            self.dtype in [np.dtype('int32'), np.dtype('int64')] or
            np.issubdtype(self.dtype, np.bool_)):
                res_dtype = np.float_
        elif np.issubdtype(self.dtype, np.complex_):
            res_dtype = np.complex_
        else:
            res_dtype = self.dtype

        if axis is None:
            return self.sum(None) * 1.0 / (self.shape[0]*self.shape[1])

        if axis < 0:
            axis += 2
        if axis == 0:
            mean = self.astype(res_dtype).sum(0)
            mean *= 1.0 / self.shape[0]
            return mean
        elif axis == 1:
            mean = self.astype(res_dtype).sum(1)
            mean *= 1.0 / self.shape[1]
            return mean
        else:
            raise ValueError("axis out of bounds")
    def dtype(self):
        # Image data types (Image Object chapter on DM help)#
        # key = DM data type code
        # value = numpy data type
        if self.imdict.ImageData.DataType == 4:
            raise NotImplementedError(
                "Reading data of this type is not implemented.")

        imdtype_dict = {
            0: 'not_implemented',  # null
            1: 'int16',
            2: 'float32',
            3: 'complex64',
            5: 'float32',  # not numpy: 8-Byte packed complex (FFT data)
            6: 'uint8',
            7: 'int32',
            8: np.dtype({'names': ['B', 'G', 'R', 'A'],
                         'formats': ['u1', 'u1', 'u1', 'u1']}),
            9: 'int8',
            10: 'uint16',
            11: 'uint32',
            12: 'float64',
            13: 'complex128',
            14: 'bool',
            23: np.dtype({'names': ['B', 'G', 'R', 'A'],
                          'formats': ['u1', 'u1', 'u1', 'u1']}),
            27: 'complex64',  # not numpy: 8-Byte packed complex (FFT data)
            28: 'complex128',  # not numpy: 16-Byte packed complex (FFT data)
        }
        return imdtype_dict[self.imdict.ImageData.DataType]
Example #11
0
    def test_repeatability(self):
        import hashlib
        # We use a md5 hash of generated sequences of 1000 samples
        # in the range [0, 6) for all but np.bool, where the range
        # is [0, 2). Hashes are for little endian numbers.
        tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
               'int16': '1b7741b80964bb190c50d541dca1cac1',
               'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
               'int64': '17db902806f448331b5a758d7d2ee672',
               'int8': '27dd30c4e08a797063dffac2490b0be6',
               'uint16': '1b7741b80964bb190c50d541dca1cac1',
               'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
               'uint64': '17db902806f448331b5a758d7d2ee672',
               'uint8': '27dd30c4e08a797063dffac2490b0be6'}

        for dt in self.itype[1:]:
            np.random.seed(1234)

            # view as little endian for hash
            if sys.byteorder == 'little':
                val = self.rfunc(0, 6, size=1000, dtype=dt)
            else:
                val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()

            res = hashlib.md5(val.view(np.int8)).hexdigest()
            assert_(tgt[np.dtype(dt).name] == res)

        # bools do not depend on endianess
        np.random.seed(1234)
        val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
        res = hashlib.md5(val).hexdigest()
        assert_(tgt[np.dtype(np.bool).name] == res)
Example #12
0
def test_rasterize_supported_dtype(basic_geometry):
    """ Supported data types should return valid results """

    with Env():
        supported_types = (
            ('int16', -32768),
            ('int32', -2147483648),
            ('uint8', 255),
            ('uint16', 65535),
            ('uint32', 4294967295),
            ('float32', 1.434532),
            ('float64', -98332.133422114)
        )

        for dtype, default_value in supported_types:
            truth = np.zeros(DEFAULT_SHAPE, dtype=dtype)
            truth[2:4, 2:4] = default_value

            result = rasterize(
                [basic_geometry],
                out_shape=DEFAULT_SHAPE,
                default_value=default_value,
                dtype=dtype
            )
            assert np.array_equal(result, truth)
            assert np.dtype(result.dtype) == np.dtype(truth.dtype)

            result = rasterize(
                [(basic_geometry, default_value)],
                out_shape=DEFAULT_SHAPE
            )
            if np.dtype(dtype).kind == 'f':
                assert np.allclose(result, truth)
            else:
                assert np.array_equal(result, truth)
Example #13
0
    def test_frame_add_datetime64_col_other_units(self):
        n = 100

        units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']

        ns_dtype = np.dtype('M8[ns]')

        for unit in units:
            dtype = np.dtype('M8[%s]' % unit)
            vals = np.arange(n, dtype=np.int64).view(dtype)

            df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
            df[unit] = vals

            ex_vals = to_datetime(vals.astype('O')).values

            self.assertEqual(df[unit].dtype, ns_dtype)
            self.assertTrue((df[unit].values == ex_vals).all())

        # Test insertion into existing datetime64 column
        df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
        df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)

        for unit in units:
            dtype = np.dtype('M8[%s]' % unit)
            vals = np.arange(n, dtype=np.int64).view(dtype)

            tmp = df.copy()

            tmp['dates'] = vals
            ex_vals = to_datetime(vals.astype('O')).values

            self.assertTrue((tmp['dates'].values == ex_vals).all())
Example #14
0
def CfxCentreLineSnapshot(filename):
    """Factory function wrapping a CFX snapshot.
    
    Load the data with:
    >>> snap = CfxSnapshot(filename)

    Fields are constructed from the header line.
    
        
    """
    (__raw_row, fieldUnits) = parseHeader(filename, AllData=True)
    __raw_row = [('id', int),] + __raw_row
    fieldUnits['id'] = 1
    
                 # ('position', float, (3,)),
                 # ('strain_rate', float),
                 # ('speed', float),
                 # ('velocity', float, (3,)),
                 # ('wall_shear', float, (4,))]

    __readable_row = np.dtype(__raw_row[1:])
    row = np.dtype(__raw_row)
    
    noindex = np.genfromtxt(filename, skip_header=findStart(filename, AllData=True)+2,
                           delimiter=',',
                           dtype=__readable_row).view(np.recarray)
    index = np.recarray(shape=noindex.shape, dtype=row)
    index.id = np.arange(len(noindex))
    for el in __raw_row[1:]:
        key = el[0]
        index.__setattr__(key, U.convert(noindex.__getattribute__(key), fieldUnits[key], hlbUnits[key]))
        continue
    
    return index
Example #15
0
def get_trace(f, num_points, big):
    """
    Get a trace from an open RNMRTK file.

    Parameters
    -----------
    f : file object
        Open file object to read from.
    num_points : int
        Number of points in trace (R+I)
    big : bool
        True for data that is big-endian, False for little-endian.

    Returns
    -------
    trace : ndarray
        Raw trace of NMR data.

    """
    if big:
        bsize = num_points * np.dtype('>f4').itemsize
        return np.frombuffer(f.read(bsize), dtype='>f4')
    else:
        bsize = num_points * np.dtype('<f4').itemsize
        return np.frombuffer(f.read(bsize), dtype='<f4')
Example #16
0
def main(fname):

    basename = os.path.basename(fname)[0:-4]
    abspath = os.path.dirname(os.path.abspath(fname))
    output_fname = os.path.join(abspath, basename+'.hdf5')

    if 'mdr1_fofp_' in basename:
        print("\n...Reading particle data...\n")
        dt = np.dtype([('rowid', 'i8'), 
    ('x', 'f8'), ('y', 'f8'), ('z','f8'),
    ('vx', 'f8'), ('vy', 'f8'), ('vz','f8'), ('haloid', 'i8')]
    )
    else:
        print("\n...Reading halo data...\n")
        dt = np.dtype([('rowid', 'i8'), ('haloid', 'i8'), 
    ('x', 'f8'), ('y', 'f8'), ('z','f8'),
    ('vx', 'f8'), ('vy', 'f8'), ('vz','f8'), ('mass', 'f4'), ('size', 'f4')])

    start = time()
    data = read_csv(fname, dt)
    end = time()
    runtime = end-start
    print("Total time to read csv = %.1f seconds\n" % runtime)

    with h5py.File(output_fname,'w') as f:
        f['data'] = data
Example #17
0
    def test_single_subarray(self):
        a = np.dtype((np.int, (2)))
        b = np.dtype((np.int, (2,)))
        assert_dtype_equal(a, b)

        assert_equal(type(a.subdtype[1]), tuple)
        assert_equal(type(b.subdtype[1]), tuple)
Example #18
0
    def ReadFilePart(self):
        ## Setup for reading file by phase, to (hopefully) save
        # memory. 

        ## Save a file handle somewheer
        self._file_handle = open(self._filename, "rb")
        header = self._file_handle.read(16)
        count, headerSize = struct.unpack_from('QQ',header,0)
        print "Count: " + str(count) + ",Header Size:" + str(headerSize)
        dt = np.dtype([("TotalTime",float,1),
                     ("MallocTime",float,1),
                     ("MallocSize",int,1),
                     ("GPUTime",float,1),
                     ("GPUAvg", float, 1),
                     ("MemcpyTime",float,1),
                     ("Memcpysize",int,1),
                     ("TotalMem",int,1),
                     ("TotalMemRead",int,1),
                     ("TotalMemWrite",int,1)])
        self._kdt = np.dtype([("TotalTime",float,1),
                     ("TBytesRead",int,1),
                     ("TBytesWritten",int,1),
                     ("CBytesRead",int,1),
                     ("CBytesWritten", int, 1)])
        self._phases = np.zeros((count), dtype=dt)
        self._kernels = []
        self._phase_offsets = []
        self._residentPhase = -1
        self.ReadHeader(count,headerSize)
Example #19
0
    def test_expect_dtypes(self):

        @expect_dtypes(a=dtype(float), b=dtype('datetime64[ns]'))
        def foo(a, b, c):
            return a, b, c

        good_a = arange(3, dtype=float)
        good_b = arange(3).astype('datetime64[ns]')
        good_c = object()

        a_ret, b_ret, c_ret = foo(good_a, good_b, good_c)
        self.assertIs(a_ret, good_a)
        self.assertIs(b_ret, good_b)
        self.assertIs(c_ret, good_c)

        with self.assertRaises(TypeError) as e:
            foo(good_a, arange(3, dtype='int64'), good_c)

        expected_message = (
            "{qualname}() expected a value with dtype 'datetime64[ns]'"
            " for argument 'b', but got 'int64' instead."
        ).format(qualname=qualname(foo))
        self.assertEqual(e.exception.args[0], expected_message)

        with self.assertRaises(TypeError) as e:
            foo(arange(3, dtype='uint32'), good_c, good_c)

        expected_message = (
            "{qualname}() expected a value with dtype 'float64'"
            " for argument 'a', but got 'uint32' instead."
        ).format(qualname=qualname(foo))
        self.assertEqual(e.exception.args[0], expected_message)
Example #20
0
def test_jamsframe_from_df():

    df = pd.DataFrame(data=[[0.0, 1.0, 'a', 0.0],
                            [1.0, 2.0, 'b', 0.0]],
                      columns=['time', 'duration', 'value', 'confidence'])

    jf = jams.JamsFrame.from_dataframe(df)

    # 1. type check
    assert isinstance(jf, jams.JamsFrame)

    # 2. check field order
    eq_(list(jf.keys().values),
        jams.JamsFrame.fields())

    # 3. check field types
    assert jf['time'].dtype == np.dtype('<m8[ns]')
    assert jf['duration'].dtype == np.dtype('<m8[ns]')

    # 4. Check the values
    eq_(list(jf['time']),
        list(pd.to_timedelta([0.0, 1.0], unit='s')))
    eq_(list(jf['duration']), 
        list(pd.to_timedelta([1.0, 2.0], unit='s')))
    eq_(list(jf['value']), ['a', 'b'])
    eq_(list(jf['confidence']), [0.0, 0.0])
Example #21
0
 def test_ldexp_overflow(self):
     # silence warning emitted on overflow
     with np.errstate(over="ignore"):
         imax = np.iinfo(np.dtype('l')).max
         imin = np.iinfo(np.dtype('l')).min
         assert_equal(ncu.ldexp(2., imax), np.inf)
         assert_equal(ncu.ldexp(2., imin), 0)
Example #22
0
 def test_union_struct(self):
     # Should be able to create union dtypes
     dt = np.dtype({"names": ["f0", "f1", "f2"], "formats": ["<u4", "<u2", "<u2"], "offsets": [0, 0, 2]}, align=True)
     assert_equal(dt.itemsize, 4)
     a = np.array([3], dtype="<u4").view(dt)
     a["f1"] = 10
     a["f2"] = 36
     assert_equal(a["f0"], 10 + 36 * 256 * 256)
     # Should be able to specify fields out of order
     dt = np.dtype({"names": ["f0", "f1", "f2"], "formats": ["<u4", "<u2", "<u2"], "offsets": [4, 0, 2]}, align=True)
     assert_equal(dt.itemsize, 8)
     dt2 = np.dtype(
         {"names": ["f2", "f0", "f1"], "formats": ["<u2", "<u4", "<u2"], "offsets": [2, 4, 0]}, align=True
     )
     vals = [(0, 1, 2), (3, -1, 4)]
     vals2 = [(2, 0, 1), (4, 3, -1)]
     a = np.array(vals, dt)
     b = np.array(vals2, dt2)
     assert_equal(a.astype(dt2), b)
     assert_equal(b.astype(dt), a)
     assert_equal(a.view(dt2), b)
     assert_equal(b.view(dt), a)
     # Should not be able to overlap objects with other types
     assert_raises(TypeError, np.dtype, {"names": ["f0", "f1"], "formats": ["O", "i1"], "offsets": [0, 2]})
     assert_raises(TypeError, np.dtype, {"names": ["f0", "f1"], "formats": ["i4", "O"], "offsets": [0, 3]})
     assert_raises(TypeError, np.dtype, {"names": ["f0", "f1"], "formats": [[("a", "O")], "i1"], "offsets": [0, 2]})
     assert_raises(TypeError, np.dtype, {"names": ["f0", "f1"], "formats": ["i4", [("a", "O")]], "offsets": [0, 3]})
     # Out of order should still be ok, however
     dt = np.dtype({"names": ["f0", "f1"], "formats": ["i1", "O"], "offsets": [np.dtype("intp").itemsize, 0]})
Example #23
0
    def test_view(self):
        import numpy as np
        import sys

        s = np.dtype("int64").type(12)
        exc = raises(ValueError, s.view, "int8")
        assert exc.value[0] == "new type not compatible with array."
        t = s.view("double")
        assert type(t) is np.double
        assert t < 7e-323
        t = s.view("complex64")
        assert type(t) is np.complex64
        assert 0 < t.real < 1
        assert t.imag == 0
        exc = raises(TypeError, s.view, "string")
        assert exc.value[0] == "data-type must not be 0-sized"
        t = s.view("S8")
        assert type(t) is np.string_
        assert t == "\x0c"
        s = np.dtype("string").type("abc1")
        assert s.view("S4") == "abc1"
        if "__pypy__" in sys.builtin_module_names:
            raises(NotImplementedError, s.view, [("a", "i2"), ("b", "i2")])
        else:
            b = s.view([("a", "i2"), ("b", "i2")])
            assert b.shape == ()
            assert b[0] == 25185
            assert b[1] == 12643
        if "__pypy__" in sys.builtin_module_names:
            raises(TypeError, "np.dtype([('a', 'int64'), ('b', 'int64')]).type('a' * 16)")
        else:
            s = np.dtype([("a", "int64"), ("b", "int64")]).type("a" * 16)
            assert s.view("S16") == "a" * 16
  def testMakeTableExceptions(self):
    # Verify that contents is being type-checked and shape-checked.
    with self.assertRaises(ValueError):
      text_plugin.make_table([])

    with self.assertRaises(ValueError):
      text_plugin.make_table('foo')

    with self.assertRaises(ValueError):
      invalid_shape = np.full((3, 3, 3), 'nope', dtype=np.dtype('S3'))
      text_plugin.make_table(invalid_shape)

    # Test headers exceptions in 2d array case.
    test_array = np.full((3, 3), 'foo', dtype=np.dtype('S3'))
    with self.assertRaises(ValueError):
      # Headers is wrong type.
      text_plugin.make_table(test_array, headers='foo')
    with self.assertRaises(ValueError):
      # Too many headers.
      text_plugin.make_table(test_array, headers=['foo', 'bar', 'zod', 'zoink'])
    with self.assertRaises(ValueError):
      # headers is 2d
      text_plugin.make_table(test_array, headers=test_array)

    # Also make sure the column counting logic works in the 1d array case.
    test_array = np.array(['foo', 'bar', 'zod'])
    with self.assertRaises(ValueError):
      # Too many headers.
      text_plugin.make_table(test_array, headers=test_array)
Example #25
0
    def test_pass_dtype(self):
        data = """\
one,two
1,a
2,b
3,c
4,d"""

        def _make_reader(**kwds):
            return TextReader(StringIO(data), delimiter=',', **kwds)

        reader = _make_reader(dtype={'one': 'u1', 1: 'S1'})
        result = reader.read()
        self.assertEqual(result[0].dtype, 'u1')
        self.assertEqual(result[1].dtype, 'S1')

        reader = _make_reader(dtype={'one': np.uint8, 1: object})
        result = reader.read()
        self.assertEqual(result[0].dtype, 'u1')
        self.assertEqual(result[1].dtype, 'O')

        reader = _make_reader(dtype={'one': np.dtype('u1'),
                                     1: np.dtype('O')})
        result = reader.read()
        self.assertEqual(result[0].dtype, 'u1')
        self.assertEqual(result[1].dtype, 'O')
Example #26
0
    def get_default_dtype(structured=True):
        if structured:
            dtype = np.dtype([("k", np.int), ("i", np.int), ("j", np.int),
                              ("segment", np.int), ("reach", np.int),
                              ("flow", np.float32), ("stage", np.float32),
                              ("cond", np.float32), ("sbot", np.float32),
                              ("stop", np.float32),
                              ("width", np.float32), ("slope", np.float32),
                              ("rough", np.float32)])
        else:
            dtype = np.dtype([("node", np.int),
                              ("segment", np.int), ("reach", np.int),
                              ("flow", np.float32), ("stage", np.float32),
                              ("cond", np.float32), ("sbot", np.float32),
                              ("stop", np.float32),
                              ("width", np.float32), ("slope", np.float32),
                              ("rough", np.float32)])

        dtype2 = np.dtype([("itrib01", np.int), ("itrib02", np.int),
                           ("itrib03", np.int), ("itrib04", np.int),
                           ("itrib05", np.int), ("itrib06", np.int),
                           ("itrib07", np.int), ("itrib08", np.int),
                           ("itrib09", np.int), ("itrib10", np.int),
                           ("iupseg", np.int)])
        return dtype, dtype2
Example #27
0
    def __mul__(self, b):
        if type(b) is not np.ndarray:
            raise TypeError('Can only multiply by a numpy array.')

        if len(b.shape) == 1 or b.shape[1] == 1:
            b = b.flatten()
            # Just one RHS

            if b.dtype is np.dtype('O'):
                b = b.astype(type(b[0]))

            if factorize:
                X = self.solver.solve(b, **self.kwargs)
            else:
                X = fun(self.A, b, **self.kwargs)
        else: # Multiple RHSs
            if b.dtype is np.dtype('O'):
                b = b.astype(type(b[0,0]))

            X = np.empty_like(b)

            for i in range(b.shape[1]):
                if factorize:
                    X[:,i] = self.solver.solve(b[:,i])
                else:
                    X[:,i] = fun(self.A, b[:,i], **self.kwargs)

        if self.checkAccuracy:
            _checkAccuracy(self.A, b, X, self.accuracyTol)
        return X
Example #28
0
    def test_fromArrays(self):
        ary = arange(8, dtype=dtype('int16')).reshape((2, 4))

        series = SeriesLoader(self.sc).fromArrays(ary)

        seriesvals = series.collect()
        seriesary = series.pack()

        # check ordering of keys
        assert_equals((0, 0), seriesvals[0][0])  # first key
        assert_equals((1, 0), seriesvals[1][0])  # second key
        assert_equals((2, 0), seriesvals[2][0])
        assert_equals((3, 0), seriesvals[3][0])
        assert_equals((0, 1), seriesvals[4][0])
        assert_equals((1, 1), seriesvals[5][0])
        assert_equals((2, 1), seriesvals[6][0])
        assert_equals((3, 1), seriesvals[7][0])

        # check dimensions tuple is reversed from numpy shape
        assert_equals(ary.shape[::-1], series.dims.count)

        # check that values are in original order
        collectedvals = array([kv[1] for kv in seriesvals], dtype=dtype('int16')).ravel()
        assert_true(array_equal(ary.ravel(), collectedvals))

        # check that packing returns transpose of original array
        assert_true(array_equal(ary.T, seriesary))
Example #29
0
    def test_fromMultipleArrays(self):
        ary = arange(8, dtype=dtype('int16')).reshape((2, 4))
        ary2 = arange(8, 16, dtype=dtype('int16')).reshape((2, 4))

        series = SeriesLoader(self.sc).fromArrays([ary, ary2])

        seriesvals = series.collect()
        seriesary = series.pack()

        # check ordering of keys
        assert_equals((0, 0), seriesvals[0][0])  # first key
        assert_equals((1, 0), seriesvals[1][0])  # second key
        assert_equals((3, 0), seriesvals[3][0])
        assert_equals((0, 1), seriesvals[4][0])
        assert_equals((3, 1), seriesvals[7][0])

        # check dimensions tuple is reversed from numpy shape
        assert_equals(ary.shape[::-1], series.dims.count)

        # check that values are in original order, with subsequent point concatenated in values
        collectedvals = array([kv[1] for kv in seriesvals], dtype=dtype('int16'))
        assert_true(array_equal(ary.ravel(), collectedvals[:, 0]))
        assert_true(array_equal(ary2.ravel(), collectedvals[:, 1]))

        # check that packing returns concatenation of input arrays, with time as first dimension
        assert_true(array_equal(ary.T, seriesary[0]))
        assert_true(array_equal(ary2.T, seriesary[1]))
Example #30
0
 def test_masked_all(self):
     # Tests masked_all
     # Standard dtype
     test = masked_all((2,), dtype=float)
     control = array([1, 1], mask=[1, 1], dtype=float)
     assert_equal(test, control)
     # Flexible dtype
     dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']})
     test = masked_all((2,), dtype=dt)
     control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt)
     assert_equal(test, control)
     test = masked_all((2, 2), dtype=dt)
     control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]],
                     mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]],
                     dtype=dt)
     assert_equal(test, control)
     # Nested dtype
     dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])])
     test = masked_all((2,), dtype=dt)
     control = array([(1, (1, 1)), (1, (1, 1))],
                     mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
     assert_equal(test, control)
     test = masked_all((2,), dtype=dt)
     control = array([(1, (1, 1)), (1, (1, 1))],
                     mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
     assert_equal(test, control)
     test = masked_all((1, 1), dtype=dt)
     control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt)
     assert_equal(test, control)
Example #31
0
 def test_run(self, t):
     """Only test hash runs at all."""
     dt = np.dtype(t)
     hash(dt)
Example #32
0
import copy
from abc import ABCMeta, abstractmethod
import lalsimulation as sim
import h5py
from pycbc import waveform
from pycbc.waveform import get_td_waveform, utils as wfutils
from pycbc.waveform import ringdown_td_approximants
from pycbc.types import float64, float32, TimeSeries
from pycbc.detector import Detector
from pycbc.conversions import tau0_from_mass1_mass2
import pycbc.io

from six import add_metaclass

injection_func_map = {
    np.dtype(float32): sim.SimAddInjectionREAL4TimeSeries,
    np.dtype(float64): sim.SimAddInjectionREAL8TimeSeries
}

#
# Remove everything between the dashed lines once we get rid of xml
# -----------------------------------------------------------------------------
#
from glue.ligolw import utils as ligolw_utils
from glue.ligolw import ligolw, table, lsctables


# dummy class needed for loading LIGOLW files
class LIGOLWContentHandler(ligolw.LIGOLWContentHandler):
    pass
Example #33
0
 def make_dtype(off):
     return np.dtype({
         'names': ['A'],
         'formats': ['i4'],
         'offsets': [off]
     })
Example #34
0
 def test_bool_commastring(self):
     d = np.dtype('?,?,?')  # raises?
     assert_equal(len(d.names), 3)
     for n in d.names:
         assert_equal(d.fields[n][0], np.dtype('?'))
Example #35
0
 def test_from_dictproxy(self):
     # Tests for PR #5920
     dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
     assert_dtype_equal(dt, np.dtype(dt.fields))
     dt2 = np.dtype((np.void, dt.fields))
     assert_equal(dt2.fields, dt.fields)
Example #36
0
 def test_comma_datetime(self):
     dt = np.dtype('M8[D],datetime64[Y],i8')
     assert_equal(
         dt,
         np.dtype([('f0', 'M8[D]'), ('f1', 'datetime64[Y]'), ('f2', 'i8')]))
Example #37
0
 def test_union_struct(self):
     # Should be able to create union dtypes
     dt = np.dtype(
         {
             'names': ['f0', 'f1', 'f2'],
             'formats': ['<u4', '<u2', '<u2'],
             'offsets': [0, 0, 2]
         },
         align=True)
     assert_equal(dt.itemsize, 4)
     a = np.array([3], dtype='<u4').view(dt)
     a['f1'] = 10
     a['f2'] = 36
     assert_equal(a['f0'], 10 + 36 * 256 * 256)
     # Should be able to specify fields out of order
     dt = np.dtype(
         {
             'names': ['f0', 'f1', 'f2'],
             'formats': ['<u4', '<u2', '<u2'],
             'offsets': [4, 0, 2]
         },
         align=True)
     assert_equal(dt.itemsize, 8)
     # field name should not matter: assignment is by position
     dt2 = np.dtype(
         {
             'names': ['f2', 'f0', 'f1'],
             'formats': ['<u4', '<u2', '<u2'],
             'offsets': [4, 0, 2]
         },
         align=True)
     vals = [(0, 1, 2), (3, -1, 4)]
     vals2 = [(0, 1, 2), (3, -1, 4)]
     a = np.array(vals, dt)
     b = np.array(vals2, dt2)
     assert_equal(a.astype(dt2), b)
     assert_equal(b.astype(dt), a)
     assert_equal(a.view(dt2), b)
     assert_equal(b.view(dt), a)
     # Should not be able to overlap objects with other types
     assert_raises(TypeError, np.dtype, {
         'names': ['f0', 'f1'],
         'formats': ['O', 'i1'],
         'offsets': [0, 2]
     })
     assert_raises(TypeError, np.dtype, {
         'names': ['f0', 'f1'],
         'formats': ['i4', 'O'],
         'offsets': [0, 3]
     })
     assert_raises(
         TypeError, np.dtype, {
             'names': ['f0', 'f1'],
             'formats': [[('a', 'O')], 'i1'],
             'offsets': [0, 2]
         })
     assert_raises(
         TypeError, np.dtype, {
             'names': ['f0', 'f1'],
             'formats': ['i4', [('a', 'O')]],
             'offsets': [0, 3]
         })
     # Out of order should still be ok, however
     dt = np.dtype({
         'names': ['f0', 'f1'],
         'formats': ['i1', 'O'],
         'offsets': [np.dtype('intp').itemsize, 0]
     })
Example #38
0
 def test_different_names(self):
     # In theory, they may hash the same (collision) ?
     a = np.dtype([('yo', int)])
     b = np.dtype([('ye', int)])
     assert_dtype_not_equal(a, b)
Example #39
0
 def test_simple_endian_types(self):
     self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))
     self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))
     self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
     self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
Example #40
0
 def test_aligned_size(self):
     # Check that structured dtypes get padded to an aligned size
     dt = np.dtype('i4, i1', align=True)
     assert_equal(dt.itemsize, 8)
     dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
     assert_equal(dt.itemsize, 8)
     dt = np.dtype(
         {
             'names': ['f0', 'f1'],
             'formats': ['i4', 'u1'],
             'offsets': [0, 4]
         },
         align=True)
     assert_equal(dt.itemsize, 8)
     dt = np.dtype({'f0': ('i4', 0), 'f1': ('u1', 4)}, align=True)
     assert_equal(dt.itemsize, 8)
     # Nesting should preserve that alignment
     dt1 = np.dtype([('f0', 'i4'),
                     ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
                     ('f2', 'i1')],
                    align=True)
     assert_equal(dt1.itemsize, 20)
     dt2 = np.dtype(
         {
             'names': ['f0', 'f1', 'f2'],
             'formats':
             ['i4', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 'i1'],
             'offsets': [0, 4, 16]
         },
         align=True)
     assert_equal(dt2.itemsize, 20)
     dt3 = np.dtype(
         {
             'f0': ('i4', 0),
             'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
             'f2': ('i1', 16)
         },
         align=True)
     assert_equal(dt3.itemsize, 20)
     assert_equal(dt1, dt2)
     assert_equal(dt2, dt3)
     # Nesting should preserve packing
     dt1 = np.dtype([('f0', 'i4'),
                     ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
                     ('f2', 'i1')],
                    align=False)
     assert_equal(dt1.itemsize, 11)
     dt2 = np.dtype(
         {
             'names': ['f0', 'f1', 'f2'],
             'formats':
             ['i4', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 'i1'],
             'offsets': [0, 4, 10]
         },
         align=False)
     assert_equal(dt2.itemsize, 11)
     dt3 = np.dtype(
         {
             'f0': ('i4', 0),
             'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
             'f2': ('i1', 10)
         },
         align=False)
     assert_equal(dt3.itemsize, 11)
     assert_equal(dt1, dt2)
     assert_equal(dt2, dt3)
     # Array of subtype should preserve alignment
     dt1 = np.dtype([('a', '|i1'), ('b', [('f0', '<i2'),
                                          ('f1', '<f4')], 2)],
                    align=True)
     assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
                              ('b', [('f0', '<i2'), ('', '|V2'),
                                     ('f1', '<f4')], (2, ))])
Example #41
0
    def test_padded_structure(self):
        class PaddedStruct(ctypes.Structure):
            _fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint16)]

        expected = np.dtype([('a', np.uint8), ('b', np.uint16)], align=True)
        self.check(PaddedStruct, expected)
Example #42
0
 def test_equivalent_record(self):
     """Test whether equivalent record dtypes hash the same."""
     a = np.dtype([('yo', int)])
     b = np.dtype([('yo', int)])
     assert_dtype_equal(a, b)
Example #43
0
 class dt(np.void):
     # This code path is fully untested before, so it is unclear
     # what this should be useful for. Note that if np.void is used
     # numpy will think we are deallocating a base type [1.17, 2019-02].
     dtype = np.dtype("f,f")
     pass
Example #44
0
    def test_big_endian_structure(self):
        class PaddedStruct(ctypes.BigEndianStructure):
            _fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint16)]

        expected = np.dtype([('a', '>B'), ('b', '>H')], align=True)
        self.check(PaddedStruct, expected)
Example #45
0
def test_dtypes_are_true():
    # test for gh-6294
    assert bool(np.dtype('f8'))
    assert bool(np.dtype('i8'))
    assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))
Example #46
0
 def check(ctype, dtype):
     dtype = np.dtype(dtype)
     assert_equal(np.dtype(ctype), dtype)
     assert_equal(np.dtype(ctype()), dtype)
Example #47
0
 def test_datetime(self, base, unit):
     dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
     self.check_pickling(dt)
     if unit:
         dt = np.dtype('%s[7%s]' % (base, unit))
         self.check_pickling(dt)
Example #48
0
    def test_simple(self):
        class dt:
            dtype = "f8"

        assert np.dtype(dt) == np.float64
        assert np.dtype(dt()) == np.float64
Example #49
0
 def test_structured(self):
     dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))
     self.check_pickling(dt)
Example #50
0
 def test_metadata(self):
     dt = np.dtype(int, metadata={'datum': 1})
     self.check_pickling(dt)
Example #51
0
    def test_name_dtype_subclass(self):
        # Ticket #4357
        class user_def_subcls(np.void):
            pass

        assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
Example #52
0
 def test_structured_unaligned(self):
     dt = np.dtype('i4, i1', align=False)
     self.check_pickling(dt)
Example #53
0
 def test_dtype_bytes_str_equivalence(self, value):
     bytes_value = value.encode('ascii')
     from_bytes = np.dtype(bytes_value)
     from_str = np.dtype(value)
     assert_dtype_equal(from_bytes, from_str)
Example #54
0
 def test_builtin(self, t):
     self.check_pickling(np.dtype(t))
Example #55
0
 def test_void_subclass_fields(self):
     dt = np.dtype((np.record, [('a', '<u2')]))
     assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
     assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
     assert_equal(dt.name, 'record16')
Example #56
0
 def test_name_builtin(self, t):
     name = t.__name__
     if name.endswith('_'):
         name = name[:-1]
     assert_equal(np.dtype(t).name, name)
def dt_to_timestamp(dt):
    dt_in_us = np.dtype(np.int64).type(
        (dt - timeinfo["date_start"]).total_seconds() * timeinfo['s'])
    return dt_in_us
Example #58
0
 def test_dtype_writable_attributes_deletion(self):
     dt = np.dtype(np.double)
     attr = ["names"]
     for s in attr:
         assert_raises(AttributeError, delattr, dt, s)
Example #59
0
 def test_namedtype(self):
     """ Named type repr() with unicode """
     self.f['type'] = np.dtype('f')
     typ = self.f['type']
     self._check_type(typ)
Example #60
0
 def test_void_subclass_sized(self):
     dt = np.dtype((np.record, 2))
     assert_equal(repr(dt), "dtype('V2')")
     assert_equal(str(dt), '|V2')
     assert_equal(dt.name, 'record16')