示例#1
0
文件: utils.py 项目: WilfR/datashader
def dynd_to_np_mask(x):
    if is_option(x.dtype):
        arr = nd.as_numpy(x.view_scalars(x.dtype.value_type))
        missing = is_missing(arr)
    else:
        arr = nd.as_numpy(x)
        missing = np.full_like(arr, False, dtype='bool')
    return arr, missing
示例#2
0
def test_scalar_agg_bool(op):
    np_c = nd.as_numpy(c)
    np_d = nd.as_numpy(d)
    assert_dynd_eq(op(s_c, s_d)._data, op(np_c, np_d), False)
    assert_dynd_eq(op(s_c, True)._data, op(np_c, True), False)
    assert_dynd_eq(op(s_d, True)._data, op(np_d, True), False)
    assert_dynd_eq(op(s_c, True)._data, op(np_c, True), False)
    assert_dynd_eq(op(s_d, True)._data, op(np_d, True), False)
    assert_dynd_eq(op(True, s_c)._data, op(True, np_c), False)
    assert_dynd_eq(op(True, s_d)._data, op(True, np_d), False)
    assert_dynd_eq(op(True, s_c)._data, op(True, np_c), False)
    assert_dynd_eq(op(True, s_d)._data, op(True, np_d), False)
 def test_string_as_numpy(self):
     a = nd.array(["this", "is", "a", "test of varlen strings"])
     b = nd.as_numpy(a, allow_copy=True)
     self.assertEqual(b.dtype, np.dtype('O'))
     assert_equal(b, np.array(["this", "is", "a", "test of varlen strings"],
                              dtype='O'))
     # Also in a struct
     a = nd.array([(1, "testing", 1.5), (10, "abc", 2)],
                  type="strided * {x: int, y: string, z: real}")
     b = nd.as_numpy(a, allow_copy=True)
     self.assertEqual(b.dtype, np.dtype([('x', 'int32'),
                                         ('y', 'O'),
                                         ('z', 'float64')], align=True))
     self.assertEqual(b.tolist(), [(1, "testing", 1.5), (10, "abc", 2)])
 def test_string_as_numpy(self):
     a = nd.array(["this", "is", "a", "test of varlen strings"])
     b = nd.as_numpy(a, allow_copy=True)
     self.assertEqual(b.dtype, np.dtype('O'))
     assert_equal(b, np.array(["this", "is", "a", "test of varlen strings"],
                              dtype='O'))
     # Also in a struct
     a = nd.array([(1, "testing", 1.5), (10, "abc", 2)],
                  type="Fixed * {x: int, y: string, z: real}")
     b = nd.as_numpy(a, allow_copy=True)
     self.assertEqual(b.dtype, np.dtype([('x', 'int32'),
                                         ('y', 'O'),
                                         ('z', 'float64')], align=True))
     self.assertEqual(b.tolist(), [(1, "testing", 1.5), (10, "abc", 2)])
示例#5
0
 def create_hdf5(self, name):
     import tables as tb
     a1 = nd.array([[1, 2, 3], [4, 5, 6]], dtype="int32")
     a2 = nd.array([[1, 2, 3], [3, 2, 1]], dtype="int32")
     fname = os.path.join(self.arrdir, '%s.h5' % name)
     with tb.open_file(fname, 'w') as f:
         f.create_array(f.root, "a1", nd.as_numpy(a1))
         mg = f.create_group(f.root, "mygroup")
         f.create_array(mg, "a2", nd.as_numpy(a2))
     with open(os.path.join(self.arrdir, '%s.array' % name), 'w') as f:
         f.write('type: hdf5\n')
         f.write('import: {\n')
         f.write('    datapath: /mygroup/a2\n')
         f.write('    }\n')
示例#6
0
 def create_hdf5(self, name):
     import tables as tb
     a1 = nd.array([[1, 2, 3], [4, 5, 6]], dtype="int32")
     a2 = nd.array([[1, 2, 3], [3, 2, 1]], dtype="int32")
     fname = os.path.join(self.arrdir, '%s.h5' % name)
     with tb.open_file(fname, 'w') as f:
         f.create_array(f.root, "a1", nd.as_numpy(a1))
         mg = f.create_group(f.root, "mygroup")
         f.create_array(mg, "a2", nd.as_numpy(a2))
     with open(os.path.join(self.arrdir, '%s.array' % name), 'w') as f:
         f.write('type: hdf5\n')
         f.write('import: {\n')
         f.write('    datapath: /mygroup/a2\n')
         f.write('    }\n')
 def test_cstruct_as_numpy(self):
     # Aligned cstruct
     a = nd.array([[1, 2], [3, 4]], dtype='c{x : int32, y: int64}')
     b = nd.as_numpy(a)
     self.assertEqual(b.dtype,
                 np.dtype([('x', np.int32), ('y', np.int64)], align=True))
     self.assertEqual(nd.as_py(a.x), b['x'].tolist())
     self.assertEqual(nd.as_py(a.y), b['y'].tolist())
     # Unaligned cstruct
     a = nd.array([[1, 2], [3, 4]],
                 dtype='c{x : unaligned[int32], y: unaligned[int64]}')
     b = nd.as_numpy(a)
     self.assertEqual(b.dtype, np.dtype([('x', np.int32), ('y', np.int64)]))
     self.assertEqual(nd.as_py(a.x), b['x'].tolist())
     self.assertEqual(nd.as_py(a.y), b['y'].tolist())
 def test_struct_as_numpy(self):
     # Aligned struct
     a = nd.array([[1, 2], [3, 4]], type='2 * {x : int32, y: int64}')
     b = nd.as_numpy(a)
     self.assertEqual(b.dtype,
                 np.dtype([('x', np.int32), ('y', np.int64)], align=True))
     self.assertEqual(nd.as_py(a.x), b['x'].tolist())
     self.assertEqual(nd.as_py(a.y), b['y'].tolist())
     # Unaligned struct
     a = nd.array([[1, 2], [3, 4]],
                 type='2 * {x : unaligned[int32], y: unaligned[int64]}')
     b = nd.as_numpy(a)
     self.assertEqual(b.dtype, np.dtype([('x', np.int32), ('y', np.int64)]))
     self.assertEqual(nd.as_py(a.x), b['x'].tolist())
     self.assertEqual(nd.as_py(a.y), b['y'].tolist())
 def test_datetime_as_numpy(self):
     a = nd.array(['2000-12-13T12:30',
                   '1995-05-02T2:15:33'],
                  dtype='datetime[tz="UTC"]')
     b = nd.as_numpy(a, allow_copy=True)
     assert_equal(b, np.array(['2000-12-13T12:30Z', '1995-05-02T02:15:33Z'],
                              dtype='M8[us]'))
示例#10
0
 def test__type_from_h5py_special(self):
     # h5py 2.3 style "special dtype"
     dt = np.dtype(object, metadata={'vlen' : str})
     self.assertEqual(ndt.type(dt), ndt.string)
     if sys.version_info < (3, 0):
         dt = np.dtype(object, metadata={'vlen' : unicode})
         self.assertEqual(ndt.type(dt), ndt.string)
     # h5py 2.2 style "special dtype"
     dt = np.dtype(('O', [( ({'type': str},'vlen'), 'O' )] ))
     self.assertEqual(ndt.type(dt), ndt.string)
     if sys.version_info < (3, 0):
         dt = np.dtype(('O', [( ({'type': unicode},'vlen'), 'O' )] ))
         self.assertEqual(ndt.type(dt), ndt.string)
     # Should be able to roundtrip dynd -> numpy -> dynd
     x = nd.array(['testing', 'one', 'two'])
     self.assertEqual(nd.type_of(x), ndt.type('3 * string'))
     y = nd.as_numpy(x, allow_copy=True)
     self.assertEqual(y.shape, (3,))
     self.assertEqual(y[0], 'testing')
     self.assertEqual(y[1], 'one')
     self.assertEqual(y[2], 'two')
     self.assertEqual(y.dtype.kind, 'O')
     if sys.version_info < (3, 0):
         self.assertEqual(y.dtype.metadata, {'vlen' : unicode})
     else:
         self.assertEqual(y.dtype.metadata, {'vlen' : str})
     z = nd.array(y)
     self.assertEqual(nd.type_of(z), nd.type_of(x))
     self.assertEqual(nd.as_py(z), nd.as_py(x))
示例#11
0
 def test_ndt_type_from_h5py_special(self):
     # h5py 2.3 style "special dtype"
     dt = np.dtype(object, metadata={'vlen' : str})
     self.assertEqual(ndt.type(dt), ndt.string)
     if sys.version_info < (3, 0):
         dt = np.dtype(object, metadata={'vlen' : unicode})
         self.assertEqual(ndt.type(dt), ndt.string)
     # h5py 2.2 style "special dtype"
     dt = np.dtype(('O', [( ({'type': str},'vlen'), 'O' )] ))
     self.assertEqual(ndt.type(dt), ndt.string)
     if sys.version_info < (3, 0):
         dt = np.dtype(('O', [( ({'type': unicode},'vlen'), 'O' )] ))
         self.assertEqual(ndt.type(dt), ndt.string)
     # Should be able to roundtrip dynd -> numpy -> dynd
     x = nd.array(['testing', 'one', 'two'])
     self.assertEqual(nd.type_of(x), ndt.type('3 * string'))
     y = nd.as_numpy(x, allow_copy=True)
     self.assertEqual(y.dtype.kind, 'O')
     if sys.version_info < (3, 0):
         self.assertEqual(y.dtype.metadata, {'vlen' : unicode})
     else:
         self.assertEqual(y.dtype.metadata, {'vlen' : str})
     z = nd.array(y)
     self.assertEqual(nd.type_of(z), nd.type_of(x))
     self.assertEqual(nd.as_py(z), nd.as_py(x))
 def sort(self, json_cmd):
     import numpy as np
     print('sorting')
     cmd = json.loads(json_cmd)
     array_url = cmd.get('input', self.base_url + self.array_name)
     if not array_url.startswith(self.base_url):
         raise RuntimeError('Input array must start with the base url')
     array_name = array_url[len(self.base_url):]
     field = cmd['field']
     arr = self.get_session_array(array_name)
     nparr = as_numpy(arr)
     idxs = np.argsort(nparr[field])
     res = nd.ndobject(nparr[idxs])
     defarr = self.array_provider.create_deferred_array_filename(
         self.session_name, 'sort_', res)
     dshape = nd.dshape_of(res)
     defarr[0].write(
         json.dumps({
             'dshape': dshape,
             'command': 'sort',
             'params': {
                 'field': field,
             }
         }))
     defarr[0].close()
     content_type = 'application/json; charset=utf-8'
     body = json.dumps({
         'session': self.base_url + self.session_name,
         'output': self.base_url + defarr[1],
         'dshape': dshape
     })
     return (content_type, body)
def ones(dshape, ddesc=None):
    """Create an array and fill it with ones.

    Parameters
    ----------
    dshape : datashape
        The datashape for the resulting array.

    ddesc : data descriptor instance
        This comes with the necessary info for storing the data.  If
        None, a DyND_DDesc will be used.

    Returns
    -------
    out: a concrete blaze array.

    """
    dshape = _normalize_dshape(dshape)

    if ddesc is None:
        ddesc = DyND_DDesc(nd.ones(str(dshape), access='rw'))
        return Array(ddesc)
    if isinstance(ddesc, BLZ_DDesc):
        shape, dt = to_numpy(dshape)
        ddesc.blzarr = blz.ones(
            shape, dt, rootdir=ddesc.path, mode=ddesc.mode, **ddesc.kwargs)
    elif isinstance(ddesc, HDF5_DDesc):
        obj = nd.as_numpy(nd.empty(str(dshape)))
        with tb.open_file(ddesc.path, mode=ddesc.mode) as f:
            where, name = split_path(ddesc.datapath)
            f.create_earray(where, name, filters=ddesc.filters, obj=obj)
        ddesc.mode = 'a'  # change into 'a'ppend mode for further operations
    return Array(ddesc)
 def sort(self, json_cmd):
     import numpy as np
     print ('sorting')
     cmd = json.loads(json_cmd)
     array_url = cmd.get('input', self.base_url + self.array_name)
     if not array_url.startswith(self.base_url):
         raise RuntimeError('Input array must start with the base url')
     array_name = array_url[len(self.base_url):]
     field = cmd['field']
     arr = self.get_session_array(array_name)
     nparr = as_numpy(arr)
     idxs = np.argsort(nparr[field])
     res = nd.ndobject(nparr[idxs])
     defarr = self.array_provider.create_deferred_array_filename(
                     self.session_name, 'sort_', res)
     dshape = nd.dshape_of(res)
     defarr[0].write(json.dumps({
             'dshape': dshape,
             'command': 'sort',
             'params': {
                 'field': field,
             }
         }))
     defarr[0].close()
     content_type = 'application/json; charset=utf-8'
     body = json.dumps({
             'session': self.base_url + self.session_name,
             'output': self.base_url + defarr[1],
             'dshape': dshape
         })
     return (content_type, body)
示例#15
0
 def finalize(bases):
     shape = bases[0].shape[:2]
     out = nd.empty(shape, dshape)
     for path, finalizer, inds in zip(paths, finalizers, indices):
         arr = reduce(getattr, path, out)
         np_arr = nd.as_numpy(arr.view_scalars(arr.dtype.value_type))
         np_arr[:] = finalizer(*get(inds, bases))
     return out
示例#16
0
 def finalize(bases):
     shape = bases[0].shape[:2]
     out = nd.empty(shape, dshape)
     for path, finalizer, inds in zip(paths, finalizers, indices):
         arr = reduce(getattr, path, out)
         np_arr = nd.as_numpy(arr.view_scalars(arr.dtype.value_type))
         np_arr[:] = finalizer(*get(inds, bases))
     return out
示例#17
0
 def test_datetime_as_numpy(self):
     a = nd.array(['2000-12-13T12:30', '1995-05-02T2:15:33'],
                  dtype='datetime[tz="UTC"]')
     b = nd.as_numpy(a, allow_copy=True)
     assert_equal(
         b,
         np.array(['2000-12-13T12:30Z', '1995-05-02T02:15:33Z'],
                  dtype='M8[us]'))
示例#18
0
 def test_expr_struct_conversion(self):
     a = nd.array([date(2000, 12, 13), date(1995, 5, 2)]).to_struct
     b = nd.as_numpy(a, allow_copy=True)
     self.assertTrue(isinstance(b, np.ndarray))
     # Use the NumPy assertions which support arrays
     assert_equal(b['year'], [2000, 1995])
     assert_equal(b['month'], [12, 5])
     assert_equal(b['day'], [13, 2])
 def test_expr_struct_conversion(self):
     a = nd.array([date(2000, 12, 13), date(1995, 5, 2)]).to_struct()
     b = nd.as_numpy(a, allow_copy=True)
     self.assertTrue(isinstance(b, np.ndarray))
     # Use the NumPy assertions which support arrays
     assert_equal(b['year'], [2000, 1995])
     assert_equal(b['month'], [12, 5])
     assert_equal(b['day'], [13, 2])
示例#20
0
def test_count_cat():
    agg = c.points(df, 'x', 'y', ds.count_cat('cat'))
    sol = np.array([[[5, 0, 0, 0],
                     [0, 0, 5, 0]],
                    [[0, 5, 0, 0],
                     [0, 0, 0, 5]]])
    assert (nd.as_numpy(agg._data) == sol).all()
    assert agg._cats == ('a', 'b', 'c', 'd')
 def test_var_dim_conversion(self):
     # A simple instantiated var_dim array should be
     # viewable with numpy without changes
     a = nd.array([1, 2, 3, 4, 5], type='var * int32')
     b = nd.as_numpy(a)
     self.assertTrue(isinstance(b, np.ndarray))
     self.assertEqual(b.dtype, np.dtype('int32'))
     # Use the NumPy assertions which support arrays
     assert_equal(b, [1, 2, 3, 4, 5])
示例#22
0
 def test_var_dim_conversion(self):
     # A simple instantiated var_dim array should be
     # viewable with numpy without changes
     a = nd.array([1, 2, 3, 4, 5], type='var * int32')
     b = nd.as_numpy(a)
     self.assertTrue(isinstance(b, np.ndarray))
     self.assertEqual(b.dtype, np.dtype('int32'))
     # Use the NumPy assertions which support arrays
     assert_equal(b, [1, 2, 3, 4, 5])
示例#23
0
文件: hdf5.py 项目: vitan/blaze
 def _extend_chunks(self, chunks):
     with h5py.File(self.path, mode='a') as f:
         dset = f[self.datapath]
         dtype = dset.dtype
         shape = dset.shape
         for chunk in chunks:
             arr = nd.as_numpy(chunk, allow_copy=True)
             shape = list(dset.shape)
             shape[0] += len(arr)
             dset.resize(shape)
             dset[-len(arr):] = arr
示例#24
0
 def __getitem__(self, key):
     try:
         if isinstance(key, list):
             # List of categories
             inds = [self._cats.index(k) for k in key]
             dtype = self._data.dtype
             if is_option(dtype):
                 out = nd.as_numpy(self._data.view_scalars(
                                   dtype.value_type))
             else:
                 out = nd.as_numpy(self._data)
             out = nd.asarray(out[:, :, inds]).view_scalars(dtype)
             return CategoricalAggregate(out, key, self.x_axis, self.y_axis)
         else:
             # Single category
             i = self._cats.index(key)
             return ScalarAggregate(self._data[:, :, i],
                                    self.x_axis, self.y_axis)
     except ValueError:
         raise KeyError("'{0}'".format(key))
示例#25
0
文件: hdf5.py 项目: B-Rich/blaze
 def _extend_chunks(self, chunks):
     with h5py.File(self.path, mode='a') as f:
         dset = f[self.datapath]
         dtype = dset.dtype
         shape = dset.shape
         for chunk in chunks:
             arr = nd.as_numpy(chunk, allow_copy=True)
             shape = list(dset.shape)
             shape[0] += len(arr)
             dset.resize(shape)
             dset[-len(arr):] = arr
示例#26
0
文件: hdf5.py 项目: aterrel/blaze
    def _extend_chunks(self, chunks):
        if 'w' not in self.mode and 'a' not in self.mode:
            raise ValueError('Read only')

        with h5py.File(self.path, mode=self.mode) as f:
            dset = f[self.datapath]
            dtype = dset.dtype
            shape = dset.shape
            for chunk in chunks:
                arr = nd.as_numpy(chunk, allow_copy=True)
                shape = list(dset.shape)
                shape[0] += len(arr)
                dset.resize(shape)
                dset[-len(arr):] = arr
示例#27
0
 def create_hdf5(self, name):
     import tables as tb
     a1 = nd.array([[1, 2, 3], [4, 5, 6]], dtype="int32")
     a2 = nd.array([[1, 2, 3], [3, 2, 1]], dtype="int32")
     a3 = nd.array([[1, 3, 2], [2, 1, 3]], dtype="int32")
     fname = os.path.join(self.arrdir, '%s_arr.h5' % name)
     with tb.open_file(fname, 'w') as f:
         f.create_array(f.root, "a1", nd.as_numpy(a1))
         mg = f.create_group(f.root, "mygroup")
         f.create_array(mg, "a2", nd.as_numpy(a2))
         f.create_array(mg, "a3", nd.as_numpy(a3))
         mg2 = f.create_group(mg, "mygroup2")
     # Create a .array file for locating the dataset inside the file
     with open(os.path.join(self.arrdir, '%s_arr.array' % name), 'w') as f:
         f.write('type: hdf5\n')
         f.write('import: {\n')
         f.write('    datapath: /mygroup/a2\n')
         f.write('    }\n')
     # Create a .dir file for listing datasets inside the file
     with open(os.path.join(self.arrdir, '%s_dir.dir' % name), 'w') as f:
         f.write('type: hdf5\n')
         f.write('import: {\n')
         f.write('    filename: "%s"\n' % fname.replace('\\', '\\\\'))
         f.write('    }\n')
示例#28
0
def colorize(agg, color_key, how="log", min_alpha=20):
    """Color a CategoricalAggregate by field.

    Parameters
    ----------
    agg : CategoricalAggregate
    color_key : dict or iterable
        A mapping of fields to colors. Can be either a ``dict`` mapping from
        field name to colors, or an iterable of colors in the same order as the
        record fields.
    how : string or callable
        The interpolation method to use. Valid strings are 'log' [default],
        'cbrt', and 'linear'. Callables take a 2-dimensional array of
        magnitudes at each pixel, and should return a numeric array of the same
        shape.
    min_alpha : float, optional
        The minimum alpha value to use for non-empty pixels, in [0, 255].
    """
    if not isinstance(agg, CategoricalAggregate):
        raise TypeError("agg must be instance of CategoricalAggregate")
    if not isinstance(color_key, dict):
        color_key = dict(zip(agg.cats, color_key))
    if len(color_key) != len(agg.cats):
        raise ValueError("Number of colors doesn't match number of fields")
    if not (0 <= min_alpha <= 255):
        raise ValueError("min_alpha must be between 0 and 255")
    colors = [rgb(color_key[c]) for c in agg.cats]
    rs, gs, bs = map(np.array, zip(*colors))
    data = nd.as_numpy(agg._data).astype("f8")
    total = data.sum(axis=2)
    r = (data.dot(rs) / total).astype(np.uint8)
    g = (data.dot(gs) / total).astype(np.uint8)
    b = (data.dot(bs) / total).astype(np.uint8)
    a = _normalize_interpolate_how(how)(total)
    a = ((255 - min_alpha) * a / a.max() + min_alpha).astype(np.uint8)
    white = total == 0
    r[white] = g[white] = b[white] = 255
    a[white] = 0
    return Image(np.dstack([r, g, b, a]).view(np.uint32).reshape(a.shape), agg.x_axis, agg.y_axis)
def ones(dshape, ddesc=None):
    """Create an array and fill it with ones.

    Parameters
    ----------
    dshape : datashape
        The datashape for the resulting array.

    ddesc : data descriptor instance
        This comes with the necessary info for storing the data.  If
        None, a DyND_DDesc will be used.

    Returns
    -------
    out: a concrete blaze array.

    """
    dshape = _normalize_dshape(dshape)

    if ddesc is None:
        ddesc = DyND_DDesc(nd.ones(str(dshape), access='rw'))
        return Array(ddesc)
    if isinstance(ddesc, BLZ_DDesc):
        shape, dt = to_numpy(dshape)
        ddesc.blzarr = blz.ones(shape,
                                dt,
                                rootdir=ddesc.path,
                                mode=ddesc.mode,
                                **ddesc.kwargs)
    elif isinstance(ddesc, HDF5_DDesc):
        obj = nd.as_numpy(nd.empty(str(dshape)))
        with tb.open_file(ddesc.path, mode=ddesc.mode) as f:
            where, name = split_path(ddesc.datapath)
            f.create_earray(where, name, filters=ddesc.filters, obj=obj)
        ddesc.mode = 'a'  # change into 'a'ppend mode for further operations
    return Array(ddesc)
示例#30
0
文件: into.py 项目: holdenk/blaze
def into(a, b):
    return nd.as_numpy(b, allow_copy=True)
 def test_date_as_numpy(self):
     a = nd.array([date(2000, 12, 13), date(1995, 5, 2)])
     b = nd.as_numpy(a, allow_copy=True)
     assert_equal(b, np.array(['2000-12-13', '1995-05-02'], dtype='M8[D]'))
示例#32
0
def into(a, b, **kwargs):
    return nd.as_numpy(b, allow_copy=True)
示例#33
0
def eq(agg, b):
    a = nd.as_numpy(agg.view_scalars(agg.dtype.value_type))
    assert np.allclose(a, b)
    assert a.dtype == b.dtype
示例#34
0
def eq(agg, b):
    agg = agg._data
    a = nd.as_numpy(agg.view_scalars(getattr(agg.dtype, 'value_type', agg.dtype)))
    assert np.allclose(a, b)
    assert a.dtype == b.dtype
示例#35
0
文件: dynd.py 项目: CaptainAL/Spyder
def dynd_to_numpy(x, **kwargs):
    return nd.as_numpy(x, allow_copy=True)
 def test_fixed_dim(self):
     a = nd.array([1, 3, 5], type='3 * int32')
     b = nd.as_numpy(a)
     self.assertEqual(b.dtype, np.dtype('int32'))
     self.assertEqual(b.tolist(), [1, 3, 5])
示例#37
0
 def test_date_as_numpy(self):
     a = nd.array([date(2000, 12, 13), date(1995, 5, 2)])
     b = nd.as_numpy(a, allow_copy=True)
     assert_equal(b, np.array(['2000-12-13', '1995-05-02'], dtype='M8[D]'))
示例#38
0
文件: dynd.py 项目: debugger22/into
def dynd_to_numpy(x, **kwargs):
    return nd.as_numpy(x, allow_copy=True)
 def __array__(self):
     return nd.as_numpy(self.dynd_arr())
示例#40
0
 def __array__(self):
     return nd.as_numpy(self.as_dynd())
示例#41
0
 def __array__(self):
     return nd.as_numpy(self.dynd_arr())
def array(obj, dshape=None, ddesc=None):
    """Create a Blaze array.

    Parameters
    ----------
    obj : array_like
        Initial contents for the array.

    dshape : datashape
        The datashape for the resulting array. By default the
        datashape will be inferred from data. If an explicit dshape is
        provided, the input data will be coerced into the provided
        dshape.

    ddesc : data descriptor instance
        This comes with the necessary info for storing the data.  If
        None, a DyND_DDesc will be used.

    Returns
    -------
    out : a concrete blaze array.

    """
    dshape = _normalize_dshape(dshape)

    if ((obj is not None) and (not inspect.isgenerator(obj))
            and (dshape is not None)):
        dt = ndt.type(str(dshape))
        if dt.ndim > 0:
            obj = nd.array(obj, type=dt, access='rw')
        else:
            obj = nd.array(obj, dtype=dt, access='rw')

    if obj is None and ddesc is None:
        raise ValueError('you need to specify at least `obj` or `ddesc`')

    if isinstance(obj, Array):
        return obj
    elif isinstance(obj, DDesc):
        if ddesc is None:
            ddesc = obj
            return Array(ddesc)
        else:
            raise ValueError(('you cannot specify `ddesc` when `obj` '
                              'is already a DDesc instance'))

    if ddesc is None:
        # Use a dynd ddesc by default
        try:
            array = nd.asarray(obj, access='rw')
        except:
            raise ValueError(('failed to construct a dynd array from '
                              'object %r') % obj)
        ddesc = DyND_DDesc(array)
        return Array(ddesc)

    # The DDesc has been specified
    if isinstance(ddesc, DyND_DDesc):
        if obj is not None:
            raise ValueError(('you cannot specify simultaneously '
                              '`obj` and a DyND `ddesc`'))
        return Array(ddesc)
    elif isinstance(ddesc, BLZ_DDesc):
        if inspect.isgenerator(obj):
            dt = None if dshape is None else to_numpy_dtype(dshape)
            # TODO: Generator logic could go inside barray
            ddesc.blzarr = blz.fromiter(obj,
                                        dtype=dt,
                                        count=-1,
                                        rootdir=ddesc.path,
                                        mode=ddesc.mode,
                                        **ddesc.kwargs)
        else:
            if isinstance(obj, nd.array):
                obj = nd.as_numpy(obj)
            if dshape and isinstance(dshape.measure, datashape.Record):
                ddesc.blzarr = blz.btable(obj,
                                          rootdir=ddesc.path,
                                          mode=ddesc.mode,
                                          **ddesc.kwargs)
            else:
                ddesc.blzarr = blz.barray(obj,
                                          rootdir=ddesc.path,
                                          mode=ddesc.mode,
                                          **ddesc.kwargs)
    elif isinstance(ddesc, HDF5_DDesc):
        if isinstance(obj, nd.array):
            obj = nd.as_numpy(obj)
        with tb.open_file(ddesc.path, mode=ddesc.mode) as f:
            where, name = split_path(ddesc.datapath)
            if dshape and isinstance(dshape.measure, datashape.Record):
                # Convert the structured array to unaligned dtype
                # We need that because PyTables only accepts unaligned types,
                # which are the default in NumPy
                obj = np.array(obj, datashape.to_numpy_dtype(dshape.measure))
                f.create_table(where, name, filters=ddesc.filters, obj=obj)
            else:
                f.create_earray(where, name, filters=ddesc.filters, obj=obj)
        ddesc.mode = 'a'  # change into 'a'ppend mode for further operations

    return Array(ddesc)
示例#43
0
def eq(agg, b):
    a = nd.as_numpy(agg.view_scalars(agg.dtype.value_type))
    assert np.allclose(a, b)
    assert a.dtype == b.dtype
示例#44
0
文件: into.py 项目: dalejung/blaze
def into(a, b, **kwargs):
    return nd.as_numpy(b, allow_copy=True)
def array(obj, dshape=None, ddesc=None):
    """Create a Blaze array.

    Parameters
    ----------
    obj : array_like
        Initial contents for the array.

    dshape : datashape
        The datashape for the resulting array. By default the
        datashape will be inferred from data. If an explicit dshape is
        provided, the input data will be coerced into the provided
        dshape.

    ddesc : data descriptor instance
        This comes with the necessary info for storing the data.  If
        None, a DyND_DDesc will be used.

    Returns
    -------
    out : a concrete blaze array.

    """
    dshape = _normalize_dshape(dshape)

    if ((obj is not None) and
        (not inspect.isgenerator(obj)) and
        (dshape is not None)):
        dt = ndt.type(str(dshape))
        if dt.ndim > 0:
            obj = nd.array(obj, type=dt, access='rw')
        else:
            obj = nd.array(obj, dtype=dt, access='rw')

    if obj is None and ddesc is None:
        raise ValueError('you need to specify at least `obj` or `ddesc`')

    if isinstance(obj, Array):
        return obj
    elif isinstance(obj, DDesc):
        if ddesc is None:
            ddesc = obj
            return Array(ddesc)
        else:
            raise ValueError(('you cannot specify `ddesc` when `obj` '
                              'is already a DDesc instance'))

    if ddesc is None:
        # Use a dynd ddesc by default
        try:
            array = nd.asarray(obj, access='rw')
        except:
            raise ValueError(('failed to construct a dynd array from '
                              'object %r') % obj)
        ddesc = DyND_DDesc(array)
        return Array(ddesc)

    # The DDesc has been specified
    if isinstance(ddesc, DyND_DDesc):
        if obj is not None:
            raise ValueError(('you cannot specify simultaneously '
                              '`obj` and a DyND `ddesc`'))
        return Array(ddesc)
    elif isinstance(ddesc, BLZ_DDesc):
        if inspect.isgenerator(obj):
            dt = None if dshape is None else to_numpy_dtype(dshape)
            # TODO: Generator logic could go inside barray
            ddesc.blzarr = blz.fromiter(obj, dtype=dt, count=-1,
                                        rootdir=ddesc.path, mode=ddesc.mode,
                                        **ddesc.kwargs)
        else:
            if isinstance(obj, nd.array):
                obj = nd.as_numpy(obj)
            if dshape and isinstance(dshape.measure, datashape.Record):
                ddesc.blzarr = blz.btable(
                    obj, rootdir=ddesc.path, mode=ddesc.mode, **ddesc.kwargs)
            else:
                ddesc.blzarr = blz.barray(
                    obj, rootdir=ddesc.path, mode=ddesc.mode, **ddesc.kwargs)
    elif isinstance(ddesc, HDF5_DDesc):
        if isinstance(obj, nd.array):
            obj = nd.as_numpy(obj)
        with tb.open_file(ddesc.path, mode=ddesc.mode) as f:
            where, name = split_path(ddesc.datapath)
            if dshape and isinstance(dshape.measure, datashape.Record):
                # Convert the structured array to unaligned dtype
                # We need that because PyTables only accepts unaligned types,
                # which are the default in NumPy
                obj = np.array(obj, datashape.to_numpy_dtype(dshape.measure))
                f.create_table(where, name, filters=ddesc.filters, obj=obj)
            else:
                f.create_earray(where, name, filters=ddesc.filters, obj=obj)
        ddesc.mode = 'a'  # change into 'a'ppend mode for further operations

    return Array(ddesc)
示例#46
0
文件: core.py 项目: holdenk/blaze
 def __array__(self):
     return nd.as_numpy(self.as_dynd())
示例#47
0
 def test_fixed_dim(self):
     a = nd.array([1, 3, 5], type='3 * int32')
     b = nd.as_numpy(a)
     self.assertEqual(b.dtype, np.dtype('int32'))
     self.assertEqual(b.tolist(), [1, 3, 5])