def array_from_dlpack(t, capsule): descr = enoki.detail.from_dlpack(capsule) device_type = descr['device_type'] data = descr['data'] dtype = descr['dtype'] shape = descr['shape'] ndim = len(shape) strides = descr['strides'] if strides is None: tmp = 1 strides = [0] * ndim for i in reversed(range(ndim)): strides[i] = tmp tmp *= shape[i] if t.IsCUDA and device_type != 2: raise enoki.Exception("Cannot create an Enoki GPU array from a " "DLPack CPU tensor!") elif not t.IsCUDA and device_type != 1: raise enoki.Exception("Cannot create an Enoki CPU array from a " "DLPack GPU tensor!") if dtype != t.Type: raise enoki.Exception("Incompatible type!") shape_target = list(reversed(enoki.shape(t()))) if len(shape_target) != ndim: raise enoki.Exception("Incompatible dimension!") for i in range(ndim): if shape_target[i] != shape[i] and shape_target[i] != 0: raise enoki.Exception("Incompatible shape!") value = t while issubclass(value.Value, enoki.ArrayBase): value = value.Value descr['consume'](capsule) data = value.map_(data, enoki.hprod(shape), descr['release']) def load(t, i, offset): size = shape[-1 - i] stride = strides[-1 - i] if i == ndim - 1: if type(offset) is int and stride == 1: return data else: i = enoki.arange(enoki.int32_array_t(t), size) return t.gather_(data, offset + stride * i, True, False) else: result = t() for j in range(size): result[j] = load(t.Value, i + 1, offset + stride * j) return result return load(t, 0, 0)
def export_(a, migrate_to_host, version): shape = _ek.shape(a) ndim = len(shape) shape = tuple(reversed(shape)) if not a.IsJIT: # F-style strides temp, strides = a.Type.Size, [0] * ndim for i in range(ndim): strides[i] = temp temp *= shape[i] # Array is already contiguous in memory -- document its structure return { 'shape': shape, 'strides': tuple(strides), 'typestr': '<' + a.Type.NumPy, 'data': (a.data_(), False), 'version': version, 'device': -1, 'owner': a } else: # C-style strides temp, strides = a.Type.Size, [0] * ndim for i in reversed(range(ndim)): strides[i] = temp temp *= shape[i] # JIT array -- requires extra transformations b = _ek.ravel(_ek.detach(a) if a.IsDiff else a) _ek.eval(b) if b.IsCUDA and migrate_to_host: if b is a: b = type(a)(b) b = b.migrate_(_ek.AllocType.Host) _ek.sync_thread() elif b.IsLLVM: _ek.sync_thread() record = { 'shape': shape, 'strides': tuple(strides), 'typestr': '<' + a.Type.NumPy, 'data': (b.data_(), False), 'version': version, 'device': _ek.device(b), 'owner': b } return record
def array_init(self, args): """ This generic initialization routine initializes an arbitrary Enoki array from a variable-length argument list (which could be a scalar broadcast, a component list, or a NumPy/PyTorch/Tensorflow array..) """ n = len(args) if n == 0: return size = self.Size value_type = self.Value dynamic = size == enoki.Dynamic err = None try: if n == 1: o = args[0] t = type(o) mod = t.__module__ name = t.__name__ is_array = issubclass(t, enoki.ArrayBase) is_static_array = is_array and not o.Size == enoki.Dynamic is_sequence = issubclass(t, list) or issubclass(t, tuple) # Matrix initialization from nested list if is_sequence and self.IsMatrix and \ len(o) == size and sub_len(o) == size: for x in range(size): for y in range(size): self[x, y] = value_type.Value(o[x][y]) elif is_array or is_sequence: os = len(o) if dynamic: size = os self.init_(size) if size == 0: pass elif size != os or value_type is t: # Size mismatch! if self.IsMatrix and getattr(t, 'IsMatrix', False): # If both are matrices, copy the top-left block for x in range(size): for y in range(size): if x < o.Size and y < o.Size: self[x, y] = value_type.Value(o[x, y]) else: self[x, y] = value_type.Value(1 if x == y else 0) elif self.IsMatrix and value_type is t: for x in range(size): self[x] = o else: # Otherwise, try to broadcast to all entries self.broadcast_( value_type(o) if not issubclass(t, value_type) and not self.IsMatrix else o) else: # Size matches, copy element by element if self.IsJIT and getattr(t, 'IsJIT', False) and \ self.Depth == 1 and t.Depth == 1: raise enoki.Exception( 'Refusing to do an extremely inefficient ' 'element-by-element array conversion from type %s ' 'to %s. Did you forget a cast or detach operation?' % (str(type(o)), str(type(self)))) if isinstance(o[0], value_type) or self.IsMatrix: for i in range(size): self.set_entry_(i, o[i]) else: for i in range(size): self.set_entry_(i, value_type(o[i])) elif issubclass(t, (int, float)): if dynamic: size = 1 self.init_(size) self.broadcast_(o) elif issubclass(t, complex) and self.IsComplex: self.set_entry_(0, o.real) self.set_entry_(1, o.imag) elif mod == 'numpy': import numpy as np s1 = tuple(reversed(enoki.shape(self))) s2 = o.shape # Remove unnecessary outer dimension is possible if s2[0] == 1: o = o[0, ...] s2 = o.shape if o.dtype == np.complex64: s2 = (*s2, 2) o = o.view(np.float32).reshape(s2) elif o.dtype == np.complex128: s2 = (*s2, 2) o = o.view(np.float64).reshape(s2) if o.dtype != self.Type.NumPy: o = o.astype(self.Type.NumPy) dim1 = len(s1) dim2 = len(s2) # Numpy array might have one dimension less when initializing dynamic arrays if not dim1 == dim2 and not (dim1 == dim2 + 1 and self.IsDynamic): raise enoki.Exception("Incompatible dimension!") for i in reversed(range(dim2)): if s1[i] != s2[i] and s1[i] != 0: raise enoki.Exception("Incompatible shape!") if dim1 == 0: pass elif dim1 == 1 and self.IsDynamic: o = np.ascontiguousarray(o) holder = (o, o.__array_interface__['data'][0]) self.assign(self.load_(holder[1], s2[0])) else: for i in range(s1[-1]): if dim2 == 1 and self.IsDynamic: self.set_entry_(i, value_type.Value(o[i])) else: self.set_entry_(i, value_type(o[..., i])) elif mod == 'builtins' and name == 'PyCapsule': self.assign(array_from_dlpack(type(self), o)) elif mod == 'torch': from torch.utils.dlpack import to_dlpack self.assign(array_from_dlpack(type(self), to_dlpack(o))) elif mod.startswith('tensorflow.'): from tensorflow.experimental.dlpack import to_dlpack self.assign(array_from_dlpack(type(self), to_dlpack(o))) elif mod.startswith('jax.') or mod.startswith('jaxlib.'): from jax.dlpack import to_dlpack self.assign(array_from_dlpack(type(self), to_dlpack(o))) else: raise enoki.Exception( 'Don\'t know how to create an Enoki array ' 'from type \"%s.%s\"!' % (mod, name)) elif n == size or dynamic: if dynamic: size = n self.init_(size) for i in range(size): self.set_entry_(i, value_type(args[i])) elif self.IsMatrix and n == self.Size * self.Size: tbl = [[args[i * self.Size + j] for i in range(self.Size)] for j in range(self.Size)] array_init(self, tbl) else: raise enoki.Exception('Invalid size!') except Exception as e: err = e if err is not None: if dynamic: raise TypeError( "%s constructor expects: arbitrarily many values " "of type '%s', a matching list/tuple, or a NumPy/" "PyTorch/TF/Jax array." % (type(self).__name__, value_type.__name__)) from err else: raise TypeError("%s constructor expects: %s%i values " "of type '%s', a matching list/tuple, or a NumPy/" "PyTorch/TF/Jax array." % (type(self).__name__, "" if size == 1 else "1 or ", size, value_type.__name__)) from err
def export_(a, migrate_to_host, version, owner_supported=True): shape = _ek.shape(a) ndim = len(shape) shape = tuple(reversed(shape)) if not a.IsJIT: # F-style strides temp, strides = a.Type.Size, [0] * ndim # Enoki represents 3D arrays as 4D to leverage SIMD instructions padding = 1 if a.IsScalar and a.IsMatrix and shape[0] == 3 else 0 for i in range(ndim): strides[i] = temp temp *= shape[i] + padding # Array is already contiguous in memory -- document its structure return { 'shape': shape, 'strides': tuple(strides), 'typestr': '<' + a.Type.NumPy, 'data': (a.data_(), False), 'version': version, 'device': -1, 'owner': a } else: # C-style strides temp, strides = a.Type.Size, [0] * ndim # First dimension is the dynamic one, the rest should be in reversed order for i in reversed(range(1, ndim)): strides[ndim - i] = temp temp *= shape[i] strides[0] = temp # JIT array -- requires extra transformations b = _ek.ravel(_ek.detach(a) if a.IsDiff else a) _ek.eval(b) if b.IsCUDA and migrate_to_host: if b is a: b = type(a)(b) b = b.migrate_(_ek.AllocType.Host) _ek.sync_thread() elif b.IsLLVM: _ek.sync_thread() if not owner_supported and a is not b: # If the caller cannot deal with the 'owner' field, use # a weak reference to keep 'b' alive while 'a' exists _wr.finalize(a, lambda arg: None, b) record = { 'shape': shape, 'strides': tuple(strides), 'typestr': '<' + a.Type.NumPy, 'data': (b.data_(), False), 'version': version, 'device': _ek.device(b), 'owner': b } return record