def array_from_dlpack(t, capsule): descr = enoki.detail.from_dlpack(capsule) device_type = descr['device_type'] data = descr['data'] dtype = descr['dtype'] shape = descr['shape'] ndim = len(shape) strides = descr['strides'] if strides is None: tmp = 1 strides = [0] * ndim for i in reversed(range(ndim)): strides[i] = tmp tmp *= shape[i] if t.IsCUDA and device_type != 2: raise enoki.Exception("Cannot create an Enoki GPU array from a " "DLPack CPU tensor!") elif not t.IsCUDA and device_type != 1: raise enoki.Exception("Cannot create an Enoki CPU array from a " "DLPack GPU tensor!") if dtype != t.Type: raise enoki.Exception("Incompatible type!") shape_target = list(reversed(enoki.shape(t()))) if len(shape_target) != ndim: raise enoki.Exception("Incompatible dimension!") for i in range(ndim): if shape_target[i] != shape[i] and shape_target[i] != 0: raise enoki.Exception("Incompatible shape!") value = t while issubclass(value.Value, enoki.ArrayBase): value = value.Value descr['consume'](capsule) data = value.map_(data, enoki.hprod(shape), descr['release']) def load(t, i, offset): size = shape[-1 - i] stride = strides[-1 - i] if i == ndim - 1: if type(offset) is int and stride == 1: return data else: i = enoki.arange(enoki.int32_array_t(t), size) return t.gather_(data, offset + stride * i, True, False) else: result = t() for j in range(size): result[j] = load(t.Value, i + 1, offset + stride * j) return result return load(t, 0, 0)
def write_indices(indices, *args): for a in args: if enoki.is_array_v(a): if a.Depth > 1: for i in range(len(a)): write_indices(indices, a.entry_ref_(i)) elif a.IsDiff: if enoki.grad_enabled(a) and enoki.flag( enoki.JitFlag.LoopRecord): raise enoki.Exception( "write_indices(): one of the supplied loop " "variables is attached to the AD graph (i.e. " "grad_enabled(..) is true). However, recorded " "loops cannot be differentiated in their entirety. " "You have two options: either disable loop " "recording via set_flag(JitFlag.LoopRecord, " "False). Alternatively, you could implement the " "adjoint of the loop using ek::CustomOp.") idx = indices.pop(0) a.set_index_(idx[0]) a.set_index_ad_(idx[1]) elif a.IsJIT: idx = indices.pop(0) a.set_index_(idx[0]) assert idx[1] == 0 elif isinstance(a, tuple) or isinstance(a, list): for b in a: if getattr(b, '__name__', None) == '<lambda>': write_indices(indices, b()) else: write_indices(indices, b) elif enoki.is_enoki_struct_v(a): for k, v in type(a).ENOKI_STRUCT.items(): write_indices(indices, getattr(a, k))
def read_indices(*args): result = [] for a in args: if enoki.is_array_v(a): if a.Depth > 1: for i in range(len(a)): result.extend(read_indices(a.entry_ref_(i))) elif a.IsDiff: if enoki.grad_enabled(a) and enoki.flag( enoki.JitFlag.LoopRecord): raise enoki.Exception( "read_indices(): one of the supplied loop " "variables is attached to the AD graph (i.e. " "grad_enabled(..) is true). However, recorded " "loops cannot be differentiated in their entirety. " "You have two options: either disable loop " "recording via set_flag(JitFlag.LoopRecord, " "False). Alternatively, you could implement the " "adjoint of the loop using ek::CustomOp.") result.append((a.index(), a.index_ad())) elif a.IsJIT: result.append((a.index(), 0)) elif isinstance(a, tuple) or isinstance(a, list): for b in a: if getattr(b, '__name__', None) == '<lambda>': result.extend(read_indices(b())) else: result.extend(read_indices(b)) elif enoki.is_enoki_struct_v(a): for k, v in type(a).ENOKI_STRUCT.items(): result.extend(read_indices(getattr(a, k))) return result
def write_indices(indices, *args): for a in args: if enoki.is_array_v(a): if a.Depth > 1: for i in range(len(a)): write_indices(indices, a.entry_ref_(i)) elif a.IsDiff: if enoki.grad_enabled(a): raise enoki.Exception( 'Symbolic loop encountered a differentiable array ' 'with enabled gradients! This is not supported.') write_indices(indices, a.detach_()) elif a.IsJIT: a.set_index_(indices.pop(0)) elif isinstance(a, tuple) or isinstance(a, list): for b in a: write_indices(indices, b) elif enoki.is_enoki_struct_v(a): for k, v in type(a).ENOKI_STRUCT.items(): write_indices(indices, getattr(a, k)) else: print(" do not know what to do with %s\n" % str(a))
def read_indices(*args): result = [] for a in args: if enoki.is_array_v(a): if a.Depth > 1: for i in range(len(a)): result.extend(read_indices(a.entry_ref_(i))) elif a.IsDiff: if enoki.grad_enabled(a): raise enoki.Exception( 'Symbolic loop encountered a differentiable array ' 'with enabled gradients! This is not supported.') result.extend(read_indices(a.detach_())) elif a.IsJIT: result.append(a.index()) elif isinstance(a, tuple) or isinstance(a, list): for b in a: result.extend(read_indices(b)) elif enoki.is_enoki_struct_v(a): for k, v in type(a).ENOKI_STRUCT.items(): result.extend(read_indices(getattr(a, k))) else: print(" do not know what to do with %s\n" % str(a)) return result
def array_init(self, args): """ This generic initialization routine initializes an arbitrary Enoki array from a variable-length argument list (which could be a scalar broadcast, a component list, or a NumPy/PyTorch/Tensorflow array..) """ n = len(args) if n == 0: return size = self.Size value_type = self.Value dynamic = size == enoki.Dynamic err = None try: if n == 1: o = args[0] t = type(o) mod = t.__module__ name = t.__name__ is_array = issubclass(t, enoki.ArrayBase) is_static_array = is_array and not o.Size == enoki.Dynamic is_sequence = issubclass(t, list) or issubclass(t, tuple) # Matrix initialization from nested list if is_sequence and self.IsMatrix and \ len(o) == size and sub_len(o) == size: for x in range(size): for y in range(size): self[x, y] = value_type.Value(o[x][y]) elif is_array or is_sequence: os = len(o) if dynamic: size = os self.init_(size) if size == 0: pass elif size != os or value_type is t: # Size mismatch! if self.IsMatrix and getattr(t, 'IsMatrix', False): # If both are matrices, copy the top-left block for x in range(size): for y in range(size): if x < o.Size and y < o.Size: self[x, y] = value_type.Value(o[x, y]) else: self[x, y] = value_type.Value(1 if x == y else 0) elif self.IsMatrix and value_type is t: for x in range(size): self[x] = o else: # Otherwise, try to broadcast to all entries self.broadcast_( value_type(o) if not issubclass(t, value_type) and not self.IsMatrix else o) else: # Size matches, copy element by element if self.IsJIT and getattr(t, 'IsJIT', False) and \ self.Depth == 1 and t.Depth == 1: raise enoki.Exception( 'Refusing to do an extremely inefficient ' 'element-by-element array conversion from type %s ' 'to %s. Did you forget a cast or detach operation?' % (str(type(o)), str(type(self)))) if isinstance(o[0], value_type) or self.IsMatrix: for i in range(size): self.set_entry_(i, o[i]) else: for i in range(size): self.set_entry_(i, value_type(o[i])) elif issubclass(t, (int, float)): if dynamic: size = 1 self.init_(size) self.broadcast_(o) elif issubclass(t, complex) and self.IsComplex: self.set_entry_(0, o.real) self.set_entry_(1, o.imag) elif mod == 'numpy': import numpy as np s1 = tuple(reversed(enoki.shape(self))) s2 = o.shape # Remove unnecessary outer dimension is possible if s2[0] == 1: o = o[0, ...] s2 = o.shape if o.dtype == np.complex64: s2 = (*s2, 2) o = o.view(np.float32).reshape(s2) elif o.dtype == np.complex128: s2 = (*s2, 2) o = o.view(np.float64).reshape(s2) if o.dtype != self.Type.NumPy: o = o.astype(self.Type.NumPy) dim1 = len(s1) dim2 = len(s2) # Numpy array might have one dimension less when initializing dynamic arrays if not dim1 == dim2 and not (dim1 == dim2 + 1 and self.IsDynamic): raise enoki.Exception("Incompatible dimension!") for i in reversed(range(dim2)): if s1[i] != s2[i] and s1[i] != 0: raise enoki.Exception("Incompatible shape!") if dim1 == 0: pass elif dim1 == 1 and self.IsDynamic: o = np.ascontiguousarray(o) holder = (o, o.__array_interface__['data'][0]) self.assign(self.load_(holder[1], s2[0])) else: for i in range(s1[-1]): if dim2 == 1 and self.IsDynamic: self.set_entry_(i, value_type.Value(o[i])) else: self.set_entry_(i, value_type(o[..., i])) elif mod == 'builtins' and name == 'PyCapsule': self.assign(array_from_dlpack(type(self), o)) elif mod == 'torch': from torch.utils.dlpack import to_dlpack self.assign(array_from_dlpack(type(self), to_dlpack(o))) elif mod.startswith('tensorflow.'): from tensorflow.experimental.dlpack import to_dlpack self.assign(array_from_dlpack(type(self), to_dlpack(o))) elif mod.startswith('jax.') or mod.startswith('jaxlib.'): from jax.dlpack import to_dlpack self.assign(array_from_dlpack(type(self), to_dlpack(o))) else: raise enoki.Exception( 'Don\'t know how to create an Enoki array ' 'from type \"%s.%s\"!' % (mod, name)) elif n == size or dynamic: if dynamic: size = n self.init_(size) for i in range(size): self.set_entry_(i, value_type(args[i])) elif self.IsMatrix and n == self.Size * self.Size: tbl = [[args[i * self.Size + j] for i in range(self.Size)] for j in range(self.Size)] array_init(self, tbl) else: raise enoki.Exception('Invalid size!') except Exception as e: err = e if err is not None: if dynamic: raise TypeError( "%s constructor expects: arbitrarily many values " "of type '%s', a matching list/tuple, or a NumPy/" "PyTorch/TF/Jax array." % (type(self).__name__, value_type.__name__)) from err else: raise TypeError("%s constructor expects: %s%i values " "of type '%s', a matching list/tuple, or a NumPy/" "PyTorch/TF/Jax array." % (type(self).__name__, "" if size == 1 else "1 or ", size, value_type.__name__)) from err