def make_node(self, x): x = as_tensor_variable(x) assert x.ndim == 2, "The input of svd function should be a matrix." s = aesara.tensor.vector(dtype=x.dtype) if self.compute_uv: u = aesara.tensor.matrix(dtype=x.dtype) vt = aesara.tensor.matrix(dtype=x.dtype) return Apply(self, [x], [u, s, vt]) else: return Apply(self, [x], [s])
def make_node(self, condition, *monitored_vars): # Ensure that condition is a aesara tensor if not isinstance(condition, aesara.Variable): condition = tt.as_tensor_variable(condition) # Validate that the condition is a scalar (else it is not obvious how # is should be evaluated) assert condition.ndim == 0 # Because the user might be tempted to instantiate PdbBreakpoint only # once and apply it many times on different number of inputs, we must # create a new instance of the op here, define the instance attributes # (view_map and var_types) in that instance and then apply it on the # inputs. new_op = PdbBreakpoint(name=self.name) new_op.view_map = {} new_op.inp_types = [] for i in range(len(monitored_vars)): # Every output i is a view of the input i+1 because of the input # condition. new_op.view_map[i] = [i + 1] new_op.inp_types.append(monitored_vars[i].type) # Build the Apply node inputs = [condition] + list(monitored_vars) outputs = [inp.type() for inp in monitored_vars] return Apply(op=new_op, inputs=inputs, outputs=outputs)
def make_node(self, x, index): assert isinstance(x.type, TypedListType) if not isinstance(index, Variable): if isinstance(index, slice): index = Constant(SliceType(), index) return Apply(self, [x, index], [x.type()]) else: index = tt.constant(index, ndim=0, dtype="int64") return Apply(self, [x, index], [x.ttype()]) if isinstance(index.type, SliceType): return Apply(self, [x, index], [x.type()]) elif isinstance(index, tt.TensorVariable) and index.ndim == 0: assert index.dtype == "int64" return Apply(self, [x, index], [x.ttype()]) else: raise TypeError("Expected scalar or slice as index.")
def make_node(self, x): if self.axis.keys() and (x.ndim <= max(self.axis.keys())): raise ValueError("Trying to rebroadcast non-existent dimension") t = x.type.clone( broadcastable=[ self.axis.get(i, b) for i, b in enumerate(x.type.broadcastable) ] ) return Apply(self, [x], [t()])
def make_node(self, x, index, toInsert): assert isinstance(x.type, TypedListType) assert x.ttype == toInsert.type if not isinstance(index, Variable): index = tt.constant(index, ndim=0, dtype="int64") else: assert index.dtype == "int64" assert isinstance(index, tt.TensorVariable) and index.ndim == 0 return Apply(self, [x, index, toInsert], [x.type()])
def make_node(self, x, shape): if not isinstance(x, Variable): x = aesara.tensor.as_tensor_variable(x) shape = aesara.tensor.as_tensor_variable(shape) assert shape.ndim == 1 assert shape.dtype in aesara.tensor.integer_dtypes if isinstance(shape, aesara.tensor.TensorConstant): assert shape.data.size == x.ndim return Apply(self, [x, shape], [x.type()])
def make_node(self, M): M = basic.as_tensor_variable(M) if M.ndim != 0: raise TypeError("%s only works on scalar input" % self.__class__.__name__) elif M.dtype not in aesara.tensor.integer_dtypes: # dtype is a aesara attribute here raise TypeError("%s only works on integer input" % self.__class__.__name__) return Apply(self, [M], [basic.dvector()])
def make_node(self, x): x = as_tensor_variable(x) assert x.ndim == 2, "The input of qr function should be a matrix." q = aesara.tensor.matrix(dtype=x.dtype) if self.mode != "raw": r = aesara.tensor.matrix(dtype=x.dtype) else: r = aesara.tensor.vector(dtype=x.dtype) return Apply(self, [x], [q, r])
def make_node(self, x, w, v, gw, gv): x, w, v, gw, gv = map(as_tensor_variable, (x, w, v, gw, gv)) assert x.ndim == 2 assert w.ndim == 1 assert v.ndim == 2 assert gw.ndim == 1 assert gv.ndim == 2 out_dtype = aesara.scalar.upcast(x.dtype, w.dtype, v.dtype, gw.dtype, gv.dtype) out = aesara.tensor.matrix(dtype=out_dtype) return Apply(self, [x, w, v, gw, gv], [out])
def make_node(self, _x): warnings.warn( "DeprecationWarning: aesara.tensor.nlinalg.AllocDiag" "is deprecated, please use aesara.tensor.AllocDiag" "instead.", category=DeprecationWarning, ) x = as_tensor_variable(_x) if x.type.ndim != 1: raise TypeError("AllocDiag only works on vectors", _x) return Apply(self, [x], [aesara.tensor.matrix(dtype=x.type.dtype)])
def make_node(self, slc, stop=None, step=None): # We need to accept and handle in make_node inputs the node # inputs to allow redoing a new op elsewhere in the graph by # optimization. if isinstance(slc, slice): assert stop is None assert step is None inp = [slc.start, slc.stop, slc.step] else: inp = [slc, stop, step] return Apply(self, list(map(as_int_none_variable, inp)), [slicetype()])
def make_node(self, x): x = as_tensor_variable(x) assert x.ndim == 2 # Numpy's linalg.eigh may return either double or single # presision eigenvalues depending on installed version of # LAPACK. Rather than trying to reproduce the (rather # involved) logic, we just probe linalg.eigh with a trivial # input. w_dtype = self._numop([[np.dtype(x.dtype).type()]])[0].dtype.name w = aesara.tensor.vector(dtype=w_dtype) v = aesara.tensor.matrix(dtype=x.dtype) return Apply(self, [x], [w, v])
def make_node(self, a): assert isinstance(a, (tuple, list)) a2 = [] for elem in a: if not isinstance(elem, aesara.gof.Variable): elem = tt.as_tensor_variable(elem) a2.append(elem) if not all(a2[0].type == elem.type for elem in a2): raise TypeError( "MakeList need all input variable to be of the same type.") tl = aesara.typed_list.TypedListType(a2[0].type)() return Apply(self, a2, [tl])
def make_node(self, X): context_name = infer_context_name(X) # We keep the original broadcastable flags for dimensions on which # we do not perform the max / argmax. all_axes = set(self.axis) broadcastable = [ b for i, b in enumerate(X.type.broadcastable) if i not in all_axes ] inputs = [as_gpuarray_variable(X, context_name)] outputs = [ GpuArrayType(X.type.dtype, broadcastable, context_name=context_name)(), GpuArrayType(self.argmax_dtype, broadcastable, context_name=context_name)(), ] return Apply(self, inputs, outputs)
def make_node(self, a, val): a = basic.as_tensor_variable(a) val = basic.as_tensor_variable(val) if a.ndim < 2: raise TypeError("%s: first parameter must have at least" " two dimensions" % self.__class__.__name__) elif val.ndim != 0: raise TypeError("%s: second parameter must be a scalar" % self.__class__.__name__) val = basic.cast(val, dtype=upcast(a.dtype, val.dtype)) if val.dtype != a.dtype: raise TypeError("%s: type of second parameter must be the same as" " the first's" % self.__class__.__name__) return Apply(self, [a, val], [a.type()])
def make_node(self, frames, n, axis): """ Compute an n-point fft of frames along given axis. """ _frames = tensor.as_tensor(frames, ndim=2) _n = tensor.as_tensor(n, ndim=0) _axis = tensor.as_tensor(axis, ndim=0) if self.half and _frames.type.dtype.startswith("complex"): raise TypeError("Argument to HalfFFT must not be complex", frames) spectrogram = tensor.zmatrix() buf = generic() # The `buf` output is present for future work # when we call FFTW directly and re-use the 'plan' that FFTW creates. # In that case, buf would store a CObject encapsulating the plan. rval = Apply(self, [_frames, _n, _axis], [spectrogram, buf]) return rval
def make_node(self, indices, dims): indices = basic.as_tensor_variable(indices) dims = basic.as_tensor_variable(dims) if indices.dtype not in basic.int_dtypes: raise TypeError("'%s' object cannot be interpreted as an index" % str(indices.dtype)) if dims.dtype not in basic.int_dtypes: raise TypeError("'%s' object cannot be interpreted as an index" % str(dims.dtype)) if dims.ndim != 1: raise TypeError("dims must be a 1D array") return Apply( self, [indices, dims], [ basic.TensorType(dtype="int64", broadcastable=(False, ) * indices.ndim)() for i in range(basic.get_vector_length(dims)) ], )
def make_node(self, c, *args): assert ( len(args) == 2 * self.n_outs ), "Wrong number of arguments to make_node: " "expected %d, got %d" % ( 2 * self.n_outs, len(args), ) c = aesara.tensor.as_tensor_variable(c) if not self.gpu: # When gpu is true, we are given only gpuarrays, and we want # to keep them as gpuarrays nw_args = [] for x in args: if hasattr(x, "_as_TensorVariable"): nw_args.append(x._as_TensorVariable()) elif isinstance(x, aesara.Variable): nw_args.append(x) else: nw_args.append(aesara.tensor.as_tensor_variable(x)) args = nw_args ts = args[:self.n_outs] fs = args[self.n_outs:] for t, f in zip(ts, fs): if t.type != f.type: raise TypeError( ("IfElse requires same types for true and " "false return values"), t, f, t.type, f.type, ) if c.ndim > 0: raise TypeError("Condition given to the op has to be a scalar " "with 0 standing for False, anything else " "for True") return Apply(self, [c] + list(args), [t.type() for t in ts])
def make_node(self, a, val, offset): a = basic.as_tensor_variable(a) val = basic.as_tensor_variable(val) offset = basic.as_tensor_variable(offset) if a.ndim != 2: raise TypeError("%s: first parameter must have exactly" " two dimensions" % self.__class__.__name__) elif val.ndim != 0: raise TypeError("%s: second parameter must be a scalar" % self.__class__.__name__) elif offset.ndim != 0: raise TypeError("%s: third parameter must be a scalar" % self.__class__.__name__) val = basic.cast(val, dtype=upcast(a.dtype, val.dtype)) if val.dtype != a.dtype: raise TypeError("%s: type of second parameter must be the same" " as the first's" % self.__class__.__name__) elif offset.dtype not in aesara.tensor.integer_dtypes: raise TypeError("%s: type of third parameter must be as integer" " use aesara.tensor.cast( input, 'int32/int64')" % self.__class__.__name__) return Apply(self, [a, val, offset], [a.type()])
def make_node(self, *inp): multi_index = [basic.as_tensor_variable(i) for i in inp[:-1]] dims = basic.as_tensor_variable(inp[-1]) for i in multi_index: if i.dtype not in basic.int_dtypes: raise TypeError( "'%s' object cannot be interpreted as an index" % str(i.dtype)) if dims.dtype not in basic.int_dtypes: raise TypeError("'%s' object cannot be interpreted as an index" % str(dims.dtype)) if dims.ndim != 1: raise TypeError("dims must be a 1D array") return Apply( self, multi_index + [dims], [ basic.TensorType( dtype="int64", broadcastable=(False, ) * multi_index[0].ndim)() ], )
def make_node(self, x): x = tensor.as_tensor_variable(x) return Apply(self, [x], [x.type()])
def make_node(self, x, i0, i1, amt): _i0 = tensor.as_tensor_variable(i0) _i1 = tensor.as_tensor_variable(i1) return Apply(self, [x, _i0, _i1, amt], [x.type()])
def make_node(self, x): return Apply(self, [x], [x.type()])
def make_node(self, xin): xout = xin.type() return Apply(op=self, inputs=[xin], outputs=[xout])
def make_node(self, input): return Apply(self, [input], [input.type()])
def make_node(self, input): input = scalar.as_scalar(input) output = input.type() return Apply(self, [input], [output])
def make_node(self, a, b): a = as_tensor_variable(a) b = as_tensor_variable(b) out_dtype = aesara.scalar.upcast(a.dtype, b.dtype) x = aesara.tensor.matrix(dtype=out_dtype) return Apply(self, [a, b], [x])
def make_node(self, c1, t1, c2, t2, c3, t3, f3): assert t1.type == f3.type assert t2.type == t3.type assert t3.type == f3.type return Apply(self, [c1, t1, c2, t2, c3, t3, f3], [t1.type()])
def make_node(self, x): x = as_tensor_variable(x) assert x.ndim == 2 return Apply(self, [x], [x.type()])
def make_node(self, x): # Must work for all type that have a shape attribute. # This will fail at execution time. if not isinstance(x, aesara.Variable): x = aesara.tensor.as_tensor_variable(x) return Apply(self, [x], [aesara.tensor.lvector()])