def opt(fgraph, node): if ( isinstance(node.op, GpuElemwise) and node.op.scalar_op == aes.mul and node.nin == 2 ): targ = find_node(fgraph, node.inputs[0], cls) if targ is None: targ = find_node(fgraph, node.inputs[1], cls) if targ is None: return lr = grab_cpu_scalar(node.inputs[0], nd=targ.outputs[0].ndim) else: lr = grab_cpu_scalar(node.inputs[1], nd=targ.outputs[0].ndim) if lr is None or lr.dtype != targ.outputs[0].dtype: return None inputs = list(targ.inputs) try: c = get_scalar_constant_value(lr) if c == 0: inputs[alpha_in] = lr inputs[beta_in] = lr elif c == 1: inputs[alpha_in] = targ.inputs[alpha_in] inputs[beta_in] = targ.inputs[beta_in] else: inputs[alpha_in] = lr * targ.inputs[alpha_in] inputs[beta_in] = lr * targ.inputs[beta_in] except NotScalarConstantError: inputs[alpha_in] = lr * targ.inputs[alpha_in] inputs[beta_in] = lr * targ.inputs[beta_in] with inherit_stack_trace(node.outputs): return maker(targ, *inputs)
def make_node(self, x, *shape): from aesara.tensor.basic import get_scalar_constant_value x = at.as_tensor_variable(x) shape = tuple(NoneConst if (s is None or NoneConst.equals(s) ) else at.as_tensor_variable(s, ndim=0) for s in shape) if any(s.dtype not in aesara.tensor.type.integer_dtypes for s in shape if hasattr(s, "dtype")): raise TypeError("Shape values must be integer types") if len(shape) != x.type.ndim: raise ValueError( f"Input `x` is {x.type.ndim}-dimensional and will never match a shape of length {len(shape)}." ) type_shape = [None] * x.ndim for i, (xts, s) in enumerate(zip(x.type.shape, shape)): if xts is not None: type_shape[i] = xts else: try: type_s = get_scalar_constant_value(s) if type_s is not None: type_shape[i] = int(type_s) except NotScalarConstantError: pass out_var = x.type.clone(shape=type_shape)() return Apply(self, [x, *shape], [out_var])
def compute_bcast(self, dist_params, size): """Compute the broadcast array for this distribution's `TensorType`. Parameters ---------- dist_params: list Distribution parameters. size: int or Sequence (optional) Numpy-like size of the output (i.e. replications). """ shape = self._infer_shape(size, dist_params) # Let's try to do a better job than `_infer_ndim_bcast` when # dimension sizes are symbolic. bcast = [] for s in shape: s_owner = getattr(s, "owner", None) # Get rid of the `Assert`s added by `broadcast_shape` if s_owner and isinstance(s_owner.op, Assert): s = s_owner.inputs[0] try: s_val = get_scalar_constant_value(s) except NotScalarConstantError: s_val = False bcast += [s_val == 1] return bcast
def infer_shape(self, fgraph, node, shapes): xshape, sshape = shapes new_shape = [] for dim in range(node.inputs[0].ndim): try: s = aet.get_scalar_constant_value(node.inputs[1][dim]) s = aet.as_tensor_variable(s) new_shape.append(s) except NotScalarConstantError: new_shape.append(node.inputs[1][dim]) assert len(new_shape) == len(xshape) return [new_shape]
def infer_shape(self, fgraph, node, input_shapes): _, size, _, *dist_params = node.inputs _, size_shape, _, *param_shapes = input_shapes try: size_len = get_vector_length(size) except ValueError: size_len = get_scalar_constant_value(size_shape[0]) size = tuple(size[n] for n in range(size_len)) shape = self._infer_shape(size, dist_params, param_shapes=param_shapes) return [None, [s for s in shape]]
def is_positive(v): if hints(v).get("positive", False): return True # TODO: how to handle this - a registry? # infer_hints on Ops? logger.debug(f"is_positive: {v}") if v.owner and v.owner.op == aet_pow: try: exponent = aet.get_scalar_constant_value(v.owner.inputs[1]) except NotScalarConstantError: return False if 0 == exponent % 2: return True return False
def _is_1(expr): """ Returns ------- bool True iff expr is a constant close to 1. """ try: v = get_scalar_constant_value(expr) return np.allclose(v, 1) except NotScalarConstantError: return False
def local_gpua_multinomial(op, context_name, inputs, outputs): # TODO : need description for function if len(inputs) == 2: p, u = inputs n_samples = 1 else: p, u, n_samples = inputs try: if get_scalar_constant_value(n_samples) != 1: return None except NotScalarConstantError: return None (m,) = outputs gpu_op = GPUAMultinomialFromUniform(op.odtype) return GpuDimShuffle([False, False], [1, 0])(gpu_op(p, u))
def local_1msigmoid(fgraph, node): """ 1-sigm(x) -> sigm(-x) """ if node.op == sub: sub_l, sub_r = node.inputs if len(fgraph.clients[sub_r]) > 1: return # graph is using both sigm and 1-sigm if sub_r.owner and sub_r.owner.op == sigmoid: try: val_l = get_scalar_constant_value(sub_l) except NotScalarConstantError: return if np.allclose(np.sum(val_l), 1): out = sigmoid(-sub_r.owner.inputs[0]) copy_stack_trace([sub_r, node.outputs[0]], out) return [out]
def infer_shape(self, fgraph, node, shapes): xshape, *_ = shapes shape = node.inputs[1:] new_shape = [] for dim in range(node.inputs[0].type.ndim): s = shape[dim] try: s = at.get_scalar_constant_value(s) # We assume that `None` shapes are always retrieved by # `get_scalar_constant_value`, and only in that case do we default to # the shape of the input variable if s is None: s = xshape[dim] except NotScalarConstantError: pass new_shape.append(at.as_tensor_variable(s)) assert len(new_shape) == len(xshape) return [new_shape]
def is_equal(var, val): """ Returns True if `var` is always equal to `val`. This will only return True if the variable will always be equal to the value. If it might not be true in some cases then it returns False. Parameters ---------- var Variable to compare val Python value """ try: v = get_scalar_constant_value(var) return v == val except NotScalarConstantError: return False
def make_node(self, x, repeats): x = aet.as_tensor_variable(x) repeats = aet.as_tensor_variable(repeats) if repeats.dtype not in integer_dtypes: raise TypeError("repeats.dtype must be an integer.") # Some dtypes are not supported by numpy's implementation of repeat. # Until another one is available, we should fail at graph construction # time, not wait for execution. ptr_bitwidth = LOCAL_BITWIDTH if ptr_bitwidth == 64: numpy_unsupported_dtypes = ("uint64",) if ptr_bitwidth == 32: numpy_unsupported_dtypes = ("uint32", "int64", "uint64") if repeats.dtype in numpy_unsupported_dtypes: raise TypeError( ( "dtypes %s are not supported by numpy.repeat " "for the 'repeats' parameter, " % str(numpy_unsupported_dtypes) ), repeats.dtype, ) if self.axis is None: broadcastable = [False] else: try: const_reps = aet.get_scalar_constant_value(repeats) except NotScalarConstantError: const_reps = None if const_reps == 1: broadcastable = x.broadcastable else: broadcastable = list(x.broadcastable) broadcastable[self.axis] = False out_type = TensorType(x.dtype, broadcastable) return Apply(self, [x, repeats], [out_type()])
def is_neg(var): """ Match a variable with the `-x` pattern. Parameters ---------- var The Variable to analyze. Returns ------- object `x` if `var` is of the form `-x`, or None otherwise. """ var_node = var.owner if not var_node: return None # First match against `neg`. if var_node.op == neg: return var_node.inputs[0] # Then match against a multiplication by -1. if var_node.op == mul and len(var_node.inputs) >= 2: for idx, mul_input in enumerate(var_node.inputs): try: constant = get_scalar_constant_value(mul_input) is_minus_1 = np.allclose(constant, -1) except NotScalarConstantError: is_minus_1 = False if is_minus_1: # Found a multiplication by -1. if len(var_node.inputs) == 2: # Only return the other input. return var_node.inputs[1 - idx] else: # Return the multiplication of all other inputs. return mul(*(var_node.inputs[0:idx] + var_node.inputs[idx + 1:])) # No match. return None
def isNaN_or_Inf_or_None(x): isNone = x is None try: isNaN = np.isnan(x) isInf = np.isinf(x) isStr = isinstance(x, str) except Exception: isNaN = False isInf = False isStr = False if not isNaN and not isInf: try: val = get_scalar_constant_value(x) isInf = np.isinf(val) isNaN = np.isnan(val) except Exception: isNaN = False isInf = False if isinstance(x, gof.Constant) and isinstance(x.data, str): isStr = True else: isStr = False return isNone or isNaN or isInf or isStr
def _get_vector_length_SpecifyShape(op, var): try: return at.get_scalar_constant_value(var.owner.inputs[1]) except NotScalarConstantError: raise ValueError(f"Length of {var} cannot be determined")