def test_get_vector_length(): x = TensorVariable(TensorType("int64", (4, ))) res = get_vector_length(x) assert res == 4 x = TensorVariable(TensorType("int64", (None, ))) with pytest.raises(ValueError): get_vector_length(x)
def extract_obs_data(x: TensorVariable) -> np.ndarray: """Extract data from observed symbolic variables. Raises ------ TypeError """ if isinstance(x, Constant): return x.data if isinstance(x, SharedVariable): return x.get_value() if x.owner and isinstance(x.owner.op, Elemwise) and isinstance( x.owner.op.scalar_op, Cast): array_data = extract_obs_data(x.owner.inputs[0]) return array_data.astype(x.type.dtype) if x.owner and isinstance(x.owner.op, (AdvancedIncSubtensor, AdvancedIncSubtensor1)): array_data = extract_obs_data(x.owner.inputs[0]) mask_idx = tuple(extract_obs_data(i) for i in x.owner.inputs[2:]) mask = np.zeros_like(array_data) mask[mask_idx] = 1 return np.ma.MaskedArray(array_data, mask) raise TypeError(f"Data cannot be extracted from {x}")
def test_tensorvariable(self): # Get counter value autoname_id = next(Variable.__count__) Variable.__count__ = count(autoname_id) r1 = TensorType(dtype="int32", shape=())("myvar") r2 = TensorVariable(TensorType(dtype="int32", shape=())) r3 = shared(np.random.standard_normal((3, 4))) assert r1.auto_name == "auto_" + str(autoname_id) assert r2.auto_name == "auto_" + str(autoname_id + 1) assert r3.auto_name == "auto_" + str(autoname_id + 2)
def test_infer_shape(self): rng = np.random.default_rng(3453) adtens4 = dtensor4() aivec = TensorVariable(TensorType("int64", (4, ))) aivec_val = [3, 4, 2, 5] adtens4_val = rng.random(aivec_val) self._compile_and_check( [adtens4, aivec], [specify_shape(adtens4, aivec)], [adtens4_val, aivec_val], SpecifyShape, )
def test_fixed_shape_variable_basic(): x = TensorVariable(TensorType("int64", (4, ))) assert isinstance(x.shape, Constant) assert np.array_equal(x.shape.data, (4, )) x = TensorConstant(TensorType("int64", (False, False)), np.array([[1, 2], [2, 3]])) assert x.type.shape == (2, 2) with pytest.raises(ValueError): TensorConstant(TensorType("int64", (True, False)), np.array([[1, 2], [2, 3]]))
def change_rv_size( rv_var: TensorVariable, new_size: PotentialShapeType, expand: Optional[bool] = False, ) -> TensorVariable: """Change or expand the size of a `RandomVariable`. Parameters ========== rv_var The `RandomVariable` output. new_size The new size. expand: Expand the existing size by `new_size`. """ # Check the dimensionality of the `new_size` kwarg new_size_ndim = np.ndim(new_size) if new_size_ndim > 1: raise ShapeError("The `new_size` must be ≤1-dimensional.", actual=new_size_ndim) elif new_size_ndim == 0: new_size = (new_size, ) # Extract the RV node that is to be resized, together with its inputs, name and tag if isinstance(rv_var.owner.op, SpecifyShape): rv_var = rv_var.owner.inputs[0] rv_node = rv_var.owner rng, size, dtype, *dist_params = rv_node.inputs name = rv_var.name tag = rv_var.tag if expand: old_shape = tuple(rv_node.op._infer_shape(size, dist_params)) old_size = old_shape[:len(old_shape) - rv_node.op.ndim_supp] new_size = tuple(new_size) + tuple(old_size) # Make sure the new size is a tensor. This dtype-aware conversion helps # to not unnecessarily pick up a `Cast` in some cases (see #4652). new_size = at.as_tensor(new_size, ndim=1, dtype="int64") new_rv_node = rv_node.op.make_node(rng, new_size, dtype, *dist_params) rv_var = new_rv_node.outputs[-1] rv_var.name = name for k, v in tag.__dict__.items(): rv_var.tag.__dict__.setdefault(k, v) if config.compute_test_value != "off": compute_test_value(new_rv_node) return rv_var