Пример #1
0
def test_convert_variable():
    t1 = MyType(1)
    v1 = Variable(MyType(1), None, None)
    v2 = Variable(MyType(2), None, None)
    v3 = Variable(MyType2(0), None, None)

    assert t1.convert_variable(v1) is v1
    assert t1.convert_variable(v2) is None

    with pytest.raises(NotImplementedError):
        t1.convert_variable(v3)
Пример #2
0
    def make_node(self, value: Variable, *conds: Tuple[Variable]):
        """

        Parameters
        ==========
        value
            The value to return if `conds` all evaluate to ``True``; otherwise,
            `self.exc_type` is raised.
        conds
            The conditions to evaluate.
        """
        import aesara.tensor as at

        if not isinstance(value, Variable):
            value = at.as_tensor_variable(value)

        conds = [at.as_tensor_variable(c) for c in conds]

        assert all(c.type.ndim == 0 for c in conds)

        return Apply(
            self,
            [value] + conds,
            [value.type()],
        )
Пример #3
0
def test_shape_basic():
    s = shape([])
    assert s.type.broadcastable == (True, )

    s = shape([10])
    assert s.type.broadcastable == (True, )

    s = shape(lscalar())
    assert s.type.broadcastable == (False, )

    class MyType(Type):
        def filter(self, *args, **kwargs):
            raise NotImplementedError()

        def __eq__(self, other):
            return isinstance(other, MyType) and other.thingy == self.thingy

    s = shape(Variable(MyType()))
    assert s.type.broadcastable == (False, )

    s = shape(np.array(1))
    assert np.array_equal(eval_outputs([s]), [])

    s = shape(np.ones((5, 3)))
    assert np.array_equal(eval_outputs([s]), [5, 3])

    s = shape(np.ones(2))
    assert np.array_equal(eval_outputs([s]), [2])

    s = shape(np.ones((5, 3, 10)))
    assert np.array_equal(eval_outputs([s]), [5, 3, 10])
Пример #4
0
 def test_invalid_modes(self):
     # Modes 'r+', 'r', and 'w+' cannot work with Aesara, becausei
     # the output array may be modified inplace, and that should not
     # modify the original file.
     path = Variable(Generic())
     for mmap_mode in ("r+", "r", "w+", "toto"):
         with pytest.raises(ValueError):
             load(path, "int32", (False,), mmap_mode)
Пример #5
0
 def test_basic(self):
     path = Variable(Generic())
     # Not specifying mmap_mode defaults to None, and the data is
     # copied into main memory
     x = load(path, "int32", (False,))
     y = x * 2
     fn = function([path], y)
     assert (fn(self.filename) == (self.data * 2)).all()
Пример #6
0
 def make_node(self):
     return Apply(
         self,
         [],
         [
             Variable(Generic()),
             tensor(self.dtype, shape=self.broadcastable),
         ],
     )
Пример #7
0
 def test1(self):
     path = Variable(Generic())
     # 'c' means "copy-on-write", which allow the array to be overwritten
     # by an inplace Op in the graph, without modifying the underlying
     # file.
     x = load(path, "int32", (False,), "c")
     # x ** 2 has been chosen because it will work inplace.
     y = (x ** 2).sum()
     fn = function([path], y)
     # Call fn() twice, to check that inplace ops do not cause trouble
     assert (fn(self.filename) == (self.data ** 2).sum()).all()
     assert (fn(self.filename) == (self.data ** 2).sum()).all()
Пример #8
0
def get_test_value(v: Variable) -> Any:
    """Get the test value for `v`.

    If input `v` is not already a variable, it is turned into one by calling
    `as_tensor_variable(v)`.

    Raises
    ------
    AttributeError if no test value is set.

    """
    if not isinstance(v, Variable):
        v = aesara.tensor.as_tensor_variable(v)

    return v.get_test_value()
Пример #9
0
def double(name):
    return Variable(tdouble, None, None, name=name)
Пример #10
0
def safe_new(x: Variable,
             tag: str = "",
             dtype: Optional[Union[str, np.dtype]] = None) -> Variable:
    """Clone variables.

    Internal function that constructs a new variable from `x` with the same
    type, but with a different name (old name + tag). This function is used
    by `gradient`, or the R-op to construct new variables for the inputs of
    the inner graph such that there is no interference between the original
    graph and the newly constructed graph.

    """
    if hasattr(x, "name") and x.name is not None:
        nw_name = x.name + tag
    else:
        nw_name = None

    if isinstance(x, Constant):
        # TODO: Do something better about this
        assert isinstance(x.type, HasDataType)

        if dtype and x.type.dtype != dtype:
            casted_x = cast(x, dtype)
            nwx = type(x)(casted_x.type, x.data, x.name)
            nwx.tag = copy.copy(x.tag)
            return nwx
        else:
            return x
    # Note, `as_tensor_variable` will convert the `ScalarType` into a
    # `TensorScalar` that will require a `ScalarFromTensor` `Op`, making the
    # push-out optimization fail
    elif isinstance(x, aes.ScalarVariable):
        if dtype:
            nw_x = aes.get_scalar_type(dtype=dtype)()
        else:
            nw_x = x.type()
        nw_x.name = nw_name
        if config.compute_test_value != "off":
            # Copy test value, cast it if necessary
            try:
                x_test_value = get_test_value(x)
            except TestValueError:
                pass
            else:
                # This clause is executed if no exception was raised
                nw_x.tag.test_value = nw_x.type.filter(x_test_value)
        return nw_x
    else:
        try:
            x = at.as_tensor_variable(x)
        except TypeError:
            # This could happen for example for random states
            pass

    # Cast `x` if needed. If `x` has a test value, this will also cast it.
    if dtype:
        # TODO: Do something better about this
        assert isinstance(x.type, HasDataType)

        if x.type.dtype != dtype:
            x = cast(x, dtype)

    nw_x = x.type()
    nw_x.name = nw_name
    # Preserve test values so that the `compute_test_value` option can be used.
    # The test value is deep-copied to ensure there can be no interactions
    # between test values, due to inplace operations for instance. This may
    # not be the most efficient memory-wise, though.
    if config.compute_test_value != "off":
        try:
            nw_x.tag.test_value = copy.deepcopy(get_test_value(x))
        except TestValueError:
            pass

    return nw_x
Пример #11
0
def MyVariable2(name):
    return Variable(MyType2(), None, None, name=name)
Пример #12
0
 def make_node(self, request, data):
     return Apply(self, [request, data], [Variable(Generic())])
Пример #13
0
 def make_node(self, data):
     return Apply(self, [data], [Variable(Generic()), data.type()])
Пример #14
0
def MyVariable(thingy):
    return Variable(MyType(thingy), None, None)
Пример #15
0
 def test_memmap(self):
     path = Variable(Generic())
     x = load(path, "int32", (False,), mmap_mode="c")
     fn = function([path], x)
     assert type(fn(self.filename)) == np.core.memmap
Пример #16
0
 def MyVariable(name):
     return Variable(MyType(name), None, None)