Exemple #1
0
    def make_node(self, value: Variable, *conds: Tuple[Variable]):
        """

        Parameters
        ==========
        value
            The value to return if `conds` all evaluate to ``True``; otherwise,
            `self.exc_type` is raised.
        conds
            The conditions to evaluate.
        """
        import aesara.tensor as at

        if not isinstance(value, Variable):
            value = at.as_tensor_variable(value)

        conds = [at.as_tensor_variable(c) for c in conds]

        assert all(c.type.ndim == 0 for c in conds)

        return Apply(
            self,
            [value] + conds,
            [value.type()],
        )
Exemple #2
0
def safe_new(x: Variable,
             tag: str = "",
             dtype: Optional[Union[str, np.dtype]] = None) -> Variable:
    """Clone variables.

    Internal function that constructs a new variable from `x` with the same
    type, but with a different name (old name + tag). This function is used
    by `gradient`, or the R-op to construct new variables for the inputs of
    the inner graph such that there is no interference between the original
    graph and the newly constructed graph.

    """
    if hasattr(x, "name") and x.name is not None:
        nw_name = x.name + tag
    else:
        nw_name = None

    if isinstance(x, Constant):
        # TODO: Do something better about this
        assert isinstance(x.type, HasDataType)

        if dtype and x.type.dtype != dtype:
            casted_x = cast(x, dtype)
            nwx = type(x)(casted_x.type, x.data, x.name)
            nwx.tag = copy.copy(x.tag)
            return nwx
        else:
            return x
    # Note, `as_tensor_variable` will convert the `ScalarType` into a
    # `TensorScalar` that will require a `ScalarFromTensor` `Op`, making the
    # push-out optimization fail
    elif isinstance(x, aes.ScalarVariable):
        if dtype:
            nw_x = aes.get_scalar_type(dtype=dtype)()
        else:
            nw_x = x.type()
        nw_x.name = nw_name
        if config.compute_test_value != "off":
            # Copy test value, cast it if necessary
            try:
                x_test_value = get_test_value(x)
            except TestValueError:
                pass
            else:
                # This clause is executed if no exception was raised
                nw_x.tag.test_value = nw_x.type.filter(x_test_value)
        return nw_x
    else:
        try:
            x = at.as_tensor_variable(x)
        except TypeError:
            # This could happen for example for random states
            pass

    # Cast `x` if needed. If `x` has a test value, this will also cast it.
    if dtype:
        # TODO: Do something better about this
        assert isinstance(x.type, HasDataType)

        if x.type.dtype != dtype:
            x = cast(x, dtype)

    nw_x = x.type()
    nw_x.name = nw_name
    # Preserve test values so that the `compute_test_value` option can be used.
    # The test value is deep-copied to ensure there can be no interactions
    # between test values, due to inplace operations for instance. This may
    # not be the most efficient memory-wise, though.
    if config.compute_test_value != "off":
        try:
            nw_x.tag.test_value = copy.deepcopy(get_test_value(x))
        except TestValueError:
            pass

    return nw_x