Beispiel #1
0
 def backward(self, y_):
     y = y_.T
     y = aet.concatenate([y, -aet.sum(y, 0, keepdims=True)])
     # "softmax" with vector support and no deprication warning:
     e_y = aet.exp(y - aet.max(y, 0, keepdims=True))
     x = e_y / aet.sum(e_y, 0, keepdims=True)
     return floatX(x.T)
Beispiel #2
0
    def test_graph(self):
        # define common values  first
        groups = 3
        bottom = np.random.rand(3, 6, 5, 5).astype(aesara.config.floatX)
        kern = np.random.rand(9, 2, 3, 3).astype(aesara.config.floatX)
        bottom_sym = tensor4("bottom")
        kern_sym = tensor4("kern")

        # grouped convolution graph
        conv_group = self.conv(num_groups=groups)(bottom_sym, kern_sym)
        gconv_func = aesara.function([bottom_sym, kern_sym],
                                     conv_group,
                                     mode=self.mode)

        # Graph for the normal hard way
        kern_offset = kern_sym.shape[0] // groups
        bottom_offset = bottom_sym.shape[1] // groups
        split_conv_output = [
            self.conv()(
                bottom_sym[:, i * bottom_offset:(i + 1) * bottom_offset, :, :],
                kern_sym[i * kern_offset:(i + 1) * kern_offset, :, :, :],
            ) for i in range(groups)
        ]
        concatenated_output = at.concatenate(split_conv_output, axis=1)
        conv_func = aesara.function([bottom_sym, kern_sym],
                                    concatenated_output,
                                    mode=self.mode)

        # calculate outputs for each graph
        gconv_output = gconv_func(bottom, kern)
        conv_output = conv_func(bottom, kern)

        # compare values
        utt.assert_allclose(gconv_output, conv_output)
Beispiel #3
0
def prior_dlogp(vars, model, flat_view):
    """Returns the gradient of the prior on the parameters as a vector of size D x 1"""
    terms = at.concatenate(
        [aesara.grad(var.logpt, var).flatten() for var in vars], axis=0)
    dlogp = aesara.clone_replace(terms, flat_view.replacements, strict=False)

    return dlogp
Beispiel #4
0
    def dlogp(self):
        grad = at.grad(self.logp_norm.sum(), self.approx_symbolic_matrices)

        def flatten2(tensor):
            return tensor.flatten(2)

        return at.concatenate(list(map(flatten2, grad)), -1)
Beispiel #5
0
    def rv_op(cls, rhos, sigma, init_dist, steps, ar_order, constant_term, size=None):
        # Init dist should have shape (*size, ar_order)
        if size is not None:
            batch_size = size
        else:
            # In this case the size of the init_dist depends on the parameters shape
            # The last dimension of rho and init_dist does not matter
            batch_size = at.broadcast_shape(sigma, rhos[..., 0], init_dist[..., 0])
        if init_dist.owner.op.ndim_supp == 0:
            init_dist_size = (*batch_size, ar_order)
        else:
            # In this case the support dimension must cover for ar_order
            init_dist_size = batch_size
        init_dist = change_rv_size(init_dist, init_dist_size)

        # Create OpFromGraph representing random draws form AR process
        # Variables with underscore suffix are dummy inputs into the OpFromGraph
        init_ = init_dist.type()
        rhos_ = rhos.type()
        sigma_ = sigma.type()
        steps_ = steps.type()

        rhos_bcast_shape_ = init_.shape
        if constant_term:
            # In this case init shape is one unit smaller than rhos in the last dimension
            rhos_bcast_shape_ = (*rhos_bcast_shape_[:-1], rhos_bcast_shape_[-1] + 1)
        rhos_bcast_ = at.broadcast_to(rhos_, rhos_bcast_shape_)

        noise_rng = aesara.shared(np.random.default_rng())

        def step(*args):
            *prev_xs, reversed_rhos, sigma, rng = args
            if constant_term:
                mu = reversed_rhos[-1] + at.sum(prev_xs * reversed_rhos[:-1], axis=0)
            else:
                mu = at.sum(prev_xs * reversed_rhos, axis=0)
            next_rng, new_x = Normal.dist(mu=mu, sigma=sigma, rng=rng).owner.outputs
            return new_x, {rng: next_rng}

        # We transpose inputs as scan iterates over first dimension
        innov_, innov_updates_ = aesara.scan(
            fn=step,
            outputs_info=[{"initial": init_.T, "taps": range(-ar_order, 0)}],
            non_sequences=[rhos_bcast_.T[::-1], sigma_.T, noise_rng],
            n_steps=steps_,
            strict=True,
        )
        (noise_next_rng,) = tuple(innov_updates_.values())
        ar_ = at.concatenate([init_, innov_.T], axis=-1)

        ar_op = AutoRegressiveRV(
            inputs=[rhos_, sigma_, init_, steps_],
            outputs=[noise_next_rng, ar_],
            ar_order=ar_order,
            constant_term=constant_term,
            inline=True,
        )

        ar = ar_op(rhos, sigma, init_dist, steps)
        return ar
Beispiel #6
0
 def insert_bigger_b_add(fgraph, node):
     if node.op == add:
         inputs = list(node.inputs)
         if inputs[-1].owner is None:
             inputs[-1] = aet.concatenate((inputs[-1], inputs[-1]))
             return [node.op(*inputs)]
     return False
Beispiel #7
0
def gradient(f, vars=None):
    if vars is None:
        vars = cont_inputs(f)

    if vars:
        return at.concatenate([gradient1(f, v) for v in vars], axis=0)
    else:
        return empty_gradient
Beispiel #8
0
def jacobian(f, vars=None):
    if vars is None:
        vars = cont_inputs(f)

    if vars:
        return at.concatenate([jacobian1(f, v) for v in vars], axis=1)
    else:
        return empty_gradient
Beispiel #9
0
def hessian_diag(f, vars=None):
    if vars is None:
        vars = cont_inputs(f)

    if vars:
        return -at.concatenate([hessian_diag1(f, v) for v in vars], axis=0)
    else:
        return empty_gradient
Beispiel #10
0
 def jacobian_det(self, y_):
     y = y_.T
     Km1 = y.shape[0] + 1
     sy = aet.sum(y, 0, keepdims=True)
     r = aet.concatenate([y + sy, aet.zeros(sy.shape)])
     sr = logsumexp(r, 0, keepdims=True)
     d = aet.log(Km1) + (Km1 * sy) - (Km1 * sr)
     return aet.sum(d, 0).T
    def logp(self, states):
        r"""Create a Theano graph that computes the log-likelihood for a discrete Markov chain.

        This is the log-likelihood for the joint distribution of states, :math:`S_t`, conditional
        on state samples, :math:`s_t`, given by the following:

        .. math::

            \int_{S_0} P(S_1 = s_1 \mid S_0) dP(S_0) \prod^{T}_{t=2} P(S_t = s_t \mid S_{t-1} = s_{t-1})

        The first term (i.e. the integral) simply computes the marginal :math:`P(S_1 = s_1)`, so
        another way to express this result is as follows:

        .. math::

            P(S_1 = s_1) \prod^{T}_{t=2} P(S_t = s_t \mid S_{t-1} = s_{t-1})

        """  # noqa: E501

        states_tt = at.as_tensor(states)

        if states.ndim > 1 or self.Gammas.ndim > 3 or self.gamma_0.ndim > 1:
            raise NotImplementedError("Broadcasting not supported.")

        Gammas_tt = at_broadcast_to(self.Gammas, (states.shape[0], ) +
                                    tuple(self.Gammas.shape)[-2:])
        gamma_0_tt = self.gamma_0

        Gamma_1_tt = Gammas_tt[0]
        P_S_1_tt = at.dot(gamma_0_tt, Gamma_1_tt)[states_tt[0]]

        # def S_logp_fn(S_tm1, S_t, Gamma):
        #     return at.log(Gamma[..., S_tm1, S_t])
        #
        # P_S_2T_tt, _ = aesara.scan(
        #     S_logp_fn,
        #     sequences=[
        #         {
        #             "input": states_tt,
        #             "taps": [-1, 0],
        #         },
        #         Gammas_tt,
        #     ],
        # )
        P_S_2T_tt = Gammas_tt[at.arange(1, states.shape[0]), states[:-1],
                              states[1:]]

        log_P_S_1T_tt = at.concatenate(
            [at.shape_padright(at.log(P_S_1_tt)),
             at.log(P_S_2T_tt)])

        res = log_P_S_1T_tt.sum()
        res.name = "states_logp"

        return res
Beispiel #12
0
def join_nonshared_inputs(
    point: Dict[str, np.ndarray],
    xs: List[TensorVariable],
    vars: List[TensorVariable],
    shared,
    make_shared: bool = False,
):
    """
    Takes a list of Aesara Variables and joins their non shared inputs into a single input.

    Parameters
    ----------
    point: a sample point
    xs: list of Aesara tensors
    vars: list of variables to join

    Returns
    -------
    tensors, inarray
    tensors: list of same tensors but with inarray as input
    inarray: vector of inputs
    """
    if not vars:
        raise ValueError("Empty list of variables.")

    joined = at.concatenate([var.ravel() for var in vars])

    if not make_shared:
        tensor_type = joined.type
        inarray = tensor_type("inarray")
    else:
        if point is None:
            raise ValueError("A point is required when `make_shared` is True")
        joined_values = np.concatenate(
            [point[var.name].ravel() for var in vars])
        inarray = aesara.shared(joined_values, "inarray")

    if aesara.config.compute_test_value != "off":
        inarray.tag.test_value = joined.tag.test_value

    replace = {}
    last_idx = 0
    for var in vars:
        shape = point[var.name].shape
        arr_len = np.prod(shape, dtype=int)
        replace[var] = reshape_t(inarray[last_idx:last_idx + arr_len],
                                 shape).astype(var.dtype)
        last_idx += arr_len

    replace.update(shared)

    xs_special = [
        aesara.clone_replace(x, replace, rebuild_strict=False) for x in xs
    ]
    return xs_special, inarray
Beispiel #13
0
    def backward(self, rv_var, rv_value):
        if rv_var.broadcastable[-1]:
            # If this variable is just a bunch of scalars/degenerate
            # Dirichlets, we can't transform it
            return rv_value

        y = rv_value.T
        y = at.concatenate([y, -at.sum(y, 0, keepdims=True)])
        # "softmax" with vector support and no deprication warning:
        e_y = at.exp(y - at.max(y, 0, keepdims=True))
        x = e_y / at.sum(e_y, 0, keepdims=True)
        return floatX(x.T)
Beispiel #14
0
    def jacobian_det(self, rv_var, rv_value):
        if rv_var.broadcastable[-1]:
            # If this variable is just a bunch of scalars/degenerate
            # Dirichlets, we can't transform it
            return at.ones_like(rv_value)

        y = rv_value.T
        Km1 = y.shape[0] + 1
        sy = at.sum(y, 0, keepdims=True)
        r = at.concatenate([y + sy, at.zeros(sy.shape)])
        sr = logsumexp(r, 0, keepdims=True)
        d = at.log(Km1) + (Km1 * sy) - (Km1 * sr)
        return at.sum(d, 0).T
Beispiel #15
0
    def get_volatility(self, x):
        x = x[:-1]

        def volatility_update(x, vol, w, a, b):
            return at.sqrt(w + a * at.square(x) + b * at.square(vol))

        vol, _ = scan(
            fn=volatility_update,
            sequences=[x],
            outputs_info=[self.initial_vol],
            non_sequences=[self.omega, self.alpha_1, self.beta_1],
        )
        return at.concatenate([[self.initial_vol], vol])
Beispiel #16
0
    def change_size(cls, rv, new_size, expand=False):

        if expand:
            old_size = rv.shape[:-1]
            new_size = at.concatenate([new_size, old_size])

        op = rv.owner.op
        return cls.rv_op(
            *rv.owner.inputs,
            ar_order=op.ar_order,
            constant_term=op.constant_term,
            size=new_size,
        )
Beispiel #17
0
 def __init__(
     self,
     x,
     y,
     intercept=True,
     labels=None,
     priors=None,
     vars=None,
     name="",
     model=None,
     offset=0.0,
 ):
     super().__init__(name, model)
     if len(y.shape) > 1:
         err_msg = ("Only one-dimensional observed variable objects (i.e."
                    " of shape `(n, )`) are supported")
         raise TypeError(err_msg)
     if priors is None:
         priors = {}
     if vars is None:
         vars = {}
     x, labels = any_to_tensor_and_labels(x, labels)
     # now we have x, shape and labels
     if intercept:
         x = at.concatenate([at.ones((x.shape[0], 1), x.dtype), x], axis=1)
         labels = ["Intercept"] + labels
     coeffs = list()
     for name in labels:
         if name == "Intercept":
             if name in vars:
                 v = Deterministic(name, vars[name])
             else:
                 v = self.Var(name=name,
                              dist=priors.get(name,
                                              self.default_intercept_prior))
             coeffs.append(v)
         else:
             if name in vars:
                 v = Deterministic(name, vars[name])
             else:
                 v = self.Var(
                     name=name,
                     dist=priors.get(
                         name,
                         priors.get("Regressor",
                                    self.default_regressor_prior)),
                 )
             coeffs.append(v)
     self.coeffs = at.stack(coeffs, axis=0)
     self.y_est = x.dot(self.coeffs) + offset
Beispiel #18
0
    def change_size(cls, rv, new_size, expand=False):

        if expand:
            old_size = rv.shape[:-1]
            new_size = at.concatenate([new_size, old_size])

        init_dist_rng = rv.owner.inputs[2].owner.inputs[0]
        noise_rng = rv.owner.inputs[-1]

        op = rv.owner.op
        return cls.rv_op(
            *rv.owner.inputs,
            ar_order=op.ar_order,
            constant_term=op.constant_term,
            size=new_size,
            rngs=(init_dist_rng, noise_rng),
        )
Beispiel #19
0
 def infer_shape(self, node, in_shapes):
     shape_a = in_shapes[0]
     n = node.inputs[1]
     axis = node.inputs[2]
     if len(shape_a) == 1:
         return [(n, )]
     elif isinstance(axis, tensor.TensorConstant):
         out_shape = (list(shape_a[0:axis.data.item()]) + [n] +
                      list(shape_a[axis.data + 1:]))
     else:
         l = len(shape_a)
         shape_a = tensor.stack(shape_a)
         out_shape = tensor.concatenate(
             (shape_a[0:axis], [n], shape_a[axis + 1:]))
         n_splits = [1] * l
         out_shape = tensor.split(out_shape, n_splits, l)
         out_shape = [a[0] for a in out_shape]
     return [out_shape]
Beispiel #20
0
def elemwise_dlogL(vars, model, flat_view):
    """
    Returns Jacobian of the log likelihood for each training datum wrt vars
    as a matrix of size N x D
    """
    # select one observed random variable
    obs_var = model.observed_RVs[0]
    # tensor of shape (batch_size,)
    logL = obs_var.logp_elemwiset.sum(axis=tuple(range(1, obs_var.logp_elemwiset.ndim)))
    # calculate fisher information
    terms = []
    for var in vars:
        output, _ = aesara.scan(
            lambda i, logX=logL, v=var: aesara.grad(logX[i], v).flatten(),
            sequences=[at.arange(logL.shape[0])],
        )
        terms.append(output)
    dlogL = aesara.clone_replace(
        at.concatenate(terms, axis=1), flat_view.replacements, strict=False
    )
    return dlogL
Beispiel #21
0
def join_nonshared_inputs(xs, vars, shared, make_shared=False):
    """
    Takes a list of aesara Variables and joins their non shared inputs into a single input.

    Parameters
    ----------
    xs: list of aesara tensors
    vars: list of variables to join

    Returns
    -------
    tensors, inarray
    tensors: list of same tensors but with inarray as input
    inarray: vector of inputs
    """
    if not vars:
        raise ValueError("Empty list of variables.")

    joined = at.concatenate([var.ravel() for var in vars])

    if not make_shared:
        tensor_type = joined.type
        inarray = tensor_type("inarray")
    else:
        inarray = aesara.shared(joined.tag.test_value, "inarray")

    ordering = ArrayOrdering(vars)
    inarray.tag.test_value = joined.tag.test_value

    get_var = {var.name: var for var in vars}
    replace = {
        get_var[var]: reshape_t(inarray[slc], shp).astype(dtyp)
        for var, slc, shp, dtyp in ordering.vmap
    }

    replace.update(shared)

    xs_special = [aesara.clone_replace(x, replace, strict=False) for x in xs]
    return xs_special, inarray
Beispiel #22
0
    def rv_op(cls, weights, *components, size=None):
        # Create new rng for the mix_indexes internal RV
        mix_indexes_rng = aesara.shared(np.random.default_rng())

        single_component = len(components) == 1
        ndim_supp = components[0].owner.op.ndim_supp

        if size is not None:
            components = cls._resize_components(size, *components)
        elif not single_component:
            # We might need to broadcast components when size is not specified
            shape = tuple(at.broadcast_shape(*components))
            size = shape[:len(shape) - ndim_supp]
            components = cls._resize_components(size, *components)

        # Extract replication ndims from components and weights
        ndim_batch = components[0].ndim - ndim_supp
        if single_component:
            # One dimension is taken by the mixture axis in the single component case
            ndim_batch -= 1

        # The weights may imply extra batch dimensions that go beyond what is already
        # implied by the component dimensions (ndim_batch)
        weights_ndim_batch = max(0, weights.ndim - ndim_batch - 1)

        # If weights are large enough that they would broadcast the component distributions
        # we try to resize them. This in necessary to avoid duplicated values in the
        # random method and for equivalency with the logp method
        if weights_ndim_batch:
            new_size = at.concatenate([
                weights.shape[:weights_ndim_batch],
                components[0].shape[:ndim_batch],
            ])
            components = cls._resize_components(new_size, *components)

            # Extract support and batch ndims from components and weights
            ndim_batch = components[0].ndim - ndim_supp
            if single_component:
                ndim_batch -= 1
            weights_ndim_batch = max(0, weights.ndim - ndim_batch - 1)

        assert weights_ndim_batch == 0

        # Component RVs terms are accounted by the Mixture logprob, so they can be
        # safely ignored by Aeppl
        components = [ignore_logprob(component) for component in components]

        # Create a OpFromGraph that encapsulates the random generating process
        # Create dummy input variables with the same type as the ones provided
        weights_ = weights.type()
        components_ = [component.type() for component in components]
        mix_indexes_rng_ = mix_indexes_rng.type()

        mix_axis = -ndim_supp - 1

        # Stack components across mixture axis
        if single_component:
            # If single component, we consider it as being already "stacked"
            stacked_components_ = components_[0]
        else:
            stacked_components_ = at.stack(components_, axis=mix_axis)

        # Broadcast weights to (*batched dimensions, stack dimension), ignoring support dimensions
        weights_broadcast_shape_ = stacked_components_.shape[:ndim_batch + 1]
        weights_broadcasted_ = at.broadcast_to(weights_,
                                               weights_broadcast_shape_)

        # Draw mixture indexes and append (stack + ndim_supp) broadcastable dimensions to the right
        mix_indexes_ = at.random.categorical(weights_broadcasted_,
                                             rng=mix_indexes_rng_)
        mix_indexes_padded_ = at.shape_padright(mix_indexes_, ndim_supp + 1)

        # Index components and squeeze mixture dimension
        mix_out_ = at.take_along_axis(stacked_components_,
                                      mix_indexes_padded_,
                                      axis=mix_axis)
        mix_out_ = at.squeeze(mix_out_, axis=mix_axis)

        # Output mix_indexes rng update so that it can be updated in place
        mix_indexes_rng_next_ = mix_indexes_.owner.outputs[0]

        mix_op = MarginalMixtureRV(
            inputs=[mix_indexes_rng_, weights_, *components_],
            outputs=[mix_indexes_rng_next_, mix_out_],
        )

        # Create the actual MarginalMixture variable
        mix_out = mix_op(mix_indexes_rng, weights, *components)

        # Reference nodes to facilitate identification in other classmethods
        mix_out.tag.weights = weights
        mix_out.tag.components = components
        mix_out.tag.choices_rng = mix_indexes_rng

        return mix_out
Beispiel #23
0
 def backward(self, value, *inputs):
     remaining = 1 - at.sum(value[..., :], axis=-1, keepdims=True)
     return at.concatenate([value[..., :], remaining], axis=-1)
Beispiel #24
0
def solve_ivp(
    t0: float,
    y0: np.ndarray,
    params: Dict[str, Any],
    tvals: np.ndarray,
    rhs: Callable[[sym.Symbol, np.ndarray, np.ndarray], Dict[str, Any]],
    derivatives: str = 'adjoint',
    coords: Optional[Dict[str, pd.Index]] = None,
    make_solver=None,
    derivative_subset=None,
    solver_kwargs=None,
    simplify=None,
) -> Any:
    dtype = basic.data_dtype
    if solver_kwargs is None:
        solver_kwargs = {}

    if derivatives == "forward":
        params = params.copy()
        params["__initial_values"] = y0

    def read_dict(vals, name=None):
        if isinstance(vals, dict):
            return {name: read_dict(item, name) for name, item in vals.items()}
        else:
            if isinstance(vals, tuple):
                tensor, dim_names = vals
            else:
                try:
                    tensor, dim_names = vals, aet.as_tensor_variable(
                        vals).shape.eval()
                except MissingInputError as e:
                    raise ValueError('Shapes of tensors need to be statically '
                                     'known or given explicitly.') from e
            if isinstance(dim_names, (str, int)):
                dim_names = (dim_names, )
            tensor = aet.as_tensor_variable(tensor)
            if tensor.ndim != len(dim_names):
                raise ValueError(
                    f"Dimension mismatch for {name}: Value has rank {tensor.ndim}, "
                    f"but {len(dim_names)} was specified.")
            assert np.dtype(tensor.dtype) == dtype, tensor
            tensor_dtype = np.dtype(tensor.dtype)
            if tensor_dtype != dtype:
                raise ValueError(
                    f"Dtype mismatch for {name}: Got {tensor_dtype} but expected {dtype}."
                )
            return dim_names

    y0_dims = read_dict(y0)
    params_dims = read_dict(params)

    if derivative_subset is None:
        derivative_subset = []
        for path, val in as_flattened(params).items():
            if isinstance(val, tuple):
                tensor, _ = val
            else:
                tensor = val
            if isinstance(tensor, Variable):
                if not isinstance(tensor, Constant):
                    derivative_subset.append(path)

    problem = symode.problem.SympyProblem(params_dims,
                                          y0_dims,
                                          rhs,
                                          derivative_subset,
                                          coords=coords,
                                          simplify=simplify)

    flat_tensors = as_flattened(params)
    vars = []
    for path in problem.params_subset.subset_paths:
        tensor = flat_tensors[path]
        if isinstance(tensor, tuple):
            tensor, _ = tensor
        vars.append(aet.as_tensor_variable(tensor).reshape((-1, )))
    if vars:
        params_subs_flat = aet.concatenate(vars)
    else:
        params_subs_flat = aet.as_tensor_variable(np.zeros(0))

    vars = []
    for path in problem.params_subset.remainder.subset_paths:
        tensor = flat_tensors[path]
        if isinstance(tensor, tuple):
            tensor, _ = tensor
        vars.append(aet.as_tensor_variable(tensor).reshape((-1, )))
    if vars:
        params_remaining_flat = aet.concatenate(vars)
    else:
        params_remaining_flat = aet.as_tensor_variable(np.zeros(0))

    flat_tensors = as_flattened(y0)
    vars = []
    for path in problem.state_subset.paths:
        tensor = flat_tensors[path]
        if isinstance(tensor, tuple):
            tensor, _ = tensor
        vars.append(aet.as_tensor_variable(tensor).reshape((-1, )))
    y0_flat = aet.concatenate(vars)

    if derivatives == 'adjoint':
        sol = solver.AdjointSolver(problem, **solver_kwargs)
        wrapper = SolveODEAdjoint(sol, t0, tvals)
        flat_solution = wrapper(y0_flat, params_subs_flat,
                                params_remaining_flat)
        solution = problem.flat_solution_as_dict(flat_solution)
        return solution, flat_solution, problem, sol, y0_flat, params_subs_flat
    elif derivatives == 'forward':
        if not "sens_mode" in solver_kwargs:
            raise ValueError(
                "When `derivatives=True`, the `solver_kwargs` must contain one of `sens_mode={\"simultaneous\" | \"staggered\"}`."
            )
        sol = solver.Solver(problem, **solver_kwargs)
        wrapper = SolveODE(sol, t0, tvals)
        flat_solution, flat_sens = wrapper(y0_flat, params_subs_flat,
                                           params_remaining_flat)
        solution = problem.flat_solution_as_dict(flat_solution)
        return solution, flat_solution, problem, sol, y0_flat, params_subs_flat, flat_sens, wrapper
    elif derivatives in [None, False]:
        sol = solver.Solver(problem, sens_mode=False)
        assert False
def neibs2images(neibs, neib_shape, original_shape, mode="valid"):
    """
    Function :func:`neibs2images <aesara.sandbox.neighbours.neibs2images>`
    performs the inverse operation of
    :func:`images2neibs <aesara.sandbox.neigbours.neibs2images>`. It inputs
    the output of :func:`images2neibs <aesara.sandbox.neigbours.neibs2images>`
    and reconstructs its input.

    Parameters
    ----------
    neibs : 2d tensor
        Like the one obtained by
        :func:`images2neibs <aesara.sandbox.neigbours.neibs2images>`.
    neib_shape
        `neib_shape` that was used in
        :func:`images2neibs <aesara.sandbox.neigbours.neibs2images>`.
    original_shape
        Original shape of the 4d tensor given to
        :func:`images2neibs <aesara.sandbox.neigbours.neibs2images>`

    Returns
    -------
    object
        Reconstructs the input of
        :func:`images2neibs <aesara.sandbox.neigbours.neibs2images>`,
        a 4d tensor of shape `original_shape`.

    Notes
    -----
    Currently, the function doesn't support tensors created with
    `neib_step` different from default value. This means that it may be
    impossible to compute the gradient of a variable gained by
    :func:`images2neibs <aesara.sandbox.neigbours.neibs2images>` w.r.t.
    its inputs in this case, because it uses
    :func:`images2neibs <aesara.sandbox.neigbours.neibs2images>` for
    gradient computation.

    Examples
    --------
    Example, which uses a tensor gained in example for
    :func:`images2neibs <aesara.sandbox.neigbours.neibs2images>`:

    .. code-block:: python

        im_new = neibs2images(neibs, (5, 5), im_val.shape)
        # Aesara function definition
        inv_window = aesara.function([neibs], im_new)
        # Function application
        im_new_val = inv_window(neibs_val)

    .. note:: The code will output the initial image array.

    """
    neibs = tt.as_tensor_variable(neibs)
    neib_shape = tt.as_tensor_variable(neib_shape)
    original_shape = tt.as_tensor_variable(original_shape)

    new_neib_shape = tt.stack(
        [original_shape[-1] // neib_shape[1], neib_shape[1]])
    output_2d = images2neibs(neibs.dimshuffle("x", "x", 0, 1),
                             new_neib_shape,
                             mode=mode)

    if mode == "ignore_borders":
        # We use set_subtensor to accept original_shape we can't infer
        # the shape and still raise error when it don't have the right
        # shape.
        valid_shape = original_shape
        valid_shape = tt.set_subtensor(
            valid_shape[2], (valid_shape[2] // neib_shape[0]) * neib_shape[0])
        valid_shape = tt.set_subtensor(
            valid_shape[3], (valid_shape[3] // neib_shape[1]) * neib_shape[1])
        output_4d = output_2d.reshape(valid_shape, ndim=4)
        # padding the borders with zeros
        for d in [2, 3]:
            pad_shape = list(output_4d.shape)
            pad_shape[d] = original_shape[d] - valid_shape[d]
            output_4d = tt.concatenate(
                [output_4d, tt.zeros(pad_shape)], axis=d)
    elif mode == "valid":
        # TODO: we do not implement all mode with this code.
        # Add a check for the good cases.
        output_4d = output_2d.reshape(original_shape, ndim=4)
    else:
        raise NotImplementedError("neibs2images do not support mode=%s" % mode)

    return output_4d
Beispiel #26
0
def flatten_list(tensors):
    return at.concatenate([var.ravel() for var in tensors])
Beispiel #27
0
 def backward(self, rv_var, rv_value):
     remaining = 1 - at.sum(rv_value[..., :], axis=-1, keepdims=True)
     return at.concatenate([rv_value[..., :], remaining], axis=-1)
Beispiel #28
0
 def backward(self, y):
     remaining = 1 - aet.sum(y[..., :], axis=-1, keepdims=True)
     return aet.concatenate([y[..., :], remaining], axis=-1)
Beispiel #29
0
        def ode_func_4_t(y, t, p):
            # Make sure that ds and di are vectors by slicing
            ds = -p[0:1] * y[0:1] * y[1:]
            di = p[0:1] * y[0:1] * y[1:] - p[1:] * y[1:]

            return at.concatenate([ds, di], axis=0)
Beispiel #30
0
 def get_moment(cls, rv, *sim_inputs):
     # Take the mean of 10 draws
     multiple_sim = rv.owner.op(*sim_inputs,
                                size=at.concatenate([[10], rv.shape]))
     return at.mean(multiple_sim, axis=0)