Example #1
0
def _calc_elbo(vars, model, n_mcsamples, random_seed):
    """Calculate approximate ELBO.
    """
    theano.config.compute_test_value = 'ignore'
    shared = pm.make_shared_replacements(vars, model)

    factors = [var.logp_elemwiset
               for var in model.basic_RVs] + model.potentials
    wfactors = tt.stack(factors[:-1])
    logpt = tt.concatenate([wfactors, factors[-1].reshape([factors[-1].size])])

    #wfactors = tt.stack(factors[:-1])
    #logpt = tt.concatenate([wfactors, factors[-1]])

    #logpt = tt.add(*map(tt.sum, factors))

    [logp], inarray = pm.join_nonshared_inputs([logpt], vars, shared)

    uw = tt.dvector('uw')
    uw.tag.test_value = np.concatenate(
        [inarray.tag.test_value, inarray.tag.test_value])

    elbo = _elbo_t(logp, uw, inarray, n_mcsamples, random_seed)

    return elbo, shared
Example #2
0
def delta_logp(logp, vars, shared):
    [logp0], inarray0 = pm.join_nonshared_inputs([logp], vars, shared)

    tensor_type = inarray0.type
    inarray1 = tensor_type("inarray1")

    logp1 = pm.CallableTensor(logp0)(inarray1)

    f = aesara.function([inarray1, inarray0], logp1 - logp0)
    f.trust_input = True
    return f
Example #3
0
def delta_logp(logp, vars, shared):
    [logp0], inarray0 = pm.join_nonshared_inputs([logp], vars, shared)

    tensor_type = inarray0.type
    inarray1 = tensor_type('inarray1')

    logp1 = pm.CallableTensor(logp0)(inarray1)

    f = theano.function([inarray1, inarray0], logp1 - logp0)
    f.trust_input = True
    return f
Example #4
0
def delta_logp(point, logp, vars, shared):
    [logp0], inarray0 = pm.join_nonshared_inputs(point, [logp], vars, shared)

    tensor_type = inarray0.type
    inarray1 = tensor_type("inarray1")

    logp1 = pm.CallableTensor(logp0)(inarray1)

    f = compile_rv_inplace([inarray1, inarray0], logp1 - logp0)
    f.trust_input = True
    return f
Example #5
0
def _calc_elbo(vars, model, n_mcsamples, random_seed):
    """Calculate approximate ELBO.
    """
    theano.config.compute_test_value = 'ignore'
    shared = pm.make_shared_replacements(vars, model)

    factors = [var.logpt for var in model.basic_RVs] + model.potentials
    logpt = tt.add(*map(tt.sum, factors))

    [logp], inarray = pm.join_nonshared_inputs([logpt], vars, shared)

    uw = tt.vector('uw')
    uw.tag.test_value = np.concatenate([inarray.tag.test_value,
                                        inarray.tag.test_value]).astype(theano.config.floatX)

    elbo = _elbo_t(logp, uw, inarray, n_mcsamples, random_seed)

    return elbo, shared
Example #6
0
def _make_vectorized_logp_grad(vars, model, X):
    theano.config.compute_test_value = 'ignore'
    shared = pm.make_shared_replacements(vars, model)

    # For some reason can't use model.basic_RVs here as the ordering
    # will not match up with that of vars.
    factors = [var.logpt for var in vars + model.observed_RVs] + model.potentials
    logpt_grad = pm.theanof.gradient(tt.add(*map(tt.sum, factors)))

    [logp_grad], inarray = pm.join_nonshared_inputs([logpt_grad], vars, shared)

    # Callable tensor
    def logp_grad_(input):
        return theano.clone(logp_grad, {inarray: input}, strict=False)

    logp_grad_vec = theano.map(logp_grad_, X)[0]

    return logp_grad_vec