def condition(rvs, observations): if len(rvs) > 1: raise NotImplementedError() observations = typed_items(observations) # if none of the rvs show up in the ancestors of any observations # then this is easy conditioning obs_ancestors = ancestors(observations.keys(), blockers=rvs) if any(rv in obs_ancestors for rv in rvs): # not-so-easy conditioning # we need to produce a sampler-driven model raise NotImplementedError() else: # easy conditioning rvs_anc = ancestors(rvs, blockers=observations.keys()) frontier = [r for r in rvs_anc if r.owner is None or r in observations.keys()] cloned_inputs, cloned_outputs = clone_keep_replacements(frontier, rvs, replacements=observations) return cloned_outputs
def full_log_likelihood(assignment): """ Return log(P(rv0=sample)) assignment: rv0=val0, rv1=val1, ... Each of val0, val1, ... v0, v1, ... is supposed to represent an identical number of draws from a distribution. This function returns the real-valued density for each one of those draws. The output from this function may be a random variable, if not all sources of randomness are removed by the assignment and the given. """ for rv in assignment.keys(): if not is_rv(rv): raise ValueError('non-random var in assignment key', rv) # All random variables that are not assigned should stay as the same object so it can later be replaced # If this is not done this way, they get cloned RVs = [v for v in ancestors(assignment.keys()) if is_raw_rv(v)] for rv in RVs: if rv not in assignment: assignment[rv] = rv # Cast assignment elements to the right kind of thing assignment = typed_items(assignment) pdfs = [lpdf(rv, sample) for rv, sample in assignment.items()] lik = tensor.add(*[tensor.sum(p) for p in pdfs]) dfs_variables = ancestors([lik], blockers=assignment.keys()) frontier = [r for r in dfs_variables if r.owner is None or r in assignment.keys()] cloned_inputs, cloned_outputs = clone_keep_replacements(frontier, [lik], replacements=assignment) cloned_lik, = cloned_outputs return cloned_lik