示例#1
0
    def propose(self, trace, scaffold):
        pnodes = scaffold.getPrincipalNodes()
        currentValues = getCurrentValues(trace, pnodes)

        # So the initial detach will get the gradient right
        registerDeterministicLKernels(trace, scaffold, pnodes, currentValues)
        rhoWeight = self.prepare(trace, scaffold,
                                 True)  # Gradient is in self.rhoDB

        momenta = self.sampleMomenta(currentValues, trace.np_rng)
        start_K = self.kinetic(momenta)

        grad = GradientOfRegen(trace, scaffold, pnodes)

        def grad_potential(values):
            # The potential function we want is - log density
            return [-dx for dx in grad(values)]

        # Might as well save a gradient computation, since the initial
        # detach does it
        start_grad_pot = [-self.rhoDB.getPartial(pnode) for pnode in pnodes]

        # Smashes the trace but leaves it a torus
        (proposed_values, end_K) = self.evolve(grad_potential, currentValues,
                                               start_grad_pot, momenta)

        xiWeight = grad.regen(proposed_values)  # Mutates the trace
        # The weight arithmetic is given by the Hamiltonian being
        # -weight + kinetic(momenta)
        return (trace, xiWeight - rhoWeight + start_K - end_K)
示例#2
0
 def __call__(self, values):
     """Returns the gradient of the weight of regenerating along
 an (implicit) scaffold starting with the given values.  Smashes
 the trace, but leaves it a torus.  Assumes there are no delta
 kernels around."""
     # TODO Assert that no delta kernels are requested?
     self.fixed_regen(values)
     new_scaffold = constructScaffold(self.trace, [OrderedSet(self.pnodes)])
     registerDeterministicLKernels(self.trace, new_scaffold, self.pnodes,
                                   values)
     (_, rhoDB) = detachAndExtract(self.trace, new_scaffold, True)
     self.scaffold = new_scaffold
     return [rhoDB.getPartial(pnode) for pnode in self.pnodes]
示例#3
0
def log_joint_at(trace, args):
  (scaffolders, transitions, _) = dispatch_arguments(trace, ("bogon",) + args)
  if transitions > 0:
    # Don't want to think about the interaction between 'each' and the
    # value this is supposed to return.
    assert len(scaffolders) == 1, "log_joint_at doesn't support 'each'"
    scaffold = scaffolders[0].sampleIndex(trace)
    pnodes = scaffold.getPrincipalNodes()
    currentValues = getCurrentValues(trace, pnodes)
    registerDeterministicLKernels(trace, scaffold, pnodes, currentValues,
      unconditional=True)
    (_rhoWeight, rhoDB) = detachAndExtract(trace, scaffold)
    xiWeight = regenAndAttach(trace, scaffold, True, rhoDB, OrderedDict())
    # Old state restored, don't need to do anything else
    return xiWeight
  else:
    return 0.0
示例#4
0
    def propose(self, trace, scaffold):
        pnodes = scaffold.getPrincipalNodes()
        currentValues = getCurrentValues(trace, pnodes)

        # So the initial detach will get the gradient right
        registerDeterministicLKernels(trace, scaffold, pnodes, currentValues)
        _rhoWeight = self.prepare(trace, scaffold,
                                  True)  # Gradient is in self.rhoDB

        grad = GradientOfRegen(trace, scaffold, pnodes)

        # Might as well save a gradient computation, since the initial
        # detach does it
        start_grad = [self.rhoDB.getPartial(pnode) for pnode in pnodes]

        # Smashes the trace but leaves it a torus
        proposed_values = self.evolve(grad, currentValues, start_grad)

        _xiWeight = grad.regen(proposed_values)  # Mutates the trace

        return (trace, 1000)  # It's MAP -- try to force acceptance
示例#5
0
    def compute_particles(self, trace, scaffold):
        assertTrace(trace, scaffold)

        pnodes = scaffold.getPrincipalNodes()
        currentValues = getCurrentValues(trace, pnodes)

        registerDeterministicLKernels(trace, scaffold, pnodes, currentValues)

        rhoWeight, self.rhoDB = detachAndExtract(trace, scaffold)
        xiWeights = []
        xiParticles = []

        allSetsOfValues = getCartesianProductOfEnumeratedValues(trace, pnodes)

        for newValues in allSetsOfValues:
            if newValues == currentValues:
                # If there are random choices downstream, keep their current values.
                # This follows the auxiliary variable method in Neal 2000,
                # "Markov Chain Sampling Methods for Dirichlet Process Models"
                # (Algorithm 8 with m = 1).
                # Otherwise, we may target the wrong stationary distribution.
                # See testEnumerativeGibbsBrushRandomness in
                # test/inference_language/test_enumerative_gibbs.py for an
                # example.
                shouldRestore = True
                omegaDB = self.rhoDB
            else:
                shouldRestore = False
                omegaDB = OmegaDB()
            xiParticle = self.copy_trace(trace)
            assertTorus(scaffold)
            registerDeterministicLKernels(trace, scaffold, pnodes, newValues)
            xiParticles.append(xiParticle)
            xiWeights.append(
                regenAndAttach(xiParticle, scaffold, shouldRestore, omegaDB,
                               OrderedDict()))
            # if shouldRestore:
            #   assert_almost_equal(xiWeights[-1], rhoWeight)
        return (xiParticles, xiWeights)
示例#6
0
 def regen(self, values):
     registerDeterministicLKernels(self.trace, self.scaffold, self.pnodes,
                                   values)
     return regenAndAttach(self.trace, self.scaffold, False, OmegaDB(),
                           OrderedDict())