Exemplo n.º 1
0
    def astep(self, q0: RaveledVars, logp) -> Tuple[RaveledVars, List[Dict[str, Any]]]:

        logp_q0 = logp(q0)
        point_map_info = q0.point_map_info
        q0 = q0.data

        # Convert adaptive_scale_factor to a jump probability
        p_jump = 1.0 - 0.5 ** self.scaling

        rand_array = nr.random(q0.shape)
        q = np.copy(q0)
        # Locations where switches occur, according to p_jump
        switch_locs = rand_array < p_jump
        q[switch_locs] = True - q[switch_locs]
        logp_q = logp(RaveledVars(q, point_map_info))

        accept = logp_q - logp_q0
        q_new, accepted = metrop_select(accept, q, q0)
        self.accepted += accepted

        stats = {
            "tune": self.tune,
            "accept": np.exp(accept),
            "p_jump": p_jump,
        }

        q_new = RaveledVars(q_new, point_map_info)

        return q_new, [stats]
Exemplo n.º 2
0
    def astep(self, q0):
        if not self.steps_until_tune and self.tune:
            if self.tune == "scaling":
                self.scaling = tune(self.scaling, self.accepted / float(self.tune_interval))
            elif self.tune == "lambda":
                self.lamb = tune(self.lamb, self.accepted / float(self.tune_interval))
            # Reset counter
            self.steps_until_tune = self.tune_interval
            self.accepted = 0

        epsilon = self.proposal_dist() * self.scaling

        # differential evolution proposal
        # select two other chains
        ir1, ir2 = np.random.choice(self.other_chains, 2, replace=False)
        r1 = self.bij.map(self.population[ir1])
        r2 = self.bij.map(self.population[ir2])
        # propose a jump
        q = floatX(q0 + self.lamb * (r1 - r2) + epsilon)

        accept = self.delta_logp(q, q0)
        q_new, accepted = metrop_select(accept, q, q0)
        self.accepted += accepted

        self.steps_until_tune -= 1

        stats = {
            "tune": self.tune,
            "scaling": self.scaling,
            "lambda": self.lamb,
            "accept": np.exp(accept),
            "accepted": accepted,
        }

        return q_new, [stats]
Exemplo n.º 3
0
    def astep(self, q0):
        if not self.steps_until_tune and self.tune:
            # Tune scaling parameter
            self.scaling = tune(self.scaling, self.accepted / float(self.tune_interval))
            # Reset counter
            self.steps_until_tune = self.tune_interval
            self.accepted = 0

        delta = self.proposal_dist() * self.scaling

        if self.any_discrete:
            if self.all_discrete:
                delta = np.round(delta, 0).astype("int64")
                q0 = q0.astype("int64")
                q = (q0 + delta).astype("int64")
            else:
                delta[self.discrete] = np.round(delta[self.discrete], 0)
                q = q0 + delta
        else:
            q = floatX(q0 + delta)

        accept = self.delta_logp(q, q0)
        q_new, accepted = metrop_select(accept, q, q0)
        self.accepted += accepted

        self.steps_until_tune -= 1

        stats = {
            "tune": self.tune,
            "scaling": self.scaling,
            "accept": np.exp(accept),
            "accepted": accepted,
        }

        return q_new, [stats]
Exemplo n.º 4
0
    def astep(self, q0):
        if not self.steps_until_tune and self.tune:
            # Tune scaling parameter
            self.scaling = tune(self.scaling,
                                self.accepted / float(self.tune_interval))
            # Reset counter
            self.steps_until_tune = self.tune_interval
            self.accepted = 0

        delta = self.proposal_dist() * self.scaling
        if self.any_discrete:
            oneLocations = np.where(q0[self.discrete] == 1)[
                0]  #looking for the 1 values in the vector
            zeroLocations = np.where(q0[self.discrete] == 0)[0]
            #delta[oneLocations-len(self.discrete)]*=self.sigmaFactor #increasing the variance for 1 value entries. the oneLocations are the betas corresponding to the gamma=1
            #under the assumption that the vars are beta,gamma,...
        if self.any_discrete:
            if self.all_discrete:
                delta = np.round(delta, 0).astype('int64')
                q0 = q0.astype('int64')
                q = (q0 + delta).astype('int64')
            else:
                loc = nr.choice(self.discrete)  #pick a location
                q = q0 + delta  #change all the q by delta
                q[self.discrete] = q0[
                    self.
                    discrete]  #set the discrete values to be q0, meaning disregarding the delta change
                r = nr.rand()  #random number
                dif = len(oneLocations) - int(
                    self.varProp * len(self.discrete))
                #http://www-stat.wharton.upenn.edu/~edgeorge/Research_papers/ims.pdf
                if loc in zeroLocations and dif > 0:  #adding another sig. feature and there are already more than expected
                    if r > np.exp(-dif):
                        loc2 = nr.choice(
                            oneLocations
                        )  #take an existing feature and reverse it
                        q[loc2] += 1
                if loc in oneLocations and dif < 0:  #reducing feature number and there are already too few
                    if r > np.exp(dif):
                        loc2 = nr.choice(zeroLocations)
                        q[loc2] += 1
                q[loc] += 1  #change the bit and %2 to stay {0,1}
                q[self.discrete] = q[self.discrete] % 2

        else:
            q = q0 + delta

        q_new = metrop_select(self.delta_logp(q, q0), q, q0)

        if q_new is q:
            self.accepted += 1

        self.steps_until_tune -= 1

        return q_new
Exemplo n.º 5
0
    def astep(self, q0: RaveledVars) -> Tuple[RaveledVars, List[Dict[str, Any]]]:

        point_map_info = q0.point_map_info
        q0 = q0.data

        # same tuning scheme as DEMetropolis
        if not self.steps_until_tune and self.tune:
            if self.tune_target == "scaling":
                self.scaling = tune(self.scaling, self.accepted / float(self.tune_interval))
            elif self.tune_target == "lambda":
                self.lamb = tune(self.lamb, self.accepted / float(self.tune_interval))
            # Reset counter
            self.steps_until_tune = self.tune_interval
            self.accepted = 0

        epsilon = self.proposal_dist() * self.scaling

        it = len(self._history)
        # use the DE-MCMC-Z proposal scheme as soon as the history has 2 entries
        if it > 1:
            # differential evolution proposal
            # select two other chains
            iz1 = np.random.randint(it)
            iz2 = np.random.randint(it)
            while iz2 == iz1:
                iz2 = np.random.randint(it)

            z1 = self._history[iz1]
            z2 = self._history[iz2]
            # propose a jump
            q = floatX(q0 + self.lamb * (z1 - z2) + epsilon)
        else:
            # propose just with noise in the first 2 iterations
            q = floatX(q0 + epsilon)

        accept = self.delta_logp(q, q0)
        q_new, accepted = metrop_select(accept, q, q0)
        self.accepted += accepted
        self._history.append(q_new)

        self.steps_until_tune -= 1

        stats = {
            "tune": self.tune,
            "scaling": self.scaling,
            "lambda": self.lamb,
            "accept": np.exp(accept),
            "accepted": accepted,
        }

        q_new = RaveledVars(q_new, point_map_info)

        return q_new, [stats]
Exemplo n.º 6
0
def metrop_kernel(
    q_old,
    old_tempered_logp,
    old_prior,
    old_likelihood,
    draw,
    proposal,
    scalings,
    any_discrete,
    all_discrete,
    discrete,
    n_steps,
    prior_logp,
    likelihood_logp,
    beta,
):
    """
    Metropolis kernel
    """
    deltas = np.squeeze(proposal(n_steps) * scalings[draw])

    accepted = 0
    for n_step in range(n_steps):
        delta = deltas[n_step]

        if any_discrete:
            if all_discrete:
                delta = np.round(delta, 0).astype("int64")
                q_old = q_old.astype("int64")
                q_new = (q_old + delta).astype("int64")
            else:
                delta[discrete] = np.round(delta[discrete], 0)
                q_new = floatX(q_old + delta)
        else:
            q_new = floatX(q_old + delta)

        ll = likelihood_logp(q_new)
        pl = prior_logp(q_new)

        new_tempered_logp = pl + ll * beta

        q_old, accept = metrop_select(new_tempered_logp - old_tempered_logp, q_new, q_old)

        if accept:
            accepted += 1
            old_prior = pl
            old_likelihood = ll
            old_tempered_logp = new_tempered_logp

    return q_old, accepted / n_steps, old_prior, old_likelihood
Exemplo n.º 7
0
    def astep_unif(self, q0, logp):
        dimcats = self.dimcats
        if self.shuffle_dims:
            nr.shuffle(dimcats)

        q = np.copy(q0)
        logp_curr = logp(q)

        for dim, k in dimcats:
            curr_val, q[dim] = q[dim], sample_except(k, q[dim])
            logp_prop = logp(q)
            q[dim], accepted = metrop_select(logp_prop - logp_curr, q[dim], curr_val)
            if accepted:
                logp_curr = logp_prop
        return q
Exemplo n.º 8
0
    def astep(self, q0, logp):
        order = self.order
        if self.shuffle_dims:
            nr.shuffle(order)

        q = np.copy(q0)
        logp_curr = logp(q)

        for idx in order:
            # No need to do metropolis update if the same value is proposed,
            # as you will get the same value regardless of accepted or reject
            if nr.rand() < self.transit_p:
                curr_val, q[idx] = q[idx], True - q[idx]
                logp_prop = logp(q)
                q[idx], accepted = metrop_select(logp_prop - logp_curr, q[idx], curr_val)
                if accepted:
                    logp_curr = logp_prop

        return q
Exemplo n.º 9
0
    def astep(self, q0: RaveledVars) -> Tuple[RaveledVars, List[Dict[str, Any]]]:

        point_map_info = q0.point_map_info
        q0 = q0.data

        if not self.steps_until_tune and self.tune:
            # Tune scaling parameter
            self.scaling = tune(self.scaling, self.accepted / float(self.tune_interval))
            # Reset counter
            self.steps_until_tune = self.tune_interval
            self.accepted = 0

        delta = self.proposal_dist() * self.scaling

        if self.any_discrete:
            if self.all_discrete:
                delta = np.round(delta, 0).astype("int64")
                q0 = q0.astype("int64")
                q = (q0 + delta).astype("int64")
            else:
                delta[self.discrete] = np.round(delta[self.discrete], 0)
                q = q0 + delta
        else:
            q = floatX(q0 + delta)

        accept = self.delta_logp(q, q0)
        q_new, accepted = metrop_select(accept, q, q0)

        self.accepted += accepted

        self.steps_until_tune -= 1

        stats = {
            "tune": self.tune,
            "scaling": self.scaling,
            "accept": np.exp(accept),
            "accepted": accepted,
        }

        q_new = RaveledVars(q_new, point_map_info)

        return q_new, [stats]
Exemplo n.º 10
0
    def astep_unif(self, q0: RaveledVars, logp) -> RaveledVars:

        point_map_info = q0.point_map_info
        q0 = q0.data

        dimcats = self.dimcats
        if self.shuffle_dims:
            nr.shuffle(dimcats)

        q = RaveledVars(np.copy(q0), point_map_info)
        logp_curr = logp(q)

        for dim, k in dimcats:
            curr_val, q.data[dim] = q.data[dim], sample_except(k, q.data[dim])
            logp_prop = logp(q)
            q.data[dim], accepted = metrop_select(logp_prop - logp_curr, q.data[dim], curr_val)
            if accepted:
                logp_curr = logp_prop

        return q
Exemplo n.º 11
0
    def astep(self, q0: RaveledVars) -> Tuple[RaveledVars, List[Dict[str, Any]]]:

        point_map_info = q0.point_map_info
        q0 = q0.data

        if not self.steps_until_tune and self.tune:
            if self.tune == "scaling":
                self.scaling = tune(self.scaling, self.accepted / float(self.tune_interval))
            elif self.tune == "lambda":
                self.lamb = tune(self.lamb, self.accepted / float(self.tune_interval))
            # Reset counter
            self.steps_until_tune = self.tune_interval
            self.accepted = 0

        epsilon = self.proposal_dist() * self.scaling

        # differential evolution proposal
        # select two other chains
        ir1, ir2 = np.random.choice(self.other_chains, 2, replace=False)
        r1 = DictToArrayBijection.map(self.population[ir1])
        r2 = DictToArrayBijection.map(self.population[ir2])
        # propose a jump
        q = floatX(q0 + self.lamb * (r1.data - r2.data) + epsilon)

        accept = self.delta_logp(q, q0)
        q_new, accepted = metrop_select(accept, q, q0)
        self.accepted += accepted

        self.steps_until_tune -= 1

        stats = {
            "tune": self.tune,
            "scaling": self.scaling,
            "lambda": self.lamb,
            "accept": np.exp(accept),
            "accepted": accepted,
        }

        q_new = RaveledVars(q_new, point_map_info)

        return q_new, [stats]
Exemplo n.º 12
0
    def astep(self, q0: RaveledVars, logp: Callable[[RaveledVars], np.ndarray]) -> RaveledVars:

        order = self.order
        if self.shuffle_dims:
            nr.shuffle(order)

        q = RaveledVars(np.copy(q0.data), q0.point_map_info)

        logp_curr = logp(q)

        for idx in order:
            # No need to do metropolis update if the same value is proposed,
            # as you will get the same value regardless of accepted or reject
            if nr.rand() < self.transit_p:
                curr_val, q.data[idx] = q.data[idx], True - q.data[idx]
                logp_prop = logp(q)
                q.data[idx], accepted = metrop_select(logp_prop - logp_curr, q.data[idx], curr_val)
                if accepted:
                    logp_curr = logp_prop

        return q
Exemplo n.º 13
0
    def astep(self, q0, logp):

        # Convert adaptive_scale_factor to a jump probability
        p_jump = 1.0 - 0.5**self.scaling

        rand_array = nr.random(q0.shape)
        q = np.copy(q0)
        # Locations where switches occur, according to p_jump
        switch_locs = rand_array < p_jump
        q[switch_locs] = True - q[switch_locs]

        accept = logp(q) - logp(q0)
        q_new, accepted = metrop_select(accept, q, q0)
        self.accepted += accepted

        stats = {
            "tune": self.tune,
            "accept": np.exp(accept),
            "p_jump": p_jump,
        }

        return q_new, [stats]
Exemplo n.º 14
0
    def astep(self, q0):
        """One MLDA step, given current sample q0"""
        # Check if the tuning flag has been changed and if yes,
        # change the proposal's tuning flag and reset self.accepted
        # This is triggered by _iter_sample while the highest-level MLDA step
        # method is running. It then propagates to all levels.
        if self.proposal_dist.tune != self.tune:
            self.proposal_dist.tune = self.tune
            # set tune in sub-methods of compound stepper explicitly because
            # it is not set within sample.py (only the CompoundStep's tune flag is)
            if isinstance(self.step_method_below, CompoundStep):
                for method in self.step_method_below.methods:
                    method.tune = self.tune
            self.accepted = 0

        # Convert current sample from numpy array ->
        # dict before feeding to proposal
        q0_dict = DictToArrayBijection.rmap(q0)

        # Set subchain_selection (which sample from the coarse chain
        # is passed as a proposal to the fine chain). If variance
        # reduction is used, a random sample is selected as proposal.
        # If variance reduction is not used, the last sample is
        # selected as proposal.
        if self.variance_reduction:
            self.subchain_selection = np.random.randint(0, self.subsampling_rate)
        else:
            self.subchain_selection = self.subsampling_rate - 1
        self.proposal_dist.subchain_selection = self.subchain_selection

        # Call the recursive DA proposal to get proposed sample
        # and convert dict -> numpy array
        pre_q = self.proposal_dist(q0_dict)
        q = DictToArrayBijection.map(pre_q)

        # Evaluate MLDA acceptance log-ratio
        # If proposed sample from lower levels is the same as current one,
        # do not calculate likelihood, just set accept to 0.0
        if (q.data == q0.data).all():
            accept = np.float(0.0)
            skipped_logp = True
        else:
            accept = self.delta_logp(q.data, q0.data) + self.delta_logp_below(q0.data, q.data)
            skipped_logp = False

        # Accept/reject sample - next sample is stored in q_new
        q_new, accepted = metrop_select(accept, q, q0)
        if skipped_logp:
            accepted = False

        # if sample is accepted, update self.Q_last with the sample's Q value
        # runs only for VR or when store_Q_fine is True
        if self.variance_reduction or self.store_Q_fine:
            if accepted and not skipped_logp:
                self.Q_last = self.model.Q.get_value()

        # Variance reduction
        if self.variance_reduction:
            self.update_vr_variables(accepted, skipped_logp)

        # Adaptive error model - runs only during tuning.
        if self.tune and self.adaptive_error_model:
            self.update_error_estimate(accepted, skipped_logp)

        # Update acceptance counter
        self.accepted += accepted

        stats = {"tune": self.tune, "accept": np.exp(accept), "accepted": accepted}

        # Save the VR statistics to the stats dictionary (only happens in the
        # top MLDA level)
        if (self.variance_reduction or self.store_Q_fine) and not self.is_child:
            q_stats = {}
            if self.variance_reduction:
                m = self
                for level in range(self.num_levels - 1, 0, -1):
                    # save the Q differences for this level and iteration
                    q_stats[f"Q_{level}_{level - 1}"] = np.array(m.Q_diff)
                    # this makes sure Q_diff is reset for
                    # the next iteration
                    m.Q_diff = []
                    if level == 1:
                        break
                    m = m.step_method_below
                q_stats["Q_0"] = np.array(m.Q_base_full)
                m.Q_base_full = []
            if self.store_Q_fine:
                q_stats["Q_" + str(self.num_levels - 1)] = np.array(self.Q_last)
            stats = {**stats, **q_stats}

        # Capture the base tuning stats from the level below.
        self.base_tuning_stats = []

        if isinstance(self.step_method_below, MLDA):
            self.base_tuning_stats = self.step_method_below.base_tuning_stats
        elif isinstance(self.step_method_below, MetropolisMLDA):
            self.base_tuning_stats.append({"base_scaling": self.step_method_below.scaling[0]})
        elif isinstance(self.step_method_below, DEMetropolisZMLDA):
            self.base_tuning_stats.append(
                {
                    "base_scaling": self.step_method_below.scaling[0],
                    "base_lambda": self.step_method_below.lamb,
                }
            )
        elif isinstance(self.step_method_below, CompoundStep):
            # Below method is CompoundStep
            for method in self.step_method_below.methods:
                if isinstance(method, MetropolisMLDA):
                    self.base_tuning_stats.append({"base_scaling": method.scaling[0]})
                elif isinstance(method, DEMetropolisZMLDA):
                    self.base_tuning_stats.append(
                        {"base_scaling": method.scaling[0], "base_lambda": method.lamb}
                    )

        return q_new, [stats] + self.base_tuning_stats