Esempio n. 1
0
    def sampling_iteration(self):
        """perform a single sampling step
        """
        # states
        pre_state = self.ladder_states.copy()
        f_state = self.ladder_states.copy().F()
        fl_state = self.ladder_states.copy().FL()

        # rates
        fl_rates = self.acceptance_rate(self.ladder_states, fl_state)
        f_rates = self.f_rate * np.ones(self.nbatch)

        # draws from exponential distribution
        fl_draws = self.draw_from(fl_rates)
        f_draws = self.draw_from(f_rates)

        # select minimum waiting times
        fl_idx, f_idx = min_idx([fl_draws, f_draws])
        waiting_times = np.minimum(f_draws, fl_draws)[0]

        self.n += 1

        # update last states with waiting times
        if self.n > self.burn_in_steps:
            self.update_distr(waiting_times)

        self.ladder_states.update(fl_idx, fl_state)
        self.ladder_states.update(f_idx, f_state)

        if self.n > self.burn_in_steps:
            self.update_empirical_transition_matrix(pre_state)
Esempio n. 2
0
    def sampling_iteration(self):
        # states
        pre_state = self.ladder_states.copy()
        f_state = self.ladder_states.copy().F()
        l_state = self.ladder_states.copy().F().FL()
        # aka l^-1 state
        flf_state = self.ladder_states.copy().FL().F()

        # rates
        l_rates = self.acceptance_rate(self.ladder_states, l_state)
        flf_rates = self.acceptance_rate(self.ladder_states, flf_state)
        f_rates = flf_rates - np.min((flf_rates, l_rates), axis=0)

        # draws from exponential distribution
        l_draws = self.draw_from(l_rates)
        f_draws = self.draw_from(f_rates)

        # select minimum waiting times
        l_idx, f_idx = min_idx([l_draws, f_draws])
        waiting_times = np.minimum(f_draws, l_draws)[0]

        self.n += 1

        # update last states with waiting times
        if self.n > self.burn_in_steps:
            self.update_distr(waiting_times)

        self.ladder_states.update(l_idx, l_state)
        self.ladder_states.update(f_idx, f_state)

        if self.n > self.burn_in_steps:
            self.update_empirical_transition_matrix(pre_state)
Esempio n. 3
0
    def sampling_iteration(self):
        # states
        pre_state = self.ladder_states.copy()
        f_state = self.ladder_states.copy().F()
        l_state = self.ladder_states.copy().F().FL()
        # aka l^-1 state
        flf_state = self.ladder_states.copy().FL().F()

        # rates
        l_rates = self.acceptance_rate(self.ladder_states, l_state)
        flf_rates = self.acceptance_rate(self.ladder_states, flf_state)
        f_rates = flf_rates - np.min((flf_rates, l_rates), axis=0)

        # draws from exponential distribution
        l_draws = self.draw_from(l_rates)
        f_draws = self.draw_from(f_rates)

        # select minimum waiting times
        l_idx, f_idx = min_idx([l_draws, f_draws])
        waiting_times = np.minimum(f_draws, l_draws)[0]

        self.n += 1

        # update last states with waiting times
        if self.n > self.burn_in_steps:
            self.update_distr(waiting_times)

        self.ladder_states.update(l_idx, l_state)
        self.ladder_states.update(f_idx, f_state)

        if self.n > self.burn_in_steps:
            self.update_empirical_transition_matrix(pre_state)
Esempio n. 4
0
    def test_three_case(self):
        """
        tests the three list case
        """
        list_1 = np.random.randn(list_length)
        list_2 = np.random.randn(list_length)
        list_3 = np.random.randn(list_length)
        mi_1_test, mi_2_test, mi_3_test = min_idx([
            list_1.reshape(1, list_length),
            list_2.reshape(1, list_length),
            list_3.reshape(1, list_length)
        ])

        mi_12_control = set(np.arange(list_length)[list_1 < list_2])
        mi_13_control = set(np.arange(list_length)[list_1 < list_3])
        mi_23_control = set(np.arange(list_length)[list_2 < list_3])
        mi_21_control = set(np.arange(list_length)[list_2 < list_1])
        mi_31_control = set(np.arange(list_length)[list_3 < list_1])
        mi_32_control = set(np.arange(list_length)[list_3 < list_2])
        mi_1_control = np.array(list(mi_12_control & mi_13_control))
        mi_2_control = np.array(list(mi_21_control & mi_23_control))
        mi_3_control = np.array(list(mi_31_control & mi_32_control))
        self.assertTrue((mi_1_test == mi_1_control).all(),
                        "idx minimums do not match")
        self.assertTrue((mi_2_test == mi_2_control).all(),
                        "idx minimums do not match")
        self.assertTrue((mi_3_test == mi_3_control).all(),
                        "idx minimums do not match")
Esempio n. 5
0
    def sampling_iteration(self):
        """perform a single sampling step
        """
        # states
        pre_state = self.ladder_states.copy()
        f_state = self.ladder_states.copy().F()
        fl_state = self.ladder_states.copy().FL()

        # rates
        fl_rates = self.acceptance_rate(self.ladder_states, fl_state)
        f_rates = self.f_rate * np.ones(self.nbatch)

        # draws from exponential distribution
        fl_draws = self.draw_from(fl_rates)
        f_draws = self.draw_from(f_rates)

        # select minimum waiting times
        fl_idx, f_idx = min_idx([fl_draws, f_draws])
        waiting_times = np.minimum(f_draws, fl_draws)[0]

        self.n += 1

        # update last states with waiting times
        if self.n > self.burn_in_steps:
            self.update_distr(waiting_times)

        self.ladder_states.update(fl_idx, fl_state)
        self.ladder_states.update(f_idx, f_state)

        if self.n > self.burn_in_steps:
            self.update_empirical_transition_matrix(pre_state)
Esempio n. 6
0
    def test_two_case(self):
        """
        tests the very important two list case
        """
        list_1 = np.random.randn(list_length)
        list_2 = np.random.randn(list_length)
        mi_1_test, mi_2_test = min_idx(
            [list_1.reshape(1, list_length),
             list_2.reshape(1, list_length)])

        mi_1_control = np.arange(list_length)[list_1 < list_2]
        mi_2_control = np.arange(list_length)[list_1 >= list_2]
        self.assertTrue((mi_1_test == mi_1_control).all(), "idx minimums do not match")
        self.assertTrue((mi_2_test == mi_2_control).all(), "idx minimums do not match")
Esempio n. 7
0
    def test_two_case(self):
        """
        tests the very important two list case
        """
        list_1 = np.random.randn(list_length)
        list_2 = np.random.randn(list_length)
        mi_1_test, mi_2_test = min_idx(
            [list_1.reshape(1, list_length),
             list_2.reshape(1, list_length)])

        mi_1_control = np.arange(list_length)[list_1 < list_2]
        mi_2_control = np.arange(list_length)[list_1 >= list_2]
        self.assertTrue((mi_1_test == mi_1_control).all(),
                        "idx minimums do not match")
        self.assertTrue((mi_2_test == mi_2_control).all(),
                        "idx minimums do not match")
Esempio n. 8
0
    def sampling_iteration(self):
        # states
        f_state = self.state.copy().F()
        l_state = self.state.copy().L()
        # aka L^-1 state
        flf_state = self.state.copy().FLF()
        r_state = self.state.copy().R()

        # rates
        l_rates = self.transition_rates(self.state, l_state)
        flf_rates = self.transition_rates(self.state, flf_state)
        f_rates = flf_rates - np.min((flf_rates, l_rates), axis=0)
        r_rates = self.p_r * np.ones((1, self.nbatch))

        # draws from exponential distributions
        l_draws = draw_from(l_rates[0])
        f_draws = draw_from(f_rates[0])
        r_draws = draw_from(r_rates[0])

        # choose min for each particle
        l_idx, f_idx, r_idx = min_idx([l_draws, f_draws, r_draws])

        # record dwelling times
        self.dwelling_times = np.amin(
            np.concatenate((l_draws, f_draws, r_draws)), axis=0)

        # cache current state as FLF state for next L transition
        self.state.cache_flf_state(l_idx, self.state)
        # cache FL as FLF state for for particles that made transition to F
        # self.state.cache_flf_state(f_idx, l_state.F())

        # update accepted proposed states
        self.state.update(l_idx, l_state)
        self.state.update(f_idx, f_state)
        self.state.update(r_idx, r_state)

        # clear flf cache for particles that transition to R, F
        self.state.clear_flf_cache(r_idx)
        self.state.clear_flf_cache(f_idx)


        self.l_count += len(l_idx)
        self.f_count += len(f_idx)
        self.r_count += len(r_idx)
Esempio n. 9
0
    def sampling_iteration(self):
        """Perform a single sampling step
        """
        # F operator
        f_state = self.state.copy().F()

        # FL operator
        fl_state = self.state.copy().L().F()

        # rates
        fl_rates = self.transition_rates(self.state, fl_state)
        f_rates = np.ones((1, self.nbatch))
        r_rates = self.p_r * np.ones((1, self.nbatch))

        # draws from exponential distributions
        fl_draws = draw_from(fl_rates[0])
        f_draws = draw_from(f_rates[0])
        r_draws = draw_from(r_rates[0])

        # choose min for each particle
        f_idx, fl_idx, r_idx = min_idx([f_draws, fl_draws, r_draws])

        # record dwelling times
        self.dwelling_times = np.amin(
            np.concatenate((fl_draws, f_draws, r_draws)), axis=0)

        # update accepted FL transitions
        self.state.update(fl_idx, fl_state)

        # update accepted F transitions
        self.state.update(f_idx, f_state)

        # corrupt the momentum and update accepted R transition
        # inefficiently corrupts momentum for all state then selects a subset
        R_state = self.state.copy().R()
        self.state.update(r_idx, R_state)

        self.fl_count  += len(fl_idx)
        self.f_count += len(f_idx)
        self.r_count += len(r_idx)
Esempio n. 10
0
    def sampling_iteration(self):
        """Perform a single sampling step
        """
        # F operator
        f_state = self.state.copy().F()

        # FL operator
        fl_state = self.state.copy().L().F()

        # rates
        fl_rates = self.transition_rates(self.state, fl_state)
        f_rates = np.ones((1, self.nbatch))
        r_rates = self.p_r * np.ones((1, self.nbatch))

        # draws from exponential distributions
        fl_draws = draw_from(fl_rates[0])
        f_draws = draw_from(f_rates[0])
        r_draws = draw_from(r_rates[0])

        # choose min for each particle
        f_idx, fl_idx, r_idx = min_idx([f_draws, fl_draws, r_draws])

        # record dwelling times
        self.dwelling_times = np.amin(
            np.concatenate((fl_draws, f_draws, r_draws)), axis=0)

        # update accepted FL transitions
        self.state.update(fl_idx, fl_state)

        # update accepted F transitions
        self.state.update(f_idx, f_state)

        # corrupt the momentum and update accepted R transition
        # inefficiently corrupts momentum for all state then selects a subset
        R_state = self.state.copy().R()
        self.state.update(r_idx, R_state)

        self.fl_count  += len(fl_idx)
        self.f_count += len(f_idx)
        self.r_count += len(r_idx)
Esempio n. 11
0
    def test_three_case(self):
        """
        tests the three list case
        """
        list_1 = np.random.randn(list_length)
        list_2 = np.random.randn(list_length)
        list_3 = np.random.randn(list_length)
        mi_1_test, mi_2_test, mi_3_test = min_idx(
            [list_1.reshape(1, list_length),
             list_2.reshape(1, list_length),
             list_3.reshape(1, list_length)])

        mi_12_control = set(np.arange(list_length)[list_1 < list_2])
        mi_13_control = set(np.arange(list_length)[list_1 < list_3])
        mi_23_control = set(np.arange(list_length)[list_2 < list_3])
        mi_21_control = set(np.arange(list_length)[list_2 < list_1])
        mi_31_control = set(np.arange(list_length)[list_3 < list_1])
        mi_32_control = set(np.arange(list_length)[list_3 < list_2])
        mi_1_control = np.array(list(mi_12_control & mi_13_control))
        mi_2_control = np.array(list(mi_21_control & mi_23_control))
        mi_3_control = np.array(list(mi_31_control & mi_32_control))
        self.assertTrue((mi_1_test == mi_1_control).all(), "idx minimums do not match")
        self.assertTrue((mi_2_test == mi_2_control).all(), "idx minimums do not match")
        self.assertTrue((mi_3_test == mi_3_control).all(), "idx minimums do not match")
Esempio n. 12
0
    def sampling_iteration(self):
        # states
        f_state = self.state.copy().F()
        l_state = self.state.copy().L()
        # aka L^-1 state
        flf_state = self.state.copy().FLF()
        r_state = self.state.copy().R()


        try:
            # rates
            l_rates = self.transition_rates(self.state, l_state)
            flf_rates = self.transition_rates(self.state, flf_state)
            f_rates = flf_rates - np.min((flf_rates, l_rates), axis=0)
            r_rates = self.p_r * np.ones((1, self.nbatch))

            # draws from exponential distributions
            l_draws = draw_from(l_rates[0])
            f_draws = draw_from(f_rates[0])
            r_draws = draw_from(r_rates[0])
        # infinite rate due to taking too large of a step
        except ValueError:
            # take smaller steps, but go the same overall distance
            self.epsilon *= 0.5
            self.num_leapfrog_steps *= 2

            depth = np.log(self.original_epsilon / self.epsilon) / np.log(2)
            print("Ecountered infinite rate, doubling back. Depth: {}".format(depth))
            # try again
            self.state.reset_flf_cache()
            self.sampling_iteration()
            # restore the old guys
            self.epsilon *= 2
            self.num_leapfrog_steps = int(self.num_leapfrog_steps / 2)
            return

        # choose min for each particle
        l_idx, f_idx, r_idx = min_idx([l_draws, f_draws, r_draws])

        # record dwelling times
        self.dwelling_times = np.amin(
            np.concatenate((l_draws, f_draws, r_draws)), axis=0)

        # cache current state as FLF state for next L transition
        self.state.cache_flf_state(l_idx, self.state)
        # cache FL as FLF state for for particles that made transition to F
        # self.state.cache_flf_state(f_idx, l_state.F())

        # update accepted proposed states
        self.state.update(l_idx, l_state)
        self.state.update(f_idx, f_state)
        self.state.update(r_idx, r_state)

        # clear flf cache for particles that transition to R, F
        self.state.clear_flf_cache(r_idx)
        self.state.clear_flf_cache(f_idx)


        self.l_count += len(l_idx)
        self.f_count += len(f_idx)
        self.r_count += len(r_idx)
Esempio n. 13
0
    def sampling_iteration(self):
        # states
        f_state = self.state.copy().F()
        l_state = self.state.copy().L()
        # aka L^-1 state
        flf_state = self.state.copy().FLF()
        r_state = self.state.copy().R()


        try:
            # rates
            l_rates = self.transition_rates(self.state, l_state)
            flf_rates = self.transition_rates(self.state, flf_state)
            f_rates = flf_rates - np.min((flf_rates, l_rates), axis=0)
            r_rates = self.p_r * np.ones((1, self.nbatch))

            # draws from exponential distributions
            l_draws = draw_from(l_rates[0])
            f_draws = draw_from(f_rates[0])
            r_draws = draw_from(r_rates[0])
        # infinite rate due to taking too large of a step
        except ValueError:
            # take smaller steps, but go the same overall distance
            self.epsilon *= 0.5
            self.num_leapfrog_steps *= 2

            depth = np.log(self.original_epsilon / self.epsilon) / np.log(2)
            print("Ecountered infinite rate, doubling back. Depth: {}".format(depth))
            # try again
            self.state.reset_flf_cache()
            self.sampling_iteration()
            # restore the old guys
            self.epsilon *= 2
            self.num_leapfrog_steps = int(self.num_leapfrog_steps / 2)
            return

        # choose min for each particle
        l_idx, f_idx, r_idx = min_idx([l_draws, f_draws, r_draws])

        # record dwelling times
        self.dwelling_times = np.amin(
            np.concatenate((l_draws, f_draws, r_draws)), axis=0)

        # cache current state as FLF state for next L transition
        self.state.cache_flf_state(l_idx, self.state)
        # cache FL as FLF state for for particles that made transition to F
        # self.state.cache_flf_state(f_idx, l_state.F())

        # update accepted proposed states
        self.state.update(l_idx, l_state)
        self.state.update(f_idx, f_state)
        self.state.update(r_idx, r_state)

        # clear flf cache for particles that transition to R, F
        self.state.clear_flf_cache(r_idx)
        self.state.clear_flf_cache(f_idx)


        self.l_count += len(l_idx)
        self.f_count += len(f_idx)
        self.r_count += len(r_idx)