def get_mixing_time_base_self(net, fro, to, identifier):
	ev = net.get_node_by_name('X1')
	A_sc = load_or_run('transition_matrix_shortcuts_m%d_f%d_t%d_%s_ev1' % (args.m, fro, to, identifier),
		lambda: construct_markov_transition_matrix(net, conditioned_on={ev: 1}),
		force_recompute=args.recompute)
	S_start  = analytic_marginal_states(net, conditioned_on={ev: 0})
	S_steady_state = analytic_marginal_states(net, conditioned_on={ev: 1})

	mt_base,_ = mixing_time(S_start, S_target_baseline, A_sc, eps=args.eps, converging_to=S_steady_state)
	mt_self,_ = mixing_time(S_start, S_steady_state, A_sc, eps=args.eps, converging_to=S_steady_state)
	return (mt_base, mt_self)
def get_mixing_time_base_self(net, fro, to, identifier):
    ev = net.get_node_by_name('X1')
    A_sc = load_or_run('transition_matrix_shortcuts_m%d_f%d_t%d_%s_ev1' %
                       (args.m, fro, to, identifier),
                       lambda: construct_markov_transition_matrix(
                           net, conditioned_on={ev: 1}),
                       force_recompute=args.recompute)
    S_start = analytic_marginal_states(net, conditioned_on={ev: 0})
    S_steady_state = analytic_marginal_states(net, conditioned_on={ev: 1})

    mt_base, _ = mixing_time(S_start,
                             S_target_baseline,
                             A_sc,
                             eps=args.eps,
                             converging_to=S_steady_state)
    mt_self, _ = mixing_time(S_start,
                             S_steady_state,
                             A_sc,
                             eps=args.eps,
                             converging_to=S_steady_state)
    return (mt_base, mt_self)
Esempio n. 3
0
# _true and _self are MT with respect to unmodified transition matrix and modified one, respectively.
# When TVD(S_true_ss, S_self_ss)>eps, mixing_time_true is infinite.
mixing_times_true = np.zeros((args.tau_steps, len(Ms))) # one data point per model per tau
mixing_times_self = np.zeros((args.tau_steps, len(Ms)))

for mi,m in enumerate(Ms):
	net = m_deep_bistable(m, marg=args.marg)
	ev = net.get_node_by_name('X1')
	p = ev.get_table()[0,0]

	S_target = analytic_marginal_states(net, conditioned_on={ev: 1})

	for ti, tau in enumerate(taus):
		A_adapt = load_or_run('transition_matrix_adapt_M%d_p%.3f_tau%.3f_ev1' % (m, p, tau),
			lambda: construct_markov_transition_matrix(net, fatigue_tau=tau, conditioned_on={ev: 1}),
			force_recompute=args.recompute)

		converging_to = eig_steadystate(A_adapt)

		# steadystate with {ev:0} is our starting point. It is computed by simply 'flipping' 
		# the distribution from {ev:1} (symmetry is convenient)
		S_steadystate_ev0 = flip_distribution_binary_nodes(net, converging_to)

		# compute mixing time to 'true' posterior
		mixing_times_true[ti,mi] = mixing_time(S_steadystate_ev0, S_target, A_adapt, eps=args.eps,
			converging_to=converging_to)[0]
		# compute mixing time to 'modified' posterior
		mixing_times_self[ti,mi] = mixing_time(S_steadystate_ev0, converging_to, A_adapt, eps=args.eps,
			converging_to=converging_to)[0]
if __name__ == '__main__' and __package__ is None:
	__package__ = 'scripts'

import numpy as np
import matplotlib.pyplot as plt
from counting import construct_markov_transition_matrix
from util import load_or_run

m = 3
p = .964

A = load_or_run('transition_matrix_M%d_p%.3f_noev' % (m, p), lambda: construct_markov_transition_matrix(net))

plt.figure()
plt.imshow(A, interpolation='nearest')
plt.colorbar()
plt.savefig('plots/A_matrix_m%d_p%.3f.png' % (m,p))
plt.close()
parser.add_argument('--m-max', dest='m_max', type=int, default=7)

args = parser.parse_args()

max_t = 1000
m_min = 2
n_layers = args.m_max - m_min + 1
layers = range(m_min, args.m_max+1)

mixing_times = np.zeros(n_layers)
for M in layers:
	net = m_deep_bistable(M, marg=args.marg)
	ev = net.get_node_by_name('X1')
	p = ev.get_table()[0,0]
	A = load_or_run('transition_matrix_M%d_p%.3f_ev1' % (M, p),
		lambda: construct_markov_transition_matrix(net, conditioned_on={ev:1}),
		force_recompute=args.recompute)

	# S_start and S_target are marginal distributions conditioned on {ev:0} and {ev:1} respectively.
	S_start  = analytic_marginal_states(net, conditioned_on={ev: 0})
	S_target = analytic_marginal_states(net, conditioned_on={ev: 1})

	mixing_times[M-m_min], _ = mixing_time(S_start, S_target, A, eps=args.eps)

if args.cmp_p:
	p = 0.96
	mixing_times_rho_const = np.zeros(n_layers)
	for M in layers:
		net = m_deep_bistable(M, p=0.96)
		ev = net.get_node_by_name('X1')
		A = load_or_run('transition_matrix_M%d_p%.3f_ev1' % (M, p),
Esempio n. 6
0
    w, v = np.linalg.eig(A)
    inds = np.argsort(w)
    S_steady_state = np.abs(v[:, inds[-1]])
    S_steady_state /= S_steady_state.sum()
    return S_steady_state


mixing_times = np.zeros((args.boost_steps, args.t_max + 1, len(Ms)))

for mi, m in enumerate(Ms):
    net = m_deep_bistable(m, marg=args.marg)
    ev = net.get_node_by_name('X1')
    p = ev.get_table()[0, 0]

    A = load_or_run('transition_matrix_M%d_p%.3f_ev1' % (m, p),
                    lambda: construct_markov_transition_matrix(
                        net, conditioned_on={ev: 1}),
                    force_recompute=args.recompute)
    S_start = analytic_marginal_states(net, conditioned_on={ev: 0})
    S_target = analytic_marginal_states(net, conditioned_on={ev: 1})

    for ai, a in enumerate(alphas):
        A_ff = load_or_run(
            'transition_matrix_transient_ff_M%d_p%.3f_b%.3f_ev1' % (m, p, a),
            lambda: construct_markov_transition_matrix(
                net, feedforward_boost=a, conditioned_on={ev: 1}),
            force_recompute=args.recompute)

        for transient_steps in Ts:
            # avoid possibility that S-->S_ff_steadystate 'passes through' S_target, apparently
            # looking like mixing time is very small
            ff_steady_state_in_range = variational_distance(
    for i in range(N):
        sum_state = sum(id_to_state(net, i))
        p_sum_state[sum_state] += S[i]

    if args.plot:
        # plot distribution over sum of states (show it's bimodal)
        plt.figure()
        plt.plot(p_sum_state)
        plt.title('P(sum of states) for M = %d' % M)
        plt.xlabel('sum of states')
        plt.savefig('plots/p_sum_state_M%d.png' % M)
        plt.close()

    # run sampler to get switching times histogram
    def compute_distribution():
        counter = SwitchedFunction()
        gibbs_sample(net, {}, counter, args.samples, args.burnin)
        return counter.distribution()

    d = load_or_run('sampled_switching_time_distribution_majority_M%d' % M,
                    compute_distribution,
                    force_recompute=args.recompute)

    if args.plot:
        plt.figure()
        plt.bar(np.arange(len(d)), d)
        plt.title(
            'Sampled switching time distributions (majority percept) M=%d' % M)
        plt.savefig('plots/sampled_switching_time_majority_M%d.png' % M)
        plt.close()
	def sample_net_response():
		# keep track of state of every node at every sample
		states = np.zeros((args.samples, M))

		def record_sample(i, net):
			states[i,:] = net.state_vector()

		gibbs_sample_dynamic_evidence(
			net,
			alternator([{ev_node: 0}, {ev_node: 1}], period=args.T),
			record_sample,
			args.samples,
			args.burnin)
		return states

	states = load_or_run('sampled_osc_M%d_T%d_S%d' % (M, args.T, args.samples), sample_net_response, force_recompute=args.recompute)

	# compute frequency response for each node
	freq_response = np.fft.fft(states*2-1, axis=0)

	if args.plot:
		plt.figure()
		plt.loglog(np.fliplr(np.abs(freq_response)**2))
		plt.title('Frequency response of nodes to input with T=%d' % args.T)
		plt.xlabel('frequency')
		plt.legend(['X%d' % (M-l+m_min) for l in reversed(layers)], loc='lower left')
		plt.savefig('plots/frequency_response_M%d_T%d.png' % (M, args.T))
		plt.close()

		if args.movie:
			fig = plt.figure()
    # 	if args.plot:
    # 		plt.figure()
    # 		plt.plot(analytic)
    # 		plt.plot(empirical)
    # 		plt.legend(['analytic', 'sample-approx'])
    # 		plt.savefig('plots/cmp_empirical_analytic_st_M%d.png' % args.m)
    # 		plt.close()

    switching_times_top = np.zeros(args.max_t)
    switching_times_majority = np.zeros(args.max_t)
    actual_max_t = 0
    net = m_deep_bistable(args.m, marg=args.marg)
    p = net.get_node_by_name('X1').get_table()[0, 0]

    A = load_or_run('transition_matrix_M%d_p%.3f_noev' % (args.m - 1, p),
                    lambda: construct_markov_transition_matrix(net),
                    force_recompute=args.recompute)

    # top node percept
    S_init = analytic_recently_switched_states(net, top_node_percept, 0, A)
    distrib = analytic_switching_times(net,
                                       S_init, (top_node_percept, 1),
                                       transition=A,
                                       max_t=args.max_t)
    actual_max_t = max(actual_max_t, len(distrib))
    switching_times_top[:len(distrib)] = distrib
    # majority percept
    S_init = analytic_recently_switched_states(net, plurality_state, 0, A)
    distrib = analytic_switching_times(net,
                                       S_init, (plurality_state, 1),
                                       transition=A,
        # keep track of state of every node at every sample
        states = np.zeros((args.samples, M))

        def record_sample(i, net):
            states[i, :] = net.state_vector()

        gibbs_sample_dynamic_evidence(
            net, alternator([{
                ev_node: 0
            }, {
                ev_node: 1
            }], period=args.T), record_sample, args.samples, args.burnin)
        return states

    states = load_or_run('sampled_osc_M%d_T%d_S%d' % (M, args.T, args.samples),
                         sample_net_response,
                         force_recompute=args.recompute)

    # compute frequency response for each node
    freq_response = np.fft.fft(states * 2 - 1, axis=0)

    if args.plot:
        plt.figure()
        plt.loglog(np.fliplr(np.abs(freq_response)**2))
        plt.title('Frequency response of nodes to input with T=%d' % args.T)
        plt.xlabel('frequency')
        plt.legend(['X%d' % (M - l + m_min) for l in reversed(layers)],
                   loc='lower left')
        plt.savefig('plots/frequency_response_M%d_T%d.png' % (M, args.T))
        plt.close()
	S = analytic_marginal_states(net)

	p_sum_state = np.zeros(len(net._nodes)+1)

	for i in range(N):
		sum_state = sum(id_to_state(net, i))
		p_sum_state[sum_state] += S[i]

	if args.plot:
		# plot distribution over sum of states (show it's bimodal)
		plt.figure()
		plt.plot(p_sum_state)
		plt.title('P(sum of states) for M = %d' % M)
		plt.xlabel('sum of states')
		plt.savefig('plots/p_sum_state_M%d.png' % M)
		plt.close()

	# run sampler to get switching times histogram
	def compute_distribution():
		counter = SwitchedFunction()
		gibbs_sample(net, {}, counter, args.samples, args.burnin)
		return counter.distribution()

	d = load_or_run('sampled_switching_time_distribution_majority_M%d' % M, compute_distribution, force_recompute=args.recompute)

	if args.plot:
		plt.figure()
		plt.bar(np.arange(len(d)), d)
		plt.title('Sampled switching time distributions (majority percept) M=%d' % M)
		plt.savefig('plots/sampled_switching_time_majority_M%d.png' % M)
		plt.close()
Esempio n. 12
0
model = m_deep_bistable
model_nm = 'deep'
if '--model' in argv:
    if len(argv) > argv.index('--model'):
        m = argv[argv.index('--model') + 1].lower()
        if m == 'wide':
            model = m_wide_bistable
            model_nm = 'wide'

for M in range(2, 7):
    print M

    net = model(M, 0.96)
    S1 = load_or_run(
        'compare_init_S1_%s_%02d.npy' % (model_nm, M),
        lambda: sample_marginal_states(net, {},
                                       10000,
                                       conditional_fn=lambda net:
                                       plurality_state(net) == 0))
    S2 = load_or_run(
        'compare_init_S2_%s_%02d.npy' % (model_nm, M),
        lambda: sample_marginal_states(net, {net._nodes[0]: 0}, 10000))
    S3 = load_or_run(
        'compare_init_S3_%s_%02d.npy' % (model_nm, M),
        lambda: sample_recently_switched_states(
            net, lambda n: n._nodes[0].state_index(), max_iterations=100000))

    colors1 = mean_state(net, S1)
    colors2 = mean_state(net, S2)
    colors3 = mean_state(net, S3)

    colormap = u'afmhot'
Esempio n. 13
0
parser.add_argument('--prob', dest='p', type=float, default=0.96)
parser.add_argument('--eps', dest='eps', type=float, default=0.05)
parser.add_argument('--m', dest='M', type=int, default=6)
parser.add_argument('--res', type=int, default=240)

args = parser.parse_args()

FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='Mixing Time Animation', artist='Matplotlib', comment='')
writer = FFMpegWriter(fps=15, metadata=metadata)

# see test_lag_as_mixing_time:
M = args.M
net = m_deep_bistable(M, args.p)
ev = net.get_node_by_name('X1')
A = load_or_run('transition_matrix_M%d_p%.3f_ev1' % (M, args.p), lambda: construct_markov_transition_matrix(net, conditioned_on={ev: 1}))

S_start  = analytic_marginal_states(net, conditioned_on={ev: 0})
S_target = analytic_marginal_states(net, conditioned_on={ev: 1})

max_t = 100

S = np.zeros((2**M, max_t))
vds = np.zeros(max_t)
S[:,0] = S_start
i = 0
d = variational_distance(S_target, S[:,0])
vds[0] = d
while d >= args.eps:
	i = i+1
	S[:,i] = np.dot(A,S[:,i-1])
Esempio n. 14
0
if __name__ == '__main__' and __package__ is None:
    __package__ = 'scripts'

import numpy as np
import matplotlib.pyplot as plt
from counting import construct_markov_transition_matrix
from util import load_or_run

m = 3
p = .964

A = load_or_run('transition_matrix_M%d_p%.3f_noev' % (m, p),
                lambda: construct_markov_transition_matrix(net))

plt.figure()
plt.imshow(A, interpolation='nearest')
plt.colorbar()
plt.savefig('plots/A_matrix_m%d_p%.3f.png' % (m, p))
plt.close()
Esempio n. 15
0
parser.add_argument('--m-max', dest='m_max', type=int, default=7)

args = parser.parse_args()

max_t = 1000
m_min = 2
n_layers = args.m_max - m_min + 1
layers = range(m_min, args.m_max + 1)

mixing_times = np.zeros(n_layers)
for M in layers:
    net = m_deep_bistable(M, marg=args.marg)
    ev = net.get_node_by_name('X1')
    p = ev.get_table()[0, 0]
    A = load_or_run('transition_matrix_M%d_p%.3f_ev1' % (M, p),
                    lambda: construct_markov_transition_matrix(
                        net, conditioned_on={ev: 1}),
                    force_recompute=args.recompute)

    # S_start and S_target are marginal distributions conditioned on {ev:0} and {ev:1} respectively.
    S_start = analytic_marginal_states(net, conditioned_on={ev: 0})
    S_target = analytic_marginal_states(net, conditioned_on={ev: 1})

    mixing_times[M - m_min], _ = mixing_time(S_start,
                                             S_target,
                                             A,
                                             eps=args.eps)

if args.cmp_p:
    p = 0.96
    mixing_times_rho_const = np.zeros(n_layers)
    for M in layers:
def eig_steadystate(A):
	# steady state distribution of A_ff transition matrix is largest eigenvector (eigenvalue=1)
	w,v = np.linalg.eig(A)
	inds = np.argsort(w)
	S_steady_state = np.abs(v[:,inds[-1]])
	S_steady_state /= S_steady_state.sum()
	return S_steady_state

mixing_times = np.zeros((args.boost_steps, args.t_max+1, len(Ms)))

for mi,m in enumerate(Ms):
	net = m_deep_bistable(m, marg=args.marg)
	ev = net.get_node_by_name('X1')
	p = ev.get_table()[0,0]

	A = load_or_run('transition_matrix_M%d_p%.3f_ev1' % (m, p), lambda: construct_markov_transition_matrix(net, conditioned_on={ev: 1}), force_recompute=args.recompute)
	S_start  = analytic_marginal_states(net, conditioned_on={ev: 0})
	S_target = analytic_marginal_states(net, conditioned_on={ev: 1})

	for ai, a in enumerate(alphas):
		A_ff = load_or_run('transition_matrix_transient_ff_M%d_p%.3f_b%.3f_ev1' % (m, p, a),
			lambda: construct_markov_transition_matrix(net, feedforward_boost=a, conditioned_on={ev: 1}), force_recompute=args.recompute)

		for transient_steps in Ts:
			# avoid possibility that S-->S_ff_steadystate 'passes through' S_target, apparently
			# looking like mixing time is very small
			ff_steady_state_in_range = variational_distance(eig_steadystate(A_ff), S_target) < args.eps
			# first 'transience' samples are with A_ff
			S = S_start.copy()
			tvd = variational_distance(S, S_target)
			for t in range(transient_steps):
Esempio n. 17
0
	args = parser.parse_args()

	recompute = args.recompute
	samples = args.samples
	prob = args.prob
	trials = args.trials
	plot = args.plot
	m_max = args.m_max

	# KL Divergence Test
	for m in range(2,m_max+1):
		print m

		filename = 'test_deep_kl_lag[%d].npy' % m
		data = load_or_run(filename, lambda: test_deep_kl_lag(m, trials=trials, n_samples=samples, p=prob), force_recompute=recompute)

		if plot:
			# get mean over trials
			mean = np.mean(data, axis=2)
			variance = np.var(data, axis=2)
	
			fig = plt.figure()
			ax = fig.add_subplot(111)
			# plot in reverse since data[0] is 'top' layer
			for layer in range(data.shape[0]+1, 1, -1):
				plt.errorbar(range(samples), mean[layer-2,:].transpose(), variance[layer-2,:].transpose())
			plt.legend(['layer %d' % l for l in range(2,m+1)])
			plt.savefig('results_KL_m=%d.png' % m)
			plt.close()