network_hypers = {'c': np.zeros(K, dtype=np.int), 'p': 0.5, 'kappa': 10.0, 'v': 10*3.0} bkgd_hypers = {"alpha": 1., "beta": 10.} model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt, dt_max=dt_max, weight_hypers={"parallel_resampling": False}, network_hypers=network_hypers) model.generate(T=T) # Gibbs sample and then generate new data N_samples = 10000 samples = [] lps = [] for itr in progprint_xrange(N_samples, perline=50): # Resample the model model.resample_model() samples.append(model.copy_sample()) lps.append(model.log_likelihood()) # Geweke step model.data_list.pop() model.generate(T=T) # Compute sample statistics for second half of samples A_samples = np.array([s.weight_model.A for s in samples]) W_samples = np.array([s.weight_model.W for s in samples]) g_samples = np.array([s.impulse_model.g for s in samples]) lambda0_samples = np.array([s.bias_model.lambda0 for s in samples]) lps = np.array(lps) offset = 0
def demo(seed=None): """ Create a discrete time Hawkes model and generate from it. :return: """ if seed is None: seed = np.random.randint(2**32) print "Setting seed to ", seed np.random.seed(seed) C = 1 K = 10 T = 1000 dt = 1.0 B = 3 # Create a true model p = 0.8 * np.eye(C) v = 10.0 * np.eye(C) + 20.0 * (1 - np.eye(C)) # m = 0.5 * np.ones(C) c = (0.0 * (np.arange(K) < 10) + 1.0 * (np.arange(K) >= 10)).astype(np.int) true_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(C=C, K=K, dt=dt, B=B, c=c, p=p, v=v) # Plot the true network plt.ion() plot_network(true_model.weight_model.A, true_model.weight_model.W, vmax=0.5) plt.pause(0.001) # Sample from the true model S, R = true_model.generate(T=T) # Make a new model for inference test_model = DiscreteTimeStandardHawkesModel(K=K, dt=dt, B=B, beta=1.0) test_model.add_data(S) # Plot the true and inferred firing rate kplt = 0 plt.figure() plt.plot(np.arange(T), R[:, kplt], '-k', lw=2) plt.ion() ln = plt.plot(np.arange(T), test_model.compute_rate(ks=kplt), '-r')[0] plt.show() # Gradient descent N_steps = 10000 lls = [] for itr in xrange(N_steps): W, ll, grad = test_model.gradient_descent_step(stepsz=0.001) lls.append(ll) # Update plot if itr % 5 == 0: ln.set_data(np.arange(T), test_model.compute_rate(ks=kplt)) plt.title("Iteration %d" % itr) plt.pause(0.001) plt.ioff() print "W true: ", true_model.weight_model.A * true_model.weight_model.W print "lambda0 true: ", true_model.bias_model.lambda0 print "ll true: ", true_model.log_likelihood() print "" print "W test: ", test_model.W print "lambda0 test ", test_model.bias print "ll test: ", test_model.log_likelihood() plt.figure() plt.plot(np.arange(N_steps), lls) plt.xlabel("Iteration") plt.ylabel("Log likelihood") plot_network(np.ones((K, K)), test_model.W, vmax=0.5) plt.show()
def demo(seed=None): """ Create a discrete time Hawkes model and generate from it. :return: """ raise NotImplementedError("This example needs to be updated.") if seed is None: seed = np.random.randint(2**32) print "Setting seed to ", seed np.random.seed(seed) C = 1 K = 10 T = 1000 dt = 1.0 B = 3 # Create a true model p = 0.8 * np.eye(C) v = 10.0 * np.eye(C) + 20.0 * (1-np.eye(C)) # m = 0.5 * np.ones(C) c = (0.0 * (np.arange(K) < 10) + 1.0 * (np.arange(K) >= 10)).astype(np.int) true_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(C=C, K=K, dt=dt, B=B, c=c, p=p, v=v) # Plot the true network plt.ion() plot_network(true_model.weight_model.A, true_model.weight_model.W, vmax=0.5) plt.pause(0.001) # Sample from the true model S,R = true_model.generate(T=T) # Make a new model for inference test_model = DiscreteTimeStandardHawkesModel(K=K, dt=dt, B=B, beta=1.0) test_model.add_data(S) # Plot the true and inferred firing rate kplt = 0 plt.figure() plt.plot(np.arange(T), R[:,kplt], '-k', lw=2) plt.ion() ln = plt.plot(np.arange(T), test_model.compute_rate(ks=kplt), '-r')[0] plt.show() # Gradient descent N_steps = 10000 lls = [] for itr in xrange(N_steps): W,ll,grad = test_model.gradient_descent_step(stepsz=0.001) lls.append(ll) # Update plot if itr % 5 == 0: ln.set_data(np.arange(T), test_model.compute_rate(ks=kplt)) plt.title("Iteration %d" % itr) plt.pause(0.001) plt.ioff() print "W true: ", true_model.weight_model.A * true_model.weight_model.W print "lambda0 true: ", true_model.bias_model.lambda0 print "ll true: ", true_model.log_likelihood() print "" print "W test: ", test_model.W print "lambda0 test ", test_model.bias print "ll test: ", test_model.log_likelihood() plt.figure() plt.plot(np.arange(N_steps), lls) plt.xlabel("Iteration") plt.ylabel("Log likelihood") plot_network(np.ones((K,K)), test_model.W, vmax=0.5) plt.show()
K=K, dt=dt, dt_max=dt_max, weight_hypers={"parallel_resampling": False}, network_hypers=network_hypers) model.generate(T=T) # Gibbs sample and then generate new data N_samples = 10000 samples = [] lps = [] for itr in progprint_xrange(N_samples, perline=50): # Resample the model model.resample_model() samples.append(model.copy_sample()) lps.append(model.log_likelihood()) # Geweke step model.data_list.pop() model.generate(T=T) # Compute sample statistics for second half of samples A_samples = np.array([s.weight_model.A for s in samples]) W_samples = np.array([s.weight_model.W for s in samples]) g_samples = np.array([s.impulse_model.g for s in samples]) lambda0_samples = np.array([s.bias_model.lambda0 for s in samples]) lps = np.array(lps) offset = 0 A_mean = A_samples[offset:, ...].mean(axis=0) W_mean = W_samples[offset:, ...].mean(axis=0)