def run(): (train_images, train_labels),\ (tests_images, tests_labels) = load_data_subset(N_train, N_tests) parser, pred_fun, nllfun, frac_err = make_nn_funs(layer_sizes, L2_per_dpt) N_param = len(parser.vect) print "Running experiment..." results = defaultdict(list) for i in xrange(N_samples): x_init_scale = np.full(N_param, init_scale) def indexed_loss_fun(w, i_iter): rs = RandomState((seed, i, i_iter)) idxs = rs.randint(N_train, size=batch_size) return nllfun(w, train_images[idxs], train_labels[idxs]) * N_train gradfun = grad(indexed_loss_fun) def callback(x, t, v, entropy): results[("entropy", i)].append(entropy / N_train) results[("v_norm", i)].append(norm(v) / np.sqrt(N_param)) results[("minibatch_likelihood", i)].append(-indexed_loss_fun(x, t)) if t % thin != 0 and t != N_iter and t != 0: return results[('iterations', i)].append(t) results[("train_likelihood", i)].append(-nllfun(x, train_images, train_labels)) results[("tests_likelihood", i)].append(-nllfun(x, tests_images, tests_labels)) results[("tests_error", i)].append(frac_err(x, tests_images, tests_labels)) print "Iteration {0:5} Train likelihood {1:2.4f} Test likelihood {2:2.4f}" \ " Test Err {3:2.4f}".format(t, results[("train_likelihood", i)][-1], results[("tests_likelihood", i)][-1], results[("tests_error", i)][-1]) rs = RandomState((seed, i)) entropic_descent2(gradfun, callback=callback, x_scale=x_init_scale, epsilon=epsilon, gamma=gamma, alpha=alpha, annealing_schedule=annealing_schedule, rs=rs) return results
def run(): x_init_scale = np.full(D, init_scale) annealing_schedule = np.linspace(0, 1, N_iter) print "Running experiment..." results = defaultdict(list) for i in xrange(N_samples): def callback(x, t, v, entropy): if i < N_samples_trails: results[("trail_x", i)].append(x.copy()) results[("trail_v", i)].append(v.copy()) results[("entropy", i)].append(entropy) results[("likelihood", i)].append(-nllfun(x)) if t in snapshot_times: results[("all_x", t)].append(x.copy()) results[("all_v", t)].append(v.copy()) rs = RandomState((seed, i)) entropic_descent2(gradfun, callback=callback, x_scale=x_init_scale, epsilon=epsilon, gamma=gamma, alpha=alpha, annealing_schedule=annealing_schedule, rs=rs) return results
def run(): # annealing_schedule = np.linspace(0,1,N_iter) annealing_schedule = np.concatenate( (np.zeros(N_iter / 3), np.linspace(0, 1, N_iter / 3), np.ones(N_iter / 3))) print "Running experiment..." results = defaultdict(list) for i in xrange(N_samples): def callback(x, t, v, entropy): results[("x", i)].append( x.copy()) # Replace this with a loop over kwargs? results[("entropy", i)].append(entropy) results[("velocity", i)].append(v) results[("likelihood", i)].append(-nllfun(x)) rs = RandomState((seed, i)) x, entropy = entropic_descent2(gradfun, callback=callback, x_scale=x_init_scale, epsilon=epsilon, gamma=gamma, alpha=alpha, annealing_schedule=annealing_schedule, rs=rs) return results
def run(): print "Running experiment..." results = defaultdict(list) for i in xrange(N_samples): def callback(x, t, g, v, entropy): results[("x", i, t)] = x.copy() # Replace this with a loop over kwargs? results[("entropy", i, t)] = entropy results[("velocity", i, t)] = v results[("likelihood", i, t)] = -nllfun(x) rs = RandomState((seed, i)) x, entropy = entropic_descent2(gradfun, callback=callback, x_scale=x_init_scale, epsilon=epsilon, gamma=gamma, iters=N_iter, rs=rs) results[("x", i, N_iter)] = x results[("entropy", i, N_iter)] = x return results
def run(): x_init_scale = np.full(D, init_scale) annealing_schedule = np.linspace(0,1,N_iter) print "Running experiment..." results = defaultdict(list) for i in xrange(N_samples): def callback(x, t, v, entropy): if i < N_samples_trails: results[("trail_x", i)].append(x.copy()) results[("trail_v", i)].append(v.copy()) results[("entropy", i)].append(entropy) results[("likelihood", i)].append(-nllfun(x)) if t in snapshot_times: results[("all_x", t)].append(x.copy()) results[("all_v", t)].append(v.copy()) rs = RandomState((seed, i)) entropic_descent2(gradfun, callback=callback, x_scale=x_init_scale, epsilon=epsilon, gamma=gamma, alpha=alpha, annealing_schedule=annealing_schedule, rs=rs) return results
def run(): # annealing_schedule = np.linspace(0,1,N_iter) annealing_schedule = np.concatenate((np.zeros(N_iter/3), np.linspace(0, 1, N_iter/3), np.ones(N_iter/3))) print "Running experiment..." results = defaultdict(list) for i in xrange(N_samples): def callback(x, t, v, entropy): results[("x", i)].append(x.copy()) # Replace this with a loop over kwargs? results[("entropy", i)].append(entropy) results[("velocity", i)].append(v) results[("likelihood", i)].append(-nllfun(x)) rs = RandomState((seed, i)) x, entropy = entropic_descent2(gradfun, callback=callback, x_scale=x_init_scale, epsilon=epsilon, gamma=gamma, alpha=alpha, annealing_schedule=annealing_schedule, rs=rs) return results