def run():
    print "Running experiment..."
    results = defaultdict(list)
    for i in xrange(N_samples):
        print i,
        def callback(**kwargs):
            for k, v in kwargs.iteritems():
                results[(k, i)].append(copy(v))
            results[("likelihood", i)].append(-nllfun(kwargs['x']))
            results[("x_minus_mu_sq", i)].append((kwargs['x'] - mu)**2)

        rs = RandomState((seed, i))
        sgd_entropic(gradfun, np.full(D, init_scale), N_iter, alpha, rs, callback)

    return results
Example #2
0
def run():
    x_init_scale = np.full(D, init_scale)

    print "Running experiment..."
    results = defaultdict(list)
    for i in xrange(N_samples):

        def callback(x, t, entropy):
            if i < N_samples_trails:
                results[("trail_x", i)].append(x.copy())
            results[("entropy", i)].append(entropy)
            results[("likelihood", i)].append(-nllfun(x))
            if t in snapshot_times:
                results[("all_x", t)].append(x.copy())

        rs = RandomState((seed, i))
        x, entropy = sgd_entropic(gradfun,
                                  x_scale=x_init_scale,
                                  N_iter=N_iter,
                                  learn_rate=alpha,
                                  rs=rs,
                                  callback=callback,
                                  approx=False,
                                  mu=init_mu)
        callback(x, N_iter, entropy)
    return results
Example #3
0
def run():
    (train_images, train_labels),\
    (tests_images, tests_labels) = load_data_subset(N_train, N_tests)
    parser, pred_fun, nllfun, frac_err = make_nn_funs(layer_sizes)
    N_param = len(parser.vect)

    def indexed_loss_fun(w, i_iter):
        rs = RandomState((seed, i, i_iter))
        idxs = rs.randint(N_train, size=batch_size)
        nll = nllfun(w, train_images[idxs], train_labels[idxs]) * N_train
        nlp = neg_log_prior(w)
        return nll + nlp

    gradfun = grad(indexed_loss_fun)

    def callback(x, t, entropy):
        results["entropy_per_dpt"].append(entropy / N_train)
        results["x_rms"].append(np.sqrt(np.mean(x * x)))
        results["minibatch_likelihood"].append(-indexed_loss_fun(x, t))
        results["log_prior_per_dpt"].append(-neg_log_prior(x) / N_train)
        if t % thin != 0 and t != N_iter and t != 0: return
        results["iterations"].append(t)
        results["train_likelihood"].append(
            -nllfun(x, train_images, train_labels))
        results["tests_likelihood"].append(
            -nllfun(x, tests_images, tests_labels))
        results["tests_error"].append(frac_err(x, tests_images, tests_labels))
        results["marg_likelihood"].append(
            estimate_marginal_likelihood(results["train_likelihood"][-1],
                                         results["entropy_per_dpt"][-1]))

        print "Iteration {0:5} Train lik {1:2.4f}  Test lik {2:2.4f}" \
              "  Marg lik {3:2.4f}  Test err {4:2.4f}".format(
                  t, results["train_likelihood"][-1],
                  results["tests_likelihood"][-1],
                  results["marg_likelihood" ][-1],
                  results["tests_error"     ][-1])

    all_results = []
    for i in xrange(N_samples):
        results = defaultdict(list)
        rs = RandomState((seed, i))
        sgd_entropic(gradfun, np.full(N_param, init_scale), N_iter, alpha, rs,
                     callback)
        all_results.append(results)

    return all_results
def run():
    train_inputs, train_targets,\
    tests_inputs, tests_targets, unscale_y = load_boston_housing(train_frac)
    N_train = train_inputs.shape[0]
    batch_size = N_train
    alpha = alpha_un / N_train
    parser, pred_fun, nllfun, rmse = make_regression_nn_funs(layer_sizes)
    N_param = len(parser.vect)

    def indexed_loss_fun(w, i_iter):
        rs = RandomState((seed, i, i_iter))
        idxs = rs.randint(N_train, size=batch_size)
        nll = nllfun(w, train_inputs[idxs], train_targets[idxs]) * N_train
        nlp = neg_log_prior(w)
        return nll + nlp
    gradfun = grad(indexed_loss_fun)

    def callback(x, t, entropy):
        results["entropy_per_dpt"     ].append(entropy / N_train)
        results["x_rms"               ].append(np.sqrt(np.mean(x * x)))
        results["minibatch_likelihood"].append(-indexed_loss_fun(x, t))
        results["log_prior_per_dpt"   ].append(-neg_log_prior(x) / N_train)
        if t % thin != 0 and t != N_iter and t != 0: return
        results["iterations"      ].append(t)
        results["train_likelihood"].append(-nllfun(x, train_inputs, train_targets))
        results["tests_likelihood"].append(-nllfun(x, tests_inputs, tests_targets))
        results["train_rmse"      ].append(unscale_y(rmse(x, train_inputs, train_targets)))
        results["tests_rmse"      ].append(unscale_y(rmse(x, tests_inputs, tests_targets)))
        results["marg_likelihood" ].append(estimate_marginal_likelihood(
            results["train_likelihood"][-1], results["entropy_per_dpt"][-1]))
                                           
        print "Iteration {0:5} Train lik {1:2.4f}  Test lik {2:2.4f}" \
              "  Marg lik {3:2.4f}  Test RMSE {4:2.4f}".format(
                  t, results["train_likelihood"][-1],
                  results["tests_likelihood"][-1],
                  results["marg_likelihood" ][-1],
                  results["tests_rmse"      ][-1])

    all_results = []
    for i in xrange(N_samples):
        results = defaultdict(list)
        rs = RandomState((seed, i))
        sgd_entropic(gradfun, np.full(N_param, init_scale), N_iter, alpha, rs, callback)
        all_results.append(results)

    return all_results
def run():
    (train_images, train_labels),\
    (tests_images, tests_labels) = load_data_subset(N_train, N_tests)
    parser, pred_fun, nllfun, frac_err = make_nn_funs(layer_sizes)
    N_param = len(parser.vect)

    def indexed_loss_fun(w, i_iter):
        rs = RandomState((seed, i, i_iter))
        idxs = rs.randint(N_train, size=batch_size)
        nll = nllfun(w, train_images[idxs], train_labels[idxs]) * N_train
        nlp = neg_log_prior(w)
        return nll + nlp
    gradfun = grad(indexed_loss_fun)

    def callback(x, t, entropy):
        results["entropy_per_dpt"     ].append(entropy / N_train)
        results["x_rms"               ].append(np.sqrt(np.mean(x * x)))
        results["minibatch_likelihood"].append(-indexed_loss_fun(x, t))
        results["log_prior_per_dpt"   ].append(-neg_log_prior(x) / N_train)
        if t % thin != 0 and t != N_iter and t != 0: return
        results["iterations"      ].append(t)
        results["train_likelihood"].append(-nllfun(x, train_images, train_labels))
        results["tests_likelihood"].append(-nllfun(x, tests_images, tests_labels))
        results["tests_error"     ].append(frac_err(x, tests_images, tests_labels))
        results["marg_likelihood" ].append(estimate_marginal_likelihood(
            results["train_likelihood"][-1], results["entropy_per_dpt"][-1]))
                                           
        print "Iteration {0:5} Train lik {1:2.4f}  Test lik {2:2.4f}" \
              "  Marg lik {3:2.4f}  Test err {4:2.4f}".format(
                  t, results["train_likelihood"][-1],
                  results["tests_likelihood"][-1],
                  results["marg_likelihood" ][-1],
                  results["tests_error"     ][-1])

    all_results = []
    for i in xrange(N_samples):
        results = defaultdict(list)
        rs = RandomState((seed, i))
        sgd_entropic(gradfun, np.full(N_param, init_scale), N_iter, alpha, rs, callback)
        all_results.append(results)

    return all_results
def run():
    x_init_scale = np.full(D, init_scale)

    print "Running experiment..."
    results = defaultdict(list)
    for i in xrange(N_samples):
        def callback(x, t, entropy):
            if i < N_samples_trails:
                results[("trail_x", i)].append(x.copy())
            results[("entropy", i)].append(entropy)
            results[("likelihood", i)].append(-nllfun(x))
            if t in snapshot_times:
                results[("all_x", t)].append(x.copy())

        rs = RandomState((seed, i))
        x, entropy = sgd_entropic(gradfun, x_scale=x_init_scale, N_iter=N_iter, learn_rate=alpha,
                                  rs=rs, callback=callback, approx=False, mu=init_mu)
        callback(x, N_iter, entropy)
    return results