Exemplo n.º 1
0
def save_unwhitened(th, lam0, lam0_delta, parser, K_chol, data_dir, split_type):
    # grab betas, hit em w/ K_chol, and save
    to_save = th.copy()
    betas  = parser.get(to_save, 'betas')
    parser.set(to_save, 'betas', np.dot(K_chol, betas.T).T)
    qfb.save_basis_fit(to_save, lam0, lam0_delta, parser, 
                           data_dir=BASIS_DIR, split_type=SPLIT_TYPE)
Exemplo n.º 2
0
def minimize_chunk(fun, jac, x0, method, chunk_size=4):
    """ minimize function that saves every few iterations """
    num_chunks = int(MAX_LBFGS_ITER / float(chunk_size)) + 1
    for chunk_i in range(num_chunks):
        print "optimizing chunk %d of %d (curr_ll = %2.5g)"%(chunk_i, num_chunks, fun(x0))
        res = minimize(fun = fun, jac = jac, x0 = x0, method = method,
                       options = {'maxiter': chunk_size, 'disp': True})
        x0  = res.x
        qfb.save_basis_fit(res.x, lam0, lam0_delta, parser, data_dir="")
    return res
Exemplo n.º 3
0
def minimize_chunk(fun, jac, x0, method, chunk_size=250):
    """ minimize function that saves every few iterations """
    num_chunks = int(MAX_LBFGS_ITER / float(chunk_size)) + 1
    for chunk_i in range(num_chunks):
        print "optimizing chunk %d of %d (curr_ll = %2.5g)"%(chunk_i, num_chunks, fun(x0))
        res = minimize(fun = fun, jac = jac, x0 = x0, method = method,
                       options = {'maxiter': chunk_size, 'disp': True})
        x0  = res.x
        qfb.save_basis_fit(res.x, lam0, lam0_delta, parser, data_dir="")
    return res
Exemplo n.º 4
0
def save_unwhitened(th, lam0, lam0_delta, parser, K_chol, data_dir,
                    split_type):
    # grab betas, hit em w/ K_chol, and save
    to_save = th.copy()
    betas = parser.get(to_save, 'betas')
    parser.set(to_save, 'betas', np.dot(K_chol, betas.T).T)
    qfb.save_basis_fit(to_save,
                       lam0,
                       lam0_delta,
                       parser,
                       data_dir=BASIS_DIR,
                       split_type=SPLIT_TYPE)
Exemplo n.º 5
0
            print " %d, loss = %2.4g, grad = %2.4g " % \
                (i, loss_val, np.sqrt(np.dot(g,g)))
            obj_vals.append(loss_val)

    ## train with rms_prop
    def full_loss_grad(th, idx=None):
        return loss_grad(th, idx) + prior_loss_grad(th, idx)

    minibatch_minimize(grad=full_loss_grad,
                       x=th,
                       callback=callback,
                       num_epochs=10,
                       batch_size=24,
                       step_size=RMSPROP_STEP,
                       mass=RMSPROP_MOMENTUM)
    qfb.save_basis_fit(min_x, lam0, lam0_delta, parser, data_dir="")

    ## tighten it up a bit
    res = minimize_chunk(fun=lambda th: loss_fun(th) + prior_loss(th),
                         jac=lambda th: loss_grad(th) + prior_loss_grad(th),
                         x0=min_x,
                         method='L-BFGS-B')

    ## save result
    qfb.save_basis_fit(res.x, lam0, lam0_delta, parser, data_dir="")
    th_mle = res.x
    ll_mle = loss_fun(th_mle) + prior_loss(th_mle)

    ## plot random profiles
    #plot a handful of random directions
    egrid = np.linspace(-1.5, 1.5, 30)
Exemplo n.º 6
0
            loss_val = loss_fun(x) + prior_loss(x)
            if loss_val < min_val:
                min_x = x
                min_val = loss_val
            print " %d, loss = %2.4g, grad = %2.4g " % \
                (i, loss_val, np.sqrt(np.dot(g,g)))
            obj_vals.append(loss_val)

    ## train with rms_prop
    out = rms_prop(grad      = lambda th: loss_grad(th) + prior_loss_grad(th),
                   x         = th,
                   callback  = callback,
                   num_iters = MAX_RMSPROP_ITER,
                   step_size = RMSPROP_STEP,
                   gamma     = RMSPROP_MOMENTUM)
    qfb.save_basis_fit(min_x, lam0, lam0_delta, parser, data_dir="")

    ## tighten it up a bit
    res = minimize_chunk(fun = lambda th: loss_fun(th) + prior_loss(th),
                         jac = lambda th: loss_grad(th) + prior_loss_grad(th),
                         x0  = min_x,
                         method = 'L-BFGS-B')

    ## save result
    qfb.save_basis_fit(res.x, lam0, lam0_delta, parser, data_dir="")
    th_mle = res.x
    ll_mle = loss_fun(th_mle) + prior_loss(th_mle)

    ## plot random profiles
    #plot a handful of random directions
    egrid = np.linspace(-1.5, 1.5, 30)