Exemplo n.º 1
0
def main():
    np.random.seed(1)
    Xtrain, ytrain, params_true, true_fun, ttl = make_data_linreg_1d(21, 'linear')
    
    model = LinregModel(1, True)
    params_init = model.init_params()
    print model
    
    # Check that OLS and BFGS give same result
    params_ols, loss_ols = model.ols_fit(Xtrain, ytrain)
    obj_fun = lambda params: model.objective(params, Xtrain, ytrain)
    grad_fun = lambda params: model.gradient(params, Xtrain, ytrain)
    params_bfgs, loss_bfgs = bfgs(obj_fun, grad_fun, params_init) 
    assert(np.allclose(params_bfgs, params_ols))
    assert(np.allclose(loss_bfgs, loss_ols))

    # Check that analytic gradient and automatic gradient give same result
    # when evaluated on training data
    grad_fun = autograd.grad(obj_fun)
    grad_auto = grad_fun(params_init)
    grad_finite_diff = autograd.util.nd(lambda p : obj_fun(p), params_init)[0]
    grad_analytic = model.gradient(params_init, Xtrain, ytrain)
    assert(np.allclose(grad_auto, grad_finite_diff))
    assert(np.allclose(grad_auto, grad_analytic))

    params_autograd, loss_autograd = bfgs(obj_fun, grad_fun, params_init) 
    assert(np.allclose(params_bfgs, params_autograd))
    assert(np.allclose(loss_bfgs, loss_autograd))
    
    print "All assertions passed"
Exemplo n.º 2
0
def main():
    np.random.seed(1)
    Xtrain, ytrain, params_true, true_fun, ttl = make_data_linreg_1d(
        21, 'linear')

    model = LinregModel(1, True)
    params_init = model.init_params()
    print(model)

    # Check that OLS and BFGS give same result
    params_ols, loss_ols = model.ols_fit(Xtrain, ytrain)
    obj_fun = lambda params: model.objective(params, Xtrain, ytrain)
    grad_fun = lambda params: model.gradient(params, Xtrain, ytrain)
    params_bfgs, loss_bfgs = bfgs(obj_fun, grad_fun, params_init)
    assert (np.allclose(params_bfgs, params_ols))
    assert (np.allclose(loss_bfgs, loss_ols))

    # Check that analytic gradient and automatic gradient give same result
    # when evaluated on training data
    grad_fun = autograd.grad(obj_fun)
    grad_auto = grad_fun(params_init)
    grad_finite_diff = autograd.util.nd(lambda p: obj_fun(p), params_init)[0]
    grad_analytic = model.gradient(params_init, Xtrain, ytrain)
    assert (np.allclose(grad_auto, grad_finite_diff))
    assert (np.allclose(grad_auto, grad_analytic))

    params_autograd, loss_autograd = bfgs(obj_fun, grad_fun, params_init)
    assert (np.allclose(params_bfgs, params_autograd))
    assert (np.allclose(loss_bfgs, loss_autograd))

    print("All assertions passed")
Exemplo n.º 3
0
def run_expt(config, loss_opt=0):
    ttl = config_to_str(config)
    print '\nstarting experiment {}'.format(ttl)
    print config
    
    Xtrain, Ytrain, params_true, true_fun, fun_name = \
      demo.make_data_linreg_1d(config['N'], config['fun_type'])
    data_dim = Xtrain.shape[1]
    N = Xtrain.shape[0]
    Xtrain, Ytrain = opt.shuffle_data(Xtrain, Ytrain)
        
    model_type = config['model_type']
    if model_type == 'linear':
        model = LinregModel(data_dim, add_ones=True)
        params, loss = model.ols_fit(Xtrain, Ytrain)
    elif model_type[0:3] == 'mlp':
        _, layer_sizes = model_type.split(':')
        layer_sizes = [int(n) for n in layer_sizes.split('-')]
        model = MLP(layer_sizes, 'regression', L2_reg=0.001) 
    else:
        raise ValueError('unknown model type {}'.format(model_type))
            
    initial_params = model.init_params() 
    param_dim = len(initial_params)

    plot_data = (data_dim == 1)
    plot_params = (param_dim == 2)
    nplots = 1
    if plot_data: 
        nplots += 1
    if plot_params:
        nplots += 1
    plot_rows, plot_cols = util.nsubplots(nplots)



    if config['optimizer'] == 'BFGS':
        obj_fun = lambda params: model.PNLL(params, Xtrain, Ytrain)
        grad_fun = autograd.grad(obj_fun)
        logger = opt.OptimLogger(lambda params: obj_fun(params), eval_freq=1, 
                    store_freq=1, print_freq=1)   
        params, obj = opt.bfgs(obj_fun, grad_fun, initial_params, config['num_epochs'], 
                          logger.callback)
                         
    if config['optimizer'] == 'SGD':
        B = config['batch_size']
        M = N / B # num_minibatches_per_epoch (num iter per epoch)
        max_iters = config['num_epochs'] * M
        
        grad_fun_with_iter = opt.build_batched_grad(model.gradient, config['batch_size'], Xtrain, Ytrain)
        #obj_fun = opt.build_batched_grad(model.PNLL, config['batch_size'], Xtrain, Ytrain)
        obj_fun = lambda params: model.PNLL(params, Xtrain, Ytrain)
        sf = config.get('store_freq', M)
        logger = opt.OptimLogger(obj_fun, eval_freq=sf, store_freq=sf, print_freq=0)         
        sgd_fun = config['sgd_fun']
 
        if config['lr_tune']==True:
            eval_fun = lambda params: model.PNLL(params, Xtrain, Ytrain)
            lr, lrs, scores = opt.lr_tuner(eval_fun, 'grid', sgd_fun, grad_fun_with_iter,
                            initial_params, int(np.ceil(max_iters*0.1)))
            print 'lr tuner chose lr {:0.3f}'.format(lr)
            print lrs
            print scores
            config['lr_init'] = lr
            
        lr_fun = lambda iter: opt.lr_exp_decay(iter, config['lr_init'],
                                    config['lr_decay'], config['lr_step']) 
        params, obj = sgd_fun(obj_fun, grad_fun_with_iter, initial_params,
                            max_iters, logger.callback, lr_fun)
    
    training_loss = model.PNLL(params, Xtrain, Ytrain)
    print 'finished fitting, training loss {:0.3g}, {} obj calls, {} grad calls'.\
        format(training_loss, model.num_obj_fun_calls, model.num_grad_fun_calls)
    
    fig = plt.figure()
    ax = fig.add_subplot(plot_rows, plot_cols, 1)
    opt.plot_loss_trace(logger.eval_trace, loss_opt, ax)
    ax.set_title('final objective {:0.3g}'.format(training_loss))
    ax.set_xlabel('epochs')
    
    if plot_data:
        ax = fig.add_subplot(plot_rows, plot_cols, 2)
        predict_fun = lambda X: model.predictions(params, X)
        demo.plot_data_and_predictions_1d(Xtrain, Ytrain, true_fun, predict_fun, ax)
    
    if plot_params:
        ax = fig.add_subplot(plot_rows, plot_cols, 3)
        loss_fun = lambda w0, w1: model.PNLL(np.array([w0, w1]), Xtrain, Ytrain)
        demo.plot_error_surface_2d(loss_fun, params, params_true, config['fun_type'], ax)
        demo.plot_param_trace_2d(logger.param_trace, ax)        
        
    ttl = config_to_str(config) # recompute in case lr has been estimated
    fig.suptitle(ttl)
    folder = 'figures/linreg-sgd'        
    fname = os.path.join(folder, 'linreg_1d_sgd_{}.png'.format(ttl))
    plt.savefig(fname)
    return training_loss