def clutter_button_callback(attr):
    if cluttered_button.active:
        cluttered_button.button_type = 'success'
        scatter_data.data = {'x': x[indexes], 'y': y[indexes]}
        new_x = x[indexes]
        new_y = y[indexes]
        new_y_line = set_m.value * new_x
        new_line_x = [[i, i] for i in new_x]
        new_line_y = [[j, new_y_line[i]] for i, j in enumerate(new_y)]
        error_points.data = dict(x=new_line_x, y=new_line_y)

        error_param = error_land_data.data['x']
        error_val = [utils.compute_error(new_x, new_y, i) for i in error_param]
        error_land_data.data = dict(x=error_param, y=error_val)

        error_plot.y_range.update(start=0, end=400)

    else:
        cluttered_button.button_type = 'primary'
        scatter_data.data = {'x': x, 'y': y}

        error_param = error_land_data.data['x']
        error_val = [utils.compute_error(x, y, i) for i in error_param]
        error_land_data.data = dict(x=error_param, y=error_val)

        error_plot.y_range.update(start=0, end=2000)
Exemple #2
0
def change_m(attr, old, new):
    new_m = set_m.value

    x_line = np.arange(-1, 5.1, 0.1)
    y_line = new_m * x_line

    line_data.data = dict(x=x_line, y=y_line)

    if cluttered_button.active:
        new_x = x[indexes]
        new_y = y[indexes]
    else:
        new_x = x
        new_y = y

    new_y_line = set_m.value * new_x
    new_line_x = [[i, i] for i in new_x]
    new_line_y = [[j, new_y_line[i]] for i, j in enumerate(new_y)]
    error_points.data = dict(x=new_line_x, y=new_line_y)

    error_param = np.append(error_land_data.data['x'], new_m)
    error_val = np.append(
        error_land_data.data['y'],
        utils.compute_error(new_x, new_y, new_m),
    )
    sort_index = np.argsort(error_param)
    error_param = error_param[sort_index]
    error_val = error_val[sort_index]
    error_land_data.data = dict(x=error_param, y=error_val)
def run_experiment(args):
    # build directory name
    commit = git.Repo(search_parent_directories=True).head.object.hexsha[:10]
    results_dirname = os.path.join(args["output_dir"], commit + "/")
    os.makedirs(results_dirname, exist_ok=True)

    # build file name
    md5_fname = hashlib.md5(str(args).encode('utf-8')).hexdigest()
    results_fname = os.path.join(results_dirname, md5_fname + ".jsonl")
    results_file = open(results_fname, "w")

    utils.set_seed(args["data_seed"])
    dataset = datasets.DATASETS[args["dataset"]](dim_inv=args["dim_inv"],
                                                 dim_spu=args["dim_spu"],
                                                 n_envs=args["n_envs"])

    # Oracle trained on test mode (scrambled)
    train_split = "train" if args["model"] != "Oracle" else "test"

    # sample the envs
    envs = {}
    for key_split, split in zip(("train", "validation", "test"),
                                (train_split, train_split, "test")):
        envs[key_split] = {"keys": [], "envs": []}
        for env in dataset.envs:
            envs[key_split]["envs"].append(
                dataset.sample(n=args["num_samples"], env=env, split=split))
            envs[key_split]["keys"].append(env)

    # offsetting model seed to avoid overlap with data_seed
    utils.set_seed(args["model_seed"] + 1000)

    # selecting model
    args["num_dim"] = args["dim_inv"] + args["dim_spu"]
    model = models.MODELS[args["model"]](in_features=args["num_dim"],
                                         out_features=1,
                                         task=dataset.task,
                                         hparams=args["hparams"])

    # update this field for printing purposes
    args["hparams"] = model.hparams

    # fit the dataset
    model.fit(envs=envs,
              num_iterations=args["num_iterations"],
              callback=args["callback"])

    # compute the train, validation and test errors
    for split in ("train", "validation", "test"):
        key = "error_" + split
        for k_env, env in zip(envs[split]["keys"], envs[split]["envs"]):
            args[key + "_" + k_env] = utils.compute_error(model, *env)

    # write results
    results_file.write(json.dumps(args))
    results_file.close()
    return args
Exemple #4
0
def test_model(model, X, y, test_type='Test'):
    logging.info('Testing model...')
    predictions = model.predict(X)
    err = utils.compute_error(y, predictions)
    acc = 1 - err
    num_total = len(y)
    num_correct = int(round(acc * num_total))
    num_incorrect = num_total - num_correct
    logging.info('%s accuracy: %g (%d/%d)' % (test_type, acc, num_correct, num_total))
    logging.info('%s error: %g (%d/%d)' % (test_type, err, num_incorrect, num_total))
    return acc, err
Exemple #5
0
 def load_and_test(self, test_data, path=None):
     pred = self.load_and_predict(test_data, path)
     c_pred = pred["c"]
     u_pred = pred["u"]
     V_high = test_data["V_high"]
     u_high = test_data["u_high"]
     c_high = test_data["c_high"]
     coeff_errors = []
     approx_errors = []
     for i in range(self.Lmax):
         coeff_errors.append(
             compute_error(c_high[:, :(i + 1)], c_pred[i], scale=u_high))
         approx_errors.append(compute_error(u_high, u_pred[i],
                                            scale=u_high))
     return {
         "c": c_pred,
         "u": u_pred,
         "coeff_errors": coeff_errors,
         "approx_errors": approx_errors
     }
def holdout_train(feats, labels, parameters):

    logging.info("Start holdout training ...")

    x_train,x_test,y_train,y_test = train_test_split(feats, labels, \
                                        test_size=parameters['test_size'])

    logging.debug("Dimension of training set: {}".format(x_train.shape))

    clf = svm.SVR(kernel=parameters['kernel'], C=parameters['c'], \
                    epsilon=parameters['epsilon'], gamma=parameters['gamma'])
    clf.fit(x_train, y_train)

    logging.info("Holdout training is complete.")

    y_pred = clf.predict(x_test)
    rmse = utl.compute_error(y_pred, y_test)

    logging.info("Saving the trained model on disk ...")
    utl.save_model(clf, parameters['name'] + '-holdout')

    return y_pred, y_test, rmse
Exemple #7
0
def Newtons_method_log_barrier_feasible_init_point(f, A, constraint_inequalities, 
                                                   t_path, x_0, tol, 
                                                   tol_backtracking, x_ast=None, p_ast=None, maxiter=30,
                                                   gf_symbolic = None,
                                                   Hf_symbolic = None):
    '''
    Newton's method to numerically approximate solution of min f subject to Ax = b.
    IMPORTANT: this implementation requires that initial point x_0, satisfies: Ax_0 = b
    Args:
        f (fun): definition of function f as lambda expression or function definition.
        A (numpy ndarray): 2d numpy array of shape (m,n) defines system of constraints Ax=b.
        
        constraints_ineq
        
        t_path
        
        x_0 (numpy ndarray): initial point for Newton's method. Must satisfy: Ax_0 = b
        tol (float): tolerance that will halt method. Controls stopping criteria.
        tol_backtracking (float): tolerance that will halt method. Controls value of line search by backtracking.
        x_ast (numpy ndarray): solution of min f, now it's required that user knows the solution...
        p_ast (float): value of f(x_ast), now it's required that user knows the solution...
        maxiter (int): maximum number of iterations
        gf_symbolic (fun): definition of gradient of f. If given, no approximation is
                                     performed via finite differences.
        Hf_symbolic (fun): definition of Hessian of f. If given, no approximation is
                                     performed via finite differences.
    Returns:
        x (numpy ndarray): numpy array, approximation of x_ast.
        iteration (int): number of iterations.
        Err_plot (numpy ndarray): numpy array of absolute error between p_ast and f(x) with x approximation
                          of x_ast. Useful for plotting.
        x_plot (numpy ndarray): numpy array that containts in columns vector of approximations. Last column
                        contains x, approximation of solution. Useful for plotting.
    '''
    iteration = 0
        
    x = x_0
    
    feval = f(x)
        
    if gf_symbolic and Hf_symbolic:
        gfeval = gf_symbolic(x)
        Hfeval = Hf_symbolic(x)
    else:
        grad_Hess_log_barrier_dict = numerical_differentiation_of_logarithmic_barrier(f,x,t_path,
                                                                                      constraint_inequalities)
        gfeval = grad_Hess_log_barrier_dict['gradient']
        Hfeval = grad_Hess_log_barrier_dict['Hessian']

    
    normgf = np.linalg.norm(gfeval)
    condHf= np.linalg.cond(Hfeval)
    
    Err_plot_aux = np.zeros(maxiter)
    Err_plot_aux[iteration]=compute_error(p_ast,feval)
    
    Err = compute_error(x_ast,x)
    
    if (np.all(A < np.nextafter(0,1))): #A is zero matrix
        system_matrix = Hfeval
        rhs = -gfeval
        n = x.size
        p = 0
        
    else:
        if(A.ndim == 1):
            p = 1
            n = x.size
            zero_matrix = np.zeros(p)
            first_stack = np.column_stack((Hfeval, A.T))
            second_stack = np.row_stack((A.reshape(1,n).T,zero_matrix)).reshape(1,n+1)[0]
        else:
            p,n = A.shape
            zero_matrix = np.zeros((p,p))
            first_stack = np.column_stack((Hfeval, A.T))
            second_stack = np.column_stack((A,zero_matrix))
            
        system_matrix = np.row_stack((first_stack,second_stack))
        zero_vector = np.zeros(p)
        rhs = -np.row_stack((gfeval.reshape(n,1), zero_vector.reshape(p,1))).T[0]
    
    x_plot = np.zeros((n,maxiter))
    x_plot[:,iteration] = x
    

    #Newton's direction and Newton's decrement
    dir_desc = np.linalg.solve(system_matrix, rhs)
    dir_Newton = dir_desc[0:n]
    dec_Newton = -gfeval.dot(dir_Newton)
    w_dual_variable_estimation = dir_desc[n:(n+p)]


    print('I\tNorm gfLogBarrier \tNewton Decrement\tError x_ast\tError p_ast\tline search\tCondHf')
    print('{}\t\t{:0.2e}\t\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{}\t\t{:0.2e}'.format(iteration,normgf,
                                                                         dec_Newton,Err,
                                                                         Err_plot_aux[iteration],"---",
                                                                         condHf))
    
    stopping_criteria = dec_Newton/2
    iteration+=1
    while(stopping_criteria>tol and iteration < maxiter):
        der_direct = -dec_Newton
        t = line_search_for_log_barrier_by_backtracking(f,dir_Newton,x,t_path,
                                                        constraint_inequalities,
                                                        der_direct)
        x = x + t*dir_Newton
        feval = f(x)
            
        if gf_symbolic and Hf_symbolic:
            gfeval = gf_symbolic(x)
            Hfeval = Hf_symbolic(x)
        else:
            grad_Hess_log_barrier_dict = numerical_differentiation_of_logarithmic_barrier(f,x,t_path,
                                                                                          constraint_inequalities)
            gfeval = grad_Hess_log_barrier_dict['gradient']
            Hfeval = grad_Hess_log_barrier_dict['Hessian']
        
        if (np.all(A < np.nextafter(0,1))): #A is zero matrix
            system_matrix = Hfeval
            rhs = -gfeval
            n = x.size
            p = 0
        else:
            if(A.ndim == 1):
                first_stack = np.column_stack((Hfeval, A.T))
            else:
                first_stack = np.column_stack((Hfeval, A.T))
            system_matrix = np.row_stack((first_stack,second_stack))
            rhs = -np.row_stack((gfeval.reshape(n,1), zero_vector.reshape(p,1))).T[0]

        #Newton's direction and Newton's decrement
        dir_desc = np.linalg.solve(system_matrix, rhs)
        dir_Newton = dir_desc[0:n]
        dec_Newton = -gfeval.dot(dir_Newton)
        w_dual_variable_estimation = dir_desc[n:(n+p)]
        
        Err_plot_aux[iteration]=compute_error(p_ast,feval)
        x_plot[:,iteration] = x
        Err = compute_error(x_ast,x)
        print('{}\t\t{:0.2e}\t\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}'.format(iteration,normgf,
                                                                                dec_Newton,Err,
                                                                                Err_plot_aux[iteration],t,
                                                                                condHf))
        stopping_criteria = dec_Newton/2
        if t<tol_backtracking: #if t is less than tol_backtracking then we need to check the reason
            iter_salida=iteration
            iteration = maxiter - 1
        iteration+=1
    print('{} {:0.2e}'.format("Error of x with respect to x_ast:",Err))
    print('{} {}'.format("Approximate solution:", x))
    cond = Err_plot_aux > np.finfo(float).eps*10**(-2)
    Err_plot = Err_plot_aux[cond]
    
    if iteration == maxiter and t < tol_backtracking:
        print("Backtracking value less than tol_backtracking, check approximation")
        iteration=iter_salida
    else:
        if iteration == maxiter:
            print("Reached maximum of iterations, check approximation")
    x_plot = x_plot[:,:iteration]
    return [x,iteration,Err_plot,x_plot]
Exemple #8
0
def Newtons_method_log_barrier_infeasible_init_point(f, A, b, 
                                                     constraint_inequalities, t_path,
                                                     x_0, nu_0, tol, 
                                                     tol_backtracking, x_ast=None, p_ast=None, maxiter=30,
                                                     gf_symbolic = None,
                                                     Hf_symbolic = None):
    '''
    Newton's method for infeasible initial point to numerically approximate solution of min f subject to Ax = b.
    Calls Newton's method for feasible initial point when reach primal feasibility given by tol.
    Args:
        f (fun): definition of function f as lambda expression or function definition.
        A (numpy ndarray): 2d numpy array of shape (m,n) defines system of constraints Ax=b.
        b (numpy ndarray or float): array that defines system of constraints Ax=b (can be a single number
                                    if just one restriction is defined)

        constraints_ineq
        
        t_path
        
        x_0 (numpy ndarray): initial point for Newton's method. 
        nu_0 (numpy ndarray or float): initial point for Newton's method.
        tol (float): tolerance that will halt method. Controls stopping criteria.
        tol_backtracking (float): tolerance that will halt method. Controls value of line search by backtracking.
        x_ast (numpy ndarray): solution of min f, now it's required that user knows the solution...
        p_ast (float): value of f(x_ast), now it's required that user knows the solution...
        maxiter (int): maximum number of iterations
        gf_symbolic (fun): definition of gradient of f. If given, no approximation is
                                     performed via finite differences.
        Hf_symbolic (fun): definition of Hessian of f. If given, no approximation is
                                     performed via finite differences.
    Returns:
        x (numpy ndarray): numpy array, approximation of x_ast.
        iteration (int): number of iterations.
        Err_plot (numpy ndarray): numpy array of absolute error between p_ast and f(x) with x approximation
                          of x_ast. Useful for plotting.
        x_plot (numpy ndarray): numpy array that containts in columns vector of approximations. Last column
                        contains x, approximation of solution. Useful for plotting.
    '''
    iteration = 0
    x = x_0
    nu = nu_0
    
    feval = f(x)
    
    if gf_symbolic and Hf_symbolic:
        gfeval = gf_symbolic(x)
        Hfeval = Hf_symbolic(x)
    else:
        grad_Hess_log_barrier_dict = numerical_differentiation_of_logarithmic_barrier(f,x,t_path,
                                                                                      constraint_inequalities)
        gfeval = grad_Hess_log_barrier_dict['gradient']
        Hfeval = grad_Hess_log_barrier_dict['Hessian']

    
    normgf = np.linalg.norm(gfeval)
    condHf= np.linalg.cond(Hfeval)
    
    Err_x_ast = compute_error(x_ast,x)
    Err_p_ast = compute_error(p_ast,feval)

    def residual_dual(nu_fun):
        if(A.ndim==1):
            return gfeval + A.T*nu_fun
        else:
            return gfeval + A.T@nu_fun        
    feasibility_dual = residual_dual(nu)    

    if (np.all(A < np.nextafter(0,1))): #A is zero matrix
        system_matrix = Hfeval
        rhs = -gfeval
        n = x.size
        p = 0
        feasibility_primal = 0
    else:
        if(A.ndim == 1):
            p = 1
            n = x.size
            zero_matrix = np.zeros(p)
            first_stack = np.column_stack((Hfeval, A.T))
            second_stack = np.row_stack((A.reshape(1,n).T,zero_matrix)).reshape(1,n+1)[0]
        else:
            p,n = A.shape
            zero_matrix = np.zeros((p,p))
            first_stack = np.column_stack((Hfeval, A.T))
            second_stack = np.column_stack((A,zero_matrix))

        system_matrix = np.row_stack((first_stack,second_stack))
        
        residual_primal = lambda x_fun: A@x_fun-b
        
        feasibility_primal = residual_primal(x)
        
        rhs = -np.row_stack((feasibility_dual.reshape(n,1), feasibility_primal.reshape(p,1))).T[0]

    #Newton's direction and Newton's decrement
    dir_desc = np.linalg.solve(system_matrix, rhs)
    dir_Newton_primal = dir_desc[0:n]
    dec_Newton = -gfeval.dot(dir_Newton_primal)
    dir_Newton_dual = dir_desc[n:(n+p)]

    norm_residual_eval = norm_residual(feasibility_primal,
                                       feasibility_dual)

    norm_residual_primal = np.linalg.norm(feasibility_primal)
    norm_residual_dual = np.linalg.norm(feasibility_dual)
    print('I\t||res_primal||\t||res_dual|| \tNewton Decrement\tError x_ast\tError p_ast\tline search\tCondHf')
    print('{}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{}\t\t{:0.2e}'.format(iteration,norm_residual_primal,
                                                                                  norm_residual_dual,
                                                                                  dec_Newton,Err_x_ast,
                                                                                  Err_p_ast,"---",
                                                                                  condHf))
    
    stopping_criteria = norm_residual_primal > tol
    iteration+=1
    while(stopping_criteria>tol and iteration < maxiter):
        der_direct = -dec_Newton
        t = line_search_for_residual_by_backtracking(residual_primal, residual_dual,
                                                     dir_Newton_primal, dir_Newton_dual,
                                                     x, nu,
                                                     norm_residual_eval
                                                     )
        x = x + t*dir_Newton_primal
        nu = nu + t*dir_Newton_dual
        feval = f(x)
        
        
        if gf_symbolic:
            gfeval = gf_symbolic(x)
        else:
            gfeval = gradient_approximation(f,x)
        
        if Hf_symbolic:
            Hfeval = Hf_symbolic(x)
        else:
            Hfeval = Hessian_approximation(f,x)
        if(A.ndim == 1):
            first_stack = np.column_stack((Hfeval, A.T))
        else:
            first_stack = np.column_stack((Hfeval, A.T))

        system_matrix = np.row_stack((first_stack,second_stack))
        
        feasibility_primal = residual_primal(x)
        feasibility_dual = residual_dual(nu)
        rhs = -np.row_stack((feasibility_dual.reshape(n,1), feasibility_primal.reshape(p,1))).T[0]
            
        #Newton's direction and Newton's decrement
        dir_desc = np.linalg.solve(system_matrix, rhs)
        dir_Newton_primal = dir_desc[0:n]
        dec_Newton = -gfeval.dot(dir_Newton_primal)
        dir_Newton_dual = dir_desc[n:(n+p)]

        Err_x_ast = compute_error(x_ast,x)
        Err_p_ast = compute_error(p_ast,feval)
        norm_residual_eval = norm_residual(feasibility_primal,
                                           feasibility_dual)
        norm_residual_primal = np.linalg.norm(feasibility_primal)
        norm_residual_dual = np.linalg.norm(feasibility_dual)
        print('{}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}'.format(iteration,norm_residual_primal,
                                                                                         norm_residual_dual,
                                                                                         dec_Newton,Err_x_ast,
                                                                                         Err_p_ast,t,
                                                                                         condHf))

        stopping_criteria = norm_residual_primal > tol
        if t<tol_backtracking: #if t is less than tol_backtracking then we need to check the reason
            iteration = maxiter - 1
        iteration+=1
    
    if iteration == maxiter and t < tol_backtracking:
        print("Backtracking value less than tol_backtracking, try other initial point")
        return [None,None,None,None]
    else:
        if iteration == maxiter:
            print("------------------------------------------------------------")
            print("------------------------------------------------------------")
            print("------------------------------------------------------------")
            print("Reached maximum of iterations, check primal feasibility")
            print("Continuing with Newtons method for feasible initial point")
            return Newtons_method_log_barrier_feasible_init_point(f,A, constraint_inequalities,
                                                                  t_path, x,tol, 
                                                                  tol_backtracking, x_ast, p_ast, 
                                                                  maxiter,gf_symbolic,Hf_symbolic)
        else:
            print("------------------------------------------------------------")
            print("------------------------------------------------------------")
            print("------------------------------------------------------------")
            print("Beginning Newtons method for feasible initial point")
            return Newtons_method_log_barrier_feasible_init_point(f,A, constraint_inequalities,
                                                                  t_path, x,tol, 
                                                                  tol_backtracking, x_ast, p_ast, 
                                                                  maxiter,gf_symbolic,Hf_symbolic)
def weather_prediction(feats, labels, parameters, masks):
    ''' weather prediction wrapper for solar forecast '''

    # create results dictionary under each weather
    sunny_results = {}
    cloudy_results = {}
    partly_cloudy_results = {}

    # get parameters of SVR model under each weather types
    if parameters['SUNNY']['para_path'] is not None:
        logging.info('Loading given model on disk ...')
        sunny_model = utl.restore_model(\
                parameters['SUNNY']['para_path'])
    else:
        logging.info('Restore model on disk ...')
        sunny_model = utl.restore_model(\
                parameters['SUNNY']['name'] + '-grid-search')
    logging.info("Sunny model loaded is {}".format(sunny_model))

    if parameters['CLOUDY']['para_path'] is not None:
        logging.info('Loading given model on disk ...')
        cloudy_model = utl.restore_model(\
                parameters['CLOUDY']['para_path'])
    else:
        logging.info('Restore model on disk ...')
        cloudy_model = utl.restore_model(\
                parameters['CLOUDY']['name'] + '-grid-search')
    logging.info("Cloudy model loaded is {}".format(cloudy_model))

    if parameters['PARTLY CLOUDY']['para_path'] is not None:
        logging.info('Loading given model on disk ...')
        partly_cloudy_model = utl.restore_model(\
                parameters['PARTLY CLOUDY']['para_path'])
    else:
        logging.info('Restore model on disk ...')
        partly_cloudy_model = utl.restore_model(\
                parameters['PARTLY CLOUDY']['name'] + '-grid-search')
    logging.info("Partly cloudy model loaded is {}"\
                .format(partly_cloudy_model))

    # perform prediction based on weather types

    # sunny day
    logging.info("Start performing predictions on sunny days ...")
    sunny_pred = predict(feats['sunny'], sunny_model)
    sunny_results['prediction'] = sunny_pred
    logging.info("Weather prediction on sunny days is complete.")

    # cloudy day
    logging.info("Start performing predictions on cloudy days ...")
    cloudy_pred = predict(feats['cloudy'], cloudy_model)
    cloudy_results['prediction'] = cloudy_pred
    logging.info("Weather prediction on cloudy days is complete.")

    # partly cloudy day
    logging.info("Start performing predictions on partly cloudy days ...")
    partly_cloudy_pred = predict(feats['partly_cloudy'], partly_cloudy_model)
    partly_cloudy_results['prediction'] = partly_cloudy_pred
    logging.info("Weather prediction on partly cloudy days is complete.")

    if labels is not None:

        # computing errors
        sunny_errors = utl.compute_error(sunny_pred, labels['sunny'])
        sunny_results['rmse_errors'] = sunny_errors
        logging.info("RMSE errors of sunny days: {}".format(sunny_errors))

        cloudy_errors = utl.compute_error(cloudy_pred, labels['cloudy'])
        cloudy_results['rmse_errors'] = cloudy_errors
        logging.info("RMSE errors of cloudy days: {}".format(cloudy_errors))

        partly_cloudy_errors = utl.compute_error(partly_cloudy_pred,\
                                    labels['partly_cloudy'])
        partly_cloudy_results['rmse_errors'] = partly_cloudy_errors
        logging.info("RMSE errors of partly cloudy days: {}"\
                .format(partly_cloudy_errors))

        # comparison between prediction and true labels
        sunny_fig = utl.compare_pred_results(sunny_pred, labels['sunny'],\
                'sunny', style='k.')
        cloudy_fig = utl.compare_pred_results(cloudy_pred, labels['cloudy'],\
                'cloudy', style='k.')
        partly_cloudy_fig = utl.compare_pred_results(partly_cloudy_pred,\
                labels['partly_cloudy'], 'partly cloudy', style='k.')

        preds = {'sunny': sunny_pred, 'cloudy': cloudy_pred,\
                'partly_cloudy': partly_cloudy_pred}

        # plot predictions and measurements
        fig_pred_meas, preds_total = utl.compare_preds_labels(
            preds,
            labels,
            masks,
        )

        # save fig
        fig_path = parameters['SUNNY']['fig_path'] + '.png'
        sunny_fig.savefig(fig_path)

        fig_path = parameters['CLOUDY']['fig_path'] + '.png'
        cloudy_fig.savefig(fig_path)

        fig_path = parameters['PARTLY CLOUDY']['fig_path'] + '.png'
        partly_cloudy_fig.savefig(fig_path)

        fig_path = parameters['fig_folder'] + 'pred_vs_meas' + '.png'
        partly_cloudy_fig.savefig(fig_path)

        if parameters['FLAG']['show_figs']:
            plt.show()

        return preds_total
Exemple #10
0
)

pair_plot.add_glyph(error_points, error_glyph)

# Plot error landscape
error_plot = figure(
    plot_height=400,
    plot_width=400,
    title="Error Plots",
    tools="crosshair,pan,reset,save,wheel_zoom",
    x_range=[start_m - 0.5, end_m + 0.5],
    y_range=[0, 50],
)
error_land_data = ColumnDataSource(data=dict(
    x=[m],
    y=[utils.compute_error(x, y, m)],
))
error_plot.line('x', 'y', source=error_land_data, color='red')

# Set up widgets
set_m = Slider(title='m', value=m, start=start_m, step=0.1, end=end_m)
animate_button = Button(label='► Play', button_type='primary')
cluttered_button = Toggle(label='Filter points',
                          button_type='primary',
                          width=200)
button_draw_error = Toggle(label='Draw Errors',
                           button_type='primary',
                           width=200)
reset_button = Button(label='Reset errors')

Exemple #11
0
def matrix_factorization_als(train,
                             test,
                             num_features=20,
                             lambda_user=0.014,
                             lambda_item=0.575,
                             stop_criterion=1e-5,
                             max_iter=100,
                             min_iter=25,
                             verbose=False,
                             robust=False):
    """
    Computes the matrix factorization on train data using alternating least squares
    and returns RMSE computed on train and test data.
    :param train: training data, sparse matrix of shape (num_items, num_users)
    :param test: testing data, sparse matrix of shape (num_items, num_users)
                 if None, only the training part is done and the matrix factorization is returned
    :param num_features: number of latent features in the factorization
    :param lambda_user: size of penalization coefficient for the optimization of user features
    :param lambda_item: size of penalization coefficient for the optimization of item features
    :param stop_criterion: computation continues until an optimization step does not improve the error by more than
                           this value
    :param max_iter: maximum number of iterations
    :param min_iter: minimum number of iterations
    :param verbose: True if user wants to print details of the computation
    :param robust: True to enable robustsness against singular matrices
    :return: training RMSE, testing RMSE, user_features and item_features
             or user_features and item_features only if test is None
    """
    change = 1
    error_list = [0, 0]

    num_item = train.shape[0]
    num_user = train.shape[1]

    # set seed
    np.random.seed(988)

    # init ALS
    user_features, item_features = init_mf(train, num_features)

    nz_items, nz_users = train.nonzero()
    nz_users_indices = [nz_users[nz_items == i] for i in range(num_item)]
    nz_items_indices = [nz_items[nz_users == u] for u in range(num_user)]
    nz_train = list(zip(nz_items, nz_users))

    # run ALS
    if verbose:
        print('Learning the matrix factorization using ALS...')
    n_iter = 0
    while change > stop_criterion and n_iter < max_iter or n_iter < min_iter:
        n_iter += 1

        user_features = update_user_feature(train, num_user, item_features,
                                            lambda_user, nz_items_indices,
                                            robust)
        item_features = update_item_feature(train, num_item, user_features,
                                            lambda_item, nz_users_indices,
                                            robust)

        error = compute_error(train, user_features, item_features, nz_train)
        if False:
            print("Iteration {} : RMSE on training set: {}.".format(
                n_iter, error))
        error_list.append(error)
        change = np.fabs(error_list[-1] - error_list[-2])

    if test is not None:
        # evaluate the test error
        nnz_row, nnz_col = test.nonzero()
        nnz_test = list(zip(nnz_row, nnz_col))
        test_rmse = compute_error(test, user_features, item_features, nnz_test)
        if verbose:
            print('Final RMSE on train data: {}'.format(error_list[-1]))
            print('Final RMSE on test data: {}.'.format(test_rmse))
        return error_list[-1], test_rmse, user_features, item_features

    if test is None:
        return error_list[-1], user_features, item_features
Exemple #12
0
def matrix_factorization_sgd(train,
                             test,
                             gamma=0.01,
                             num_features=25,
                             lambda_user=0.011,
                             lambda_item=0.25,
                             num_epochs=50,
                             verbose=False):
    """
    Computes the matrix factorization on train data using stochastic gradient descent
    and returns RMSE computed on train and test data.
    :param train: training data, sparse matrix of shape (num_items, num_users)
    :param test: testing data, sparse matrix of shape (num_items, num_users)
                 if None, only the training is performed and the factorization is returned
    :param gamma: size of each step of the gradient descent
    :param num_features: number of latent features in the factorization
    :param lambda_user: size of penalization coefficient for the optimization of user features
    :param lambda_item: size of penalization coefficient for the optimization of item features
    :param num_epochs: number of epochs for the optimization
    :param verbose: True if user wants to print details of the computation
    :return: training RMSE, testing RMSE, user_features and item_features
             or user_features and item_features only if test is None
    """
    errors = [0]

    # set seed
    np.random.seed(988)

    # init matrix
    user_features, item_features = init_mf(train, num_features)

    # find the non-zero ratings indices
    nz_row, nz_col = train.nonzero()
    nz_train = list(zip(nz_row, nz_col))
    if verbose:
        print('Learning the matrix factorization using SGD...')
    for epoch in range(num_epochs):
        # shuffle the training rating indices
        np.random.shuffle(nz_train)

        # decrease step size
        gamma /= 1.1

        for d, n in nz_train:
            current_item = item_features[:, d]
            current_user = user_features[:, n]
            error = train[d, n] - current_user.T.dot(current_item)

            # gradient
            item_features[:, d] += gamma * (error * current_user -
                                            lambda_item * current_item)
            user_features[:, n] += gamma * (error * current_item -
                                            lambda_user * current_user)

        rmse = compute_error(train, user_features, item_features, nz_train)
        if False:
            print("epoch: {}, RMSE on training set: {}.".format(
                epoch + 1, rmse))

        errors.append(rmse)

    if test is not None:
        nz_row, nz_col = test.nonzero()
        nz_test = list(zip(nz_row, nz_col))
        rmse = compute_error(test, user_features, item_features, nz_test)
        if verbose:
            print('Final RMSE on train data: {}'.format(errors[-1]))
            print('Final RMSE on test data: {}.'.format(rmse))
        return errors[-1], rmse, user_features, item_features

    if test is None:
        return errors[-1], user_features, item_features
def Newtons_method(f,
                   A,
                   x_0,
                   tol,
                   tol_backtracking,
                   x_ast=None,
                   p_ast=None,
                   maxiter=30):
    '''
    Newton's method to numerically approximate solution of min f.
    Args:
        f (lambda expression): definition of function f.
        A (numpy ndarray): 2d numpy array of shape (m,n) defines system of constraints Ax=b.
        x_0 (numpy ndarray): initial point for Newton's method.
        tol (float): tolerance that will halt method. Controls stopping criteria.
        tol_backtracking (float): tolerance that will halt method. Controls value of line search by backtracking.
        x_ast (numpy ndarray): solution of min f, now it's required that user knows the solution...
        p_ast (float): value of f(x_ast), now it's required that user knows the solution...
        maxiter (int): maximum number of iterations
    Returns:
        x (numpy ndarray): numpy array, approximation of x_ast.
        iteration (int): number of iterations.
        Err_plot (numpy ndarray): numpy array of absolute error between p_ast and f(x) with x approximation
                          of x_ast. Useful for plotting.
        x_plot (numpy ndarray): numpy array that containts in columns vector of approximations. Last column
                        contains x, approximation of solution. Useful for plotting.
    '''
    iteration = 0

    x = x_0

    feval = f(x)
    gfeval = gradient_approximation(f, x)
    Hfeval = Hessian_approximation(f, x)

    normgf = np.linalg.norm(gfeval)
    condHf = np.linalg.cond(Hfeval)

    Err_plot_aux = np.zeros(maxiter)
    Err_plot_aux[iteration] = math.fabs(feval - p_ast)

    Err = compute_error(x_ast, x)

    if (A.ndim == 1):
        m = 1
        n = x.size
        zero_matrix = np.zeros(1)
        first_stack = np.column_stack((Hfeval, A.T))
        second_stack = np.row_stack(
            (A.reshape(1, n).T, zero_matrix)).reshape(1, n + 1)[0]
    else:
        m, n = A.shape
        zero_matrix = np.zeros((m, n))
        first_stack = np.column_stack((Hfeval, A.T))
        second_stack = np.row_stack((A, zero_matrix))

    x_plot = np.zeros((n, maxiter))
    x_plot[:, iteration] = x

    system_matrix = np.row_stack((first_stack, second_stack))
    zero_vector = np.zeros(m)
    rhs = np.row_stack((gfeval.reshape(n, 1), zero_vector)).T[0]

    #Newton's direction and Newton's decrement
    dir_desc = np.linalg.solve(system_matrix, rhs)
    dir_Newton = dir_desc[0:n]
    dec_Newton = dir_Newton.dot(Hfeval @ dir_Newton)

    w_dual_variable_estimation = -dir_desc[n:(n + m)]

    dir_Newton = -dir_desc[0:n]

    print(
        'I    Normgf   Newton Decrement  Error x_ast   Error p_ast   line search   Condition of Hessian'
    )
    print(
        '{}    {:0.2e}    {:0.2e}        {:0.2e}      {:0.2e}       {}           {:0.2e}'
        .format(iteration, normgf, dec_Newton, Err, Err_plot_aux[iteration],
                "---", condHf))
    stopping_criteria = dec_Newton / 2
    iteration += 1
    while (stopping_criteria > tol and iteration < maxiter):
        der_direct = -dec_Newton
        t = line_search_by_backtracking(f, dir_Newton, x, der_direct)
        x = x + t * dir_Newton
        feval = f(x)
        gfeval = gradient_approximation(f, x)
        Hfeval = Hessian_approximation(f, x)
        normgf = np.linalg.norm(gfeval)
        condHf = np.linalg.cond(Hfeval)
        if (A.ndim == 1):
            m = 1
            n = x.size
            zero_matrix = np.zeros(1)
            first_stack = np.column_stack((Hfeval, A.T))
            second_stack = np.row_stack(
                (A.reshape(1, n).T, zero_matrix)).reshape(1, n + 1)[0]
        else:
            m, n = A.shape
            zero_matrix = np.zeros((m, n))
            first_stack = np.column_stack((Hfeval, A.T))
            second_stack = np.row_stack((A, zero_matrix))

        system_matrix = np.row_stack((first_stack, second_stack))
        rhs = np.row_stack((gfeval.reshape(n, 1), zero_vector)).T[0]

        #Newton's direction and Newton's decrement
        dir_desc = np.linalg.solve(system_matrix, rhs)
        dir_Newton = dir_desc[0:n]
        dec_Newton = dir_Newton.dot(Hfeval @ dir_Newton)
        w_dual_variable_estimation = -dir_desc[n:(n + m)]
        dir_Newton = -dir_desc[0:n]

        Err_plot_aux[iteration] = math.fabs(feval - p_ast)
        x_plot[:, iteration] = x
        Err = compute_error(x_ast, x)
        print(
            '{}    {:0.2e}    {:0.2e}        {:0.2e}      {:0.2e}       {:0.2e}      {:0.2e}'
            .format(iteration, normgf, dec_Newton, Err,
                    Err_plot_aux[iteration], t, condHf))
        stopping_criteria = dec_Newton / 2
        if t < tol_backtracking:  #if t is less than tol_backtracking then we need to check the reason
            iter_salida = iteration
            iteration = maxiter - 1
        iteration += 1
    print('{} {:0.2e}'.format("Error of x with respect to x_ast:", Err))
    print('{} {}'.format("Approximate solution:", x))
    cond = Err_plot_aux > np.finfo(float).eps * 10**(-2)
    Err_plot = Err_plot_aux[cond]

    if iteration == maxiter and t < tol_backtracking:
        print(
            "Backtracking value less than tol_backtracking, check approximation"
        )
        iteration = iter_salida
        x_plot = x_plot[:, :iteration]
    else:
        x_plot = x_plot[:, :iteration]
    return [x, iteration, Err_plot, x_plot]
def Newtons_method(f, x_0, tol, 
                   tol_backtracking, x_ast=None, p_ast=None, maxiter=30):
    '''
    Newton's method to numerically approximate solution of min f.
    Args:
        f (lambda expression): definition of function f.
        x_0 (numpy ndarray): initial point for Newton's method.
        tol (float): tolerance that will halt method. Controls stopping criteria.
        tol_backtracking (float): tolerance that will halt method. Controls value of line search by backtracking.
        x_ast (numpy ndarray): solution of min f, now it's required that user knows the solution...
        p_ast (float): value of f(x_ast), now it's required that user knows the solution...
        maxiter (int): maximum number of iterations
    Returns:
        x (numpy ndarray): numpy array, approximation of x_ast.
        iteration (int): number of iterations.
        Err_plot (numpy ndarray): numpy array of absolute error between p_ast and f(x) with x approximation
                          of x_ast. Useful for plotting.
        x_plot (numpy ndarray): numpy array that containts in columns vector of approximations. Last column
                        contains x, approximation of solution. Useful for plotting.
    '''
    iteration = 0
        
    x = x_0
    
    feval = f(x)
    gfeval = gradient_approximation(f,x)
    Hfeval = Hessian_approximation(f,x)
    
    normgf = np.linalg.norm(gfeval)
    condHf= np.linalg.cond(Hfeval)
    
    Err_plot_aux = np.zeros(maxiter)
    Err_plot_aux[iteration]=compute_error(p_ast,feval)
    
    Err = compute_error(x_ast,x)
    n = x.size
    x_plot = np.zeros((n,maxiter))
    x_plot[:,iteration] = x
    
    #Newton's direction and Newton's decrement
    
    dir_Newton = np.linalg.solve(Hfeval, -gfeval)
    dec_Newton = -gfeval.dot(dir_Newton)
    
    print('I    Normgf   Newton Decrement  Error x_ast   Error p_ast   line search   Condition of Hessian')
    print('{}    {:0.2e}    {:0.2e}        {:0.2e}      {:0.2e}       {}           {:0.2e}'.format(iteration,normgf,
                                                                                                   dec_Newton,Err,
                                                                                                   Err_plot_aux[iteration],"---",
                                                                                                   condHf))
    stopping_criteria = dec_Newton/2
    iteration+=1
    while(stopping_criteria>tol and iteration < maxiter):
        der_direct = -dec_Newton
        t = line_search_by_backtracking(f,dir_Newton,x,der_direct)
        x = x + t*dir_Newton
        feval = f(x)
        gfeval = gradient_approximation(f,x)
        Hfeval = Hessian_approximation(f,x)
        normgf = np.linalg.norm(gfeval)
        condHf= np.linalg.cond(Hfeval)
        #Newton's direction and Newton's decrement

        dir_Newton = np.linalg.solve(Hfeval, -gfeval)
        dec_Newton = -gfeval.dot(dir_Newton)
        Err_plot_aux[iteration]=compute_error(p_ast,feval)
        x_plot[:,iteration] = x
        Err = compute_error(x_ast,x)
        print('{}    {:0.2e}    {:0.2e}        {:0.2e}      {:0.2e}       {:0.2e}      {:0.2e}'.format(iteration,normgf,
                                                                                                         dec_Newton,Err,
                                                                                                         Err_plot_aux[iteration],t,
                                                                                                         condHf))
        stopping_criteria = dec_Newton/2
        if t<tol_backtracking: #if t is less than tol_backtracking then we need to check the reason
            iter_salida=iteration
            iteration = maxiter - 1
        iteration+=1
    print('{} {:0.2e}'.format("Error of x with respect to x_ast:",Err))
    print('{} {}'.format("Approximate solution:", x))
    cond = Err_plot_aux > np.finfo(float).eps*10**(-2)
    Err_plot = Err_plot_aux[cond]
    if iteration == maxiter and t < tol_backtracking:
        print("Backtracking value less than tol_backtracking, check approximation")
        iteration=iter_salida
        x_plot = x_plot[:,:iteration]
    else:
        x_plot = x_plot[:,:iteration]
    return [x,iteration,Err_plot,x_plot]
Exemple #15
0
args = parser.parse_args(sys.argv[1:] if len(sys.argv) > 1 else ['-h'])
data_dir = args.d
model_fp = args.m
logging_level = logging.DEBUG if args.v else logging.INFO
logging.basicConfig(stream=sys.stdout, level=logging_level)

logging.info('Loading model from file...')
with open(model_fp, 'rb') as model_file:
    model = pickle.load(model_file)
    if type(model) != model:
        logging.error('\'%s\' contains object of type \'%s\', not Model. Exiting...' % (model_fp, type(model).__name__))

logging.info('Loading data...')
im_fps = glob2.glob(os.path.join(data_dir, '**/snippets/**/*.png'))
ims = utils.import_images(im_fps)
im_labels = utils.get_labels_from_fps(im_fps)
    
logging.info('Testing model...')
predictions = model.predict(ims)

accuracy = 1 - utils.compute_error(im_labels, predictions)
num_total = len(im_labels)
num_correct = int(round(accuracy * num_total))
logging.info('Accuracy: %g (%d/%d)' % (accuracy, num_correct, num_total))
if accuracy != 1:
    logging.info('Incorrectly labeled images:')
    for i, (label, prediction) in enumerate(zip(im_labels, predictions)):
        if label != prediction:
            logging.info('img_fp=%s,  label=%s,  prediction=%s' % (im_fps[i], label, prediction))

def Newtons_method_infeasible_init_point_2nd_version(f,
                                                     A,
                                                     b,
                                                     x_0,
                                                     nu_0,
                                                     tol,
                                                     tol_backtracking,
                                                     x_ast=None,
                                                     p_ast=None,
                                                     maxiter=30,
                                                     gf_symbolic=None,
                                                     Hf_symbolic=None):
    '''
    Newton's method to numerically approximate solution of min f subject to Ax = b.
    Args:
        f (fun): definition of function f as lambda expression or function definition.
        A (numpy ndarray): 2d numpy array of shape (m,n) defines system of constraints Ax=b.
        b (numpy ndarray or float): array that defines system of constraints Ax=b (can be a single number
                                    if just one restriction is defined)
        x_0 (numpy ndarray): initial point for Newton's method. 
        nu_0 (numpy ndarray): initial point for Newton's method.
        tol (float): tolerance that will halt method. Controls stopping criteria.
        tol_backtracking (float): tolerance that will halt method. Controls value of line search by backtracking.
        x_ast (numpy ndarray): solution of min f, now it's required that user knows the solution...
        p_ast (float): value of f(x_ast), now it's required that user knows the solution...
        maxiter (int): maximum number of iterations
        gf_symbolic (fun): definition of gradient of f. If given, no approximation is
                                     performed via finite differences.
        Hf_symbolic (fun): definition of Hessian of f. If given, no approximation is
                                     performed via finite differences.
    Returns:
        x (numpy ndarray): numpy array, approximation of x_ast.
        iteration (int): number of iterations.
        Err_plot (numpy ndarray): numpy array of absolute error between p_ast and f(x) with x approximation
                          of x_ast. Useful for plotting.
        x_plot (numpy ndarray): numpy array that containts in columns vector of approximations. Last column
                        contains x, approximation of solution. Useful for plotting.
    '''
    iteration = 0
    x = x_0
    nu = nu_0

    feval = f(x)

    if gf_symbolic:
        gfeval = gf_symbolic(x)
    else:
        gfeval = gradient_approximation(f, x)

    if Hf_symbolic:
        Hfeval = Hf_symbolic(x)
    else:
        Hfeval = Hessian_approximation(f, x)

    normgf = np.linalg.norm(gfeval)
    condHf = np.linalg.cond(Hfeval)

    Err_plot_aux = np.zeros(maxiter)
    Err_plot_aux[iteration] = compute_error(p_ast, feval)

    Err = compute_error(x_ast, x)

    if (A.ndim == 1):
        p = 1
        n = x.size
        zero_matrix = np.zeros(p)
        first_stack = np.column_stack((Hfeval, A.T))
        second_stack = np.row_stack(
            (A.reshape(1, n).T, zero_matrix)).reshape(1, n + 1)[0]
    else:
        p, n = A.shape
        zero_matrix = np.zeros((p, p))
        first_stack = np.column_stack((Hfeval, A.T))
        second_stack = np.column_stack((A, zero_matrix))

    x_plot = np.zeros((n, maxiter))
    x_plot[:, iteration] = x

    system_matrix = np.row_stack((first_stack, second_stack))

    residual_primal = lambda x_fun: A @ x_fun - b

    def residual_dual(nu_fun):
        if (A.ndim == 1):
            return gfeval + A.T * nu_fun
        else:
            return gfeval + A.T @ nu_fun

    feasibility_primal = residual_primal(x)
    feasibility_dual = residual_dual(nu)

    rhs = np.row_stack(
        (feasibility_dual.reshape(n, 1), feasibility_primal.reshape(p,
                                                                    1))).T[0]

    #Newton's direction and Newton's decrement
    dir_desc = np.linalg.solve(system_matrix, -rhs)
    dir_Newton_primal = dir_desc[0:n]
    dec_Newton = -gfeval.dot(dir_Newton_primal)
    dir_Newton_dual = dir_desc[n:(n + p)]

    norm_residual_eval = norm_residual(feasibility_primal, feasibility_dual)

    norm_residual_primal = np.linalg.norm(feasibility_primal)
    norm_residual_dual = np.linalg.norm(feasibility_dual)
    print(
        'I\t||res_primal||\t||res_dual|| \tNewton Decrement\tError x_ast\tError p_ast\tline search\tCondHf'
    )
    print('{}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{}\t\t{:0.2e}'.
          format(iteration, norm_residual_primal, norm_residual_dual,
                 dec_Newton, Err, Err_plot_aux[iteration], "---", condHf))

    stopping_criteria = norm_residual_eval > tol
    iteration += 1
    while (stopping_criteria > tol and iteration < maxiter):
        der_direct = -dec_Newton
        t = line_search_for_residual_by_backtracking(residual_primal,
                                                     residual_dual,
                                                     dir_Newton_primal,
                                                     dir_Newton_dual, x, nu,
                                                     norm_residual_eval)
        x = x + t * dir_Newton_primal
        nu = nu + t * dir_Newton_dual
        feval = f(x)

        if gf_symbolic:
            gfeval = gf_symbolic(x)
        else:
            gfeval = gradient_approximation(f, x)

        if Hf_symbolic:
            Hfeval = Hf_symbolic(x)
        else:
            Hfeval = Hessian_approximation(f, x)
        if (A.ndim == 1):
            first_stack = np.column_stack((Hfeval, A.T))
        else:
            first_stack = np.column_stack((Hfeval, A.T))

        system_matrix = np.row_stack((first_stack, second_stack))

        feasibility_primal = residual_primal(x)
        feasibility_dual = residual_dual(nu)
        rhs = np.row_stack((feasibility_dual.reshape(n, 1),
                            feasibility_primal.reshape(p, 1))).T[0]

        #Newton's direction and Newton's decrement
        dir_desc = np.linalg.solve(system_matrix, -rhs)
        dir_Newton_primal = dir_desc[0:n]
        dec_Newton = -gfeval.dot(dir_Newton_primal)
        dir_Newton_dual = dir_desc[n:(n + p)]

        Err_plot_aux[iteration] = compute_error(p_ast, feval)
        x_plot[:, iteration] = x
        Err = compute_error(x_ast, x)
        norm_residual_eval = norm_residual(feasibility_primal,
                                           feasibility_dual)
        norm_residual_primal = np.linalg.norm(feasibility_primal)
        norm_residual_dual = np.linalg.norm(feasibility_dual)
        print(
            '{}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}'
            .format(iteration, norm_residual_primal, norm_residual_dual,
                    dec_Newton, Err, Err_plot_aux[iteration], t, condHf))
        stopping_criteria = norm_residual_eval > tol
        if t < tol_backtracking:  #if t is less than tol_backtracking then we need to check the reason
            iter_salida = iteration
            iteration = maxiter - 1
        iteration += 1
    print('{} {:0.2e}'.format("Error of x with respect to x_ast:", Err))
    print('{} {}'.format("Approximate solution:", x))
    cond = Err_plot_aux > np.finfo(float).eps * 10**(-2)
    Err_plot = Err_plot_aux[cond]

    if iteration == maxiter and t < tol_backtracking:
        print(
            "Backtracking value less than tol_backtracking, check approximation"
        )
        iteration = iter_salida
    else:
        if iteration == maxiter:
            print("Reached maximum of iterations, check approximation")
    x_plot = x_plot[:, :iteration]

    return [x, iteration, Err_plot, x_plot]
Exemple #17
0
def gradient_descent(f, x_0, tol, 
                     tol_backtracking, x_ast=None, p_ast=None, maxiter=30,
                     gf_symbolic=None):
    '''
    Method of gradient descent to numerically approximate solution of min f.
    Args:
        f (fun): definition of function f as lambda expression or function definition.
        x_0 (numpy ndarray): initial point for gradient descent method.
        tol (float): tolerance that will halt method. Controls norm of gradient of f.
        tol_backtracking (float): tolerance that will halt method. Controls value of line search by backtracking.
        x_ast (numpy ndarray): solution of min f, now it's required that user knows the solution...
        p_ast (float): value of f(x_ast), now it's required that user knows the solution...
        maxiter (int): maximum number of iterations.
        gf_symbolic (fun): definition of gradient of f. If given, no approximation is
                                     performed via finite differences.
    Returns:
        x (numpy ndarray): numpy array, approximation of x_ast.
        iteration (int): number of iterations.
        Err_plot (numpy ndarray): numpy array of absolute error between p_ast and f(x) with x approximation
                                  of x_ast. Useful for plotting.
        x_plot (numpy ndarray): numpy array that containts in columns vector of approximations. Last column
                                contains x, approximation of solution. Useful for plotting.
    '''
    iteration = 0
    x = x_0
    
    feval = f(x)
    if gf_symbolic:
        gfeval = gf_symbolic(x)
    else:
        gfeval = gradient_approximation(f,x)

    normgf = np.linalg.norm(gfeval)
    
    Err_plot_aux = np.zeros(maxiter)
    Err_plot_aux[iteration]=compute_error(p_ast,feval)
    
    Err = compute_error(x_ast,x)
    n = x.size
    x_plot = np.zeros((n,maxiter))
    x_plot[:,iteration] = x
    
    print('I\tNormagf\t\tError x_ast\tError p_ast\tline search')
    print('{}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{}'.format(iteration,normgf,Err,Err_plot_aux[iteration],"---"))
    iteration+=1
    while(normgf>tol and iteration < maxiter):
        dir_desc = -gfeval
        der_direct = gfeval.dot(dir_desc)
        t = line_search_by_backtracking(f,dir_desc,x,der_direct)
        x = x + t*dir_desc
        feval = f(x)
        if gf_symbolic:
            gfeval = gf_symbolic(x)
        else:
            gfeval = gradient_approximation(f,x)
        normgf = np.linalg.norm(gfeval)
        Err_plot_aux[iteration]=compute_error(p_ast,feval)
        x_plot[:,iteration] = x
        Err = compute_error(x_ast,x)
        print('{}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}'.format(iteration,normgf,Err,
                                                                      Err_plot_aux[iteration],t))
        if t<tol_backtracking: #if t is less than tol_backtracking then we need to check the reason
            iter_salida=iteration
            iteration = maxiter - 1
        iteration+=1
    print('{} {:0.2e}'.format("Error of x with respect to x_ast:",Err))
    print('{} {}'.format("Approximate solution:", x))
    cond = Err_plot_aux > np.finfo(float).eps*10**(-2)
    Err_plot = Err_plot_aux[cond]
    if iteration == maxiter and t < tol_backtracking:
        print("Backtracking value less than tol_backtracking, check approximation")
        iteration=iter_salida
    else:
        if iteration == maxiter:
            print("Reached maximum of iterations, check approximation")    
    x_plot = x_plot[:,:iteration]
    return [x,iteration,Err_plot,x_plot]
Exemple #18
0
def evaluate_one_file(filename):
    # evaluate on one file pair
    data = dataLoader(filename)
    imgL = data.imgL
    pc = data.pc
    print("Processing data " + filename + "...\n")

    print("Upsampling(accelerated) begins...")
    start_acc = time.time()
    disp_lidar = bf_vanilla_accelerated(imgL, pc)
    end_acc = time.time()
    elapse_acc = end_acc - start_acc
    print("Upsampling(accelerated) on raw points takes " + str(elapse_acc) +
          " seconds...\n")

    print("Refinement begins...")
    start_refine = time.time()
    edge_map, disp_bf = measure_dispersion(imgL, pc)
    end_refine = time.time()
    elapse_refine = end_refine - start_refine
    print("Refinement takes " + str(elapse_refine) + " seconds...\n")

    disp_psmnet = cv2.imread("../data/prediction/" + filename + ".png",
                             -1) / 256.0
    disp_gt = cv2.imread("../data/gt/disp_occ_0/" + filename + ".png",
                         -1) / 256.0
    obj_map = cv2.imread("../data/gt/obj_map/" + filename + ".png", -1) / 256.0
    disp_refined = replace_boundary(disp_psmnet, disp_bf)

    rtn = []
    error1, error1_fg, error1_bg, error_map1, count1_above_15 = compute_error(
        disp_gt, disp_refined, obj_map)
    rtn.append((error1, error1_fg, error1_bg, error_map1, count1_above_15))
    error2, error2_fg, error2_bg, error_map2, count2_above_15 = compute_error(
        disp_gt, disp_psmnet, obj_map)
    rtn.append((error2, error2_fg, error2_bg, error_map2, count2_above_15))
    error3, error3_fg, error3_bg, error_map3, count3_above_15 = compute_error(
        disp_gt, disp_lidar, obj_map)
    rtn.append((error3, error3_fg, error3_bg, error_map3, count3_above_15))
    print("All: LiDAR points upsampling... " + str(error3))
    print("All: before refinement... " + str(error2))
    print("All: after refinement... " + str(error1))
    print("FG: LiDAR points upsampling... " + str(error3_fg))
    print("FG: before refinement... " + str(error2_fg))
    print("FG: after refinement... " + str(error1_fg))
    print("BG: LiDAR points upsampling... " + str(error3_bg))
    print("BG: before refinement... " + str(error2_bg))
    print("BG: after refinement... " + str(error1_bg))
    print("BIG ERROR COUNT: LiDAR points upsampling... " +
          str(count3_above_15))
    print("BIG ERROR COUNT: before refinement... " + str(count2_above_15))
    print("BIG ERROR COUNT: after refinement... " + str(count1_above_15))

    f = plt.figure()

    ax1 = f.add_subplot(4, 2, 1)
    plt.imshow(error_map2, 'rainbow', vmin=-5, vmax=20)
    plt.axis('off')
    ax1.set_title("Error predicted: " + str(100 * error2)[:4] + "%",
                  fontsize=8)

    ax2 = f.add_subplot(4, 2, 2)
    plt.imshow(disp_psmnet, 'rainbow', vmin=10, vmax=80)
    plt.axis('off')
    ax2.set_title("Disparity predicted", fontsize=8)

    ax3 = f.add_subplot(4, 2, 3)
    plt.imshow(error_map1, 'rainbow', vmin=-5, vmax=20)
    plt.axis('off')
    ax3.set_title("Error refined:   " + str(100 * error1)[:4] + "%",
                  fontsize=8)

    ax4 = f.add_subplot(4, 2, 4)
    plt.imshow(disp_refined, 'rainbow', vmin=10, vmax=80)
    plt.axis('off')
    ax4.set_title("Disparity refined", fontsize=8)

    ax5 = f.add_subplot(4, 2, 5)
    plt.imshow(error_map3, 'rainbow', vmin=-5, vmax=20)
    plt.axis('off')
    ax5.set_title("Error upsampled: " + str(100 * error3)[:4] + "%",
                  fontsize=8)

    ax6 = f.add_subplot(4, 2, 6)
    plt.imshow(disp_lidar, 'rainbow', vmin=10, vmax=80)
    plt.axis('off')
    ax6.set_title("Disparity upsampled", fontsize=8)

    ax7 = f.add_subplot(4, 2, 7)
    plt.imshow(edge_map)
    plt.axis('off')
    ax7.set_title("Edges", fontsize=10)

    ax8 = f.add_subplot(4, 2, 8)
    plt.imshow(cv2.cvtColor(imgL, cv2.COLOR_BGR2RGB))
    plt.axis('off')
    ax8.set_title("Image", fontsize=10)

    plt.tight_layout()
    plt.savefig("../output/" + "compare_" + filename + ".png", dpi=600)
    plt.close()

    points, colors = reproject_to_3D(disp_lidar, imgL)
    save_ply("../output/" + filename + "_upsampled.ply", points, colors)
    points, colors = reproject_to_3D(disp_psmnet, imgL)
    save_ply("../output/" + filename + "_predicted.ply", points, colors)
    points, colors = reproject_to_3D(disp_refined, imgL)
    save_ply("../output/" + filename + "_refined.ply", points, colors)
    points, colors = reproject_to_3D(disp_gt, imgL)
    save_ply("../output/" + filename + "_gt.ply", points, colors)

    return rtn
Exemple #19
0
                                yplaceholder: batch_y
                            })

            epoch_loss += c / train_iters

        print('Epoch', epoch + 1, 'completed out of', epochs, 'loss:',
              epoch_loss)

    print("Optimization Finished!")

    print("Testing..")

    test_batch = test_inputs.reshape((-1, input_length, number_of_sequences))
    predictions = sess.run([logit], feed_dict={xplaceholder: test_batch})

    print('Total Mae for Test set is: ',
          utils.compute_error(test_labels,
                              np.array(predictions)[0]))

    preds = np.array(predictions)[0]

    close_labels = []
    close_preds = []
    index_list = []
    for i in range(0, len(preds)):
        close_labels.append(test_labels[i])
        close_preds.append(preds[i])
        index_list.append(i)

    utils.print_preds(close_labels, close_preds, index_list)