Exemple #1
0
    def plotScore(self, score, fig, ax, error=False, **kwargs):
        Path(os.path.join(self.figure_dir, 'scores')).mkdir(parents=True,
                                                            exist_ok=True)

        parameters = '_'.join(
            [str(v).replace('.', '') for v in self.parameters.values()])
        suffix = ''.join([
            'f',
            str(self.begin_size), 't',
            str(self.end_size), 'np',
            str(self.n_points), 'r',
            str(self.n_restart)
        ])
        res_file_name = '_'.join([score, self.method, parameters, suffix])
        res_file = res_file_name + '.csv'

        res = np.loadtxt(os.path.join(self.result_dir, "scores", res_file),
                         delimiter=',')
        res = res.transpose()

        sizes = res[0].astype(int)
        mean, std = res[1], res[2]

        alpha_t = 0.4
        # ax.errorbar(sizes, mean, std, capsize=2, elinewidth=1.25, **kwargs)
        ax.errorbar(sizes, mean, std, capsize=4, elinewidth=2.5, **kwargs)
        if error == True:
            plotting.plot_error(sizes,
                                mean,
                                std,
                                alpha_t,
                                ax=ax,
                                color=kwargs['color'])

        ax.legend()
    def plotMetric(self, metric, fig, ax, error=False, **kwargs):
        parameters = '_'.join(
            [str(v).replace('.', '') for v in self.parameters.values()])
        suffix = ''.join([
            'f',
            str(self.begin_size), 't',
            str(self.end_size), 'np',
            str(self.n_points), 'r',
            str(self.n_restart)
        ])
        res_file_name = '_'.join([metric, self.method, parameters, suffix])
        res_file = res_file_name + '.csv'

        res = np.loadtxt(os.path.join(self.result_dir, "processed", res_file),
                         delimiter=',')
        res = res.transpose()

        sizes = res[0].astype(int)
        mean, std = res[1], res[2]

        alpha_t = 0.4
        ax.errorbar(sizes, mean, std, capsize=2, elinewidth=1.25, **kwargs)
        if error == True:
            plotting.plot_error(sizes,
                                mean,
                                std,
                                alpha_t,
                                ax=ax,
                                color=kwargs['color'])

        ax.legend()
def main():
    
    #Read data and plit X and y
    y, X = read_data(s.data_file_path)

    #Implement a linear regressor based on Maximum Likelihood Estimation
    lm_res = mlel.fitLinearRegression(y, X)

    #show linear regressor summary
    print(lm_res.summary())
    
    #Estimating predicted labels
    y_hat = mlel.yhat(X, lm_res)
    
    #Plot y versus y predicted
    p.plot(y, y_hat)
    
    #compute L1
    l1 = mlel.compute_L1(y, y_hat)
    print('L1 error: ', l1[0])
    
    #Compute error between y and y_hat
    error = mlel.error_list(y, y_hat)
    
    #Plot y versus y predicted and error
    p.plot_error(error)
    
    #Bootstraping and we obtain params
    bs_params = bootstrapping.bstrap(s.number_replication, y, X)
    
    #get Means, lower and upper bounds
    means, lower_bounds, upper_bounds = bootstrapping.compute_CI(bs_params)
    
    print('Lower bounds: ', lower_bounds)
    print('Upper bounds:', upper_bounds)
    #Plot Confidence interval
    p.plotCI(np.asarray(bs_params), lower_bounds, upper_bounds)
    
    #Cluster method
    gmm_pred = clustering.gmm_cluster(X, s.n_components)
    #Report
    print(classification_report(y, gmm_pred, target_names=s.target_names))
Exemple #4
0
            angles, mobiles, 0.)

        # test model
        predY, error = model.testModel(testXs, testY)

        f = open(dir_name + 'error_iteration%d.txt' % iter_number, 'w')
        f.write("Mean Error: %f\n" % (np.mean(error)))
        f.write("Error Standard Deviation: %f\n" % (np.std(error)))
        f.close()

        #mean_errors.append(np.mean(error))
        mean_errors.append(np.median(error))
        std_errors.append(np.std(error))

        plotting.plot_error(
            testY, predY, error, bases,
            "Num Stations: %d" % (params['data__num_stations']),
            params['exp_details__save'], dir_name, iter_number)

        if all_predY == None:
            all_predY = np.zeros(
                (predY.shape[0], predY.shape[1],
                 params['exp_details__num_iterations_per_setting']))
        if all_error == None:
            all_error = np.zeros(
                (error.shape[0],
                 params['exp_details__num_iterations_per_setting']))

        all_predY[:, :, iter_number] = predY
        all_error[:, iter_number] = error

    f = open(dir_name + 'error_average.txt', 'w')
Exemple #5
0
        float) - error.OPERATIONAL_DEMAND_POE90.values.astype(float)

    POE10_over, POE50_over, POE90_over = exceeds_actual_counter(
        error, actual_demand)

    # plot_exceedance(forecasted_demand, actual_demand, error.OPERATIONAL_DEMAND_POE10)

    return error


def error_calculation_dictionaries(forecasts, actuals):

    return None


'''
# Return a list of all the files within the folder and subfolders
forecast_files, forecast_names = list_files(FORECASTED_DIR)

# Get a forecasted demand dataframe
forecasts = forecasted_demand_dataframes(forecast_files, forecast_names, state=STATE)

# get actual demand data
actual_files, actual_names = list_files(ACTUAL_DIR)

# Get an actual demand dataframe
actual_demand = actual_demand_dataframes(actual_files, actual_names, state=STATE)

# Compute deviation from actual demand
for f_file in range(len(forecast_files)):
    error = error_calculation(forecasts[clean_fnames(forecast_files[f_file]), FORECASTED_DIR], actual_demand)
#######################################
# cost function 1 , gamma=0.99
#######################################
gamma = .99
#Initialize the MarkovDecisionProcess object for method 1 of the reward
mdp1_a = MarkovDecisionProcess(transition=Transitions,
                               reward=Reward_1,
                               method=1,
                               gamma=gamma,
                               epsilon=epsilon)
""" value iteration with method 1"""
V1_a, error_v1_a = mdp1_a.value_iteration(maze.maze)
pi_v1_a = mdp1_a.best_policy(V1_a)
pl.heatmap(V1_a, pi_v1_a, maze.height, maze.width, 'VI', gamma, 1)
pl.plot_error(error_v1_a, 'VI', gamma, 1)
""" policy iteration with method 1"""
error_p1_a, pi_p1_a, U1_a = mdp1_a.policy_iteration(maze.maze)
pl.heatmap(U1_a, pi_p1_a, maze.height, maze.width, 'PI', gamma, 1)
pl.plot_error(error_p1_a, 'PI', gamma, 1)

#######################################
# cost function 2 , gamma=0.99
#######################################
gamma = .99
#Initialize the MarkovDecisionProcess object for method 2 of the reward
mdp2_a = MarkovDecisionProcess(transition=Transitions,
                               reward=Reward_2,
                               method=2,
                               gamma=gamma,
                               epsilon=epsilon)
ax.set_ylabel('')

ax.set_xlim([int(from_size), int(to_size)])
ax.set_ylim(0, 1)

alpha_t = 0.4
if method == "cpc":
    ax.plot(sizes,
            res[3],
            linestyle="-.",
            linewidth=1.25,
            color="green",
            label='cpc')
    pl.plot_error(sizes,
                  mean_fscore,
                  std_fscore,
                  alpha_t,
                  ax=ax,
                  color="green")
elif method == "elidan":
    ax.plot(sizes,
            res[3],
            linestyle="--",
            linewidth=1.25,
            color="orange",
            label='elidan')
    pl.plot_error(sizes,
                  mean_fscore,
                  std_fscore,
                  alpha_t,
                  ax=ax,
                  color="orange")
        predY, error = model.testModel(testXs, testY)

        f = open(dir_name + 'error_iteration%d.txt' % iter_number, 'w')
        f.write("Mean Error: %f\n" % (np.mean(error)))
        f.write("Error Standard Deviation: %f\n" % (np.std(error)))
        f.close()

        #mean_errors.append(np.mean(error))
        mean_errors.append(np.median(error))
        std_errors.append(np.std(error))




        plotting.plot_error(testY, predY, error, bases,
                            "Num Stations: %d" % (params['data__num_stations']),
                            params['exp_details__save'], dir_name, iter_number)

        if all_predY == None:
            all_predY = np.zeros((predY.shape[0], predY.shape[1], params['exp_details__num_iterations_per_setting']))
        if all_error == None:
            all_error = np.zeros((error.shape[0], params['exp_details__num_iterations_per_setting']))

        all_predY[:,:,iter_number] = predY
        all_error[:,iter_number] = error

    f = open(dir_name + 'error_average.txt', 'w')
    f.write("Mean Error: %f\n" % (np.mean(mean_errors)))
    f.close()

    f = open(dir_name + 'resultsdata.npz', 'w')