Exemple #1
0
 def setUp(self):
     self.x1 = np.array([[0,0,0]]).T
     self.y1 = np.array([[1,1,1]]).T
     self.x2 = np.array([[1,2,3,4,5]]).T
     self.y2 = np.array([[1,2,3,4,5]]).T
     self.ex1data1 = ml.load_data('LinearRegression/ex1data1.txt')
     self.ex1data2 = ml.load_data('LinearRegression/ex1data2.txt', norm=True)
     self.ex2data1 = ml.load_data('LogisticRegression/ex2data1.txt')
     self.ex2data2 = ml.load_data('LogisticRegression/ex2data2.txt')
     self.ex3data1 = io.loadmat('RegularizedLogReg/ex3data1.mat')
     self.ex3weights = io.loadmat('RegularizedLogReg/ex3weights.mat')
     self.ex4data1 = io.loadmat('NeuralNetworks/ex4data1.mat')
     self.ex4weights = io.loadmat('NeuralNetworks/ex4weights.mat')
     self.ex5data1 = io.loadmat('RegularizedLinReg/ex5data1.mat')
     self.ex7data1 = io.loadmat('KmeansPCA/ex7data1.mat')
     self.ex7data2 = io.loadmat('KmeansPCA/ex7data2.mat')
Exemple #2
0
    def __init__(self, N_test, offset, dataset):

        self.learned_data = machine_learning.load_data(dataset, 0, 0, None)

        self.N_test = N_test
        self.N_train = self.learned_data.shape[0] - self.N_test
        self.offset = offset
        self.h = .06
Exemple #3
0
    def __init__(self, N_test, offset):

        self.learned_data = machine_learning.load_data("learning_dataset_vish.csv", 0, 0, None)

        self.N_test = N_test
        self.N_train = self.learned_data.shape[0] - self.N_test
        self.offset = offset
        self.h = 10

        print self.N_test, self.N_train
Exemple #4
0
    def plot_map(self):
        """\
        Plots the Landmarks Locations and Walls

        Input:
            landmark_data: list of landmark_data formatted like the original .dat file

        Output:
            None. Adds Landmark and walls to final plot
        """

        # parse landmark data to plot

        landmark_data = machine_learning.load_data(
            'ds1_Landmark_Groundtruth.dat', 3, 0, [0, 2, 4, 6, 8])

        _ignore, land_x, land_y, _ignore, _ignore = map(
            list, zip(*landmark_data))

        plt.plot(land_x, land_y, 'ro', markersize=3)

        # add landmark labels
        for _a, item in enumerate(landmark_data):
            plt.annotate('%s' % item[0],
                         xy=(item[1], item[2]),
                         xytext=(3, 3),
                         textcoords='offset points')

        # Set outer wall locations
        walls_x = [
            land_x[1], land_x[4], land_x[9], land_x[11], land_x[12],
            land_x[13], land_x[14], land_x[6], land_x[5], land_x[2], land_x[0],
            land_x[3], land_x[4]
        ]
        walls_y = [
            land_y[1], land_y[4], land_y[9], land_y[11], land_y[12],
            land_y[13], land_y[14], land_y[6], land_y[5], land_y[2], land_y[0],
            land_y[3], land_y[4]
        ]

        plt.plot(walls_x, walls_y, 'k')

        # set inner wall locations
        walls_x = [land_x[10], land_x[8], land_x[7]]
        walls_y = [land_y[10], land_y[8], land_y[7]]

        plt.plot(walls_x, walls_y, 'k', label='_nolegend_')
Exemple #5
0
    def part_b_eval(self, data_files):
        """
        Function to do some extra post processing of the results using the saved
        data.

        MAKE SURE THE CLASS WAS INITIALZED WITH N_TEST AND OFFSET VALUES THAT
        CORRESPOND TO THE SAVED DATA.

        Creates the error plots and trjectory plot
        """
        x_results = machine_learning.load_data(data_files[0], 0, 0, None)
        y_results = machine_learning.load_data(data_files[1], 0, 0, None)
        th_results = machine_learning.load_data(data_files[2], 0, 0, None)

        i = self.offset + 1

        gt_arr = np.zeros([self.N_test - 2, 2])
        lwlr_arr = np.zeros([self.N_test - 1, 2])
        odom_arr = np.zeros([self.N_test - 1, 3])

        error_comp = np.zeros([self.N_test, 3])

        lwlr_arr[0][0] = self.learned_data[i][6]
        lwlr_arr[0][1] = self.learned_data[i][7]

        odom_arr[0][0] = self.learned_data[i][6]
        odom_arr[0][1] = self.learned_data[i][7]
        odom_arr[0][2] = self.learned_data[i][8]

        tot_x_err = 0
        tot_y_err = 0
        tot_th_err = 0

        ss_tot_x = 0
        ss_tot_y = 0
        ss_tot_th = 0

        mean_x = 0
        mean_y = 0
        mean_th = 0

        for _ig, row in enumerate(self.learned_data):
            mean_x += row[2]
            mean_y += row[3]
            mean_th += row[4]

        mean_x = mean_x / self.N_test
        mean_y = mean_y / self.N_test
        mean_th = mean_th / self.N_test

        while i < self.N_test + self.offset - 1:

            j = i - (self.offset + 1)
            gt_arr[j][0] = self.learned_data[i][6]
            gt_arr[j][1] = self.learned_data[i][7]

            dt = self.learned_data[i][5]
            dx = x_results[j][3]
            dy = y_results[j][3]
            dth = th_results[j][3]

            lwlr_arr[j + 1][0] = lwlr_arr[j][0] + dx * dt
            lwlr_arr[j + 1][1] = lwlr_arr[j][1] + dy * dt

            movement_set = [
                self.learned_data[i][0], self.learned_data[i][1], dt
            ]

            cur_state = odom_arr[j][:]

            new_pos = self.motion_model(movement_set, cur_state, 0)

            odom_arr[j + 1][0] = new_pos[0]
            odom_arr[j + 1][1] = new_pos[1]
            odom_arr[j + 1][2] = new_pos[2]

            error_comp[j][0] = (dx - self.learned_data[i][2])**2
            tot_x_err += error_comp[j][0]

            error_comp[j][1] = (dy - self.learned_data[i][3])**2
            tot_y_err += error_comp[j][1]

            error_comp[j][2] = (dth - self.learned_data[i][4])**2
            tot_th_err += error_comp[j][2]

            ss_tot_x += (self.learned_data[i][2] - mean_x)**2
            ss_tot_y += (self.learned_data[i][3] - mean_y)**2
            ss_tot_th += (self.learned_data[i][4] - mean_th)**2

            i += 1

        avg_x_err = tot_x_err / self.N_test
        avg_y_err = tot_y_err / self.N_test
        avg_th_err = tot_th_err / self.N_test

        R2_x = 1 - (tot_x_err / ss_tot_x)
        R2_y = 1 - (tot_y_err / ss_tot_y)
        R2_th = 1 - (tot_th_err / ss_tot_th)

        print("MSE X Error:")
        print(avg_x_err)

        print("MSE Y Error")
        print(avg_y_err)

        print("MSE Th Error")
        print(avg_th_err)

        print("R2 Values (x, y, th):")
        print(R2_x)
        print(R2_y)
        print(R2_th)

        plt.figure()

        self.plot_map()

        plt.plot(gt_arr[0:, 0], gt_arr[0:, 1], 'g')
        plt.plot(lwlr_arr[0:, 0], lwlr_arr[0:, 1], 'r')
        plt.plot(odom_arr[0:, 0], odom_arr[0:, 1], 'b')

        plt.title("LWR Performance Comparison")
        plt.xlabel("x")
        plt.ylabel("y")
        plt.legend(
            ['Landmark', 'Wall', 'Groundtruth', 'LWR Trajectory', 'Odometry'])
        plt.xlim([-2, 8])
        plt.ylim([-6, 6])

        plt.figure()
        plt.plot(error_comp[0:, 0], 'b')
        plt.xlabel("Query Index")
        plt.ylabel("Absolute Error")
        plt.title("Error Comparison for change in x")

        plt.figure()
        plt.plot(error_comp[0:, 1], 'b')
        plt.xlabel("Query Index")
        plt.ylabel("Absolute Error")
        plt.title("Error Comparison for change in y")
Exemple #6
0
    def part_b_eval(self, data_files):
        """
        Function to do some extra post processing of the results using the saved
        data.

        MAKE SURE THE CLASS WAS INITIALZED WITH N_TEST AND OFFSET VALUES THAT
        CORRESPOND TO THE SAVED DATA.

        Creates the error plots and trjectory plot
        """
        x_results = machine_learning.load_data(data_files[0], 0, 0, None)
        y_results = machine_learning.load_data(data_files[1], 0, 0, None)
        th_results = machine_learning.load_data(data_files[2], 0, 0, None)

        i = self.offset + 1

        gt_arr = np.zeros([self.N_test-2, 2])
        lwlr_arr = np.zeros([self.N_test-1, 2])
        odom_arr = np.zeros([self.N_test-1, 3])

        error_comp = np.zeros([self.N_test, 2])

        lwlr_arr[0][0] = self.learned_data[i][6]
        lwlr_arr[0][1] = self.learned_data[i][7]

        odom_arr[0][0] = self.learned_data[i][6]
        odom_arr[0][1] = self.learned_data[i][7]
        odom_arr[0][2] = self.learned_data[i][8]

        while i < self.N_test + self.offset - 1:

            j = i - (self.offset + 1)
            gt_arr[j][0] = self.learned_data[i][6]
            gt_arr[j][1] = self.learned_data[i][7]

            dt = self.learned_data[i][5]
            dx = x_results[j][3]
            dy = y_results[j][3]

            lwlr_arr[j+1][0] = lwlr_arr[j][0] + dx * dt
            lwlr_arr[j+1][1] = lwlr_arr[j][1] + dy * dt

            movement_set = [self.learned_data[i][0],
                            self.learned_data[i][1],
                            dt]

            cur_state = odom_arr[j][:]

            new_pos = self.motion_model(movement_set, cur_state, 0)

            odom_arr[j+1][0] = new_pos[0]
            odom_arr[j+1][1] = new_pos[1]
            odom_arr[j+1][2] = new_pos[2]

            error_comp[j][0] = abs(dx - self.learned_data[i][2])
            error_comp[j][1] = abs(dy - self.learned_data[i][3])

            i += 1

        plt.figure()

        self.plot_map()

        plt.plot(gt_arr[0:, 0], gt_arr[0:, 1], 'g')
        plt.plot(lwlr_arr[0:, 0], lwlr_arr[0:, 1], 'r')
        plt.plot(odom_arr[0:, 0], odom_arr[0:, 1], 'b')

        plt.title("LWR Performance Comparison")
        plt.xlabel("x")
        plt.ylabel("y")
        plt.legend(['Landmark', 'Wall', 'Groundtruth', 'LWR Trajectory', 'Odometry'])
        plt.xlim([-2, 8])
        plt.ylim([-6, 6])

        plt.figure()
        plt.plot(error_comp[0:, 0], 'b')
        plt.xlabel("Query Index")
        plt.ylabel("Absolute Error")
        plt.title("Error Comparison for change in x")

        plt.figure()
        plt.plot(error_comp[0:, 1], 'b')
        plt.xlabel("Query Index")
        plt.ylabel("Absolute Error")
        plt.title("Error Comparison for change in y")