Esempio n. 1
0
    def calculate_metrics(self):
        new_objectPoints = self.ObjectPoints[-1]
        cam = self.Camera[-1]
        validation_plane = self.ValidationPlane
        new_imagePoints = np.array(cam.project(new_objectPoints, False))
        self.ImagePoints.append(new_imagePoints)
        #CONDITION NUMBER CALCULATION
        input_list = gd.extract_objectpoints_vars(new_objectPoints)
        input_list.append(np.array(cam.P))
        mat_cond = gd.matrix_condition_number_autograd(*input_list,
                                                       normalize=False)

        #CONDITION NUMBER WITH A NORMALIZED CALCULATION
        input_list = gd.extract_objectpoints_vars(new_objectPoints)
        input_list.append(np.array(cam.P))
        mat_cond_normalized = gd.matrix_condition_number_autograd(
            *input_list, normalize=True)

        self.CondNumber.append(mat_cond)
        self.CondNumberNorm.append(mat_cond_normalized)

        ##HOMOGRAPHY ERRORS
        ## TRUE VALUE OF HOMOGRAPHY OBTAINED FROM CAMERA PARAMETERS
        Hcam = cam.homography_from_Rt()
        ##We add noise to the image points and calculate the noisy homography
        homo_dlt_error_loop = []
        homo_HO_error_loop = []
        homo_CV_error_loop = []
        ippe_tvec_error_loop = []
        ippe_rmat_error_loop = []
        epnp_tvec_error_loop = []
        epnp_rmat_error_loop = []
        pnp_tvec_error_loop = []
        pnp_rmat_error_loop = []

        # WE CREATE NOISY IMAGE POINTS (BASED ON THE TRUE VALUES) AND CALCULATE
        # THE ERRORS WE THEN OBTAIN AN AVERAGE FOR EACH ONE
        for j in range(self.ValidationIters):
            new_imagePoints_noisy = cam.addnoise_imagePoints(
                new_imagePoints, mean=0, sd=self.ImageNoise)

            #Calculate the pose using IPPE (solution with least repro error)
            normalizedimagePoints = cam.get_normalized_pixel_coordinates(
                new_imagePoints_noisy)
            ippe_tvec1, ippe_rmat1, ippe_tvec2, ippe_rmat2 = pose_ippe_both(
                new_objectPoints, normalizedimagePoints, debug=False)
            ippeCam1 = cam.clone_withPose(ippe_tvec1, ippe_rmat1)

            #Calculate the pose using solvepnp EPNP
            debug = False
            epnp_tvec, epnp_rmat = pose_pnp(new_objectPoints,
                                            new_imagePoints_noisy, cam.K,
                                            debug, cv2.SOLVEPNP_EPNP, False)
            epnpCam = cam.clone_withPose(epnp_tvec, epnp_rmat)

            #Calculate the pose using solvepnp ITERATIVE
            pnp_tvec, pnp_rmat = pose_pnp(new_objectPoints,
                                          new_imagePoints_noisy, cam.K, debug,
                                          cv2.SOLVEPNP_ITERATIVE, False)
            pnpCam = cam.clone_withPose(pnp_tvec, pnp_rmat)

            #Calculate errors
            ippe_tvec_error1, ippe_rmat_error1 = ef.calc_estimated_pose_error(
                cam.get_tvec(), cam.R, ippeCam1.get_tvec(), ippe_rmat1)
            ippe_tvec_error_loop.append(ippe_tvec_error1)
            ippe_rmat_error_loop.append(ippe_rmat_error1)

            epnp_tvec_error, epnp_rmat_error = ef.calc_estimated_pose_error(
                cam.get_tvec(), cam.R, epnpCam.get_tvec(), epnp_rmat)
            epnp_tvec_error_loop.append(epnp_tvec_error)
            epnp_rmat_error_loop.append(epnp_rmat_error)

            pnp_tvec_error, pnp_rmat_error = ef.calc_estimated_pose_error(
                cam.get_tvec(), cam.R, pnpCam.get_tvec(), pnp_rmat)
            pnp_tvec_error_loop.append(pnp_tvec_error)
            pnp_rmat_error_loop.append(pnp_rmat_error)

            #Homography Estimation from noisy image points

            #DLT TRANSFORM
            Xo = new_objectPoints[[0, 1, 3], :]
            Xi = new_imagePoints_noisy
            Hnoisy_dlt, _, _ = homo2d.homography2d(Xo, Xi)
            Hnoisy_dlt = Hnoisy_dlt / Hnoisy_dlt[2, 2]

            #HO METHOD
            Xo = new_objectPoints[[0, 1, 3], :]
            Xi = new_imagePoints_noisy
            Hnoisy_HO = hh(Xo, Xi)

            #OpenCV METHOD
            Xo = new_objectPoints[[0, 1, 3], :]
            Xi = new_imagePoints_noisy
            Hnoisy_OpenCV, _ = cv2.findHomography(Xo[:2].T.reshape(1, -1, 2),
                                                  Xi[:2].T.reshape(1, -1, 2))

            ## ERRORS FOR THE  DLT HOMOGRAPHY
            ## VALIDATION OBJECT POINTS
            validation_objectPoints = validation_plane.get_points()
            validation_imagePoints = np.array(
                cam.project(validation_objectPoints, False))
            Xo = np.copy(validation_objectPoints)
            Xo = np.delete(Xo, 2, axis=0)
            Xi = np.copy(validation_imagePoints)
            homo_dlt_error_loop.append(
                ef.validation_points_error(Xi, Xo, Hnoisy_dlt))

            ## ERRORS FOR THE  HO HOMOGRAPHY
            ## VALIDATION OBJECT POINTS
            validation_objectPoints = validation_plane.get_points()
            validation_imagePoints = np.array(
                cam.project(validation_objectPoints, False))
            Xo = np.copy(validation_objectPoints)
            Xo = np.delete(Xo, 2, axis=0)
            Xi = np.copy(validation_imagePoints)
            homo_HO_error_loop.append(
                ef.validation_points_error(Xi, Xo, Hnoisy_HO))

            ## ERRORS FOR THE  OpenCV HOMOGRAPHY
            ## VALIDATION OBJECT POINTS
            validation_objectPoints = validation_plane.get_points()
            validation_imagePoints = np.array(
                cam.project(validation_objectPoints, False))
            Xo = np.copy(validation_objectPoints)
            Xo = np.delete(Xo, 2, axis=0)
            Xi = np.copy(validation_imagePoints)
            homo_CV_error_loop.append(
                ef.validation_points_error(Xi, Xo, Hnoisy_OpenCV))

        self.Homo_DLT_mean.append(np.mean(homo_dlt_error_loop))
        self.Homo_HO_mean.append(np.mean(homo_HO_error_loop))
        self.Homo_CV_mean.append(np.mean(homo_CV_error_loop))
        self.ippe_tvec_error_mean.append(np.mean(ippe_tvec_error_loop))
        self.ippe_rmat_error_mean.append(np.mean(ippe_rmat_error_loop))
        self.epnp_tvec_error_mean.append(np.mean(epnp_tvec_error_loop))
        self.epnp_rmat_error_mean.append(np.mean(epnp_rmat_error_loop))
        self.pnp_tvec_error_mean.append(np.mean(pnp_tvec_error_loop))
        self.pnp_rmat_error_mean.append(np.mean(pnp_rmat_error_loop))

        self.Homo_DLT_std.append(np.std(homo_dlt_error_loop))
        self.Homo_HO_std.append(np.std(homo_HO_error_loop))
        self.Homo_CV_std.append(np.std(homo_CV_error_loop))
        self.ippe_tvec_error_std.append(np.std(ippe_tvec_error_loop))
        self.ippe_rmat_error_std.append(np.std(ippe_rmat_error_loop))
        self.epnp_tvec_error_std.append(np.std(epnp_tvec_error_loop))
        self.epnp_rmat_error_std.append(np.std(epnp_rmat_error_loop))
        self.pnp_tvec_error_std.append(np.std(pnp_tvec_error_loop))
        self.pnp_rmat_error_std.append(np.std(pnp_rmat_error_loop))
Esempio n. 2
0
    def decision_function(self, X):
        X = check_array(X)

        if not self.train_sensitive_cols:
            X = np.delete(X, self.sensitive_col_idx_, axis=1)
        return super().decision_function(X)
Esempio n. 3
0
# One Hot encode the class labels
encoder = OneHotEncoder(sparse=False)
y = encoder.fit_transform(y_)

# # Split the data for training and testing
# train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.20)

initial_weights = np.genfromtxt(fname='inputs/SF5d_5.dat')
#initial_weights = np.genfromtxt(fname='inputs/keras_inputs.txt')

train_x = np.genfromtxt(fname='same_split/train_x.txt')
train_y = np.genfromtxt(fname='same_split/train_y.txt')
test_x = np.genfromtxt(fname='same_split/test_x.txt')
test_y = np.genfromtxt(fname='same_split/test_y.txt')

train_x = np.delete(np.delete(train_x, 0, 1), 0, 1)
test_x = np.delete(np.delete(test_x, 0, 1), 0, 1)
x = np.delete(np.delete(x, 0, 1), 0, 1)

#Standardization
train_x[:,
        0] = (train_x[:, 0] - np.mean(train_x[:, 0])) / np.std(train_x[:, 0])
train_x[:,
        1] = (train_x[:, 1] - np.mean(train_x[:, 1])) / np.std(train_x[:, 1])
test_x[:, 0] = (test_x[:, 0] - np.mean(test_x[:, 0])) / np.std(test_x[:, 0])
test_x[:, 1] = (test_x[:, 1] - np.mean(test_x[:, 1])) / np.std(test_x[:, 1])

for out in range(1):

    acc = np.array([])
    err = np.array([])
Esempio n. 4
0
def read_data(filename):
	data = np.genfromtxt(filename, dtype=float, delimiter=',', skip_header=1)
	np.random.shuffle(data) 
	return np.delete(data, [0], axis = 1), data[:,[0]]
    def region_coloring(self, region, ax):
        #### color first regions  ####
        # generate input range for functions
        minx = min(min(self.x[:, 0]), min(self.x[:, 1]))
        maxx = max(max(self.x[:, 0]), max(self.x[:, 1]))
        gapx = (maxx - minx) * 0.1
        minx -= gapx
        maxx += gapx

        # plot over range
        r = np.linspace(minx, maxx, 200)
        x1_vals, x2_vals = np.meshgrid(r, r)
        x1_vals.shape = (len(r)**2, 1)
        x2_vals.shape = (len(r)**2, 1)
        o = np.ones((len(r)**2, 1))
        x = np.concatenate([o, x1_vals, x2_vals], axis=1)

        ### for region 1, determine points that are uniquely positive for each classifier ###
        ind_set = []
        y = np.dot(self.W, x.T)
        num_classes = np.size(np.unique(self.y))

        if region == 1 or region == 'all':
            for i in range(0, num_classes):
                class_inds = np.arange(num_classes)
                class_inds = np.delete(class_inds, (i), axis=0)

                # loop over non-current classifier
                ind = np.argwhere(y[class_inds[0]] < 0).tolist()
                ind = [s[0] for s in ind]
                for j in range(1, len(class_inds)):
                    c_ind = class_inds[j]
                    ind2 = np.argwhere(y[c_ind] < 0).tolist()
                    ind2 = [s[0] for s in ind2]
                    ind = [s for s in ind if s in ind2]

                ind2 = np.argwhere(y[i] > 0).tolist()
                ind2 = [s[0] for s in ind2]
                ind = [s for s in ind if s in ind2]

                # plot polygon over region defined by ind
                x1_ins = np.asarray([x1_vals[s] for s in ind])
                x1_ins.shape = (len(x1_ins), 1)
                x2_ins = np.asarray([x2_vals[s] for s in ind])
                x2_ins.shape = (len(x2_ins), 1)
                h = np.concatenate((x1_ins, x2_ins), axis=1)
                vertices = ConvexHull(h).vertices
                poly = [h[v] for v in vertices]
                polygon = Polygon(poly, True)
                patches = []
                patches.append(polygon)

                p = PatchCollection(patches, alpha=0.2, color=self.colors[i])
                ax.add_collection(p)

        if region == 2 or region == 'all':
            for i in range(0, num_classes):
                class_inds = np.arange(num_classes)
                class_inds = np.delete(class_inds, (i), axis=0)

                # loop over non-current classifier
                ind = np.argwhere(y[class_inds[0]] > 0).tolist()
                ind = [s[0] for s in ind]
                for j in range(1, len(class_inds)):
                    c_ind = class_inds[j]
                    ind2 = np.argwhere(y[c_ind] > 0).tolist()
                    ind2 = [s[0] for s in ind2]
                    ind = [s for s in ind if s in ind2]

                ind2 = np.argwhere(y[i] < 0).tolist()
                ind2 = [s[0] for s in ind2]
                ind = [s for s in ind if s in ind2]

                # plot polygon over region defined by ind
                x1_ins = np.asarray([x1_vals[s] for s in ind])
                x1_ins.shape = (len(x1_ins), 1)
                x2_ins = np.asarray([x2_vals[s] for s in ind])
                x2_ins.shape = (len(x2_ins), 1)
                o = np.ones((len(x2_ins), 1))
                h = np.concatenate((o, x1_ins, x2_ins), axis=1)

                # determine regions dominated by one classifier or the other
                vals = []
                for c in class_inds:
                    w = self.W[int(c)]
                    nv = np.dot(w, h.T)
                    vals.append(nv)
                vals = np.asarray(vals)
                vals.shape = (len(class_inds), len(h))
                ind = np.argmax(vals, axis=0)

                for j in range(len(class_inds)):
                    # make polygon for each subregion
                    ind1 = np.argwhere(ind == j)
                    x1_ins2 = np.asarray([x1_ins[s] for s in ind1])
                    x1_ins2.shape = (len(x1_ins2), 1)
                    x2_ins2 = np.asarray([x2_ins[s] for s in ind1])
                    x2_ins2.shape = (len(x2_ins2), 1)
                    h = np.concatenate((x1_ins2, x2_ins2), axis=1)

                    # find convex hull of points
                    vertices = ConvexHull(h).vertices
                    poly = [h[v] for v in vertices]
                    polygon = Polygon(poly, True)
                    patches = []
                    patches.append(polygon)
                    c = class_inds[j]
                    p = PatchCollection(patches,
                                        alpha=0.2,
                                        color=self.colors[c])
                    ax.add_collection(p)

        if region == 3 or region == 'all':
            # find negative zone of all classifiers
            ind = np.argwhere(y[0] < 0).tolist()
            ind = [s[0] for s in ind]
            for i in range(1, num_classes):
                ind2 = np.argwhere(y[i] < 0).tolist()
                ind2 = [s[0] for s in ind2]
                ind = [s for s in ind if s in ind2]

            # loop over negative zone, find max area of each classifier
            x1_ins = np.asarray([x1_vals[s] for s in ind])
            x1_ins.shape = (len(x1_ins), 1)
            x2_ins = np.asarray([x2_vals[s] for s in ind])
            x2_ins.shape = (len(x2_ins), 1)
            o = np.ones((len(x2_ins), 1))
            h = np.concatenate((o, x1_ins, x2_ins), axis=1)

            # determine regions dominated by one classifier or the other
            vals = []
            for c in range(num_classes):
                w = self.W[c]
                nv = np.dot(w, h.T)
                vals.append(nv)
            vals = np.asarray(vals)
            vals.shape = (num_classes, len(h))
            ind = np.argmax(vals, axis=0)

            # loop over each class, construct polygon region for each
            for c in range(num_classes):
                # make polygon for each subregion
                ind1 = np.argwhere(ind == c)
                x1_ins2 = np.asarray([x1_ins[s] for s in ind1])
                x1_ins2.shape = (len(x1_ins2), 1)
                x2_ins2 = np.asarray([x2_ins[s] for s in ind1])
                x2_ins2.shape = (len(x2_ins2), 1)
                h = np.concatenate((x1_ins2, x2_ins2), axis=1)

                # find convex hull of points
                vertices = ConvexHull(h).vertices
                poly = [h[v] for v in vertices]
                polygon = Polygon(poly, True)
                patches = []
                patches.append(polygon)
                p = PatchCollection(patches, alpha=0.2, color=self.colors[c])
                ax.add_collection(p)
Esempio n. 6
0
def EarlyStopping(
    input_,
    target,
    validation_runs,
    initial_scales,
    initial_nuggets,
    input_pipe,
    output_pipe,
    scales_rate=0.001,
    nuggets_rate=0.003,
    nsteps=1500,
):
    valid_par = input_[validation_runs.tolist()]
    valid_obs = target[validation_runs.tolist()]

    input_ = np.delete(input_, validation_runs, 0)
    target = np.delete(target, validation_runs, 0)

    input_pipe.Fit(input_)
    output_pipe.Fit(target)

    valid_par = input_pipe.Transform(valid_par)
    valid_obs = output_pipe.Transform(valid_obs)

    emulator = EmulatorMultiOutput(input_pipe.Transform(input_),
                                   output_pipe.Transform(target))
    emulator.SetCovariance(squared_exponential)

    scales_list = []
    nuggets_list = []

    for index, emu in enumerate(emulator.emulator_list):
        gd = GradientDescentForEmulator(scales_rate, nuggets_rate)
        gd.SetFunc(emu.MarginalLikelihood)

        nuggets = initial_nuggets
        scales = initial_scales
        hist_scales = []
        hist_nuggets = []
        partial_likelihood = []
        for i in range(nsteps):

            scales, nuggets, grad_scales, grad_nuggets = gd.StepDescent(
                scales, nuggets)
            emu.SetScales(scales)
            emu.SetNuggets(nuggets)
            emu.StartUp()

            hist_scales.append(scales)
            hist_nuggets.append(nuggets)

            val = 0
            for par, exp in zip(valid_par, valid_obs):
                val += emu.LogLikelihood(par.reshape(1, -1),
                                         exp[index].reshape(1, -1))
            partial_likelihood.append(val)

            sys.stdout.write(
                "\rProcessing %i iteration, likelihood = %f, nuggets = %f, scales = %s"
                % (i, val, nuggets, np.array2string(scales)))
            sys.stdout.flush()

        # plt.plot(partial_likelihood)
        # plt.show()
        # get index corresponds to maximum validation log likelihood
        i, value = max(enumerate(partial_likelihood),
                       key=operator.itemgetter(1))
        print("max", i)
        scales_list.append(hist_scales[i])
        nuggets_list.append(hist_nuggets[i])
    return np.array(scales_list), np.array(nuggets_list)
Esempio n. 7
0
            Xi = new_imagePoints_noisy
            Hnoisy_HO = hh(Xo, Xi)

            #OpenCV METHOD
            Xo = new_objectPoints[[0, 1, 3], :]
            Xi = new_imagePoints_noisy
            Hnoisy_OpenCV, _ = cv2.findHomography(Xo[:2].T.reshape(1, -1, 2),
                                                  Xi[:2].T.reshape(1, -1, 2))

            ## ERRORS FOR THE  DLT HOMOGRAPHY
            ## VALIDATION OBJECT POINTS
            validation_objectPoints = validation_plane.get_points()
            validation_imagePoints = np.array(
                cam.project(validation_objectPoints, False))
            Xo = np.copy(validation_objectPoints)
            Xo = np.delete(Xo, 2, axis=0)
            Xi = np.copy(validation_imagePoints)
            homo_dlt_error_loop.append(
                ef.validation_points_error(Xi, Xo, Hnoisy_dlt))

            ## ERRORS FOR THE  HO HOMOGRAPHY
            ## VALIDATION OBJECT POINTS
            validation_objectPoints = validation_plane.get_points()
            validation_imagePoints = np.array(
                cam.project(validation_objectPoints, False))
            Xo = np.copy(validation_objectPoints)
            Xo = np.delete(Xo, 2, axis=0)
            Xi = np.copy(validation_imagePoints)
            homo_HO_error_loop.append(
                ef.validation_points_error(Xi, Xo, Hnoisy_HO))
Esempio n. 8
0
    def apply_poly(self, x_poly, lst_poly):
        res = Poly()

        k_h, k_w = self.kernel

        lw, up = x_poly.lw.copy(), x_poly.up.copy()

        lw = lw.reshape(x_poly.shape)
        up = up.reshape(x_poly.shape)

        p = self.padding
        lw_pad = np.pad(lw, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
        up_pad = np.pad(up, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
        x_n, x_c, x_h, x_w = lw_pad.shape

        res_h = int((x_h - k_h) / self.stride) + 1
        res_w = int((x_w - k_w) / self.stride) + 1

        len_pad = x_c * x_h * x_w
        len_res = x_c * res_h * res_w

        c_idx, h_idx, w_idx = index2d(x_c, self.stride, self.kernel,
                                      (x_h, x_w))

        res_lw, res_up = [], []
        mx_lw_idx_lst, mx_up_idx_lst = [], []

        for c in range(x_c):
            for i in range(res_h * res_w):

                mx_lw_val, mx_lw_idx = -1e9, None

                for k in range(k_h * k_w):

                    h, w = h_idx[k, i], w_idx[k, i]
                    val = lw_pad[0, c, h, w]

                    if val > mx_lw_val:
                        mx_lw_val, mx_lw_idx = val, (c, h, w)

                mx_up_val, cnt = -1e9, 0

                for k in range(k_h * k_w):

                    h, w = h_idx[k, i], w_idx[k, i]
                    val = up_pad[0, c, h, w]

                    if val > mx_up_val:
                        mx_up_val = val

                    if mx_lw_idx != (c, h, w) and val > mx_lw_val:
                        cnt += 1

                res_lw.append(mx_lw_val)
                res_up.append(mx_up_val)

                mx_lw_idx_lst.append(mx_lw_idx)
                if cnt > 0: mx_up_idx_lst.append(None)
                else: mx_up_idx_lst.append(mx_lw_idx)

        res.lw = np.array(res_lw)
        res.up = np.array(res_up)

        res.le = np.zeros([len_res, len_pad + 1])
        res.ge = np.zeros([len_res, len_pad + 1])

        res.shape = (1, x_c, res_h, res_w)

        for i in range(len_res):
            c = mx_lw_idx_lst[i][0]
            h = mx_lw_idx_lst[i][1]
            w = mx_lw_idx_lst[i][2]

            idx = c * x_h * x_w + h * x_w + w
            res.ge[i, idx] = 1

            if mx_up_idx_lst[i] is None:
                res.le[i, -1] = res.up[i]
            else:
                res.le[i, idx] = 1

        del_idx = []
        if self.padding > 0:
            del_idx = del_idx + list(range(self.padding * (x_w + 1)))
            mx = x_h - self.padding
            for i in range(self.padding + 1, mx):
                tmp = i * x_w
                del_idx = del_idx + list(
                    range(tmp - self.padding, tmp + self.padding))
            del_idx = del_idx + list(range(mx * x_h - self.padding, x_h * x_w))

            tmp = np.array(del_idx)

            for i in range(1, x_c):
                offset = i * x_h * x_w
                del_idx = del_idx + list((tmp + offset).copy())

        res.le = np.delete(res.le, del_idx, 1)
        res.ge = np.delete(res.ge, del_idx, 1)

        return res
Esempio n. 9
0
    def apply_poly(self, x_poly, lst_poly):
        res = Poly()

        f_n, f_c, f_h, f_w = self.filters.shape
        x_n, x_c, x_h, x_w = x_poly.shape

        x_w += 2 * self.padding
        x_h += 2 * self.padding

        res_h = int((x_h - f_h) / self.stride) + 1
        res_w = int((x_w - f_w) / self.stride) + 1

        len_pad = x_c * x_h * x_w
        len_res = f_n * res_h * res_w

        res.lw = np.zeros(len_res)
        res.up = np.zeros(len_res)

        res.le = np.zeros([len_res, len_pad + 1])
        res.ge = np.zeros([len_res, len_pad + 1])

        res.shape = (1, f_n, res_h, res_w)

        for i in range(f_n):
            base = np.zeros([x_c, x_h, x_w])
            base[:f_c, :f_h, :f_w] = self.filters[i]
            base = np.reshape(base, -1)
            w_idx = f_w

            for j in range(res_h * res_w):
                res.le[i * res_h * res_w + j] = np.append(base, [self.bias[i]])
                res.ge[i * res_h * res_w + j] = np.append(base, [self.bias[i]])

                if w_idx + self.stride <= x_w:
                    base = np.roll(base, self.stride)
                    w_idx += self.stride
                else:
                    base = np.roll(base, self.stride * x_w - w_idx + f_w)
                    w_idx = f_w

        del_idx = []
        if self.padding > 0:
            del_idx = del_idx + list(range(self.padding * (x_w + 1)))
            mx = x_h - self.padding
            for i in range(self.padding + 1, mx):
                tmp = i * x_w
                del_idx = del_idx + list(
                    range(tmp - self.padding, tmp + self.padding))
            del_idx = del_idx + list(range(mx * x_w - self.padding, x_h * x_w))

            tmp = np.array(del_idx)

            for i in range(1, x_c):
                offset = i * x_h * x_w
                del_idx = del_idx + list((tmp + offset).copy())

        res.le = np.delete(res.le, del_idx, 1)
        res.ge = np.delete(res.ge, del_idx, 1)

        res.back_substitute(lst_poly)

        return res
Esempio n. 10
0
def sliding_window_opt():
    xy_cor_list = []
    xy_cor_list_opt = []
    xy_cor_list_gt = []
    distance_error_list = []
    diff_score_list = []

    deep_net = dlk.custom_net(model_path)

    # opt_img_height参数决定了实时图与基准图之间的分辨率大小
    I_org, GPS_list = img_utility.load_I(image_dir, image_dir_ext,
                                         opt_img_height)
    _, img_c, img_h, img_w = I_org.shape

    M = img_utility.load_M(map_loc)
    _, map_h, map_w = M.shape

    # 这里的P参数是需要我们传进来的
    P_init_org, P_org = pama_utility.load_P(motion_param_loc)
    P_opt = np.zeros(P_org.shape)
    curr_P_init = P_init_org

    # 就是离线跑的,人为这里的循环次数就是P_org的长度
    for i in range(P_org.shape[0]):
        start_timestamp = time.clock()
        I = I_org[i:i + 2, :, :, :]

        # t,V是之后优化中会用到的参数,这里进行定义
        # templates indices
        T = np.array([1])
        # visibility neighborhood
        V = np.array([np.arange(50), np.array([0])])

        V_ = np.array([np.array([0])])
        V = np.delete(V, 0)

        # curr_P shape [1,8,1]
        curr_P = np.expand_dims(P_org[i, :, :], axis=0)
        # 这里的改变是让整个矩阵的转移大小可以符合我们的要求
        curr_P = pama_utility.scale_P(curr_P, s=scale_img_to_map)
        P_mk = compute_Pmk(curr_P_init, curr_P, T)

        # 采用瀑布流的形式(从粗到细,进行结果的细化
        for idx in range(2):
            # 从基准地图中获取信息的步骤
            # 同时增加一个参数,接收其坐标值
            M_tmpl, xy_cor = extract_map_templates(P_mk, M, T, img_c, img_h,
                                                   img_w)
            xy_cor_list.append(xy_cor)

            ###
            # 提取得到M图中的patch之后,再结合实时图(tamplate frame)去提取两者之间的特征,进而进一步进行单应矩阵的细调
            T_np = np.expand_dims(I[1, :, :, :], axis=0)
            T_tens = Variable(torch.from_numpy(T_np).float())
            T_tens_nmlz = dlk.normalize_img_batch(T_tens)
            T_feat_tens = deep_net(T_tens_nmlz)
            T_feat = T_feat_tens.data.numpy()

            M_tmpl_tens = Variable(torch.from_numpy(M_tmpl).float())
            M_tmpl_tens_nmlz = dlk.normalize_img_batch(M_tmpl_tens)
            M_feat_tens = deep_net(M_tmpl_tens_nmlz)
            M_feat = M_feat_tens.data.numpy()

            ###
            # 使用dlk的纠正手段
            dlk_net = dlk.DeepLK(dlk.custom_net(model_path))
            p_lk, _, itr_dlk = dlk_net(M_tmpl_tens_nmlz,
                                       T_tens_nmlz,
                                       tol=1e-4,
                                       max_itr=max_itr_dlk,
                                       conv_flag=1,
                                       ret_itr=True)
            # 计算patch与实时图之间的差距
            diff_score = torch.sqrt(
                torch.sum(torch.pow(M_tmpl_tens_nmlz - T_tens_nmlz,
                                    2))).item()
            diff_score = diff_score / (M_tmpl_tens.shape[1] *
                                       M_tmpl_tens.shape[2] *
                                       M_tmpl_tens.shape[3])
            print("基准子图与实时图之间的差距为:", diff_score)
            diff_score_list.append(diff_score)
            p_lk = p_lk.cpu()
            p_lk2x = pama_utility.scale_P(p_lk.data.numpy(), scaling_for_disp)
            # 只使用VO的结果
            # p_lk2x = np.zeros([1, 8, 1])

            s_sm = (scaling_for_disp * opt_img_height) / map_h
            curr_P_init_scale = pama_utility.scale_P(curr_P_init, s_sm)
            curr_P_scale = pama_utility.scale_P(curr_P, s_sm)

            H_rel_samp = pama_utility.p_to_H(p_lk2x)
            H_org_samp = pama_utility.p_to_H(curr_P_scale)
            H_rel_coord = np.linalg.inv(H_rel_samp)
            H_org_coord = np.linalg.inv(H_org_samp)
            H_opt_coord = H_rel_coord @ H_org_coord
            H_opt_samp = np.linalg.inv(H_opt_coord)

            P_opt_i_scale = pama_utility.H_to_p(H_opt_samp)
            s_lg = map_h / (scaling_for_disp * opt_img_height)
            P_opt_i = pama_utility.scale_P(P_opt_i_scale, s_lg)
            P_opt[i, :, :] = P_opt_i

            P_mk_opt = compute_Pmk(curr_P_init_scale, P_opt_i_scale, T)
            P_mk0 = compute_Pmk(curr_P_init_scale, curr_P_scale, T)
            P_mk_opt_map = pama_utility.scale_P(P_mk_opt, s_lg)
            P_mk0_map = pama_utility.scale_P(P_mk0, s_lg)
            H_mk_samp = pama_utility.p_to_H(P_mk_opt)
            H_mk0_samp = pama_utility.p_to_H(P_mk0)

            H_mk = np.linalg.inv(H_mk_samp)
            H_mk0 = np.linalg.inv(H_mk0_samp)
            H_mk_rel = np.matmul(H_mk, np.linalg.inv(H_mk0))
            H_mk_rel_samp = np.linalg.inv(H_mk_rel)
            P_mk_rel_samp = pama_utility.H_to_p(H_mk_rel_samp)
            H_init_samp = pama_utility.p_to_H(curr_P_init)
            H_opt_i_samp = pama_utility.p_to_H(P_opt_i)

            H_init_coord = np.linalg.inv(H_init_samp)
            H_opt_i_coord = np.linalg.inv(H_opt_i_samp)
            H_init_coord_new = H_opt_i_coord @ H_init_coord
            H_init_samp_new = np.linalg.inv(H_init_coord_new)
            curr_P_init = pama_utility.H_to_p(H_init_samp_new)

            P_mk = curr_P_init
            curr_P = torch.zeros([1, 8, 1])

        # need to rescale P_init_org and P_org to opt_img_height, from map_h
        # s_sm = opt_img_height / map_h;
        # s_sm = (scaling_for_disp * opt_img_height) / map_h  #########################################
        # curr_P_init_scale = pama_utility.scale_P(curr_P_init, s_sm)
        # curr_P_scale = pama_utility.scale_P(curr_P, s_sm)
        #
        # # 这部分的内容是在增加优化部分的内容
        # # 直接注释掉这一行就是不使用优化跑的结果
        # # P_opt_i_scale = optimize_wmap(I, curr_P_scale, T, V, curr_P_init_scale, M_feat, T_feat, tol, max_itr, lam1, lam2)
        # # curr_P_scale = optimize_wmap(I_2x, curr_P_scale, T, V, curr_P_init_scale, M_feat_2x, T_feat_2x, tol, max_itr, lam1, lam2)
        #
        # H_rel_samp = pama_utility.p_to_H(p_lk2x)
        # H_org_samp = pama_utility.p_to_H(curr_P_scale)
        # H_rel_coord = np.linalg.inv(H_rel_samp)
        # H_org_coord = np.linalg.inv(H_org_samp)
        # H_opt_coord = H_rel_coord @ H_org_coord
        # H_opt_samp = np.linalg.inv(H_opt_coord)
        #
        # P_opt_i_scale = pama_utility.H_to_p(H_opt_samp)
        # s_lg = map_h / (scaling_for_disp * opt_img_height)
        # P_opt_i = pama_utility.scale_P(P_opt_i_scale, s_lg)
        # P_opt[i, :, :] = P_opt_i
        #
        # ### plotting
        # P_mk_opt = compute_Pmk(curr_P_init_scale, P_opt_i_scale, T)
        # P_mk0 = compute_Pmk(curr_P_init_scale, curr_P_scale, T)
        # P_mk_opt_map = pama_utility.scale_P(P_mk_opt, s_lg)
        # P_mk0_map = pama_utility.scale_P(P_mk0, s_lg)
        # H_mk_samp = pama_utility.p_to_H(P_mk_opt)
        # H_mk0_samp = pama_utility.p_to_H(P_mk0)
        #
        # # invert sampling params to get coord params:
        # H_mk = np.linalg.inv(H_mk_samp)
        # H_mk0 = np.linalg.inv(H_mk0_samp)
        #
        # # compute relative hmg:
        # H_mk_rel = np.matmul(H_mk, np.linalg.inv(H_mk0))
        #
        # # invert back to sampling hmg:
        # H_mk_rel_samp = np.linalg.inv(H_mk_rel)
        #
        # # convert sampling hmg back to sampling params:
        # P_mk_rel_samp = pama_utility.H_to_p(H_mk_rel_samp)
        #
        #
        # # compose new P_opt_i with curr_P_init
        # H_init_samp = pama_utility.p_to_H(curr_P_init)
        # H_opt_i_samp = pama_utility.p_to_H(P_opt_i)
        #
        # H_init_coord = np.linalg.inv(H_init_samp)
        # H_opt_i_coord = np.linalg.inv(H_opt_i_samp)
        #
        # H_init_coord_new = H_opt_i_coord @ H_init_coord
        #
        # H_init_samp_new = np.linalg.inv(H_init_coord_new)
        #
        # curr_P_init = pama_utility.H_to_p(H_init_samp_new)

        end_timestamp = time.clock()
        print("一张图片定位所需时间为:", end_timestamp - start_timestamp)
        print('finished iteration: {:d}'.format(i + 1))

        # 定位过程结束,以下部分是画图部分的内容
        side_margin = 0.15
        top_margin = 0.15
        targ_box = np.array([
            [
                M_tmpl_tens.shape[3] * side_margin,
                M_tmpl_tens.shape[2] * top_margin
            ],
            [
                M_tmpl_tens.shape[3] * (1 - side_margin),
                M_tmpl_tens.shape[2] * top_margin
            ],
            [
                M_tmpl_tens.shape[3] * (1 - side_margin),
                M_tmpl_tens.shape[2] * (1 - side_margin)
            ],
            [
                M_tmpl_tens.shape[3] * side_margin,
                M_tmpl_tens.shape[2] * (1 - side_margin)
            ],
            [
                M_tmpl_tens.shape[3] * side_margin,
                M_tmpl_tens.shape[2] * side_margin
            ],
            [
                M_tmpl_tens.shape[3] * (1 - side_margin),
                M_tmpl_tens.shape[2] * (1 - side_margin)
            ],
        ])

        plt.subplot(3, 1, 1)
        plt.title('M{:d}'.format(i + 1))
        plt.imshow(img_utility.plt_axis_match_np(M_tmpl_tens[0, :, :, :]))
        plt.plot(targ_box[:, 0], targ_box[:, 1], 'r-')
        plt.plot(round(M_tmpl_tens.shape[3] / 2),
                 round(M_tmpl_tens.shape[2] / 2), 'ro')
        plt.axis('off')

        plt.subplot(3, 1, 2)
        plt.title('M{:d} Warp'.format(i + 1))
        M_tmpl_curr_tens = M_tmpl_tens.float()
        P_mk_rel_samp_curr = torch.from_numpy(
            pama_utility.scale_P(P_mk_rel_samp, scaling_for_disp)).float()
        M_tmpl_w, _, xy_cor_curr_opt = dlk.warp_hmg(M_tmpl_curr_tens,
                                                    P_mk_rel_samp_curr)
        # 进一步优化的内容
        # xy_cor_opt变量中存的是新的细化后的图像在原图像(上一次warp+crop之后的图像)中坐标位置
        print("未经过优化的绝对坐标为:", xy_cor)
        # print("经过优化的相对的坐标为:", xy_cor_curr_opt)
        xy_patch_org_cor_opt = dlk.warp_hmg_Noncentric(M,
                                                       P_mk,
                                                       xy_cor_curr_opt,
                                                       img_w=T_tens.shape[3],
                                                       img_h=T_tens.shape[2])
        xy_cor_list_opt.append(xy_patch_org_cor_opt)
        print("经过优化的绝对的坐标为:", xy_patch_org_cor_opt)
        xy_cor_curr_gt = srh.lag_log_to_pix_pos(M, GPS_list[i + 1])
        xy_cor_list_gt.append(xy_cor_curr_gt)
        print("实际坐标为:", [xy_cor_curr_gt[0], xy_cor_curr_gt[1]])
        distance_error_curr = map_resolution * sqrt(
            (xy_cor_curr_gt[0] - xy_patch_org_cor_opt[0])**2 +
            (xy_cor_curr_gt[1] - xy_patch_org_cor_opt[1])**2)
        print("实际坐标与定位结果之间的距离:", distance_error_curr, "m")
        distance_error_list.append(distance_error_curr)

        plt.imshow(img_utility.plt_axis_match_tens(M_tmpl_w[0, :, :, :]))
        # plt.plot(M_tmpl_w.shape[3]/2, M_tmpl_w.shape[2]/2, 'ro')
        plt.plot(targ_box[:, 0], targ_box[:, 1], 'r-')
        plt.plot(round(M_tmpl_tens.shape[3] / 2),
                 round(M_tmpl_tens.shape[2] / 2), 'ro')
        plt.axis('off')

        plt.subplot(3, 1, 3)
        plt.imshow(img_utility.plt_axis_match_np(I_org[i + 1, :, :, :]))
        # plt.plot(I_org_2x.shape[3]/2, I_org_2x.shape[2]/2, 'ro')
        plt.plot(targ_box[:, 0], targ_box[:, 1], 'r-')
        plt.plot(round(M_tmpl_tens.shape[3] / 2),
                 round(M_tmpl_tens.shape[2] / 2), 'ro')
        plt.title('I{:d}'.format(i + 1))
        plt.axis('off')

        # 可以先不显示这里每一步的warp过程
        plt.show()

        # 	# 画一下优化对比的结果
        # 	# 这一步是比较耗时的一步,基本上去掉这一步之后可以达到实时运行的效果了
        # if i == 60:
        # 	plt.subplot(2, 1, 1)
        # 	M_pil_marked = srh.Add_M_Marker(transforms.ToPILImage()(torch.from_numpy(M)), xy_patch_org_cor_opt,
        # 									color="blue")
        # 	M_pil_marked = srh.Add_M_Marker(M_pil_marked, xy_cor, color="red")
        # 	M_pil_marked = srh.Add_M_Marker(M_pil_marked, xy_cor_curr_gt, color="green")
        # 	plt.title('当前定位结果,优化前:红;优化后:蓝;实际点:绿')
        # 	plt.imshow(M_pil_marked)
        #
        # 	plt.subplot(2, 1, 2)
        # 	M_pil_marked = srh.Add_M_Markers_list(M, xy_cor_list, color="red")
        # 	M_pil_marked = srh.Add_M_Markers_list(M_pil_marked, xy_cor_list_opt, color="blue")
        # 	M_pil_marked = srh.Add_M_Markers_list(M_pil_marked, xy_cor_list_gt, color="green")
        # 	plt.title('飞行轨迹历史,优化前:红;优化后:蓝;实际点:绿')
        # 	plt.imshow(M_pil_marked)
        # 	plt.show()
        #
        # 	plt.plot(diff_score_list)
        # 	plt.show()

    plt.subplot(2, 1, 1)
    M_pil_marked = srh.Add_M_Marker(transforms.ToPILImage()(
        torch.from_numpy(M)),
                                    xy_patch_org_cor_opt,
                                    color="blue")
    M_pil_marked = srh.Add_M_Marker(M_pil_marked, xy_cor, color="red")
    M_pil_marked = srh.Add_M_Marker(M_pil_marked,
                                    xy_cor_curr_gt,
                                    color="green")
    plt.title('当前定位结果,优化前:红;优化后:蓝;实际点:绿')
    plt.imshow(M_pil_marked)

    plt.subplot(2, 1, 2)
    M_pil_marked = srh.Add_M_Markers_list(M, xy_cor_list, color="red")
    M_pil_marked = srh.Add_M_Markers_list(M_pil_marked,
                                          xy_cor_list_opt,
                                          color="blue")
    M_pil_marked = srh.Add_M_Markers_list(M_pil_marked,
                                          xy_cor_list_gt,
                                          color="green")
    plt.title('飞行轨迹历史,优化前:红;优化后:蓝;实际点:绿')
    plt.imshow(M_pil_marked)
    plt.show()

    plt.plot(diff_score_list)
    plt.show()
    plt.plot(distance_error_list)
    plt.show()
    M_pil_marked.save('marked.png')
    draw = ImageDraw.Draw(M_pil_marked)
    for i in range(len(xy_cor_list_opt) - 1):
        draw.line((xy_cor_list_opt[i][0], xy_cor_list_opt[i][1],
                   xy_cor_list_opt[i + 1][0], xy_cor_list_opt[i + 1][1]),
                  fill='blue',
                  width=5)
        draw.line((xy_cor_list[i][0], xy_cor_list[i][1], xy_cor_list[i + 1][0],
                   xy_cor_list[i + 1][1]),
                  fill='red',
                  width=5)
        draw.line((xy_cor_list_gt[i][0], xy_cor_list_gt[i][1],
                   xy_cor_list_gt[i + 1][0], xy_cor_list_gt[i + 1][1]),
                  fill='green',
                  width=5)
    M_pil_marked.save('marked_line.png')
    # s_rel_pose = float(img_h_rel_pose) / map_h
    s_rel_pose = 1
    P_opt_scale = pama_utility.scale_P(P_opt, s_rel_pose)
    H_opt_rel_samp = pama_utility.p_to_H(P_opt_scale)
    H_opt_rel_coord = np.linalg.inv(H_opt_rel_samp)

    P_opt_map_scale_coord = pama_utility.H_to_p(H_opt_rel_coord)

    P_opt_map_scale_coord_sqz = np.squeeze(P_opt_map_scale_coord)

    # switch axes in order to have proper format for next function, decompose_rel_hmg.py
    H_opt_rel_coord = H_opt_rel_coord.swapaxes(0, 2)
    H_opt_rel_coord = H_opt_rel_coord.swapaxes(0, 1)
    print("平均精度为:", np.sum(np.array(distance_error_list)) / 17)

    sio.savemat(
        opt_param_save_loc,
        dict([('H_rel', H_opt_rel_coord), ('cor_opt', xy_cor_list_opt),
              ('diff_score', diff_score_list)]))
Esempio n. 11
0
    def read_housing_csv_2(self, file_name, x_mapping_state, target_name=None):
        skip_header = 1
        if self.config.NN_MULTI_ENCODE_TEXT_VARS or self.config.NN_APPLY_DATA_SCIENCE:
            skip_header = 0
        data = np.genfromtxt(file_name,
                             delimiter=',',
                             dtype='unicode',
                             skip_header=skip_header)
        if (self.config.NN_DEBUG_SHAPES):
            print(data.shape)
        if (target_name is None):
            skip_cols = 1
        else:
            skip_cols = 2

        # Identify Area columns
        area_cols = [i for i, item in enumerate(data[0, :]) if "Area" in item]

        # multi-encode
        if self.config.NN_MULTI_ENCODE_TEXT_VARS:
            X_data, self.neighborhood_vals = multi_encode_text_variables(
                "Neighborhood", data, self.neighborhood_vals)
            X_data = np.delete(X_data, 0, axis=0)  # Remove header now
            data = X_data
        if self.config.NN_APPLY_DATA_SCIENCE:
            # Apply some data science
            data = self.filter_training_data(data, target_name=target_name)
            X_data = self.augment_training_data(data, target_name=target_name)
            X_data = np.delete(X_data, 0, axis=0)  # Remove header now
            data = X_data
        # Get (samplesize x features per sample)
        X = np.empty(
            (data.shape[0],
             data.shape[1] - skip_cols))  # Dont need Id and Price columns
        for col in range(data.shape[1] - skip_cols):
            map_id = 1.0  # reset every feature
            if (target_name is not None):
                mapping_state = {}
            else:
                mapping_state = x_mapping_state[col]
            if col in area_cols:
                # Direct mapping
                for row in range(data.shape[0]):
                    try:
                        X[row][col] = data[row][col + 1].astype(float)
                    except:
                        if (data[row][col + 1] in mapping_state):
                            X[row][col] = mapping_state[data[row][col + 1]]
                        else:
                            mapping_state[data[row][col + 1]] = map_id
                            X[row][col] = map_id
                            map_id = map_id + 1.0
            else:
                for row in range(data.shape[0]):
                    if (data[row][col + 1] in mapping_state):
                        X[row][col] = mapping_state[data[row][col + 1]]
                    else:
                        mapping_state[data[row][col + 1]] = map_id
                        X[row][col] = map_id
                        map_id = map_id + 1.0
            x_mapping_state.append(mapping_state)
        # Get groundtruths
        Y = np.empty((data.shape[0], 1))
        if (target_name is not None):
            prev = 0.0
            for row in range(data.shape[0]):
                col = data.shape[1] - 1
                try:
                    Y[row][0] = data[row][col].astype(float)
                except:
                    raise Exception("Ground truth should be float")
                # Ensure GT was sorted before
                # assert (prev <= Y[row][0])
                prev = Y[row][0]
                if self.config.NN_LOG_TARGET is True:
                    Y[row][0] = np.log(Y[row][0])
        # Normalize
        Y_normalize_state = X_normalize_state = None
        if (self.config.NN_NORMALIZE):
            if (target_name is not None):
                Y, Y_normalize_state = self.utils.normalize0(Y, axis=0)
            X, X_normalize_state = self.utils.normalize0(X, axis=0)
        if (self.config.NN_DEBUG_SHAPES):
            print(X.shape, Y.shape, X, X[0][0].dtype)
        return X, X_normalize_state, x_mapping_state, Y, Y_normalize_state