Ejemplo n.º 1
0
from logic.inverse_perspective import InversePerspective
import pandas as pd
import os
import cv2
import numpy as np
import quaternion
from logic.mapper import Mapper, calculate_position


def row_to_quaternion(row):
    return np.quaternion(row.q4, row.q1, row.q2, row.q3)


if __name__ == '__main__':
    cal_data = r"C:\repositories\autonomic_car\selfie_car\src\pc\settings\camera_calibration\calib.json"
    cal = VideoCalibration(cal_data, (320, 240))
    dirpath = r"C:\repositories\autonomic_car\selfie_car\data_intake4\v1.011"
    perspective = InversePerspective(img_shape=(250, 250),
                                     desired_shape=(80, 160),
                                     marigin=0.25)
    mapper = Mapper(map_size=(1200, 1200), scale=300.0, type='additive')
    df = pd.read_csv(os.path.join(dirpath, 'vis_v1.csv'))
    df = df[~df['filenames'].isnull()]
    columns = ['time', 'x', 'y', 'z', 'q1', 'q2', 'q3', 'q4']
    slam_df = pd.read_csv(os.path.join(dirpath, 'slam_trajectory.csv'),
                          sep=' ',
                          names=columns)
    #	imu_df = calculate_position(df, dirpath)
    for i, row in slam_df.iterrows():
        #if pd.isnull(row['filenames']):
        #	continue
Ejemplo n.º 2
0
	#proc = cv2.fastNlMeansDenoising(proc, None, 2, 4, 5)
	#cv2.imshow('denoise', proc)
	#proc = cv2.equalizeHist(proc,)
	#proc = gamma_correction(proc, 0.9)
	#kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
	#proc = cv2.filter2D(proc, -1, kernel)
	#cv2.imshow('equal', proc)
	#proc = cv2.adaptiveThreshold(proc, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 15, 2)
	#cv2.imshow('adap', proc)
	#proc = cv2.Canny(proc, 20, 150, apertureSize=3)
	#cv2.imshow('edge', proc)
	return proc

if __name__=='__main__':
	cal_data = r"C:\repositories\autonomic_car\selfie_car\src\pc\settings\camera_calibration\calib.json"
	cal = VideoCalibration(cal_data, (320, 240))
	dirpath = r"C:\Users\hawker\Dropbox\Public\selfie_car\data_intake3\v1.23"
	perspective = InversePerspective(img_shape=(250,250), desired_shape=(80,160), marigin=0.25)
	import os
	import pandas as pd
	pd.read_csv(os.path.join(dirpath,'vis_v1.csv'))
	images = os.listdir(dirpath)
	images = filter(lambda x: ".jpg" in x, images)
	images = map(lambda x: os.path.join(dirpath, x), images)
	images = list(images)
	bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
	orb = cv2.ORB_create(nfeatures=5000, scaleFactor=2., )#, edgeThreshold=17, patchSize=64)
	vx_list = []
	vy_list = []
	for i in range(1,len(images)):
		img_path_1 = images[i]
Ejemplo n.º 3
0

if __name__ == '__main__':
    ## @MentalMap
    # Calibration is needed to undistort
    # Perspective reverses the image
    parser = ArgumentParser()
    parser.add_argument('calibration', help="path to json calibration file")
    parser.add_argument('map_path', help="path to maps yaml file")
    parser.add_argument('images_path', help="path to folder with images")
    #'/home/mwm/Desktop/datadumps/01-07-19-drive-v1_22'
    parser.add_argument('assoc_file', help="path to assoc.json file")
    #'./assets/maps/testmap1/assoc.json'
    args = parser.parse_args()
    img_shape = 320,240
    calibration = VideoCalibration(args.calibration, img_shape, new_shape=img_shape)
    mapper = Mapper2(map_shape=(800,800,3), type='additive')
    perspective = InversePerspective(img_shape=(250, 250), desired_shape=(100, 130), marigin=0.25)
    id_and_pose = get_poses(args.map_path)
    xy_transformation = Pose3Dto2D([idp[1] for idp in id_and_pose])
    for kf_id, pose, ts, filename, img in data_iterator(id_and_pose, args.images_path, args.assoc_file):
        if True:
            cv2.imshow('img',img)
            undi = calibration.undistort(img)
            undi[:100,:,:] = 0
            img = perspective.inverse(undi)
            cv2.imshow('undi', img)
            cv2.waitKey(0)
        new_pose = xy_transformation.transform.dot(pose)
        new_angles = transform.Rotation.from_dcm(new_pose[:3,:3]).as_euler('xyz')
        position = new_pose[:2,3]
Ejemplo n.º 4
0
).astype(np.float32)

if __name__ == "__main__":
    ## @MentalMap
    # Calibration is needed to undistort
    # Perspective reverses the image
    parser = ArgumentParser()
    parser.add_argument("calibration", help="path to json calibration file")
    parser.add_argument("map_path", help="path to maps yaml file")
    parser.add_argument("images_path", help="path to folder with images")
    #'/home/mwm/Desktop/datadumps/01-07-19-drive-v1_22'
    parser.add_argument("assoc_file", help="path to assoc.json file")
    #'./assets/maps/testmap1/assoc.json'
    args = parser.parse_args()
    img_shape = 320, 240
    calibration = VideoCalibration(args.calibration, img_shape, new_shape=img_shape)
    mapper = Mapper2(
        map_shape=(800, 800, 3),
        coord_coef=((50, 400), (150, 400)),
        rot_cent_coef=((0.5, 0), (1.1, 0)),
        type="additive",
    )
    perspective = InversePerspective(
        perspective_area=HAXOR_SRC3,
        img_shape=(250, 250),
        desired_shape=(100, 130),
        marigin=0.25,
    )
    perspective.calculate_roi_mask()
    my_osmap = OsmapData.from_map_path(args.map_path)
    id_and_pose = my_osmap.id_and_pose()