예제 #1
0
    def test_constructor(self):
        """ verify all construction methods are identical. """
        h_from_ndarray = Homography([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
        h_from_affine = Homography(Affine.identity())
        h_from_homography = Homography(h_from_ndarray)

        self.assertEqual(h_from_ndarray, h_from_affine)
        self.assertEqual(h_from_ndarray, h_from_homography)
        self.assertEqual(h_from_ndarray, Homography.identity())
예제 #2
0
    def test_homography_decomposition(self):
        """
        Test homography decomposition by following steps:
        1. prepare affine with random rotation R and translation t
        2. make homography of affine from step 1, with random projection P
          **NOTE: t first, then R !! ***
        3. decompose homography using P to get R' and t'
        4. compare (R, t) and (R', t')
        """
        # step 1
        R = ortho_group.rvs(dim=3)
        R /= np.linalg.det(R)
        t = np.random.random((3, )) * 2 - 1
        P = np.random.random((3, 3))
        P[0, 0] = P[1, 1] = P[2, 2] = 1
        P[0, 1:] = 0
        P[1, 2] = 0

        # step 2
        A = np.array([R[:, 0], R[:, 1], R @ t]).T
        H = P @ A

        # step 3
        hom = Homography(H, P)
        R_, t_ = hom.R, hom.t

        # step 4
        np.testing.assert_almost_equal(R, R_)
        np.testing.assert_almost_equal(t, t_)
예제 #3
0
def get_point_transform():
    hom = Homography(np.array(pts_src_), np.array(pts_real_))
    # dist = hom.get_point_transform([1136, 184], [942, 363])
    A = [1701, 746]
    B = [1851, 43]
    dist = hom.get_point_transform(A, B)
    print()
예제 #4
0
    def __init__(self, homography_file, filter_file, tune_param):

        self.bridge = CvBridge()

        # load homography matrix
        self.homography_file = str(homography_file)
        self.homography = Homography(self.homography_file)

        self.homography_matrix = self.homography.get_homography_matrix()

        if self.homography_matrix is None:
            print("File doesnt have a homography matrix")
            print("Run find_homography script")

        # load parameters used in the filter process
        self.filter_file = filter_file
        self.filter_param = Parameters(filter_file)
        self.tune_param = tune_param

        # configure parameters
        if tune_param:
            self.filter_param.create_trackbar()

            if os.path.exists(self.filter_file):
                self.filter_param.load()
                self.filter_param.set_trackbar_values()

        # autonomous mode
        else:
            if os.path.exists(self.filter_file):
                self.filter_param.load()

            else:
                print("Filter file doesnt exist")
                exit(1)

        self.init = True

        #
        self.filter_res = 0

        self.image = None
        self.warp_res = None
        self.color_res = None
        self.filter_res = None

        self.lanes_list = None
예제 #5
0
    def __init__(self):

        # Pub/Sub
        self.sub_camera = rospy.Subscriber('/zed/rgb/image_rect_color', Image,
                                           self.sub_callback_image)
        self.pub_bounding_box = rospy.Publisher('/bounding_box',
                                                bounding_box,
                                                queue_size=10)
        self.pub_state = rospy.Publisher('/state',
                                         Float32MultiArray,
                                         queue_size=10)
        self.pub_image = rospy.Publisher('/image_bounded',
                                         Image,
                                         queue_size=10)

        # Image data
        self.bridge = CvBridge()
        self.H = Homography()
예제 #6
0
    def test_ransac(self):
        """
        Test ransac by following steps:
        1. make random homography
        2. make inliers with noise
        3. make outliers
        4. RUN
        """
        N_TOTAL = 1000
        N_INLIER = 500
        N_ITER = trial_count(N_INLIER / N_TOTAL, 4)
        N_OUTLIER = N_TOTAL - N_INLIER
        NOISE_SIZE = 0

        # step 1
        gt = np.random.random((3, 3))
        gt[2, :] *= 100
        gt[2, 2] = 1

        # step 2
        hom = Homography(gt)
        x = np.random.random((N_INLIER, 2))
        y = hom.transform(x)

        x += np.random.random((N_INLIER, 2)) * NOISE_SIZE
        y += np.random.random((N_INLIER, 2)) * NOISE_SIZE
        inliers = np.concatenate((x, y), axis=1)

        # step 3
        outliers = np.random.random((N_OUTLIER, 4))
        data = np.concatenate((inliers, outliers), axis=0)

        # step 4
        from ransac import RANSAC
        ransac_inst = RANSAC(
            n_sample=4,
            n_iter=N_ITER,
            err_thres=0.1,
        )
        model = ProjectionModel()
        best_func, best_inliers = ransac_inst(data, model)
        np.testing.assert_almost_equal(best_func.M, gt)
    def __init__(self, name, roi, chessboard_size, flip_src, fps_ratio):
        self.name = name
        self.roi = roi

        # raw videos
        self.src_vid_paths = sorted(glob.glob(SRC_VID_FORMAT.format(name)))
        self.dst_vid_paths = sorted(glob.glob(DST_VID_FORMAT.format(name)))

        if len(self.src_vid_paths) != len(self.dst_vid_paths):
            raise ValueError('Number of sharp and blur videos are not equal!')
        print(f'{len(self.src_vid_paths)} videos found!')

        # Flip the images of the left camera
        self.flip_src = flip_src

        # Ratio of sharp fps and blur fps
        self.fps_ratio = fps_ratio

        # crop center
        self.cropper = Cropper(roi)

        # Read and cache all calibration data
        # homography data
        hom_src_path = HOM_SRC_FORMAT.format(name)
        hom_dst_path = HOM_DST_FORMAT.format(name)

        if (not osp.isfile(hom_src_path)) or (not osp.isfile(hom_dst_path)):
            raise ValueError('Missing homography data!')

        hom_src = cv2.imread(hom_src_path)
        hom_dst = cv2.imread(hom_dst_path)

        # color correction data
        cc_src_paths = sorted(glob.glob(COLOR_CORRECT_SRC_FORMAT.format(name)))
        cc_dst_paths = sorted(glob.glob(COLOR_CORRECT_DST_FORMAT.format(name)))

        if (len(cc_src_paths) != len(cc_dst_paths)) or (
                len(cc_src_paths) != len(self.src_vid_paths)):
            raise ValueError('Number of color correction frames is not valid!')

        self.ccs_src = [
            cv2.imread(cc_src_path) for cc_src_path in cc_src_paths
        ]
        self.ccs_dst = [
            cv2.imread(cc_dst_path) for cc_dst_path in cc_dst_paths
        ]

        # Sharp videos captured from beamsplitter-based system are flipped
        if self.flip:
            hom_src = self.flip(hom_src)
            self.ccs_src = [self.flip(ccs) for ccs in self.ccs_src]

        # Initialize calibration tool
        # Note:
        # homography: sharp -> blur
        # color correction: blur -> sharp
        self.warper = Homography(hom_src, hom_dst, chessboard_size)

        cv2.imwrite('debug1.png', self.crop(self.warp(self.ccs_src[0])))
        cv2.imwrite('debug2.png', self.crop(self.ccs_dst[0]))

        self.ccs_src = [self.warp(ccs) for ccs in self.ccs_src]

        # remove black margins of the camera
        self.ccs_src = [self.crop(ccs) for ccs in self.ccs_src]
        self.ccs_dst = [self.crop(ccs) for ccs in self.ccs_dst]

        self.color_transformer = [
            MyColorCorrection(cc_dst, cc_src)
            for cc_dst, cc_src in zip(self.ccs_dst, self.ccs_src)
        ]
예제 #8
0
    del parameters

if __name__ == "__main__":
    global homography

    rospy.init_node('find_homography')
    rospy.loginfo("Starting find_homography.py")

    if len(sys.argv) < 2:
        rospy.loginfo("Error in find_homography!")
        rospy.loginfo("Cant find json file!")
        exit(1)

    file_path = sys.argv[1]

    homography = Homography(file_path)

    # create trackbar and set values
    create_trackbar()

    '''
    # set world points
    world_points = [
        [000.0, 800.0],
        [400.0, 800.0],
        [400.0, 600.0],
        [000.0, 600.0]]
    '''

    ''' 
    #Gazebo
예제 #9
0
def get_image_homography(image_file):
    puntos = []
    imagen = openCv.imread(image_file)
    homography = Homography(puntos, imagen)
    imagenH = homography.getHomography()
    return homography.getPuntos()
예제 #10
0
                '--image',
                required=True,
                help='Path to the input image containing the logo/image')
ap.add_argument('-M',
                '--match',
                help='Type of matching algorithm to use',
                default='F')
args = vars(ap.parse_args())

#Getting the images ready
img1 = cv2.imread(args['logo'], cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread(args['image'], cv2.IMREAD_GRAYSCALE)

match_type = args['match']
img_matches = None

if match_type == 'B':
    #Matching the images using the Brute-Force algorithm.
    img_matches = BruteForce(img1, img2)
elif match_type == 'R':
    #Matching the images using BruteForce KNN algorithm with Ratio Test.
    img_matches = BruteForceKNN(img1, img2)
elif match_type == 'F':
    img_matches = FLANN(img1, img2)
elif match_type == 'H':
    img_matches = Homography(img1, img2)

#Displaying the results and storing them
plt.imshow(img_matches)
plt.show()
cv2.imwrite('matched.png', img_matches)
예제 #11
0
size = 500

if __name__ == "__main__":
    cap = cv2.VideoCapture(f'./{file}')

    with open(f'calib_{file}.json') as f:
        calib = json.load(f)

    image_points = np.array(calib["image"])
    map_points = np.array(calib["map"])
    dist_coords = calib["dist"]

    scale = calib["scale"]

    homography = Homography(
        image_points,
        size * (scale / 2) + size * scale + map_points * size * scale)

    dist_coords = [
        homography.project(dist_coords[0]),
        homography.project(dist_coords[1])
    ]
    min_dist = np.linalg.norm(dist_coords[0] - dist_coords[1])

    frame_idx = 0

    while True:
        r, img = cap.read()
        img = cv2.resize(img, (1280, 720))

        if frame_idx == 0:
예제 #12
0
    if len(points) > 1:
        _, speed_av = get_weighted_speed(frames, points, hom)
        output = get_momentum_speeds(frames, points, hom)
        speed_median = output[-1]
        return speed_av, speed_median
    else:
        return 0, 0


if __name__ == "__main__":
    # res_file = open('../video_cruise_control/second_experiment/temp/speed_file', 'w')
    # with open(path_to_targets, 'r') as targets_file:
    #     lines = targets_file.read().splitlines()
    # targets = dict((x.split(' ')[0], float(x.split(' ')[1])) for x in lines)
    
    hom = Homography(np.array(pts_src_), np.array(pts_real_))
    plate_cascade = cv.CascadeClassifier(path_to_cascade)
    caffe.set_mode_cpu()
    net = caffe.Net(path_to_model, path_to_weights, caffe.TEST)

    # videos = []
    # timestamps = []
    # files = os.listdir(path_to_video_and_timestamp)
    # for f in files:
    #     if not f.endswith('_timestamps') and f.endswith('mp4'):
    #         videos.append(f)
    #         timestamps.append(f + '_timestamps')
    # v_t = dict(zip(videos, timestamps))
    
    # v_t = {'1.mp4':'', '3.mp4':'', '5.mp4':'', '7.mp4':'', '9.mp4':'', '11.mp4':''}
    # v_t = {'2.mp4':'', '4.mp4':'', '6.mp4':'', '8.mp4':'', '10.mp4':'', '12.mp4':''}