def main(): args = get_args() # Check for calib_images folder if not os.path.exists('calib_images'): print 'Please create a directory "calib_images"' return # Setup video display video_disp = Display({'name': 'Video'}) # Setup controls setup_trackbars('Controls') # , thresholds) # Get input video video = Video(args['video']) num_frames = video.get_num_frames() # Get the first frame to start with frame = video.next_frame() global seek_callback_action while True: if play_or_pause == 'Play': if not seek_callback_action: frame = video.next_frame() else: frame = video.get_frame(cur_seek_pos * num_frames / 100) seek_callback_action = False if video.end_reached(): # Wait indefinitely if end of video reached # Or until keypress and then exit cv2.waitKey(0) break video_disp.refresh(frame) cur_frame_num = video.get_cur_frame_num() # Service the key events # if s is pressed, save image # if b is pressed, go back 1s # if n is pressed, go ahead 1s if video_disp.key_pressed('s'): video_file = os.path.basename(args['video']).lower() img_file_name = 'calib_images/{}_{}.png'.format( video_file.strip('.mp4'), cur_frame_num) if cv2.imwrite(img_file_name, frame): print 'Saved', img_file_name elif video_disp.key_pressed('n'): seek_callback( min((((cur_frame_num + 60) * 100) // num_frames), num_frames)) elif video_disp.key_pressed('b'): seek_callback(max((((cur_frame_num - 60) * 100) // num_frames), 0)) # Add quitting event if video_disp.can_quit(): break
def main(): args = get_args() # Setup video displays orig_video_disp = Display({'name': 'Original_Video'}) thresh_video_disp = Display({'name': 'Tresholded_Video'}) # Setup controls setup_trackbars(controls_window_name) # Get input video video = Video(args['video']) num_frames = video.get_num_frames() # Get the first frame to start with frame = video.next_frame() # To communicate with seek callback global seek_callback_action while True: if play_or_pause == 'Play': if not seek_callback_action: frame = video.next_frame() else: frame = video.get_frame(cur_seek_pos * num_frames / 100) seek_callback_action = False if video.end_reached(): # Wait indefinitely if end of video reached # Or until keypress and then exit cv2.waitKey(0) break # Refresh original video display orig_video_disp.refresh(frame) # Get threshold values h_min, h_max, s_min, s_max, v_min, v_max = get_thresholds( controls_window_name) # Convert image to HSV and apply threshold frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) frame_thresh = cv2.inRange( frame_hsv, (h_min, s_min, v_min), (h_max, s_max, v_max)) # Refresh thresholded video display thresh_video_disp.refresh(frame_thresh) # Add quitting event if orig_video_disp.can_quit() or thresh_video_disp.can_quit(): break # On quit, save the thresholds save_config = SaveConfig('new_thresholds', 'thresholds') save_config.save(h_min=h_min, h_max=h_max, s_min=s_min, s_max=s_max, v_min=v_min, v_max=v_max)
def scheme_2(file_name): video = Video(file_name) display = Display() # First get the number of frames num_frames = video.get_num_frames() # Get each frame from video and display # If step is greater than one (simulating random # seeks,) the playback will be slow for i in range(num_frames): # Get the frame desired frame = video.get_frame(i) # Do some operation gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Refresh display with new image display.refresh(gray) # Quit midway if required if display.can_quit(): break
def main(): args = get_args() # Read in configuration load_config = LoadConfig('config/thresholds.npz', 'thresholds') thresholds = load_config.load() # Setup video displays orig_video_disp = Display({'name': 'Original_Video'}) processed_video_disp = Display({'name': 'Processed_Video'}) # Setup controls setup_trackbars(controls_window_name, thresholds) # Get input video video = Video(args['video']) num_frames = video.get_num_frames() # Get the first frame to start with frame = video.next_frame() global seek_callback_action while True: if play_or_pause == 'Play': if not seek_callback_action: frame = video.next_frame() else: frame = video.get_frame(cur_seek_pos * num_frames / 100) seek_callback_action = False if video.end_reached(): # Wait indefinitely if end of video reached # Or until keypress and then exit cv2.waitKey(0) break # Refresh original video display orig_video_disp.refresh(frame) # Get threshold values h_min, h_max, s_min, s_max, v_min, v_max, erode_size, dilate_size = get_params( controls_window_name) # Convert image to HSV and apply threshold frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) frame_thresh = cv2.inRange(frame_hsv, (h_min, s_min, v_min), (h_max, s_max, v_max)) # Apply erosion # Create a kernel first and then apply kernel erode_kernel = cv2.getStructuringElement( cv2.MORPH_ELLIPSE, (erode_size + 1, erode_size + 1)) frame_erode = cv2.erode(frame_thresh, erode_kernel) # Apply dilate # Create a kernel first and then apply kernel dilate_kernel = cv2.getStructuringElement( cv2.MORPH_ELLIPSE, (dilate_size + 1, dilate_size + 1)) frame_dilate = cv2.dilate(frame_erode, dilate_kernel) # Refresh thresholded video display processed_video_disp.refresh(frame_dilate) # Add quitting event if orig_video_disp.can_quit() or processed_video_disp.can_quit(): break # On quit, save the params save_config = SaveConfig('new_erode_dilate', 'erode_dilate') save_config.save(dilate_size=dilate_size, erode_size=erode_size)
def main(): args = get_args() # Read in configuration load_config = LoadConfig('new_calib_{}.npz'.format(args['model'].lower()), 'calib') calib = load_config.load() # Setup video displays video_disp = Display({'name': 'Video'}) # Setup controls setup_trackbars('Controls') # , thresholds) # Get input video video = Video(args['video']) num_frames = video.get_num_frames() # Get the first frame to start with frame = video.next_frame() global seek_callback_action while True: if play_or_pause == 'Play': if not seek_callback_action: frame = video.next_frame() else: frame = video.get_frame(cur_seek_pos * num_frames / 100) seek_callback_action = False if video.end_reached(): # Wait indefinitely if end of video reached # Or until keypress and then exit cv2.waitKey(0) break # Undistort according to pinhole model if args['model'].upper() == 'P': # Make sure distortion coeffecients # follow pinhole model if calib['dist_coeffs'].shape[1] != 5: print 'Input configuration probably not pinhole' return # Harcoded image size as # this is a test script img_size = (1920, 1080) # First create scaled intrinsics because we will undistort # into region beyond original image region new_calib_matrix, _ = cv2.getOptimalNewCameraMatrix( calib['camera_matrix'], calib['dist_coeffs'], img_size, 0.35) # Then calculate new image size according to the scaling # Unfortunately the Python API doesn't directly provide the # the new image size. They forgot? new_img_size = ( int(img_size[0] + (new_calib_matrix[0, 2] - calib['camera_matrix'][0, 2])), int(img_size[1] + (new_calib_matrix[1, 2] - calib['camera_matrix'][1, 2]))) # Standard routine of creating a new rectification # map for the given intrinsics and mapping each # pixel onto the new map with linear interpolation map1, map2 = cv2.initUndistortRectifyMap(calib['camera_matrix'], calib['dist_coeffs'], None, new_calib_matrix, new_img_size, cv2.CV_16SC2) img_undistorted = cv2.remap(frame, map1, map2, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT) # Undistort according to fisheye model elif args['model'].upper() == 'F': # Make sure distortion coeffecients # follow fisheye model if calib['dist_coeffs'].shape[0] != 4: print 'Input configuration probably not fisheye' return # Harcoded image size as # this is a test script. # As already ranted before # someone messed with the image # size indexing and reversed it. img_size = (1920, 1080) # Also, the basic undistortion DOES NOT work # with the fisheye module # img_undistorted = cv2.fisheye.undistortImage( # frame, calib['camera_matrix'], calib['dist_coeffs']) # First create scaled intrinsics because we will undistort # into region beyond original image region. The alpha # parameter in pinhole model is equivalent to balance parameter here. new_calib_matrix = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify( calib['camera_matrix'], calib['dist_coeffs'], img_size, np.eye(3), balance=1) # Then calculate new image size according to the scaling # Well if they forgot this in pinhole Python API, # can't complain about Fisheye model. Note the reversed # indexing here too. new_img_size = ( int(img_size[0] + (new_calib_matrix[0, 2] - calib['camera_matrix'][0, 2])), int(img_size[1] + (new_calib_matrix[1, 2] - calib['camera_matrix'][1, 2]))) # Standard routine of creating a new rectification # map for the given intrinsics and mapping each # pixel onto the new map with linear interpolation map1, map2 = cv2.fisheye.initUndistortRectifyMap( calib['camera_matrix'], calib['dist_coeffs'], np.eye(3), new_calib_matrix, new_img_size, cv2.CV_16SC2) img_undistorted = cv2.remap(frame, map1, map2, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT) # Update GUI with new image video_disp.refresh(img_undistorted) # Service the s key to save image if video_disp.key_pressed('s'): cur_frame_num = video.get_cur_frame_num() orig_img_file_name = 'image_for_markers_orig.png' undistorted_img_file_name = 'image_for_markers_undistorted.png' if cv2.imwrite(orig_img_file_name, frame): print 'Saved original {} at frame {}'.format( orig_img_file_name, cur_frame_num) if cv2.imwrite(undistorted_img_file_name, img_undistorted): print 'Saved undistorted {} at frame {}'.format( undistorted_img_file_name, cur_frame_num) # Add quitting event if video_disp.can_quit(): break
def main(): args = get_args() # Read in configuration # load_config = LoadConfig('config/thresholds.npz', 'thresholds') # thresholds = load_config.load() # Setup video displays video_disp = Display({'name': 'Video'}) # Setup controls setup_trackbars('Controls') # , thresholds) # Get input video video = Video(args['video']) num_frames = video.get_num_frames() # Get the first frame to start with frame = video.next_frame() global seek_callback_action # Deck for storing calib images calib_img_deck = deque(maxlen=MAX_NUM_IMAGES_FOR_CALIB) # Deck for storing charuco info charuco_corners_deck = deque(maxlen=MAX_NUM_IMAGES_FOR_CALIB) charuco_ids_deck = deque(maxlen=MAX_NUM_IMAGES_FOR_CALIB) skip_count = 0 test_camera_matrix = np.array([ [11096.77, 0, 540], [0, 11096.77, 960], [0, 0, 1] ]) while True: if play_or_pause == 'Play': if not seek_callback_action: frame = video.next_frame() else: frame = video.get_frame(cur_seek_pos * num_frames / 100) seek_callback_action = False if video.end_reached(): # Wait indefinitely if end of video reached # Or until keypress and then exit cv2.waitKey(0) break corners, ids, rejected_img_points = cv2.aruco.detectMarkers( frame, dictionary) if ids is not None: img_markers = cv2.aruco.drawDetectedMarkers(frame, corners, ids) num_charuco, charuco_corners, charuco_ids = cv2.aruco.interpolateCornersCharuco( corners, ids, frame, board, cameraMatrix=test_camera_matrix) if charuco_corners is not None: img_markers = cv2.aruco.drawDetectedCornersCharuco( img_markers, charuco_corners, charuco_ids) if ids.shape[0] == MAX_ARUCO_IDS \ and num_charuco == MAX_CHARUCO_IDS \ and skip_count % 15 == 0: calib_img_deck.append(frame) charuco_corners_deck.append(charuco_corners) charuco_ids_deck.append(charuco_ids) else: img_markers = frame cv2.putText(img_markers, '{}/{}'.format(len(calib_img_deck), MAX_NUM_IMAGES_FOR_CALIB), (200, 100), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 5) video_disp.refresh(img_markers) if video_disp.key_pressed('s'): pass skip_count = skip_count + 1 # Add quitting event if video_disp.can_quit(): break # On quit, save the params # save_config = SaveConfig('new_erode_dilate', 'erode_dilate') # save_config.save(dilate_size=dilate_size, erode_size=erode_size) img_size = calib_img_deck[0].shape[:2] # print charuco_ids_deck # error, camera_matrix, dist_coeffs = cv2.aruco.calibrateCameraCharuco( # charuco_corners_deck, charuco_ids_deck, board, img_size, test_camera_matrix, None)[:3] objPoints, imgPoints = [board.chessboardCorners.reshape( 1, -1, 3)] * len(charuco_corners_deck), charuco_corners_deck calibration_flags = cv2.fisheye.CALIB_USE_INTRINSIC_GUESS + \ cv2.fisheye.CALIB_FIX_PRINCIPAL_POINT + cv2.fisheye.CALIB_FIX_SKEW error, camera_matrix, dist_coeffs = cv2.fisheye.calibrate( objPoints, imgPoints, img_size, test_camera_matrix, np.zeros(4), flags=calibration_flags)[:3] print error, camera_matrix save_config = SaveConfig('new_calib', 'calib') save_config.save(camera_matrix=camera_matrix, dist_coeffs=dist_coeffs)
def main(): args = get_args() # Setup video displays orig_video_disp = Display({'name': 'Original_Video'}) thresh_video_disp = Display({'name': 'Thresholded_Video'}) mean_shift_video_disp = Display({'name': 'Mean-Shift Tracking Video'}) # Setup controls setup_trackbars(controls_window_name) # Get input video video = Video(args['video']) num_frames = video.get_num_frames() # Get the first frame to start with frame = video.next_frame() global seek_callback_action # setup initial location of window top, length, left, width = 450, 36, 1000, 43 # simply hardcoded the values track_window = (left, top, width, length) # set up the ROI for tracking roi = frame[top:top + length, left:left + width] hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) h_min, h_max, s_min, s_max, v_min, v_max = get_thresholds( controls_window_name) mask = cv2.inRange(hsv_roi, np.array( (h_min, s_min, v_min)), np.array((h_max, s_max, v_max))) roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180]) cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX) # Setup the termination criteria, either 10 iteration or move by at least 1 pt term_criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 1) while True: if play_or_pause == 'Play': if not seek_callback_action: frame = video.next_frame() else: frame = video.get_frame(cur_seek_pos * num_frames / 100) seek_callback_action = False if video.end_reached(): # Wait indefinitely if end of video reached # Or until keypress and then exit cv2.waitKey(0) break # Refresh original video display orig_video_disp.refresh(frame) # Get threshold values h_min, h_max, s_min, s_max, v_min, v_max = get_thresholds( controls_window_name) # Convert image to HSV and apply threshold frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) frame_thresh = cv2.inRange( frame_hsv, (h_min, s_min, v_min), (h_max, s_max, v_max)) # threshold image in hsv domain frame_hsv_threshold = cv2.bitwise_and( frame_hsv, frame_hsv, mask=frame_thresh) # Refresh thresholded video display thresh_video_disp.refresh(frame_hsv_threshold) # Find the backprojection of the histogram dst = cv2.calcBackProject([frame_hsv_threshold], [ 0], roi_hist, [0, 180], 1) # apply meanshift to get the new location ret, track_window = cv2.meanShift(dst, track_window, term_criteria) # Draw it on image x, y, w, h = track_window frame_mean_shift = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2) # Refresh mean shift tracking video display mean_shift_video_disp.refresh(frame_mean_shift)