コード例 #1
0
def main():
    args = get_args()

    # Setup video displays
    orig_video_disp = Display({'name': 'Original_Video'})
    thresh_video_disp = Display({'name': 'Tresholded_Video'})

    # Setup controls
    setup_trackbars(controls_window_name)

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame to start with
    frame = video.next_frame()

    # To communicate with seek callback
    global seek_callback_action

    while True:
        if play_or_pause == 'Play':
            if not seek_callback_action:
                frame = video.next_frame()
            else:
                frame = video.get_frame(cur_seek_pos * num_frames / 100)
                seek_callback_action = False

        if video.end_reached():
            # Wait indefinitely if end of video reached
            # Or until keypress and then exit
            cv2.waitKey(0)
            break

        # Refresh original video display
        orig_video_disp.refresh(frame)

        # Get threshold values
        h_min, h_max, s_min, s_max, v_min, v_max = get_thresholds(
            controls_window_name)

        # Convert image to HSV and apply threshold
        frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        frame_thresh = cv2.inRange(
            frame_hsv, (h_min, s_min, v_min), (h_max, s_max, v_max))

        # Refresh thresholded video display
        thresh_video_disp.refresh(frame_thresh)

        # Add quitting event
        if orig_video_disp.can_quit() or thresh_video_disp.can_quit():
            break

    # On quit, save the thresholds
    save_config = SaveConfig('new_thresholds', 'thresholds')
    save_config.save(h_min=h_min, h_max=h_max, s_min=s_min,
                     s_max=s_max, v_min=v_min, v_max=v_max)
コード例 #2
0
    def __init__(self):
        """Initialise app."""
        # Init pygame
        pygame.init()

        # Init display
        self.display = Display()

        # Display commands
        print(INSTRUCTIONS)
コード例 #3
0
        def _answer_interstitial(request_attrs, session_attrs, game_engine_events):
            questions = utils._('QUESTIONS')
            current_question = int(session_attrs.get('current_question', 1))
            shuffled_index = session_attrs['ordered_questions'][current_question - 1]
            trivia_question = next((q for q in questions if q['index'] == shuffled_index), None)

            data = {'question_number': current_question}
            response_message = utils._('ASK_QUESTION_DISPLAY', data)
            response_message['display_text'] = trivia_question['question']
            Display.render(handler_input, response_message)

            Game.listen_for_answer(handler_input)
コード例 #4
0
    def handle_roll_call_complete(handler_input, event):
        print(f'ROLLCALL_HELPER: handle roll call complete: {event}')
        request_attrs = handler_input.attributes_manager.request_attributes
        session_attrs = handler_input.attributes_manager.session_attributes

        # Move to the button game state to begin the game
        session_attrs['STATE'] = settings.STATES['button_game']
        session_attrs['buttons'] = [
            {
                'button_id': event.gadget_id,
                'count': i + 1,
            } for i, event in enumerate(event.input_events)
        ]

        # clear animations on all other buttons that haven't been added to the game
        request_attrs['directives'].append(directives.GadgetController.set_idle_animation({
            'animations': animations.BasicAnimations.solid(1, "black", 100)
        }))
        # display roll call complete animation on all buttons that were added to the game
        request_attrs['directives'].append(directives.GadgetController.set_idle_animation({
            'target_gadgets': [b['button_id'] for b in session_attrs['buttons']],
            'animations': settings.ANIMATIONS['roll_call_complete']
        }))

        print(f"RollCall: resuming play from question: {session_attrs.get('current_question')}")

        current_prompts = None
        if settings.ROLLCALL_STATES['named_players']:
            # tell the next player to press their button.
            message = utils._('ROLL_CALL_HELLO_PLAYER', {
                'player_number': len(session_attrs['buttons'])
            })
            current_prompts = message

        message = utils._('ROLL_CALL_COMPLETE')
        mixed_output_speech = ''
        if current_prompts:
            mixed_output_speech = " ".join([
                settings.AUDIO['roll_call_complete'],
                settings.pick_random(message['output_speech']),
            ])
        else:
            mixed_output_speech = " ".join([
                settings.AUDIO['roll_call_complete'],
                settings.pick_random(message['output_speech']),
            ])

        Display.render(handler_input, message)
        request_attrs['output_speech'].append(mixed_output_speech)
        request_attrs['reprompt'].append(message['reprompt'])
        request_attrs['open_microphone'] = True
コード例 #5
0
def main():
    args = get_args()

    # Check for calib_images folder
    if not os.path.exists('calib_images'):
        print 'Please create a directory "calib_images"'
        return

    # Setup video display
    video_disp = Display({'name': 'Video'})

    # Setup controls
    setup_trackbars('Controls')  # , thresholds)

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame to start with
    frame = video.next_frame()

    global seek_callback_action

    while True:
        if play_or_pause == 'Play':
            if not seek_callback_action:
                frame = video.next_frame()
            else:
                frame = video.get_frame(cur_seek_pos * num_frames / 100)
                seek_callback_action = False

        if video.end_reached():
            # Wait indefinitely if end of video reached
            # Or until keypress and then exit
            cv2.waitKey(0)
            break

        video_disp.refresh(frame)

        cur_frame_num = video.get_cur_frame_num()

        # Service the key events
        # if s is pressed, save image
        # if b is pressed, go back 1s
        # if n is pressed, go ahead 1s
        if video_disp.key_pressed('s'):
            video_file = os.path.basename(args['video']).lower()
            img_file_name = 'calib_images/{}_{}.png'.format(
                video_file.strip('.mp4'), cur_frame_num)
            if cv2.imwrite(img_file_name, frame):
                print 'Saved', img_file_name
        elif video_disp.key_pressed('n'):
            seek_callback(
                min((((cur_frame_num + 60) * 100) // num_frames), num_frames))
        elif video_disp.key_pressed('b'):
            seek_callback(max((((cur_frame_num - 60) * 100) // num_frames), 0))

        # Add quitting event
        if video_disp.can_quit():
            break
コード例 #6
0
    def take_turn(actions):
        player = internal.current_players.get(internal.active_player)
        player_name = player.get("name")
        player_character = player.get("character")
        location = internal.current_game.board.locate_character(
            player_character)

        data = actions.get('turn_selection')
        turn = internal.current_turn_options.get(int(data))

        if 'suggestion' in turn:
            internal.make_suggestion(player)
        elif 'accusation' in turn:
            internal.make_accusation(player)
        else:
            space_id = turn
            space_type = internal.current_game.move_player_to_space_by_id(
                space_id, player_character, location)

            ServerProtocol.message_all_players("\n" + player_name + " moved " +
                                               player_character + " to " +
                                               space_id + ".\n")

            ServerProtocol.message_all_players(
                Display.display_board(internal.current_game.board))

            if space_type is 'Room':
                internal.make_suggestion(player, space_id)
            else:
                internal.finish_turn()
コード例 #7
0
    def end_game(handler_input, reset_game):
        print('GAME: end game')
        request_attrs = handler_input.attributes_manager.request_attributes
        session_attrs = handler_input.attributes_manager.session_attributes

        # Clean the player state on the way out
        session_attrs.pop('repeat', None)
        session_attrs.pop('incorrect_answer_buttons', None)
        session_attrs.pop('correct', None)
        session_attrs.pop('answering_button', None)
        session_attrs.pop('answering_player', None)

        response_message = utils._('GAME_FINISHED' if reset_game else 'GAME_CANCELLED')
        Display.render(handler_input, response_message)
        request_attrs['open_microphone'] = False
        handler_input.response_builder.set_should_end_session(True)

        if session_attrs['STATE'] == settings.STATES['button_game']:
            request_attrs['directives'].append(GadgetController.set_idle_animation({
                'target_gadgets': [b['button_id'] for b in session_attrs['buttons']],
                'animations': settings.ANIMATIONS['exit'],
            }))

        if reset_game:
            final_scores = GameHelper.get_formatted_scores(
                handler_input,
                session_attrs.get('scores'),
                session_attrs['player_count']
            )
            multi_player = session_attrs['STATE'] == settings.STATES['button_game']
            msg_key = 'GAME_FINISHED_INTRO' if multi_player else 'SINGLE_PLAYER_GAME_FINISHED_INTRO'
            game_finished_message = utils._(msg_key)

            if len(request_attrs['output_speech']) == 0:
                request_attrs['output_speech'].append("<break time='2s'/>")

            request_attrs['output_speech'].extend([
                game_finished_message['output_speech'],
                final_scores,
                "<break time='1s'/>",
                response_message['output_speech'],
            ])

            handler_input.attributes_manager.session_attributes = {}

        else:
            request_attrs['output_speech'].append(response_message['output_speech'])
コード例 #8
0
    def start_roll_call(handler_input, message_key):
        print('ROLLCALL_HELPER: start roll call')
        request_attrs = handler_input.attributes_manager.request_attributes
        session_attrs = handler_input.attributes_manager.session_attributes
        config = Helper.generate_input_handler_config(**{
            'player_count': session_attrs['player_count'],
            'timeout': 35000,  # allow 35 seconds for roll call to complete
        })
        Helper.listen_for_roll_call(handler_input, config)

        message = utils._(message_key)
        Display.render(handler_input, message)
        request_attrs['output_speech'].append(message['output_speech'])
        request_attrs['output_speech'].append(settings.AUDIO['waiting_for_roll_call'])
        request_attrs['open_microphone'] = True

        session_attrs['buttons'] = []
コード例 #9
0
def scheme_1(file_name):
    video = Video(file_name)
    display = Display()

    # Emulate a do-while loop
    # with "while True" and breaking
    # if condition fails after executing
    while True:
        frame = video.next_frame()
        if video.end_reached():
            break

        # Do some operation
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Refresh display with new image
        display.refresh(gray)
        if display.can_quit():
            break
コード例 #10
0
def scheme_2(file_name):
    video = Video(file_name)
    display = Display()

    # First get the number of frames
    num_frames = video.get_num_frames()

    # Get each frame from video and display
    # If step is greater than one (simulating random
    # seeks,) the playback will be slow
    for i in range(num_frames):
        # Get the frame desired
        frame = video.get_frame(i)

        # Do some operation
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Refresh display with new image
        display.refresh(gray)

        # Quit midway if required
        if display.can_quit():
            break
コード例 #11
0
 def __init__(self):
     self.config = Config()
     self.cookies = login()
     logger.info(self.cookies)
     self.base_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
                                   self.config.config['output']['path'])
     self.live_infos = Infos()
     self.display = Display()
     self.decoder = Decoder()
     self.uploader = Upload()
     self.threadRecorder = threadRecorder()
     logger.info('基路径:%s' % (self.base_path))
     self.load_room_info()
     self.get_live_url()
     logger.info('初始化完成')
コード例 #12
0
def evaluate(csv_file, eval_path="", s3_bucket="bsivisiondata", train_config=None, write_output_images=False):
    '''
    eval_path: top directory, look for frames/ and annotations/ underneath here.  If csv specified, then paths
        will be relative to this eval_path
    csv_file: a csv of paths_to_images,labels.  Paths are relative to eval_path
    s3_bucket: name of an s3 bucket where image and label data is read from. If csv is specified, labels will be
        taken from the csv
    train_config: a .json config file from train_history that gives information on the training job that we're
        evaluating
    write_output_images: bool, whether or not to write output images to data/eval_images/training_job_name_steps
    loop_eval_period: int, the period in seconds to loop evaluation runs.  If not set, only runs once
    '''

    if train_config:
        config_file = train_config
    else:
        # config_file = get_most_recent_file_from_dir('train_history/')
        config_file = choose_file_with_stdin('train_history/')
    print("Config file: {}".format(config_file))
    cm = ConfigManager(config_file, folder='train_history')
    cfg = cm.get_json()

    data = DataExtractor(eval_path, s3_bucket, csv_file=csv_file)
        
    # batch_size = cfg['training']['hyperparameters']['batch_size']
    batch_size = 1
    target_shape = cfg['training']['model']['target_shape']
    data_gen = data.get_processed_data_generator(True, batch_size, target_shape, True)

    print("Testing on {} samples".format(data.get_data_length()))

    displayer = Display()

    model, path_and_prefix = get_model_and_load_weights(cfg)
    compile_model(model, cfg['training'])
    ckpt_prefix = os.path.basename(path_and_prefix)
    
    total_loss = 0
    num_testing_samples = 0
    for X, y, filepaths, originals in data_gen:

        pred = model.predict_on_batch(X)

        mse_out = mean_squared_error(y, pred)
        loss = mse_out.eval(session=K.get_session())
        total_loss += loss[0]
        num_testing_samples += 1
        print("Loss: {}".format(loss[0]))

        H = np.reshape(pred, (3,3))
        # print('Prediction:\n{}'.format(H))
        
        if write_output_images: 
            frame = originals[0]
            filepath = filepaths[0]
            image_text = "Pred: {}\nTruth: {}\nLoss: {}".format(np.array_str(pred), np.array_str(y[0]), str(loss[0]))
            warped = displayer.get_warp_overlay(frame, H, dsize=(1000,425))
            vstacked = displayer.get_vstacked(frame, warped, dsize=(1000,850))
            displayer.put_rectangle(vstacked, (10, 10), (800, 80))
            displayer.put_text(vstacked, "Pred: {}".format(np.array_str(pred, precision=2)), text_loc=(10,30))
            displayer.put_text(vstacked, "Truth: {}".format(np.array_str(y[0], precision=2)), text_loc=(10,50))
            displayer.put_text(vstacked, "loss: {}".format(loss[0]), text_loc=(10,70))
            step_number = path_and_prefix.split('-')[-1]
            job_name_and_step_num = cfg['sagemaker_job_info']['job_name'] + '-step' + str(step_number)
            job_name_dir = os.path.join('data/eval_images', job_name_and_step_num)
            if not os.path.exists(job_name_dir):
                os.makedirs(job_name_dir)
            filename = "{:.2E}-{}".format(loss[0], os.path.basename(filepath))
            displayer.save_image(os.path.join(job_name_dir, filename), vstacked)
        # displayer.display_vstacked(frame, warped, dsize=(1000,850))
        # displayer.display_warp_overlay(frame, H, dsize=(600,600))
    average_loss = total_loss / num_testing_samples
    print("Average loss: {}".format(average_loss))
    return average_loss, path_and_prefix
コード例 #13
0
def main_worker(id, video_file, camera_model, K, D, R, T, measurements, quit_event):
    # Setup video displays
    video_disp = Display({'name': 'Camera_{}'.format(id)})

    # Get input video
    video = Video(video_file)

    # Setup the undistortion stuff
    if camera_model == 'P':
        # Harcoded image size as
        # this is a test script
        img_size = (1920, 1080)

        # First create scaled intrinsics because we will undistort
        # into region beyond original image region
        new_K = cv2.getOptimalNewCameraMatrix(K, D, img_size, 0.35)[0]

        # Then calculate new image size according to the scaling
        # Unfortunately the Python API doesn't directly provide the
        # the new image size. They forgot?
        new_img_size = (int(img_size[0] + (new_K[0, 2] - K[0, 2])), int(
            img_size[1] + (new_K[1, 2] - K[1, 2])))

        # Standard routine of creating a new rectification
        # map for the given intrinsics and mapping each
        # pixel onto the new map with linear interpolation
        map1, map2 = cv2.initUndistortRectifyMap(
            K, D, None, new_K, new_img_size, cv2.CV_16SC2)

    elif camera_model == 'F':
        # Harcoded image size
        img_size = (1920, 1080)

        # First create scaled intrinsics because we will undistort
        # into region beyond original image region. The alpha
        # parameter in pinhole model is equivalent to balance parameter here.
        new_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
            K, D, img_size, np.eye(3), balance=1)

        # Then calculate new image size according to the scaling
        # Well if they forgot this in pinhole Python API,
        # can't complain about Fisheye model. Note the reversed
        # indexing here too.
        new_img_size = (int(img_size[0] + (new_K[0, 2] - K[0, 2])), int(
            img_size[1] + (new_K[1, 2] - K[1, 2])))

        # Standard routine of creating a new rectification
        # map for the given intrinsics and mapping each
        # pixel onto the new map with linear interpolation
        map1, map2 = cv2.fisheye.initUndistortRectifyMap(
            K, D, np.eye(3), new_K, new_img_size_1, cv2.CV_16SC2)

    # Set up foreground and background separation
    fgbg = cv2.createBackgroundSubtractorMOG2()

    # Averaging kernel that will be used in opening
    kernel = np.ones((6, 6), np.uint8)

    # Code commented out because not using
    # confidence currently, but could be 
    # used again with changes later
    # # Will be used for histogram comparison
    # # (Confidence measure)
    # ball_image_file = 'ball_image.jpg'
    # ball_image = cv2.imread(ball_image_file)


    # 2D ball detection and 3D ball tracking setup
    ball_position_frame = None
    ball_wc = [0, 0, 0]

    while not video.end_reached() and not quit_event.value:
        # Get each frame
        frame = video.next_frame()

        # Undistort the current frame
        img_undistorted = cv2.remap(
            frame, map1, map2, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)

        # Convert to HSV and threshold range of ball
        img_hsv = cv2.cvtColor(img_undistorted, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(img_hsv, np.array(
            (15, 190, 200)), np.array((25, 255, 255)))

        # Foreground and background separation mask
        fgmask = fgbg.apply(img_undistorted)
        mask_color_bgs = cv2.bitwise_and(mask, mask, mask=fgmask)
        masked_and_opened = cv2.morphologyEx(
            mask_color_bgs, cv2.MORPH_OPEN, kernel)

        # Hough transform to detect ball (circle)
        circles = cv2.HoughCircles(masked_and_opened, cv2.HOUGH_GRADIENT, dp=3,
                                   minDist=2500, param1=300, param2=5, minRadius=3, maxRadius=30)
        if circles is not None:
            # Make indexing easier and
            # convert everything to int
            circles = circles[0, :]
            circles = np.round(circles).astype("int")

            # Take only the first
            # (and hopefully largest)
            # circle detected
            x, y, r = circles[0]
            ball_position_frame = [x - r, y - r, 2 * r, 2 * r]
        else:
            ball_position_frame = None

        # Determine the correct ball radius
        mask_ball_radius = cv2.bitwise_and(fgmask, fgmask, mask=cv2.inRange(
            img_hsv, np.array((10, 150, 180)), np.array((40, 255, 255))))
        if ball_position_frame:
            x1, y1, w1, h1 = ball_position_frame
            ball_crop_temp = mask_ball_radius[(
                y1 + h1 // 2 - 50):(y1 + h1 // 2 + 50), (x1 + w1 // 2 - 50):(x1 + w1 // 2 + 50)]   
            height, width = ball_crop_temp.shape
            if height and width:
                # Successfully cropped image
                ball_crop = ball_crop_temp
                cnts = cv2.findContours(
                    ball_crop.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
                center = None
                if len(cnts) > 0:
                    # contour detected
                    c = max(cnts, key=cv2.contourArea)
                    ellipse = cv2.fitEllipse(c)
                    width = min(ellipse[1])
                    ball_position_frame = [
                        ball_position_frame[0], ball_position_frame[1], 2 * width, 2 * width]

                # Code commented out because not using
                # confidence currently, but could be 
                # used again with changes later
                # # Calculate confidence
                # confidence = histogram_comparison(ball_image, img_undistorted, ball_position_frame)
                # print confidence

        if ball_position_frame:
            x1, y1, w1, h1 = ball_position_frame
            pixels_per_mm = (
                K[0, 0] + K[1, 1]) / 2 / DEFAULT_FOCAL_LENGTH
            z = PING_PONG_DIAMETER * \
                DEFAULT_FOCAL_LENGTH / (w1 / pixels_per_mm)
            x = ((x1 - K[0, 2]) /
                 pixels_per_mm) * z / DEFAULT_FOCAL_LENGTH
            y = ((y1 - K[1, 2]) /
                 pixels_per_mm) * z / DEFAULT_FOCAL_LENGTH

            ball_cc = np.array([x, y, z]) / 1000
            ball_wc = np.dot(R.T, ball_cc - T.ravel())

        # Push measurements to be processed/visualized
        measurement = {
            'id': id,
            'frame_num': video.get_cur_frame_num(),
            'ball_ic': ball_position_frame,
            'ball_wc': ball_wc
        }
        measurements.put(measurement)

        # Update video display
        video_disp.refresh(img_undistorted)

        # Add quitting event
        if video_disp.can_quit():
            break

    # Setting this will signal
    # the other parallel process
    # to exit too.
    quit_event.value = 1
コード例 #14
0
class DuckieControlApp(object):
    """Application class.

    This application contains an event listener to catch keyboard keys press
    and a UI to display information. All UI logic is defined in utils.display.
    """

    def __init__(self):
        """Initialise app."""
        # Init pygame
        pygame.init()

        # Init display
        self.display = Display()

        # Display commands
        print(INSTRUCTIONS)

    def register_image(self, image):
        """Register an image for future display."""
        self.display.image = image

    def register_stack(self, stack):
        """Register a state stack for future display."""
        self.display.stack = stack

    def register_action(self, action):
        """Register an action for future display."""
        self.display.action = action

    def step(self, auto_pilote_mode):
        """Run a step of app logic.

        This will check if an arrow key is pressed if in autopilote_mode and
        return the action to take, listen to the a key pressed and return
        toggle autopilote boolean if it is.
        """
        # Get keys currently being pressed
        keys = pygame.key.get_pressed()

        action = []

        # If in auto pilote mode, convert key to action
        if not auto_pilote_mode:
            # Default if no key is pressed
            action = [0, 0]
            if keys[pygame.K_UP]:
                action[0] = 0.6
                action[1] = 0.6
            if keys[pygame.K_DOWN]:
                action[0] = -0.6
                action[1] = -0.6
            if keys[pygame.K_LEFT]:
                action[0] = 0.24
                action[1] = 0.86
            if keys[pygame.K_RIGHT]:
                action[0] = 0.86
                action[1] = 0.24

            # Register action to display
            self.register_action(action)

        # Retrive pygame event list
        pygame.event.pump()

        autopilote_toggle = False
        save_cmd = False

        # If A have been pressed once, toggle autopilote
        for event in pygame.event.get():
            if event.type == pygame.KEYDOWN and event.key == pygame.K_a:
                autopilote_toggle = True
            if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
                save_cmd = True

        return action, autopilote_toggle, save_cmd

    def render(self):
        """Render display."""
        self.display.render()
コード例 #15
0
ファイル: scrapbook.py プロジェクト: ccweaver1/bsi_vision
from utils.display import Display
from utils.file_manager import FileManager
from utils.warp_tools import *
from utils.rink_specs import HockeyRink
import random
import cv2
fm = FileManager('bsivisiondata')
d = Display()

annotations = fm.get_folder_list('PHI-PIT_6m-8m/annotations',
                                 extension_filter='json')
# random.shuffle(annotations)
for f in annotations:
    print f
    im_dict = fm.read_image_dict('PHI-PIT_6m-8m/annotations', f)
    if not 'warp' in im_dict:
        continue

    imname = f.split('.')[0] + '.png'
    im = fm.read_image_file('PHI-PIT_6m-8m/frames', imname)
    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)

    H = np.array(im_dict['warp']['M'])
    hr = HockeyRink()

    scaled_H = scale_homography(H, 600, 300)
    H1280 = scale_homography(H, 1280, 720)
    '''
    NEEDED TO BE RESIZING IMAGES BEFORE CALLING WARP!!!
    '''
コード例 #16
0
def main():
    args = get_args()

    # Setup video displays
    orig_video_disp = Display({'name': 'Original_Video'})
    thresh_video_disp = Display({'name': 'Thresholded_Video'})
    mean_shift_video_disp = Display({'name': 'Mean-Shift Tracking Video'})

    # Setup controls
    setup_trackbars(controls_window_name)

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame to start with
    frame = video.next_frame()

    global seek_callback_action

    # setup initial location of window
    top, length, left, width = 450, 36, 1000, 43  # simply hardcoded the values
    track_window = (left, top, width, length)

    # set up the ROI for tracking
    roi = frame[top:top + length, left:left + width]
    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    h_min, h_max, s_min, s_max, v_min, v_max = get_thresholds(
        controls_window_name)
    mask = cv2.inRange(hsv_roi, np.array(
        (h_min, s_min, v_min)), np.array((h_max, s_max, v_max)))
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

    # Setup the termination criteria, either 10 iteration or move by at least 1 pt
    term_criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 1)

    while True:
        if play_or_pause == 'Play':
            if not seek_callback_action:
                frame = video.next_frame()
            else:
                frame = video.get_frame(cur_seek_pos * num_frames / 100)
                seek_callback_action = False

        if video.end_reached():
            # Wait indefinitely if end of video reached
            # Or until keypress and then exit
            cv2.waitKey(0)
            break

        # Refresh original video display
        orig_video_disp.refresh(frame)

        # Get threshold values
        h_min, h_max, s_min, s_max, v_min, v_max = get_thresholds(
            controls_window_name)

        # Convert image to HSV and apply threshold
        frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        frame_thresh = cv2.inRange(
            frame_hsv, (h_min, s_min, v_min), (h_max, s_max, v_max))

        # threshold image in hsv domain
        frame_hsv_threshold = cv2.bitwise_and(
            frame_hsv, frame_hsv, mask=frame_thresh)

        # Refresh thresholded video display
        thresh_video_disp.refresh(frame_hsv_threshold)

        # Find the backprojection of the histogram
        dst = cv2.calcBackProject([frame_hsv_threshold], [
                                  0], roi_hist, [0, 180], 1)

        # apply meanshift to get the new location
        ret, track_window = cv2.meanShift(dst, track_window, term_criteria)

        # Draw it on image
        x, y, w, h = track_window
        frame_mean_shift = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)

        # Refresh mean shift tracking video display
        mean_shift_video_disp.refresh(frame_mean_shift)
コード例 #17
0
 def begin_game():
     # initialize the game
     internal.current_game.initialize_game()
     ServerProtocol.message_all_players(
         Display.display_board(internal.current_game.board))
     internal.begin_turn(1)
コード例 #18
0
if __name__ == "__main__":
    current_game = Game()

    # add players to game
    current_game.add_player("Roxanne", "Mr. Green")
    current_game.add_player("Julie", "Mrs. Peacock")

    # initialize the game
    current_game.initialize_game()

    weapons = current_game.get_cards().get_weapons()
    characters = current_game.get_cards().get_characters()
    rooms = current_game.get_cards().get_rooms()

    for weapon in weapons:
        print(weapon.card_type + " : " + weapon.value)
    for character in characters:
        print(character.card_type + " : " + character.value)
    for room in rooms:
        print(room.card_type + " : " + room.value)

    players = current_game.get_players()

    for player in players:
        print(player.name + " : " + player.selected_character)

    Display.display_board(current_game.board)

    current_game.terminate_game()

コード例 #19
0
    def ask_question(handler_input, is_following):
        """
        gather the built responses, add them to the overall response
        retrieve and ask the next/same question
        """
        request_env = handler_input.request_envelope
        request_attrs = handler_input.attributes_manager.request_attributes
        session_attrs = handler_input.attributes_manager.session_attributes
        questions = utils._('QUESTIONS')
        print(f"GAME: ask_question (currentQuestion = {session_attrs.get('current_question')})")

        if not is_following:
            # clean repeat state
            session_attrs.pop('repeat', None)
            session_attrs.pop('incorrect_answer_buttons', None)

        session_attrs['input_handler_id'] = request_env.request.request_id

        if 'current_question' in session_attrs:
            current_question = int(session_attrs['current_question'])
        else:
            session_attrs['current_question'] = current_question = 1

        if 'ordered_questions' not in session_attrs or (
            current_question == 1 and 'repeat' not in session_attrs
        ):
            if settings.GAME_OPTIONS['shuffle_questions']:
                print('GamePlay: producing ordered questions for new game (using shuffling)!')
                # if this is the first question, then shuffle the questions
                ordered_questions = [q['index'] for q in questions]
                random.shuffle(ordered_questions)
            else:
                print('GamePlay: producing ordered questions for new game (shuffling disabled)!')
                ordered_questions = [q['index'] for q in questions]

            ordered_questions = ordered_questions[:settings.GAME_OPTIONS['questions_per_game']]
            session_attrs['ordered_questions'] = ordered_questions

        if (
            current_question > len(session_attrs['ordered_questions']) or
            current_question > settings.GAME_OPTIONS['questions_per_game']
        ):
            return Game.end_game(handler_input, True)

        else:
            shuffle_question = session_attrs['ordered_questions'][current_question - 1]
            next_question = next((q for q in questions if q['index'] == shuffle_question), None)
            print(
                f"Ask question: {current_question} of "
                f"{settings.GAME_OPTIONS['questions_per_game']}, "
                f"next question {next_question}"
            )

        interstitial_delay = 6000 if is_following else 3000
        questions_per_round = int(settings.GAME_OPTIONS['questions_per_round'])

        if (
            current_question > 2 and 'repeat' not in session_attrs and
            (current_question - 1) % questions_per_round == 0
        ):
            interstitial_delay += 12000
            round_summary = GameHelper.generate_round_summary_narration(
                handler_input,
                session_attrs['current_question'],
                session_attrs.get('scores'),
                session_attrs['player_count'],
            )
            request_attrs['output_speech'].append(round_summary)

        if 'correct' in session_attrs:
            if session_attrs['correct']:
                keys = {
                    True: 'ANSWER_QUESTION_CORRECT_DISPLAY',
                    False: 'SINGLE_PLAYER_ANSWER_QUESTION_CORRECT_DISPLAY',
                }
                key = keys[session_attrs['STATE'] == settings.STATES['button_game']]
                image = settings.pick_random(settings.IMAGES['correct_answer'])
            else:
                keys = {
                    True: 'ANSWER_QUESTION_INCORRECT_DISPLAY',
                    False: 'SINGLE_PLAYER_ANSWER_QUESTION_INCORRECT_DISPLAY',
                }
                key = keys[session_attrs['STATE'] == settings.STATES['button_game']]
                image = settings.pick_random(settings.IMAGES['incorrect_answer'])

            message = utils._(key, {'player_number': session_attrs['answering_player']})
            message['image'] = image
            Display.render(handler_input, message)

        else:
            key = 'NEW_GAME' if current_question == 1 else 'RESUME'
            message = utils._(f'ASK_FIRST_QUESTION_{key}_DISPLAY')
            Display.render(handler_input, message)

        # use a shorter break for buttonless games
        break_time = 4 if session_attrs['STATE'] == settings.STATES['button_game'] else 1
        answers = f"<break time='{break_time}s'/> Is it "
        if next_question['answers']:
            if len(next_question['answers']) > 1:
                answers += ', '.join(next_question['answers'][:-1])
                answers += f", or, {next_question['answers'][-1]}"
            else:
                answers = next_question['answers'][0]

            answers += "?"

        request_attrs['output_speech'].append(next_question['question'])
        request_attrs['output_speech'].append(answers)

        if session_attrs['STATE'] == settings.STATES['button_game']:
            request_attrs['output_speech'].append(settings.AUDIO['waiting_for_buzz_in'])

            Game.animate_buttons_after_answer(handler_input)
            Game.send_answer_interstitial(handler_input, interstitial_delay)
            session_attrs.pop('answering_button', None)
            session_attrs.pop('answering_player', None)
        else:
            request_attrs['reprompt'].append(answers)

            session_attrs['waiting_for_answer'] = True
            session_attrs['answering_player'] = 1
            request_attrs['open_microphone'] = True

            message = utils._('ASK_QUESTION_DISPLAY', {'question_number': current_question})
            if session_attrs.get('correct') is True:
                message['image'] = settings.pick_random(settings.IMAGES['correct_answer'])
            elif session_attrs.get('correct') is False:
                message['image'] = settings.pick_random(settings.IMAGES['incorrect_answer'])
            Display.render(handler_input, message)

        session_attrs.pop('correct', None)
コード例 #20
0
def main():
    args = get_args()

    # Read in configuration
    # load_config = LoadConfig('config/thresholds.npz', 'thresholds')
    # thresholds = load_config.load()

    # Setup video displays
    video_disp = Display({'name': 'Video'})

    # Setup controls
    setup_trackbars('Controls')  # , thresholds)

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame to start with
    frame = video.next_frame()

    global seek_callback_action

    # Deck for storing calib images
    calib_img_deck = deque(maxlen=MAX_NUM_IMAGES_FOR_CALIB)

    # Deck for storing charuco info
    charuco_corners_deck = deque(maxlen=MAX_NUM_IMAGES_FOR_CALIB)
    charuco_ids_deck = deque(maxlen=MAX_NUM_IMAGES_FOR_CALIB)

    skip_count = 0

    test_camera_matrix = np.array([
        [11096.77, 0, 540],
        [0, 11096.77, 960],
        [0, 0, 1]
    ])

    while True:
        if play_or_pause == 'Play':
            if not seek_callback_action:
                frame = video.next_frame()
            else:
                frame = video.get_frame(cur_seek_pos * num_frames / 100)
                seek_callback_action = False

        if video.end_reached():
            # Wait indefinitely if end of video reached
            # Or until keypress and then exit
            cv2.waitKey(0)
            break

        corners, ids, rejected_img_points = cv2.aruco.detectMarkers(
            frame, dictionary)

        if ids is not None:
            img_markers = cv2.aruco.drawDetectedMarkers(frame, corners, ids)
            num_charuco, charuco_corners, charuco_ids = cv2.aruco.interpolateCornersCharuco(
                corners, ids, frame, board, cameraMatrix=test_camera_matrix)

            if charuco_corners is not None:
                img_markers = cv2.aruco.drawDetectedCornersCharuco(
                    img_markers, charuco_corners, charuco_ids)

                if ids.shape[0] == MAX_ARUCO_IDS \
                        and num_charuco == MAX_CHARUCO_IDS \
                        and skip_count % 15 == 0:
                    calib_img_deck.append(frame)
                    charuco_corners_deck.append(charuco_corners)
                    charuco_ids_deck.append(charuco_ids)
        else:
            img_markers = frame

        cv2.putText(img_markers, '{}/{}'.format(len(calib_img_deck), MAX_NUM_IMAGES_FOR_CALIB),
                    (200, 100), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 5)

        video_disp.refresh(img_markers)
        if video_disp.key_pressed('s'):
            pass

        skip_count = skip_count + 1

        # Add quitting event
        if video_disp.can_quit():
            break

    # On quit, save the params
    # save_config = SaveConfig('new_erode_dilate', 'erode_dilate')
    # save_config.save(dilate_size=dilate_size, erode_size=erode_size)
    img_size = calib_img_deck[0].shape[:2]
    # print charuco_ids_deck
    # error, camera_matrix, dist_coeffs = cv2.aruco.calibrateCameraCharuco(
    #    charuco_corners_deck, charuco_ids_deck, board, img_size, test_camera_matrix, None)[:3]

    objPoints, imgPoints = [board.chessboardCorners.reshape(
        1, -1, 3)] * len(charuco_corners_deck), charuco_corners_deck

    calibration_flags = cv2.fisheye.CALIB_USE_INTRINSIC_GUESS + \
        cv2.fisheye.CALIB_FIX_PRINCIPAL_POINT + cv2.fisheye.CALIB_FIX_SKEW
    error, camera_matrix, dist_coeffs = cv2.fisheye.calibrate(
        objPoints, imgPoints, img_size, test_camera_matrix, np.zeros(4), flags=calibration_flags)[:3]

    print error, camera_matrix

    save_config = SaveConfig('new_calib', 'calib')
    save_config.save(camera_matrix=camera_matrix, dist_coeffs=dist_coeffs)
コード例 #21
0
ファイル: test_warper.py プロジェクト: ccweaver1/bsi_vision
#     return sorted(os.listdir('train_history/'), key=lambda x: os.path.getmtime(os.path.join('train_history', x)), reverse=True)[0]

if options.config_file:
    config_file = options.config_file
else:
    # config_file = get_most_recent_file_from_dir('train_history/')
    config_file = choose_file_with_stdin('train_history/')
print("Config file: {}".format(config_file))
cm = ConfigManager(config_file, folder='train_history')
cfg = cm.get_json()

model = get_model_and_load_weights(cfg)
compile_model(model, cfg['training'])

data = Data(options.test_path, cfg['training'], train_test='test')
print("Testing on {} samples".format(data.test_len))
data_gen = data.test_generator()

displayer = Display()
for frame_batch in data_gen:
    print('Frame batch')
    pred = model.predict_on_batch(frame_batch)
    # print pred
    H = np.reshape(pred, (3,3))
    print('Prediction:\n{}'.format(H))
    frame = np.array(frame_batch[0]*255).astype('uint8')

    warped = displayer.get_warp_overlay(frame, H, dsize=(300,300))
    displayer.display_vstacked(frame, warped, dsize=(600,600))
    # displayer.display_warp_overlay(frame, H, dsize=(600,600))
    print 'done'
コード例 #22
0
from machine import I2C, Pin

from utils.display import Display


ON = 1
OFF = 0

LED = Pin(1, Pin.OUT)
DISPLAY = Display(I2C(sda=Pin(0), scl=Pin(2)))
コード例 #23
0
def main():
    args = get_args()

    # Read in intrinsic calibration
    load_config_1 = LoadConfig(
        'config/intrinsic_calib_{}_camera_1.npz'.format(args['model'].lower()),
        'calib_camera_1')
    intrinsic_1 = load_config_1.load()
    K_1 = intrinsic_1['camera_matrix']
    D_1 = intrinsic_1['dist_coeffs']
    load_config_2 = LoadConfig(
        'config/intrinsic_calib_{}_camera_2.npz'.format(args['model'].lower()),
        'calib_camera_2')
    intrinsic_2 = load_config_2.load()
    K_2 = intrinsic_2['camera_matrix']
    D_2 = intrinsic_2['dist_coeffs']

    # Read in extrinsic calibration
    load_config_e_1 = LoadConfig(
        'config/extrinsic_calib_{}_camera_1.npz'.format(args['model'].lower()),
        'extrinsic_camera_1')
    extrinsic_1 = load_config_e_1.load()
    R_1 = cv2.Rodrigues(extrinsic_1['rvec'])[0]
    T_1 = extrinsic_1['tvec']
    load_config_e_2 = LoadConfig(
        'config/extrinsic_calib_{}_camera_2.npz'.format(args['model'].lower()),
        'extrinsic_camera_2')
    extrinsic_2 = load_config_e_2.load()
    R_2 = cv2.Rodrigues(extrinsic_2['rvec'])[0]
    T_2 = extrinsic_2['tvec']

    # Setup video displays
    video_disp_1 = Display({'name': 'Camera_1'})
    video_disp_2 = Display({'name': 'Camera_2'})

    # Get input video
    video_1 = Video(args['video_1'])
    video_2 = Video(args['video_2'])

    # Get the first frame; to see
    # if video framework works
    frame_1 = video_1.next_frame()
    frame_2 = video_2.next_frame()

    # Original code was to multiprocess, but
    # found MACOSX doesn't like forking processes
    # with GUIs. However, retaining Array from
    # multiproc for future.
    shared_var = Array('d', [0, 0, 0])
    end_reached = Value('b', False)
    # visu = Process(target=visualize_table, args=(shared_var,))
    # visu.start()
    visu = Thread(target=visualize_table, args=(end_reached, shared_var))
    visu.daemon = True
    visu.start()

    # Setup the undistortion stuff

    if args['model'].upper() == 'P':
        # Harcoded image size as
        # this is a test script
        img_size = (1920, 1080)

        # First create scaled intrinsics because we will undistort
        # into region beyond original image region
        new_K_1 = cv2.getOptimalNewCameraMatrix(K_1, D_1, img_size, 0.35)[0]
        new_K_2 = cv2.getOptimalNewCameraMatrix(K_2, D_2, img_size, 0.35)[0]

        # Then calculate new image size according to the scaling
        # Unfortunately the Python API doesn't directly provide the
        # the new image size. They forgot?
        new_img_size_1 = (int(img_size[0] + (new_K_1[0, 2] - K_1[0, 2])),
                          int(img_size[1] + (new_K_1[1, 2] - K_1[1, 2])))
        new_img_size_2 = (int(img_size[0] + (new_K_2[0, 2] - K_2[0, 2])),
                          int(img_size[1] + (new_K_2[1, 2] - K_2[1, 2])))

        # Standard routine of creating a new rectification
        # map for the given intrinsics and mapping each
        # pixel onto the new map with linear interpolation
        map1_1, map2_1 = cv2.initUndistortRectifyMap(K_1, D_1, None, new_K_1,
                                                     new_img_size_1,
                                                     cv2.CV_16SC2)
        map1_2, map2_2 = cv2.initUndistortRectifyMap(K_2, D_2, None, new_K_2,
                                                     new_img_size_2,
                                                     cv2.CV_16SC2)

    elif args['model'].upper() == 'F':
        # Harcoded image size
        img_size = (1920, 1080)
        # First create scaled intrinsics because we will undistort
        # into region beyond original image region. The alpha
        # parameter in pinhole model is equivalent to balance parameter here.
        new_K_1 = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
            K_1, D_1, img_size, np.eye(3), balance=1)
        new_K_2 = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
            K_2, D_2, img_size, np.eye(3), balance=1)

        # Then calculate new image size according to the scaling
        # Well if they forgot this in pinhole Python API,
        # can't complain about Fisheye model. Note the reversed
        # indexing here too.
        new_img_size_1 = (int(img_size[0] + (new_K_1[0, 2] - K_1[0, 2])),
                          int(img_size[1] + (new_K_1[1, 2] - K_1[1, 2])))
        new_img_size_2 = (int(img_size[0] + (new_K_2[0, 2] - K_2[0, 2])),
                          int(img_size[1] + (new_K_2[1, 2] - K_2[1, 2])))

        # Standard routine of creating a new rectification
        # map for the given intrinsics and mapping each
        # pixel onto the new map with linear interpolation
        map1_1, map2_1 = cv2.fisheye.initUndistortRectifyMap(
            K_1, D_1, np.eye(3), new_K_1, new_img_size_1, cv2.CV_16SC2)
        map1_2, map2_2 = cv2.fisheye.initUndistortRectifyMap(
            K_2, D_2, np.eye(3), new_K_2, new_img_size_2, cv2.CV_16SC2)

    # STUFF
    corr_threshold = -1
    radius_change_threshold = 5
    ball_image_file = 'ball_image.jpg'
    # will be used for histogram comparison
    ball_image = cv2.imread(ball_image_file)

    fgbg1 = cv2.createBackgroundSubtractorMOG2()
    fgbg2 = cv2.createBackgroundSubtractorMOG2()

    kernel = np.ones((6, 6), np.uint8)
    ball_position_frame1 = None
    ball_position_frame2 = None
    prev_frame1 = None
    prev_frame2 = None
    ball_wc = [0, 0, 0]

    while not video_1.end_reached() and not video_2.end_reached():

        frame_1 = video_1.next_frame()
        frame_2 = video_2.next_frame()

        img_undistorted_1 = cv2.remap(frame_1, map1_1, map2_1,
                                      cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)
        img_undistorted_2 = cv2.remap(frame_2, map1_2, map2_2,
                                      cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)

        # STUFF1
        frame_1_hsv = cv2.cvtColor(img_undistorted_1, cv2.COLOR_BGR2HSV)
        mask1 = cv2.inRange(frame_1_hsv, np.array((15, 190, 200)),
                            np.array((25, 255, 255)))
        fgmask1 = fgbg1.apply(img_undistorted_1)
        mask1_color_bgs = cv2.bitwise_and(mask1, mask1, mask=fgmask1)
        frame1_hsv_bgs = cv2.bitwise_and(frame_1_hsv,
                                         frame_1_hsv,
                                         mask=mask1_color_bgs)

        # opening
        b1 = cv2.morphologyEx(mask1_color_bgs, cv2.MORPH_OPEN, kernel)
        circles1 = cv2.HoughCircles(b1,
                                    cv2.HOUGH_GRADIENT,
                                    dp=3,
                                    minDist=2500,
                                    param1=300,
                                    param2=5,
                                    minRadius=3,
                                    maxRadius=30)
        if circles1 is not None:
            # convert the (x, y) coordinates and radius of the circles to integers
            circles1 = np.round(circles1[0, :]).astype("int")
            x, y, r = circles1[0]
            ball_position_frame1 = [x - r, y - r, 2 * r, 2 * r]
            # loop over the (x, y) coordinates and radius of the circles
        else:
            ball_position_frame1 = None

        mask_ball_radius1 = cv2.bitwise_and(fgmask1,
                                            fgmask1,
                                            mask=cv2.inRange(
                                                frame_1_hsv,
                                                np.array((10, 150, 180)),
                                                np.array((40, 255, 255))))

        # determine the correct radius
        if ball_position_frame1 != None:
            x1, y1, w1, h1 = ball_position_frame1
            ball_crop_temp1 = mask_ball_radius1[(y1 + h1 // 2 -
                                                 30):(y1 + h1 // 2 + 30),
                                                (x1 + w1 // 2 -
                                                 30):(x1 + w1 // 2 + 30)]
            height, width = ball_crop_temp1.shape
            if height != 0 and width != 0:
                # successfully cropped image
                ball_crop1 = ball_crop_temp1
                cnts = cv2.findContours(ball_crop1.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)[-2]
                center = None
                if len(cnts) > 0:
                    # contour detected
                    c = max(cnts, key=cv2.contourArea)
                    rect = cv2.minAreaRect(c)
                    width, height = rect[1]
                    ball_position_frame1 = [
                        ball_position_frame1[0], ball_position_frame1[1],
                        min(width, height),
                        min(width, height)
                    ]

        prev_frame1 = img_undistorted_1

        if ball_position_frame1:
            x1, y1, w1, h1 = ball_position_frame1
            pixels_per_mm = (K_1[0, 0] + K_1[1, 1]) / 2 / FOCAL_LENGTH
            z = PING_PONG_DIAMETER * FOCAL_LENGTH / (w1 / pixels_per_mm)
            x = ((x1 - K_1[0, 2]) / pixels_per_mm) * z / FOCAL_LENGTH
            y = ((y1 - K_1[1, 2]) / pixels_per_mm) * z / FOCAL_LENGTH

            ball_cc1 = np.array([x, y, z]) / 1000
            ball_wc1 = np.dot(R_1.T, ball_cc1 - T_1.ravel())

        # STUFF2
        frame_2_hsv = cv2.cvtColor(img_undistorted_2, cv2.COLOR_BGR2HSV)
        mask2 = cv2.inRange(frame_2_hsv, np.array((15, 190, 200)),
                            np.array((25, 255, 255)))
        fgmask2 = fgbg2.apply(img_undistorted_2)
        mask2_color_bgs = cv2.bitwise_and(mask2, mask2, mask=fgmask2)
        frame2_hsv_bgs = cv2.bitwise_and(frame_2_hsv,
                                         frame_2_hsv,
                                         mask=mask2_color_bgs)

        # opening
        b2 = cv2.morphologyEx(mask2_color_bgs, cv2.MORPH_OPEN, kernel)
        circles2 = cv2.HoughCircles(b2,
                                    cv2.HOUGH_GRADIENT,
                                    dp=3,
                                    minDist=2500,
                                    param1=300,
                                    param2=5,
                                    minRadius=3,
                                    maxRadius=30)
        if circles2 is not None:
            # convert the (x, y) coordinates and radius of the circles to integers
            circles2 = np.round(circles2[0, :]).astype("int")
            x, y, r = circles2[0]
            ball_position_frame2 = [x - r, y - r, 2 * r, 2 * r]
            # loop over the (x, y) coordinates and radius of the circles
        else:
            ball_position_frame2 = None

        mask_ball_radius2 = cv2.bitwise_and(fgmask2,
                                            fgmask2,
                                            mask=cv2.inRange(
                                                frame_2_hsv,
                                                np.array((10, 150, 180)),
                                                np.array((40, 255, 255))))

        # determine the correct radius
        if ball_position_frame2 != None:
            x2, y2, w2, h2 = ball_position_frame2
            ball_crop_temp2 = mask_ball_radius2[(y2 + h2 // 2 -
                                                 30):(y2 + h2 // 2 + 30),
                                                (x2 + w2 // 2 -
                                                 30):(x2 + w2 // 2 + 30)]
            height, width = ball_crop_temp2.shape
            if height != 0 and width != 0:
                # successfully cropped image
                ball_crop2 = ball_crop_temp2
                cnts = cv2.findContours(ball_crop2.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)[-2]
                center = None
                if len(cnts) > 0:
                    # contour detected
                    c = max(cnts, key=cv2.contourArea)
                    rect = cv2.minAreaRect(c)
                    width, height = rect[1]
                    ball_position_frame2 = [
                        ball_position_frame2[0], ball_position_frame2[1],
                        min(width, height),
                        min(width, height)
                    ]

        prev_frame2 = img_undistorted_2

        if ball_position_frame2:
            x2, y2, w2, h2 = ball_position_frame2
            pixels_per_mm = (K_2[0, 0] + K_2[1, 1]) / 2 / FOCAL_LENGTH
            z = PING_PONG_DIAMETER * FOCAL_LENGTH / (w2 / pixels_per_mm)
            x = ((x2 - K_2[0, 2]) / pixels_per_mm) * z / FOCAL_LENGTH
            y = ((y2 - K_2[1, 2]) / pixels_per_mm) * z / FOCAL_LENGTH

            ball_cc2 = np.array([x, y, z]) / 1000
            ball_wc2 = np.dot(R_2.T, ball_cc2 - T_2.ravel())

            # Additional rotation for absolute coordinates
            R = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
            ball_wc2 = np.dot(R, ball_wc2)

        # Combine STUFF1 and STUFF2
        # If positions from either available,
        # update as predicted from either.
        # But if both available, predict the average
        if ball_position_frame1 and ball_position_frame2:
            ball_wc = (ball_wc1 + ball_wc2) / 2
        elif ball_position_frame1:
            ball_wc = ball_wc1
        elif ball_position_frame2:
            ball_wc = ball_wc2

        # print 'Ball IC', np.array([x2, y2]), 'Dia', w2
        # print 'Ball CC', ball_cc2
        # print 'Ball WC', ball_wc2

        shared_var[0] = ball_wc[0]
        shared_var[1] = ball_wc[1]
        shared_var[2] = ball_wc[2]

        # Update GUI with new image
        video_disp_1.refresh(img_undistorted_1)
        video_disp_2.refresh(img_undistorted_2)

        # Add quitting event
        if video_disp_2.can_quit() or video_disp_1.can_quit():
            break

    end_reached.value = True

    visu.join()
コード例 #24
0
def main():
    args = get_args()

    # Read in intrinsic calibration
    load_config = LoadConfig(
        'config/intrinsic_calib_{}.npz'.format(args['model'].lower()), 'calib')
    calib = load_config.load()

    # Read in extrinsic calibration
    load_config_e = LoadConfig('config/extrinsic_calib_p_camera_1.npz',
                               'extrinsics')
    extrinsics = load_config_e.load()

    # Setup video display
    video_disp = Display({'name': 'Video'})

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame; to see
    # if video framework works
    frame = video.next_frame()

    shared_var = Array('d', [0, 0, 0])
    visu = Process(target=visualize_table, args=(shared_var, ))
    visu.start()

    # Setup the undistortion stuff

    if args['model'].upper() == 'P':
        # Harcoded image size as
        # this is a test script
        img_size = (1920, 1080)

        # First create scaled intrinsics because we will undistort
        # into region beyond original image region
        new_calib_matrix, _ = cv2.getOptimalNewCameraMatrix(
            calib['camera_matrix'], calib['dist_coeffs'], img_size, 0.35)

        # Then calculate new image size according to the scaling
        # Unfortunately the Python API doesn't directly provide the
        # the new image size. They forgot?
        new_img_size = (
            int(img_size[0] +
                (new_calib_matrix[0, 2] - calib['camera_matrix'][0, 2])),
            int(img_size[1] +
                (new_calib_matrix[1, 2] - calib['camera_matrix'][1, 2])))

        # Standard routine of creating a new rectification
        # map for the given intrinsics and mapping each
        # pixel onto the new map with linear interpolation
        map1, map2 = cv2.initUndistortRectifyMap(calib['camera_matrix'],
                                                 calib['dist_coeffs'], None,
                                                 new_calib_matrix,
                                                 new_img_size, cv2.CV_16SC2)

    elif args['model'].upper() == 'F':
        # Harcoded image size
        img_size = (1920, 1080)
        # First create scaled intrinsics because we will undistort
        # into region beyond original image region. The alpha
        # parameter in pinhole model is equivalent to balance parameter here.
        new_calib_matrix = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
            calib['camera_matrix'],
            calib['dist_coeffs'],
            img_size,
            np.eye(3),
            balance=1)

        # Then calculate new image size according to the scaling
        # Well if they forgot this in pinhole Python API,
        # can't complain about Fisheye model. Note the reversed
        # indexing here too.
        new_img_size = (
            int(img_size[0] +
                (new_calib_matrix[0, 2] - calib['camera_matrix'][0, 2])),
            int(img_size[1] +
                (new_calib_matrix[1, 2] - calib['camera_matrix'][1, 2])))

        # Standard routine of creating a new rectification
        # map for the given intrinsics and mapping each
        # pixel onto the new map with linear interpolation
        map1, map2 = cv2.fisheye.initUndistortRectifyMap(
            calib['camera_matrix'], calib['dist_coeffs'], np.eye(3),
            new_calib_matrix, new_img_size, cv2.CV_16SC2)

    # STUFF
    corr_threshold = -1
    radius_change_threshold = 5
    ball_image_file = 'ball_image.jpg'
    # will be used for histogram comparison
    ball_image = cv2.imread(ball_image_file)

    fgbg2 = cv2.createBackgroundSubtractorMOG2()

    kernel = np.ones((6, 6), np.uint8)
    ball_position_frame2 = None
    prev_frame2 = None

    # Get the rotation matrix
    R = cv2.Rodrigues(extrinsics['rvec'])[0]

    while not video.end_reached():

        frame = video.next_frame()
        img_undistorted = cv2.remap(frame, map1, map2, cv2.INTER_LINEAR,
                                    cv2.BORDER_CONSTANT)

        # STUFF
        img_hsv = cv2.cvtColor(img_undistorted, cv2.COLOR_BGR2HSV)
        mask2 = cv2.inRange(img_hsv, np.array((15, 190, 200)),
                            np.array((25, 255, 255)))
        fgmask2 = fgbg2.apply(img_undistorted)
        mask2_color_bgs = cv2.bitwise_and(mask2, mask2, mask=fgmask2)
        frame2_hsv_bgs = cv2.bitwise_and(img_hsv,
                                         img_hsv,
                                         mask=mask2_color_bgs)

        frame_hsv = img_hsv
        frame_gray = cv2.cvtColor(img_undistorted, cv2.COLOR_BGR2GRAY)
        mask = cv2.inRange(frame_hsv, np.array((10, 150, 150)),
                           np.array((40, 255, 255)))
        open_mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        frame_thresholded_opened_gray = cv2.bitwise_and(frame_gray,
                                                        frame_gray,
                                                        mask=open_mask)
        frame_thresholded_opened_gray_smoothed = cv2.GaussianBlur(
            frame_thresholded_opened_gray, (11, 11), 0)
        # opening
        a = cv2.inRange(frame_thresholded_opened_gray_smoothed, 10, 256)
        b = cv2.morphologyEx(mask2_color_bgs, cv2.MORPH_OPEN, kernel)
        circles = cv2.HoughCircles(b,
                                   cv2.HOUGH_GRADIENT,
                                   dp=3,
                                   minDist=2500,
                                   param1=300,
                                   param2=5,
                                   minRadius=3,
                                   maxRadius=30)
        if circles is not None:
            # convert the (x, y) coordinates and radius of the circles to integers
            circles = np.round(circles[0, :]).astype("int")
            x, y, r = circles[0]
            ball_position_frame2 = [x - r, y - r, 2 * r, 2 * r]
            # loop over the (x, y) coordinates and radius of the circles
        else:
            ball_position_frame2 = None

        frame2 = img_undistorted

        mask2_ball_radius = cv2.bitwise_and(fgmask2,
                                            fgmask2,
                                            mask=cv2.inRange(
                                                img_hsv,
                                                np.array((10, 150, 180)),
                                                np.array((40, 255, 255))))
        if ball_position_frame2 != None:
            x2, y2, w2, h2 = ball_position_frame2
            ball_crop_temp = mask2_ball_radius[(y2 + h2 / 2 -
                                                30):(y2 + h2 / 2 + 30),
                                               (x2 + w2 / 2 -
                                                30):(x2 + w2 / 2 + 30)]
            ball_crop_color = frame2[(y2 + h2 / 2 - 30):(y2 + h2 / 2 + 30),
                                     (x2 + w2 / 2 - 30):(x2 + w2 / 2 + 30)]
            height, width = ball_crop_temp.shape
        else:
            ball_crop_temp = []
            height = 0
            width = 0

        if height != 0 and width != 0:
            ball_crop = ball_crop_temp

        cnts = cv2.findContours(ball_crop.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
        center = None

        if len(cnts) > 0 and ball_position_frame2:
            c = max(cnts, key=cv2.contourArea)
            rect = cv2.minAreaRect(c)
            width, height = rect[1]
            box = cv2.boxPoints(rect)
            box = np.int0(box)
            cv2.drawContours(ball_crop_color, [box], 0, (0, 0, 255), 2)
            ball_position_frame2 = [
                ball_position_frame2[0], ball_position_frame2[1],
                min(width, height),
                min(width, height)
            ]

        prev_frame2 = frame2

        # print ball_position_frame2
        if ball_position_frame2:
            x2, y2, w2, h2 = ball_position_frame2
            # x = (x2 - 960) / PIXELS_PER_MM
            # y = (y2 - 540) / PIXELS_PER_MM
            pixels_per_mm = (new_calib_matrix[0, 0] +
                             new_calib_matrix[1, 1]) / 2 / FOCAL_LENGTH
            z = PING_PONG_DIAMETER * FOCAL_LENGTH / (w2 / pixels_per_mm)
            x = ((x2 - new_calib_matrix[0, 2]) /
                 pixels_per_mm) * z / FOCAL_LENGTH
            y = ((y2 - new_calib_matrix[1, 2]) /
                 pixels_per_mm) * z / FOCAL_LENGTH

            ball_cc = np.array([x, y, z]) / 1000
            #ball_wc = np.dot(R.T, extrinsics['tvec'].ravel() - ball_cc)
            ball_wc = np.dot(R.T, ball_cc - extrinsics['tvec'].ravel())

            print 'Ball IC', np.array([x2, y2]), 'Dia', w2
            print 'Ball CC', ball_cc
            print 'Ball WC', ball_wc

            shared_var[0] = ball_wc[0]
            shared_var[1] = ball_wc[1]
            shared_var[2] = ball_wc[2]

        # Update GUI with new image

        video_disp.refresh(frame2)

        #print "Pixels", ball_position_frame2

        # Add quitting event
        if video_disp.can_quit():
            break

    global main_process_end_reached
    main_process_end_reached = True
    visu.join()
コード例 #25
0
def main():
    args = get_args()

    # Read in configuration
    load_config = LoadConfig('new_calib_{}.npz'.format(args['model'].lower()),
                             'calib')
    calib = load_config.load()

    # Setup video displays
    video_disp = Display({'name': 'Video'})

    # Setup controls
    setup_trackbars('Controls')  # , thresholds)

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame to start with
    frame = video.next_frame()

    global seek_callback_action

    while True:
        if play_or_pause == 'Play':
            if not seek_callback_action:
                frame = video.next_frame()
            else:
                frame = video.get_frame(cur_seek_pos * num_frames / 100)
                seek_callback_action = False

        if video.end_reached():
            # Wait indefinitely if end of video reached
            # Or until keypress and then exit
            cv2.waitKey(0)
            break

        # Undistort according to pinhole model
        if args['model'].upper() == 'P':
            # Make sure distortion coeffecients
            # follow pinhole model
            if calib['dist_coeffs'].shape[1] != 5:
                print 'Input configuration probably not pinhole'
                return

            # Harcoded image size as
            # this is a test script
            img_size = (1920, 1080)

            # First create scaled intrinsics because we will undistort
            # into region beyond original image region
            new_calib_matrix, _ = cv2.getOptimalNewCameraMatrix(
                calib['camera_matrix'], calib['dist_coeffs'], img_size, 0.35)

            # Then calculate new image size according to the scaling
            # Unfortunately the Python API doesn't directly provide the
            # the new image size. They forgot?
            new_img_size = (
                int(img_size[0] +
                    (new_calib_matrix[0, 2] - calib['camera_matrix'][0, 2])),
                int(img_size[1] +
                    (new_calib_matrix[1, 2] - calib['camera_matrix'][1, 2])))

            # Standard routine of creating a new rectification
            # map for the given intrinsics and mapping each
            # pixel onto the new map with linear interpolation
            map1, map2 = cv2.initUndistortRectifyMap(calib['camera_matrix'],
                                                     calib['dist_coeffs'],
                                                     None, new_calib_matrix,
                                                     new_img_size,
                                                     cv2.CV_16SC2)
            img_undistorted = cv2.remap(frame, map1, map2, cv2.INTER_LINEAR,
                                        cv2.BORDER_CONSTANT)

        # Undistort according to fisheye model
        elif args['model'].upper() == 'F':
            # Make sure distortion coeffecients
            # follow fisheye model
            if calib['dist_coeffs'].shape[0] != 4:
                print 'Input configuration probably not fisheye'
                return

            # Harcoded image size as
            # this is a test script.
            # As already ranted before
            # someone messed with the image
            # size indexing and reversed it.
            img_size = (1920, 1080)

            # Also, the basic undistortion DOES NOT work
            # with the fisheye module
            # img_undistorted = cv2.fisheye.undistortImage(
            #   frame, calib['camera_matrix'], calib['dist_coeffs'])

            # First create scaled intrinsics because we will undistort
            # into region beyond original image region. The alpha
            # parameter in pinhole model is equivalent to balance parameter here.
            new_calib_matrix = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
                calib['camera_matrix'],
                calib['dist_coeffs'],
                img_size,
                np.eye(3),
                balance=1)

            # Then calculate new image size according to the scaling
            # Well if they forgot this in pinhole Python API,
            # can't complain about Fisheye model. Note the reversed
            # indexing here too.
            new_img_size = (
                int(img_size[0] +
                    (new_calib_matrix[0, 2] - calib['camera_matrix'][0, 2])),
                int(img_size[1] +
                    (new_calib_matrix[1, 2] - calib['camera_matrix'][1, 2])))

            # Standard routine of creating a new rectification
            # map for the given intrinsics and mapping each
            # pixel onto the new map with linear interpolation
            map1, map2 = cv2.fisheye.initUndistortRectifyMap(
                calib['camera_matrix'], calib['dist_coeffs'], np.eye(3),
                new_calib_matrix, new_img_size, cv2.CV_16SC2)
            img_undistorted = cv2.remap(frame, map1, map2, cv2.INTER_LINEAR,
                                        cv2.BORDER_CONSTANT)

        # Update GUI with new image
        video_disp.refresh(img_undistorted)

        # Service the s key to save image
        if video_disp.key_pressed('s'):
            cur_frame_num = video.get_cur_frame_num()
            orig_img_file_name = 'image_for_markers_orig.png'
            undistorted_img_file_name = 'image_for_markers_undistorted.png'
            if cv2.imwrite(orig_img_file_name, frame):
                print 'Saved original {} at frame {}'.format(
                    orig_img_file_name, cur_frame_num)
            if cv2.imwrite(undistorted_img_file_name, img_undistorted):
                print 'Saved undistorted {} at frame {}'.format(
                    undistorted_img_file_name, cur_frame_num)

        # Add quitting event
        if video_disp.can_quit():
            break
コード例 #26
0
def main():
    args = get_args()

    # Read in configuration
    load_config = LoadConfig('config/thresholds.npz', 'thresholds')
    thresholds = load_config.load()

    # Setup video displays
    orig_video_disp = Display({'name': 'Original_Video'})
    processed_video_disp = Display({'name': 'Processed_Video'})

    # Setup controls
    setup_trackbars(controls_window_name, thresholds)

    # Get input video
    video = Video(args['video'])
    num_frames = video.get_num_frames()

    # Get the first frame to start with
    frame = video.next_frame()

    global seek_callback_action

    while True:
        if play_or_pause == 'Play':
            if not seek_callback_action:
                frame = video.next_frame()
            else:
                frame = video.get_frame(cur_seek_pos * num_frames / 100)
                seek_callback_action = False

        if video.end_reached():
            # Wait indefinitely if end of video reached
            # Or until keypress and then exit
            cv2.waitKey(0)
            break

        # Refresh original video display
        orig_video_disp.refresh(frame)

        # Get threshold values
        h_min, h_max, s_min, s_max, v_min, v_max, erode_size, dilate_size = get_params(
            controls_window_name)

        # Convert image to HSV and apply threshold
        frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        frame_thresh = cv2.inRange(frame_hsv, (h_min, s_min, v_min),
                                   (h_max, s_max, v_max))

        # Apply erosion
        # Create a kernel first and then apply kernel
        erode_kernel = cv2.getStructuringElement(
            cv2.MORPH_ELLIPSE, (erode_size + 1, erode_size + 1))
        frame_erode = cv2.erode(frame_thresh, erode_kernel)

        # Apply dilate
        # Create a kernel first and then apply kernel
        dilate_kernel = cv2.getStructuringElement(
            cv2.MORPH_ELLIPSE, (dilate_size + 1, dilate_size + 1))
        frame_dilate = cv2.dilate(frame_erode, dilate_kernel)

        # Refresh thresholded video display
        processed_video_disp.refresh(frame_dilate)

        # Add quitting event
        if orig_video_disp.can_quit() or processed_video_disp.can_quit():
            break

    # On quit, save the params
    save_config = SaveConfig('new_erode_dilate', 'erode_dilate')
    save_config.save(dilate_size=dilate_size, erode_size=erode_size)
コード例 #27
0
def main():
    args = get_args()

    # Read in intrinsic calibration configuration
    if not os.path.exists('config/intrinsic_calib_{}.npz'.format(
            args['model'].lower())):
        print 'First perform intrinsic calibration and save it in config'
        return
    load_config = LoadConfig(
        'config/intrinsic_calib_{}.npz'.format(args['model'].lower()), 'calib')
    calib = load_config.load()

    # TODO: Error handling if image not readable by OpenCV
    img = cv2.imread(args['image'])

    # Threshold image to isolate the markers
    # put in by the user at the "standard" positions
    img_markers = cv2.inRange(
        img, (MARKER_B_MIN_THRESH, MARKER_G_MIN_THRESH, MARKER_R_MIN_THRESH),
        (MARKER_B_MAX_THRESH, MARKER_G_MAX_THRESH, MARKER_R_MAX_THRESH))

    # Detect the circular markers
    circles = cv2.HoughCircles(img_markers,
                               cv2.HOUGH_GRADIENT,
                               1.5,
                               10,
                               param1=150,
                               param2=15,
                               minRadius=5,
                               maxRadius=35)

    # Draw circles for visualization
    if circles is not None:
        # Make 'circles' easy to index
        circles = circles[0, :]

        # Arrange circles X-coordinates - left to right
        # to match the TABLE markers scheme
        circles = circles[circles[:, 0].argsort()]

        # Check if we have exactly 4 markers
        if circles.shape[0] != 4:
            print 'Number of markers not 4'
            return

        for (xx, yy, rr) in circles:
            # Convert to ints
            x, y, r = int(xx), int(yy), int(rr)

            # Draw circle outlines
            cv2.circle(img, (x, y), r, (0, 0, 0), 2)

            # Draw dots at their centers
            cv2.rectangle(img, (x - 2, y - 2), (x + 2, y + 2), (0, 128, 255),
                          -1)

    # Find scaled intrinsics according to pinhole model
    if args['model'].upper() == 'P':
        # Make sure distortion coeffecients
        # follow pinhole model
        if calib['dist_coeffs'].shape[1] != 5:
            print 'Input configuration probably not pinhole'
            return

        if not args['distorted']:
            # NOTE: Harcoded image size
            img_size = (1920, 1080)

            # First create scaled intrinsics because we will undistort
            # into region beyond original image region
            new_calib_matrix = cv2.getOptimalNewCameraMatrix(
                calib['camera_matrix'], calib['dist_coeffs'], img_size,
                0.35)[0]

            # If we have undistorted image, we don't
            # need to use distortion model for projection
            new_dist_coeffs = np.zeros((5, 1))
        else:
            new_calib_matrix = calib['camera_matrix']
            new_dist_coeffs = calib['dist_coeffs']

    # Find scaled intrinsics according to fisheye model
    elif args['model'].upper() == 'F':
        # If distorted image supplied,
        # no support currently.
        if args['distorted']:
            print 'Please supply undistored image for fisheye model'
            return

        # Make sure distortion coeffecients
        # follow fisheye model
        if calib['dist_coeffs'].shape[0] != 4:
            print 'Input configuration probably not fisheye'
            return

        # Harcoded image size as
        # this is a test script.
        # TODO: Change this later.
        img_size = (1920, 1080)

        # Create scaled intrinsics. The alpha parameter
        # in pinhole model is equivalent to balance parameter here.
        new_calib_matrix = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
            calib['camera_matrix'],
            calib['dist_coeffs'],
            img_size,
            np.eye(3),
            balance=1)

        # If we have undistorted image, we don't
        # need to use distortion model for projection
        new_dist_coeffs = np.zeros((5, 1))

    # Solve a point to point correspondence with
    # ideal points in the 3D world with projections
    # as we obtained from the user.
    # NOTE: Found it the hard way that solvePnP
    # unfortunately requires the image and object
    # points to be contiguous elements in the memory
    # This is probably to do with the method using
    # some old style pointers.
    obj_points = np.ascontiguousarray(TABLE_MARKERS.reshape((-1, 1, 3)))
    img_points = np.ascontiguousarray(circles[:, :2].reshape((-1, 1, 2)))

    # RANSAC achieves better performance than DLT + LM
    # as the markers in the TABLE might be noisy
    # rvecs, tvecs = cv2.solvePnP(
    #     obj_points, img_points, calib['camera_matrix'], calib['dist_coeffs'])[1:]
    rvecs, tvecs = cv2.solvePnPRansac(obj_points, img_points, new_calib_matrix,
                                      new_dist_coeffs)[1:3]

    # Project the origin of the table back
    # on to imager according to the found
    # extrinsics rotation and translation
    projected_points = cv2.projectPoints(AXIS_AT_ORIGIN, rvecs, tvecs,
                                         new_calib_matrix, new_dist_coeffs)[0]

    # Draw the axes at the origin on to image
    img = cv2.line(img, tuple(projected_points[0].ravel().astype(int)),
                   tuple(projected_points[3].ravel().astype(int)), (255, 0, 0),
                   5)
    img = cv2.line(img, tuple(projected_points[1].ravel().astype(int)),
                   tuple(projected_points[3].ravel().astype(int)), (0, 255, 0),
                   5)
    img = cv2.line(img, tuple(projected_points[2].ravel().astype(int)),
                   tuple(projected_points[3].ravel().astype(int)), (0, 0, 255),
                   5)

    # Display the image with detected markers
    # and the projected axis.
    img_display = Display({'name': 'Image'})
    img_display.refresh(img)

    # Save extrinsics in the current folder
    save_config = SaveConfig('new_extrinsics', 'extrinsics')
    save_config.save(rvec=rvecs, tvec=tvecs)

    # Wait indefinitely
    cv2.waitKey(0)