def step_impl(context, switch): assert switch == "on" or switch == "off" press(context, keys.KEY_DOWN, 1) # to show progress bar cam = context.cam context.frame = get_frame_name(context, f"{WORK_DIR}/_frame.png") assert 200 == cam.get_frame(context.frame) region = REGION_PROGRESS_BAR frame = cv2.imread(context.frame) mp = (3, 0.8, *get_default_match_parameter()[2::] ) # use ccorr_normed method 0 and threshold =0.65 if switch == "off": debug(f"match {playback_soft_buttons['cc off unhilite'][1]}") if match(frame, cv2.imread(playback_soft_buttons["cc off unhilite"][1]), region, match_parameter=mp)[0] is True: debug(f"closed caption is already {switch}\n\n") return elif switch == 'on': debug(f"match {playback_soft_buttons['cc on unhilite'][1]}") if match(frame, cv2.imread(playback_soft_buttons["cc on unhilite"][1]), region, match_parameter=mp)[0] is True: debug(f"closed caption is already {switch}") return debug(f"navigate_to {switch}\n\n") if navigate_to(context, "cc on" if switch == "off" else "cc off"): press(context, keys.KEY_SELECT, 1) # press(context, keys.KEY_UP, 1) press(context, keys.KEY_SELECT, 1) return assert False, f"fail to switch {switch} closed caption"
def step_impl(context, screen): assert screen in MENU_SCREEN_TABS, f"error: unknown {screen} tab on the menu page" if screen == MENU_SCREEN_TABS[0]: key = keys.KEY_LEFT max_try = 2 else: max_try = len(MENU_SCREEN_TABS) - 1 key = keys.KEY_RIGHT goto_vudu_home(context) # we are in menu screen now # check current selection is on spotlight cam = context.cam while max_try: context.frame = get_frame_name(context, f"{WORK_DIR}/_frame.png") assert 200 == cam.get_frame(path=context.frame) text_in_highlight, region = find_selection_text( cv2.imread(context.frame), cv2.imread(MAIN_MENU_SEL_LEFT), cv2.imread(MAIN_MENU_SEL_RIGHT), x_offset=10, y_offset=5, region=Region(0, 0, 1000, 80), match_parameter=get_default_match_parameter()) debug(f"current selection is {text_in_highlight} at {region}") if text_in_highlight is not None and screen.lower( ) in text_in_highlight.lower(): debug(f"found {screen}") return press(context, key, 1) max_try -= 1 assert False, f"fail to go to {screen} tab on the menu page"
def find_video_quality_on_progress_bar(context): cam = context.cam context.frame = get_frame_name(context, f"{WORK_DIR}/_frame.png") assert 200 == cam.get_frame(context.frame) frame = cv2.imread(context.frame) mp = (3, 0.8, *get_default_match_parameter()[2::] ) # use ccorr_normed method 0 and threshold =0.65 video_quality = None found_uhd = match(frame, cv2.imread(playback_soft_buttons['uhd unhilite'][1]), REGION_PROGRESS_BAR, match_parameter=mp)[0] if found_uhd is True: video_quality = "uhd" debug(f"found uhd") found_hdx = match(frame, cv2.imread(playback_soft_buttons['hdx unhilite'][1]), REGION_PROGRESS_BAR, match_parameter=mp)[0] if found_hdx is True: video_quality = "hdx" debug(f"found hdx") found_sd = match(frame, cv2.imread(playback_soft_buttons['sd unhilite'][1]), REGION_PROGRESS_BAR, match_parameter=mp)[0] if found_sd is True: video_quality = "sd" debug(f"found sd") return video_quality
def step_impl(context): press(context, keys.KEY_DOWN, 1) # to show progress bar cam = context.cam context.frame = get_frame_name(context, f"{WORK_DIR}/_frame.png") assert 200 == cam.get_frame(context.frame) frame = cv2.imread(context.frame) mp = (0, 0.8, *get_default_match_parameter()[2::] ) # use ccorr_normed method 0 and threshold =0.65 video_quality = "None" result = 0.0 assert os.path.exists(playback_soft_buttons["play off unhilitefamily"][1]) found = match(frame, cv2.imread( playback_soft_buttons['family play off unhilite'][1]), REGION_PROGRESS_BAR, match_parameter=mp) if found[0] is True: debug(f"found family play off\n") return assert os.path.exists(playback_soft_buttons["family play on unhilite"][1]) found = match(frame, cv2.imread( playback_soft_buttons['family play on unhilite'][1]), REGION_PROGRESS_BAR, match_parameter=mp) if found[0] is True: debug(f"found family play on\n") return debug(f"fail to find family play on the progress barn\n") assert False, "fail to find family play on the progress bar"
def step_impl(context, tab): assert tab in settings_screen_tabs, "error: {tab} is an unknown screen" cam = context.cam context.frame = get_frame_name(context, f"{WORK_DIR}/_frame.png") assert 200 == cam.get_frame(path=context.frame) text_in_highlight, region = find_selection_text( cv2.imread(context.frame), cv2.imread(SETTINGS_MENU_SEL), None, x_offset=20, region=Region(50, 170, 400, 540), match_parameter=get_default_match_parameter()) debug(f"current selection is {text_in_highlight} at {region}") assert fuzzy_match(tab, text_in_highlight), f"{tab} selection not found"
def step_impl(context, tab): assert tab in MENU_SCREEN_TABS, "error: {tab} is an unknown screen" cam = context.cam context.frame = get_frame_name(context, f"{WORK_DIR}/_frame.png") assert 200 == cam.get_frame(path=context.frame) text_in_highlight, region = find_selection_text( cv2.imread(context.frame), cv2.imread(MAIN_MENU_SEL_LEFT), cv2.imread(MAIN_MENU_SEL_RIGHT), region=Region(0, 0, 1000, 80), match_parameter=get_default_match_parameter()) debug(f"current selection is {text_in_highlight} at {region}") assert tab.lower() in text_in_highlight.lower( ), f"{tab} selection not found"
def find_selection_text(frame, left_bk, right_bk=None, x_offset=0, y_offset=5, region=Region(x=0, y=0, right=1280, bottom=720), convert_to_grayscale=True, match_parameter=None): print(f"find_selection_text: region {region}") debug("search for left bracket") if match_parameter is None: match_parameter = get_default_match_parameter() l_result = match(frame, left_bk, region=region, match_parameter=match_parameter) if l_result[0] is False: debug("Error: fail to search left bracket") return None, None if right_bk is None: region = Region(x=l_result[1].x + x_offset, y=l_result[1].y + y_offset, right=l_result[1].right - x_offset, bottom=l_result[1].bottom - y_offset) else: debug("search for right bracket") r_result = match(frame, right_bk, region=region, match_parameter=match_parameter) if r_result[0] is False: debug("Error: fail to search right bracket") return None, None if l_result[1].right + x_offset > r_result[1].x - x_offset: return None, None region = Region(x=l_result[1].x + x_offset, y=l_result[1].y + y_offset, right=r_result[1].right - x_offset, bottom=r_result[1].bottom - y_offset) print(f"find_selection_text: search text in {region}") if convert_to_grayscale is True: text = ocr(frame=cv2.cvtColor( frame[region.y:region.bottom, region.x:region.right], cv2.COLOR_BGR2GRAY)) else: text = ocr(frame=frame[region.y:region.bottom, region.x:region.right]) print(f"find_selection_text: found {text} in selected region") return text, region
def navigate_to(context, button, focus_threshold=None): assert button in playback_soft_buttons press(context, keys.KEY_DOWN, 1) # to show progress bar key_to_press = playback_soft_buttons[button][0] mp = (3, 0.65, *get_default_match_parameter()[2::] ) # use fixed method 3 and threshold =0.65 return search_by_image(context, REGION_PROGRESS_BAR, playback_soft_buttons[button][1], key_to_press, max_tries=7, mp=mp, hist=SEARCH_BY_IMAGE_USE_BRIGHTNESS, focus_threshold=focus_threshold)
def current_price_grid_selection(context, frame=None): # sel_text = find_selection_horizontal_repeat(frame=get_frame(), # background=self.images.PURCHASE_POPUP_PRICE_GRID_PIX, # region=REGION_SINGLE_PRICE_GRID).text # print(f"=== Dump current price selection: {sel_text}") # return sel_text if frame is None: cam = context.cam context.frame = get_frame_name(context, f"{WORK_DIR}/_frame.png") assert 200 == cam.get_frame(path=context.frame) frame = cv2.imread(context.frame) mp = (3, 0.8, *get_default_match_parameter()[2::]) # use ccorr_normed method 0 and threshold =0.65 sel_text, region = find_selection_text(frame, left_bk=PURCHASE_POPUP_PRICE_GRID_SEL, right_bk=None, x_offset=10, y_offset=10, region=REGION_SINGLE_PRICE_GRID, match_parameter=mp) return sel_text
def find_current_chapters(context): press(context, keys.KEY_SELECT, 1) # to show chapter grid cam = context.cam context.frame = get_frame_name(context, f"{WORK_DIR}/_frame.png") assert 200 == cam.get_frame(context.frame) frame = cv2.imread(context.frame) mp = (3, 0.8, *get_default_match_parameter()[2::] ) # use ccorr_normed method 0 and threshold =0.65 debug(f"match {playback_chapter['chapter']}") result = match(frame, cv2.imread(playback_chapter["chapter"]), REGION_CHAPTER_GRID, match_parameter=mp) print("\n\n") assert result[0] is True, f"fail to find current chapter" # find current chapter chapter_grids_width = REGION_CHAPTER_GRID.right - REGION_CHAPTER_GRID.x chapter_grids_height = REGION_CHAPTER_GRID.bottom - REGION_CHAPTER_GRID.top chapter_width = chapter_grids_width // CHAPTER_GRID_COLS chapter_height = chapter_grids_height // CHAPTER_GRID_ROWS chapters_regions = [] for row in range(CHAPTER_GRID_ROWS): for col in range(CHAPTER_GRID_COLS): chapters_regions.append( Region(REGION_CHAPTER_GRID.x + col * chapter_width, REGION_CHAPTER_GRID.y + row * chapter_height, REGION_CHAPTER_GRID.x + (col + 1) * chapter_width, REGION_CHAPTER_GRID.y + (row + 1) * chapter_height)) # debug(f"chapter: {len(chapters_regions) + 1}, {chapters_regions[-1]}") found_chapters = [] focused_chapter = 0 focused_chapter_area = 0 for chp in range(len(chapters_regions)): area = Region.intersect_area(result[1], chapters_regions[chp]) # debug(f"intersect area {result[1]} and {chapters_regions[chp]}, area: {area}") if area > 0: found_chapters.append((chp, area)) debug(f"chapter {chp + 1}, area: {area}") if area > focused_chapter_area: focused_chapter = chp + 1 focused_chapter_area = area return focused_chapter
def step_impl(context, switch): press(context, keys.KEY_DOWN, 1) # to show progress bar cam = context.cam context.frame = get_frame_name(context, f"{WORK_DIR}/_frame.png") assert 200 == cam.get_frame(context.frame) frame = cv2.imread(context.frame) mp = (3, 0.7, *get_default_match_parameter()[2::] ) # use ccorr_normed method 0 and threshold =0.65 debug("check current switch\n") if switch == "off": debug(f"match {playback_soft_buttons['family play off unhilite'][1]}") if match(frame, cv2.imread( playback_soft_buttons["family play off unhilite"][1]), REGION_PROGRESS_BAR, match_parameter=mp)[0] is True: debug(f"family play is already {switch}\n") return elif switch == "on": debug(f"match {playback_soft_buttons['family play on unhilite'][1]}") if match(frame, cv2.imread( playback_soft_buttons["family play on unhilite"][1]), REGION_PROGRESS_BAR, match_parameter=mp)[0] is True: debug(f"family play is already {switch}\n") return debug(f"navigate_to family play {switch}\n") if navigate_to(context, "family play on" if switch == "off" else "family play off"): press(context, keys.KEY_SELECT, 1) # press(context, keys.KEY_UP, 1) press(context, keys.KEY_SELECT, 1) # dismiss family play popup press(context, keys.KEY_SELECT, 1) if switch == "on": press(context, keys.KEY_SELECT, 1) # dismiss family play popup return
def find_video_quality_on_playback_page(context): cam = context.cam context.frame = get_frame_name(context, f"{WORK_DIR}/_frame.png") assert 200 == cam.get_frame(context.frame) frame = cv2.imread(context.frame) mp = (3, 0.8, *get_default_match_parameter()[2::] ) # use ccorr_normed method 0 and threshold =0.65 video_quality = "None" result = 0.0 assert os.path.exists(playback_video_quality["uhd"]) found = match(frame, cv2.imread(playback_video_quality['uhd']), REGION_PLAYBACK_PAGE_TITLE, match_parameter=mp) if found[0] is True and found[2] > result: video_quality = "uhd" result = found[2] debug(f"found uhd") found = match(frame, cv2.imread(playback_video_quality['hdx']), REGION_PLAYBACK_PAGE_TITLE, match_parameter=mp) if found[0] is True and found[2] > result: video_quality = "hdx" result = found[2] debug(f"found hdx") found = match(frame, cv2.imread(playback_video_quality['sd']), REGION_PLAYBACK_PAGE_TITLE, match_parameter=mp) if found[0] is True and found[2] > result: video_quality = "sd" result = found[2] debug(f"found sd") print("\n\n") return video_quality
def step_impl(context, result): cam = context.cam context.frame = get_frame_name(context, f"{WORK_DIR}/_frame.png") move_to_search_suggestions(context) debug("\n\n=== Read search suggestion.") mp = (3, 0.65, *get_default_match_parameter()[2::]) # use fixed method 3 and threshold =0.65 for _ in range(3): assert 200 == cam.get_frame(context.frame) frame = cv2.imread(context.frame) text_read_out, region = find_selection_text(cv2.imread(context.frame), cv2.imread(SEARCH_SUGGESTION_SEL_LEFT), cv2.imread(SEARCH_SUGGESTION_SEL_RIGHT), region=REGION_SEARCH_SUGGESTIONS, match_parameter=mp) format_read_out = text_read_out.upper() debug(f"The formatted read out: {format_read_out}") if result in format_read_out: debug("Find expected search string in search suggestion") return press(context, keys.KEY_DOWN, delay=1) time.sleep(2) raise AssertionError(f"Failed to find {result} in search menu")