def get(self, field="image"): # type: (Text) -> Tuple[ImageInfo, ...] """Get imageinfo used on the field. Args: field (six.text_type): Defaults to 'image', Server defined field name, Returns: tuple[ImageInfo]: Image information. """ select = self.select ret = set() rows = cast.list_(select[field], (str, six.text_type)) for row in rows: try: for data in cast.list_(json.loads(row), dict): info = ImageInfo( max=cast.list_(data["max"], (six.text_type, str))[0], min=cast.list_(data["min"], (six.text_type, str))[0], path=data.get("path"), attachment_id=data.get("att_id"), ) ret.add(info) except (TypeError, KeyError, ValueError): continue ret = tuple(sorted(ret)) return ret
def test_type_not_match(): with pytest.raises( cast.CastError, match= "(u?'can not cast object to instance', None, <(class|type) 'int'>)" ): cast.list_([1, None, 3], int)
def from_training_scene(cls, img: Image) -> Training: if g.image_path: image_id = imagetools.md5( imagetools.cv_image(img.convert("RGB")), save_path=g.image_path, save_mode="RGB", ) LOGGER.debug("from_training_scene: image=%s", image_id) rp = mathtools.ResizeProxy(img.width) self = cls.new() self.confirm_position = next( template.match( img, template.Specification(templates.SINGLE_MODE_TRAINING_CONFIRM, threshold=0.8), ))[1] radius = rp.vector(30, 540) for t, center in zip( Training.ALL_TYPES, ( rp.vector2((78, 850), 540), rp.vector2((171, 850), 540), rp.vector2((268, 850), 540), rp.vector2((367, 850), 540), rp.vector2((461, 850), 540), ), ): if mathtools.distance(self.confirm_position, center) < radius: self.type = t break else: raise ValueError("unknown type for confirm position: %s" % self.confirm_position) self.level = _recognize_level( tuple(cast.list_(img.getpixel(rp.vector2((10, 200), 540)), int))) t, b = 503, 532 self.speed = _ocr_training_effect( img.crop(rp.vector4((18, t, 91, b), 466))) self.stamina = _ocr_training_effect( img.crop(rp.vector4((91, t, 163, b), 466))) self.power = _ocr_training_effect( img.crop(rp.vector4((163, t, 237, b), 466))) self.guts = _ocr_training_effect( img.crop(rp.vector4((237, t, 309, b), 466))) self.wisdom = _ocr_training_effect( img.crop(rp.vector4((309, t, 382, b), 466))) self.skill = _ocr_training_effect( img.crop(rp.vector4((387, t, 450, b), 466))) return self
def _find_by_race_menu_item(ctx: Context, img: PIL.Image.Image) -> Race: rp = mathtools.ResizeProxy(img.width) spec_bbox = rp.vector4((221, 12, 478, 32), 492) no1_fan_count_bbox = rp.vector4((207, 54, 360, 72), 492) grade_color_pos = rp.vector2((182, 14), 492) stadium, ground, distance, turn, track = _recognize_spec(img.crop(spec_bbox)) no1_fan_count = _recognize_fan_count(img.crop(no1_fan_count_bbox)) grades = _recognize_grade(tuple(cast.list_(img.getpixel(grade_color_pos), int))) full_spec = ( ctx.date, stadium, ground, distance, turn, track, no1_fan_count, grades, ) for i in _find_by_spec(*full_spec): LOGGER.info("image match: %s", i) return i raise ValueError("_find_by_race_menu_item: no race match spec: %s", full_spec)
def find_by_race_detail_image(ctx: Context, screenshot: PIL.Image.Image) -> Race: rp = mathtools.ResizeProxy(screenshot.width) grade_color_pos = rp.vector2((10, 75), 466) spec_bbox = rp.vector4((27, 260, 302, 279), 466) _, no1_fan_count_pos = next( template.match(screenshot, templates.SINGLE_MODE_RACE_DETAIL_NO1_FAN_COUNT) ) no1_fan_count_bbox = ( rp.vector(150, 466), no1_fan_count_pos[1], rp.vector(400, 466), no1_fan_count_pos[1] + rp.vector(18, 466), ) grades = _recognize_grade( tuple(cast.list_(screenshot.getpixel(grade_color_pos), int)) ) stadium, ground, distance, turn, track = _recognize_spec(screenshot.crop(spec_bbox)) no1_fan_count = _recognize_fan_count(screenshot.crop(no1_fan_count_bbox)) full_spec = ( ctx.date, stadium, ground, distance, turn, track, no1_fan_count, grades, ) for i in _find_by_spec(*full_spec): LOGGER.info("image match: %s", i) return i raise ValueError("find_by_race_details_image: no race match spec: %s", full_spec)
def test_iterator(): assert cast.list_(six.moves.range(3)) == [0, 1, 2]
def test_text_with_type(): assert cast.list_("abc", six.text_type) == ["abc"]
def test_text(): assert cast.list_("abc") == ["a", "b", "c"]
def test_none(): assert cast.list_(None) == []
def _is_race_list_scroll_to_top() -> bool: rp = action.resize_proxy() color = template.screenshot(max_age=0).getpixel(rp.vector2((525, 525), 540)) return (imagetools.compare_color( (123, 121, 140), tuple(cast.list_(color, int))) > 0.9)