예제 #1
0
def load_pic(path: str):
    src = None
    try:
        src = pyscreeze._load_cv2(path)
    except Exception:
        print('加载资源出错: ' + path)
    return src
예제 #2
0
def locate(*images,
           For=0.0,
           region=None,
           grayscale=None,
           confidence: Union[float, list] = 0.8,
           verbose=False):
    imglist = list(images)
    retVal = [None] * len(imglist)
    if region is None:
        region = detect_monitor(
            -1, verbose=False)  # default monitor is the last(rightmost) one.
    start = time.time()
    if not isinstance(confidence, list):
        confidence = [confidence]
    while True:
        with mss.mss() as sct:
            ltrb = (region[0], region[1], region[0] + region[2],
                    region[1] + region[3])
            x = sct.grab(monitor=ltrb)
        screenshotIm = Image.frombytes("RGB", x.size, x.bgra, "raw", "BGRX")
        for i in range(len(imglist)):
            for conf in confidence:
                try:
                    imglist[i] = pyscreeze._load_cv2(imglist[i],
                                                     grayscale=grayscale)
                    bbox = pyscreeze.locate(imglist[i],
                                            screenshotIm,
                                            grayscale=grayscale,
                                            confidence=conf)
                    retVal[i] = (int(bbox[0] + bbox[2] / 2 + region[0]),
                                 int(bbox[1] + bbox[3] / 2) + region[1])
                    del imglist[i]
                    if verbose is True:
                        print(f'{i}th image found on confidence {conf}')
                except OSError as e:
                    raise e
                except Exception:
                    pass
        if any(retVal) or time.time() - start > For:
            if len(retVal) == 1:
                return retVal[0], screenshotIm
            else:
                return retVal, screenshotIm
예제 #3
0
def find_touming(path, confidence=0.95):
    t2 = cv2.imread(path)
    alpha = cv2.imread(path, cv2.IMREAD_UNCHANGED)[:, :, 3]
    screenshotIm = pyscreeze.screenshot(region=None)
    result = cv2.matchTemplate(pyscreeze._load_cv2(screenshotIm),
                               t2,
                               cv2.TM_CCORR_NORMED,
                               mask=alpha)

    # 获取结果中最大值和最小值以及他们的坐标
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
    pyautogui.moveTo(max_loc[0], max_loc[1])
    print(max_val)

    close_screenshotIm(screenshotIm)

    if max_val > confidence:
        return (max_loc[0], max_loc[1])
    else:
        return None
예제 #4
0
def qubeijing_for_pic_list(pic_list, file_path_name, confidence=0.999):
    last_template = None
    alpha_channel = None
    start_time = time.time()
    for pic in pic_list:
        template = pyscreeze._load_cv2(pic)
        b_channel, g_channel, r_channel = cv2.split(template)
        if alpha_channel is None:
            alpha_channel = np.ones(b_channel.shape,
                                    dtype=b_channel.dtype) * 255
        if last_template is not None:
            b, g, r = cv2.split(last_template)
            generate_alpha(alpha_channel, b_channel, b, confidence=confidence)
            generate_alpha(alpha_channel, g_channel, g, confidence=confidence)
            generate_alpha(alpha_channel, r_channel, r, confidence=confidence)
        last_template = template
    b_channel, g_channel, r_channel = cv2.split(last_template)
    img_BGRA = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
    # alpha = template[:,:,3]
    cv2.imwrite(file_path_name, img_BGRA)
예제 #5
0
def qubeijing_for_time(sec_time, box, file_path_name, confidence=0.999):
    last_template = None
    alpha_channel = None
    start_time = time.time()
    region = [box[0], box[1], box[2] - box[0], box[3] - box[1]]
    while time.time() - start_time < sec_time:
        # box 要为元组 (870,500,1000,600)
        screenshotIm = pyscreeze.screenshot(region=region)
        template = pyscreeze._load_cv2(screenshotIm)
        close_screenshotIm(screenshotIm)
        b_channel, g_channel, r_channel = cv2.split(template)
        if alpha_channel is None:
            alpha_channel = np.ones(b_channel.shape,
                                    dtype=b_channel.dtype) * 255
        if last_template is not None:
            b, g, r = cv2.split(last_template)
            generate_alpha(alpha_channel, b_channel, b, confidence=confidence)
            generate_alpha(alpha_channel, g_channel, g, confidence=confidence)
            generate_alpha(alpha_channel, r_channel, r, confidence=confidence)
        last_template = template
    b_channel, g_channel, r_channel = cv2.split(last_template)
    img_BGRA = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
    # alpha = template[:,:,3]
    cv2.imwrite(file_path_name, img_BGRA)
예제 #6
0
def mine_locateAll_opencv(needleImage,
                          haystackImage,
                          limit=10000,
                          region=None,
                          step=1,
                          confidence=0.999):
    """ faster but more memory-intensive than pure python
        step 2 skips every other row and column = ~3x faster but prone to miss;
            to compensate, the algorithm automatically reduces the confidence
            threshold by 5% (which helps but will not avoid all misses).
        limitations:
          - OpenCV 3.x & python 3.x not tested
          - RGBA images are treated as RBG (ignores alpha channel)
    """
    grayscale = False

    confidence = float(confidence)

    needleImage = pyscreeze._load_cv2(needleImage, grayscale)
    needleHeight, needleWidth = needleImage.shape[:2]
    haystackImage = pyscreeze._load_cv2(haystackImage, grayscale)

    if region:
        haystackImage = haystackImage[region[1]:region[1] + region[3],
                                      region[0]:region[0] + region[2]]
    else:
        region = (0, 0)  # full image; these values used in the yield statement
    if (haystackImage.shape[0] < needleImage.shape[0]
            or haystackImage.shape[1] < needleImage.shape[1]):
        # avoid semi-cryptic OpenCV error below if bad size
        raise ValueError(
            'needle dimension(s) exceed the haystack image or region dimensions'
        )

    if step == 2:
        confidence *= 0.95
        needleImage = needleImage[::step, ::step]
        haystackImage = haystackImage[::step, ::step]
    else:
        step = 1

    # get all matches at once, credit: https://stackoverflow.com/questions/7670112/finding-a-subimage-inside-a-numpy-image/9253805#9253805
    # orig result = cv2.matchTemplate(haystackImage, needleImage, cv2.TM_CCOEFF_NORMED)
    # orig match_indices = numpy.arange(result.size)[(result > confidence).flatten()]
    result = cv2.matchTemplate(haystackImage, needleImage,
                               cv2.TM_SQDIFF_NORMED)  # calculating diff
    match_indices = numpy.arange(result.size)[(
        result < 0.0005).flatten()]  # picking where diff < failure rate
    matches = numpy.unravel_index(match_indices[:limit], result.shape)

    #if result.min() != 0:
    Print.colored("result min", result.min(), "magenta")

    if len(matches[0]) == 0 and pyscreeze.RAISE_IF_NOT_FOUND:
        raise pyscreeze.ImageNotFoundException(
            'Could not locate the image (highest confidence = %.3f)' %
            (1 - result.min()))

    # use a generator for API consistency:
    matchx = matches[1] * step + region[0]  # vectorized
    matchy = matches[0] * step + region[1]
    for x, y in zip(matchx, matchy):
        yield (x, y, needleWidth, needleHeight)