Beispiel #1
0
def get_Target_Posture(img):
    img_roi = roi(img, x=401, x_w=553, y=29, y_h=29 + 1)
    b, g, r = cv2.split(img_roi)  # 颜色通道分离

    white_line = r[0][0]
    if white_line > 190:
        canny = cv2.Canny(cv2.GaussianBlur(r, (3, 3), 0), 0, 100)
        Target_Posture = np.argmax(canny)
    else:
        Target_Posture = 0

    if white_line > 250 and Target_Posture < 10:
        Target_Posture == len(canny)

    return Target_Posture
Beispiel #2
0
def get_Self_Posture(img):
    img_roi = roi(img, x=401, x_w=490, y=389, y_h=389 + 1)
    b, g, r = cv2.split(img_roi)  # 颜色通道分离

    white_line = r[0][0]
    if 155 < white_line < 170 or white_line > 250:
        canny = cv2.Canny(cv2.GaussianBlur(r, (3, 3), 0), 0, 100)
        Self_Posture = np.argmax(canny)
    else:
        Self_Posture = 0

    if white_line > 250 and Self_Posture < 10:
        Self_Posture == len(canny)

    return Self_Posture
Beispiel #3
0
def get_Self_HP(img):
    img_roi = roi(img, x=48, x_w=305, y=409, y_h=409+1)

    b, g ,r =cv2.split(img_roi)    # Color channel separation

    retval, img_th = cv2.threshold(g, 50, 255, cv2.THRESH_TOZERO)             # Image threshold processing, if the pixel value is lower than 50, set it to 0
    retval, img_th = cv2.threshold(img_th, 70, 255, cv2.THRESH_TOZERO_INV)    # Image threshold processing, if the pixel value is higher than 70, set it to 0

    target_img = img_th[0]
    if 0 in target_img:
        Self_HP = np.argmin(target_img)
    else:
        Self_HP = len(target_img)

    return Self_HP
Beispiel #4
0
def get_Self_HP(img):
    img_roi = roi(img, x=48, x_w=305, y=409, y_h=409 + 1)

    b, g, r = cv2.split(img_roi)  # 颜色通道分离

    retval, img_th = cv2.threshold(g, 50, 255,
                                   cv2.THRESH_TOZERO)  # 图像阈值处理,像素点的值低于50的设置为0
    retval, img_th = cv2.threshold(
        img_th, 70, 255, cv2.THRESH_TOZERO_INV)  # 图像阈值处理,像素点的值高于70的设置为0

    target_img = img_th[0]
    if 0 in target_img:
        Self_HP = np.argmin(target_img)
    else:
        Self_HP = len(target_img)

    return Self_HP
Beispiel #5
0
def get_Target_HP(img):
    img_roi = roi(img, x=48, x_w=216, y=41, y_h=41 + 1)

    b, g, r = cv2.split(img_roi)  # 颜色通道分离

    retval, img_th = cv2.threshold(g, 25, 255,
                                   cv2.THRESH_TOZERO)  # 图像阈值处理,像素点的值低于25的设置为0
    retval, img_th = cv2.threshold(
        img_th, 70, 255, cv2.THRESH_TOZERO_INV)  # 图像阈值处理,像素点的值高于70的设置为0

    target_img = img_th[0]
    if 0 in target_img:
        Target_HP = np.argmin(target_img)
    else:
        Target_HP = len(target_img)

    return Target_HP
Beispiel #6
0
def main():

    paused = True
    print("Ready!")

    while True:
        keys = key_check()
        if paused:
            if 'T' in keys:
                paused = False
                print('Starting!')
        else:

            screen = get_screen()    # 获取屏幕图像

            status_info = get_status(screen)[4]
            print('\r' + status_info, end='')    # 显示状态信息

            cv2.imshow('roi', roi(screen, x, x_w, y, y_h))

            # 校准线
            screen[409:, [48, 49, 304, 305], :] = 255    # 自身生命

            # screen[389, 401:483, :] = 255    # 自身架势
            screen[[384, 385, 392,393], 401:483, :] = 255    # 自身架势
            screen[389:, 401, :] = 255    # 自身架势中线

            screen[:41, [48, 49, 215, 216], :] = 255    # 目标生命

            # screen[29, 401:544, :] = 255    # 目标架势
            screen[[25, 26, 32, 33], 401:544, :] = 255    # 目标架势
            screen[:29, 401, :] = 255    # 目标架势中线

            cv2.imshow('screen', screen)
            cv2.waitKey(1)

            if 'P' in keys:
                cv2.destroyAllWindows()
                break

    print('\nDone!')
Beispiel #7
0
def main():

    paused = True
    print("Ready!")

    while True:
        keys = key_check()
        if paused:
            if 'T' in keys:
                paused = False
                print('Starting!')
        else:

            screen = get_screen()

            status_info = get_status(screen)[4]
            print('\r' + status_info, end='')

            cv2.imshow('roi', roi(screen, x, x_w, y, y_h))

            # Calibration line
            screen[409:, [48, 49, 304, 305], :] = 255  # Self_HP

            # screen[389, 401:483, :] = 255    # Self_Posture
            screen[[384, 385, 392, 393], 401:483, :] = 255  # Self_Posture
            screen[389:, 401, :] = 255  # Self_Posture Midline

            screen[:41, [48, 49, 215, 216], :] = 255  # Target_HP

            # screen[29, 401:544, :] = 255    # Target_Posture
            screen[[25, 26, 32, 33], 401:544, :] = 255  # Target_Posture
            screen[:29, 401, :] = 255  # Target_Posture Midline

            cv2.imshow('screen', screen)
            cv2.waitKey(1)

            if 'P' in keys:
                cv2.destroyAllWindows()
                break

    print('\nDone!')
Beispiel #8
0
 def img_processing(self, screens):
     return np.array([
         cv2.resize(
             roi(cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY), x, x_w, y, y_h),
             (in_height, in_width)) for screen in screens
     ])