示例#1
0
 def growing(self):
     self.game.tps_max += 0.2
     if self.game.tps_max > 30:
         self.game.tps_max = 30
     index = len(self.sneak)
     if index > 1:
         self.sneak.append(
             Body(self.game, index,
                  self.sneak[len(self.sneak) - 1].last_pos,
                  self.sneak[len(self.sneak) - 1].dir,
                  self.sneak[len(self.sneak) - 2].dir))
     elif index > 0:
         self.sneak.append(
             Body(self.game, index,
                  self.sneak[len(self.sneak) - 1].last_pos,
                  self.sneak[len(self.sneak) - 1].dir, self.head.dir))
示例#2
0
def detect_keypoint(test_image, is_vis):
    body_estimation = Body('model/body_pose_model.pth')
    hand_estimation = Hand('model/hand_pose_model.pth')

    oriImg = cv2.imread(test_image)  # B,G,R order

    # detect body
    # subset: n*20 array, n is the human_number in the index, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
    # candidate: m*4, m is the keypoint number in the image, [x, y, confidence, id]
    candidate, subset = body_estimation(
        oriImg
    )  # candidate: output the keypoints([25, 4]),  x, y, score, keypoint_index

    canvas = copy.deepcopy(oriImg)
    canvas, bodypoints = util.draw_bodypose(canvas, candidate, subset)

    # detect hand
    hands_list = util.handDetect(candidate, subset, oriImg)
    all_hand_peaks = []
    hand_personid_isleft = []
    for x, y, w, is_left, person_id in hands_list:
        # cv2.rectangle(canvas, (x, y), (x+w, y+w), (0, 255, 0), 2, lineType=cv2.LINE_AA)
        # cv2.putText(canvas, 'left' if is_left else 'right', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

        # if is_left:
        # plt.imshow(oriImg[y:y+w, x:x+w, :][:, :, [2, 1, 0]])
        # plt.show()
        peaks = hand_estimation(oriImg[y:y + w, x:x + w, :])
        peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x)
        peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y)
        # else:
        #     peaks = hand_estimation(cv2.flip(oriImg[y:y+w, x:x+w, :], 1))
        #     peaks[:, 0] = np.where(peaks[:, 0]==0, peaks[:, 0], w-peaks[:, 0]-1+x)
        #     peaks[:, 1] = np.where(peaks[:, 1]==0, peaks[:, 1], peaks[:, 1]+y)
        #     print(peaks)
        all_hand_peaks.append(peaks)
        hand_personid_isleft.append([person_id, is_left])

    # all_hand_peaks: [p, 21, 2] p is the hand number in the image
    # hand_personid_isleft: [p, 2]  is_isleft, person_id
    all_hand_peaks = np.asarray(all_hand_peaks)
    hand_personid_isleft = np.asarray(hand_personid_isleft)

    canvas = util.draw_handpose(canvas, all_hand_peaks)
    if is_vis:
        plt.imshow(canvas[:, :, [2, 1, 0]])
        plt.axis('off')
        plt.show()

    return bodypoints, all_hand_peaks, hand_personid_isleft
示例#3
0
    def __init__(self, game, states):
        self.game = game
        self.game.tps_max = 5
        self.States = states
        self.border_color = (int(87 * 0.75), int(189 * 0.75), int(134 * 0.75))
        self.screen_size = self.game.screen.get_size()
        self.matrix_size = Vector2(20, 19)
        self.field_size = Vector2((self.screen_size[0] * 32) // 1280,
                                  (self.screen_size[1] * 32) // 720)
        self.draw_start_pos = Vector2(
            self.screen_size[0] / 2.0 -
            (self.matrix_size.x * self.field_size.x) / 2.0, 80)

        # Apple config
        self.apple_pos = Vector2(randint(1, self.matrix_size.x - 1),
                                 randint(1, self.matrix_size.y - 1))
        self.apple_color = (250, 50, 50)
        self.apple_image = pygame.image.load("Data/apple2.png")
        self.apple_image = pygame.transform.scale(
            self.apple_image, (int(self.field_size.x), int(self.field_size.y)))

        self.background_image = pygame.image.load("Data/background.png")
        self.foreground_image = pygame.image.load("Data/foreground.png")
        self.background_size = 1
        self.background_image = pygame.transform.scale(
            self.background_image,
            (int(self.screen_size[0] * self.background_size),
             int(self.screen_size[1] * self.background_size)))
        self.foreground_image = pygame.transform.scale(
            self.foreground_image,
            (int(self.screen_size[0] * self.background_size),
             int(self.screen_size[1] * self.background_size)))

        self.head = Head(game, states)
        self.sneak = [Body(self.game, 0, self.head.last_pos, 1, 1)]

        self.points = len(self.sneak) - 1

        # Music config
        pygame.mixer.music.load("Data/Popcorn.mp3")
        pygame.mixer.music.play(-1)
        pygame.mixer.Channel(0).stop()
示例#4
0
import cv2
import matplotlib.pyplot as plt
import copy
import numpy as np

from src import model
from src import util
from src.body import Body

import time

body_estimation = Body('model/body_pose_model.pth')

input_filename = 'london-men-1'
capture = cv2.VideoCapture("videos/{}.mp4".format(input_filename))

# Default resolutions of the frame are obtained.The default resolutions are system dependent.
# We convert the resolutions from float to integer.
frame_width = int(capture.get(3))
frame_height = int(capture.get(4))

output_filename = "{}-output".format(input_filename)

# Define the codec and create VideoWriter object.
out = cv2.VideoWriter("results/{}.avi".format(output_filename),
                      cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10,
                      (frame_width, frame_height))

frame_count = 0
start = time.time()
示例#5
0
import cv2
import matplotlib.pyplot as plt
import copy
import numpy as np

from src import model
from src import util
from src.body import Body
from src.hand import Hand

body_estimation = Body('/home2/lgfm95/openposehzzone/model/body_pose_model.pth')
hand_estimation = Hand('/home2/lgfm95/openposehzzone/model/hand_pose_model.pth')

def main(oriImg):
    shape0 = oriImg.shape
    candidate, subset = body_estimation(oriImg)
    canvas = copy.deepcopy(oriImg)
    shape1 = canvas.shape
    canvas = util.draw_bodypose(canvas, candidate, subset)
    # detect hand
    hands_list = util.handDetect(candidate, subset, oriImg)

    all_hand_peaks = []
    shape2 = canvas.shape
    for x, y, w, is_left in hands_list:
        # cv2.rectangle(canvas, (x, y), (x+w, y+w), (0, 255, 0), 2, lineType=cv2.LINE_AA)
        # cv2.putText(canvas, 'left' if is_left else 'right', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

        # if is_left:
            # plt.imshow(oriImg[y:y+w, x:x+w, :][:, :, [2, 1, 0]])
            # plt.show()
 def __init__(self):
     super().__init__()
     model_dir = os.path.dirname(__file__) + '/model/'
     self.body_estimation = Body(model_dir + 'body_pose_model.pth')
     self.hand_estimation = Hand(model_dir + 'hand_pose_model.pth')
示例#7
0
import cv2
import matplotlib.pyplot as plt
import copy
import numpy as np

from src import model
from src import util
from src.body import Body

body_estimation = Body(
    'model/body_pose_model.pth')  # 포즈 예측기, 인자: 학습 시킨 모델 파라미터 저장 경로
pose_compare = util.Pose_compare()  # 내가 만든 모듈, 두개의 자세에서 오차를 알려줌

test_image1 = 'images/aa.jpg'  # 비교할 이미지 1
test_image2 = 'images/dragon2.jpg'  # 비교할 이미지 2

oriImg1 = cv2.imread(test_image1)  # 이미지 불러오기
candidate1, subset1 = body_estimation(
    oriImg1)  # candidate: x, y, score, id | subset: index
# x와 y가 관절의 위치, index는 관절의 이름이라고 생각하면 됨
# score는 단순하게 확실 정도라고 보면됨
# id는 무시해도 되고

oriImg2 = cv2.imread(test_image2)  # 이미지2 불러오기
candidate2, subset2 = body_estimation(oriImg2)

canvas1 = copy.deepcopy(oriImg1)  # 이미지 복사
canvas1 = util.draw_bodypose(canvas1, candidate1, subset1)  # 화면에 예측한 스켈레톤을 보여줌

canvas2 = copy.deepcopy(oriImg2)
canvas2 = util.draw_bodypose(canvas2, candidate2, subset2)