Exemple #1
0
	def makeData(self):
		w, h = model_wh('432x368')
		e = TfPoseEstimator(get_graph_path('mobilenet_thin'), target_size=(w, h))

		i = 0
		cap = cv2.VideoCapture(WEBCAM_FILE_NAME)
		while cap.isOpened():
			endFlag, frame = cap.read()
			if endFlag == False:
				break
			cv2.imwrite('./results/webcam/webcamRaw/'+'img_%s.png' % str(i).zfill(6), frame)
			i += 1

		j = 0
		path_w = './results/webcam/webcamData.txt'
		with open(path_w, mode='w') as f:
			f.write('')
		while j < i:
			image = common.read_imgfile('./results/webcam/webcamRaw/'+'img_%s.png' % str(j).zfill(6))
			humans = e.inference(image, resize_to_default=True, upsample_size=4.0)

			centers = TfPoseEstimator.get_centers(image, humans, imgcopy=False)
			
			with open(path_w, mode='a') as f:
				f.write('t' + str(j) + ':' + str(centers) + '\n')
			j += 1

		k = 30
		while k < 80:
			image = common.read_imgfile('./results/webcam/webcamRaw/'+'img_%s.png' % str(k).zfill(6))
			humans = e.inference(image, resize_to_default=True, upsample_size=4.0)
			image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
			cv2.imwrite('./results/webcam2/webcamOpenpose/'+'img_%s.png' % str(k).zfill(6), image)
			k += 1
Exemple #2
0
        num_12.append((x,y))
    num_13 = []
    for x, y in data["lower_body"]["13"]:
        num_13.append((x,y))
    
    index = 0
    while cap.isOpened():
        try:
            ret_val, image = cam.read()
            ret_val2, image2 = cap.read()

            logger.debug('image process+')
            humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)

            logger.debug('postprocess+')
            a = TfPoseEstimator.get_centers(image, humans, imgcopy=False)
            b = []
            b.append(num_1[index])
            b.append(num_2[index])
            b.append(num_3[index])
            b.append(num_4[index])
            b.append(num_5[index])
            b.append(num_6[index])
            b.append(num_7[index])
            c = []
            c.append(num_8[index])
            c.append(num_9[index])
            c.append(num_10[index])
            c.append(num_11[index])
            c.append(num_12[index])
            c.append(num_13[index])
Exemple #3
0
            ret_val2, image2 = cap2.read()

            logger1.debug('image process+')
            humans1 = e1.inference(image1,
                                   resize_to_default=(w1 > 0 and h1 > 0),
                                   upsample_size=args1.resize_out_ratio)
            logger2.debug('image process+')
            humans2 = e2.inference(image2,
                                   resize_to_default=(w2 > 0 and h2 > 0),
                                   upsample_size=args2.resize_out_ratio)
            ### 2:Video) if(--showBG=False) print skeleton
            if not args2.showBG:
                image2 = np.zeros(image2.shape)
            ###
            logger1.debug('postprocess+')
            a = TfPoseEstimator.get_centers(image1, humans1,
                                            imgcopy=False)  #all points
            image1 = TfPoseEstimator.draw_humans(image1,
                                                 humans1,
                                                 imgcopy=False)
            logger2.debug('postprocess+')
            b = TfPoseEstimator.get_centers(image2, humans2,
                                            imgcopy=False)  #상체 points
            c = TfPoseEstimator.get_centers(image2, humans2,
                                            imgcopy=False)  #하체 points
            image2 = TfPoseEstimator.draw_humans(image2,
                                                 humans2,
                                                 imgcopy=False)
            """
            1) 실시간으로 동영상의 점을 불러온다 (점의 좌표를 알아야함)
            2) 실시간으로 웹캠의 점을 불러온다 (점의 좌표를 알아야함)
            3) 점 간의 norm(거리)을 구한다 (scalar)
Exemple #4
0
            logger2.debug('postprocess+')
            image2 = TfPoseEstimator.draw_humans(image2,
                                                 humans2,
                                                 imgcopy=False)
            """
            1) 실시간으로 동영상의 점을 불러온다 (점의 좌표를 알아야함)
            2) 실시간으로 웹캠의 점을 불러온다 (점의 좌표를 알아야함)
            3) 점 간의 norm(거리)을 구한다 (scalar)
            4) 예를 들어 점이 18개로 고정되어 있다면 각 pair점 간의 norm을 전부 구하고
            5) sum 하여 그 값을 0과 1사이로 normalization 한다 ->result
            6) result를 y축 time을 x축으로 실시간 데이터 plotting
            7) result가 어떤 threshold를 넘어설때 마다 warning을 cv2.putText로 출력해준다.
            """

            # point 찾기
            a = TfPoseEstimator.get_centers(image1, humans1, imgcopy=False)
            b = TfPoseEstimator.get_centers(image2, humans2, imgcopy=False)

            L2_norm = []
            L2_nonzero = []
            for i in range(len(b)):
                try:
                    L2_norm.append(
                        np.linalg.norm(np.array(a[i]) - np.array(b[i]), ord=2))
                except:
                    L2_norm.append(0.0)
                    pass
                if L2_norm[i] is not 0.0:
                    L2_nonzero.append(L2_norm[i])
                else:
                    pass