Пример #1
0
class get_Score(object):
	def __init__(self, lookup='lookup.pickle'):
		self.a = Pose()
		self.s = Score()
		self.b = pickle.load(open(lookup, 'rb'))
		self.input_test = []

	def get_action_coords_from_dict(self,action):
			for (k,v) in self.b.items():
				if k==action:
					(model_array,no_of_frames) = (v,v.shape[0])
			return model_array,no_of_frames
	
	def calculate_Score(self,video,action):
		with tf.compat.v1.Session() as sess:
			model_cfg, model_outputs = posenet.load_model(101, sess)
			model_array,j = self.get_action_coords_from_dict(action)
			cap = cv2.VideoCapture(video)
			i = 0
			if cap.isOpened() is False:
				print("error in opening video")
			while cap.isOpened():
				ret_val, image = cap.read()
				if ret_val:         
					input_points= self.a.getpoints(cv2.resize(image,(372,495)),sess,model_cfg,model_outputs)
					input_new_coords = np.asarray(self.a.roi(input_points)[0:34]).reshape(17,2)
					self.input_test.append(input_new_coords)
					i = i + 1
				else:
					break
			cap.release()
			final_score,score_list = self.s.compare(np.asarray(self.input_test),np.asarray(model_array),j,i)
		return final_score,score_list
Пример #2
0
def main():
    pose = Pose()
    coordinate_list = []

    # importing the module
    import json

    # Opening JSON file
    with open('weights.json') as json_file:
        weights = json.load(json_file)["weights"]

        s = sum(weights)
        weights = [w / s for w in weights]

    with tf.compat.v1.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(101, sess)

        cap = cv2.VideoCapture(args["video"])
        i = 1

        if cap.isOpened() is False:
            print("error in opening video")
        while cap.isOpened():
            ret_val, image = cap.read()
            if ret_val:
                input_points, input_black_image = pose.getpoints_vis(
                    image, sess, model_cfg, model_outputs)
                cv2.imwrite('./test_video' + str(i) + '.jpg',
                            input_black_image)
                input_points = input_points[0:34]
                # print(input_points)
                input_new_coords = pose.roi(input_points)
                input_new_coords = input_new_coords[0:34]
                input_new_coords = np.asarray(input_new_coords).reshape(17, 2)
                coordinate_list.append(input_new_coords)
                i = i + 1
            else:
                break
        cap.release()

        coordinate_list = np.array(coordinate_list)

        # print(coordinate_list)
        # print(coordinate_list.shape)
        print("Lookup Table Created")
        file = open(args["lookup"], 'wb')
        pickle.dump({
            args["activity"]: coordinate_list,
            "weights": weights
        }, file)
Пример #3
0
def main():
	a = Pose()
	b = []
	c = {}
	
	
	with tf.compat.v1.Session() as sess:
		model_cfg, model_outputs = posenet.load_model(101, sess)
		
		cap = cv2.VideoCapture(args["video"])
		i = 1

		if cap.isOpened() is False:
			print("error in opening video")
		while cap.isOpened():
			ret_val, image = cap.read()
			if ret_val:
				image = cv2.resize(image,(372,495))			
				input_points,input_black_image = a.getpoints_vis(image,sess,model_cfg,model_outputs)
				input_points = input_points[0:34]
				print(input_points)
				input_new_coords = a.roi(input_points)
				input_new_coords = input_new_coords[0:34]
				input_new_coords = np.asarray(input_new_coords).reshape(17,2)
				b.append(input_new_coords)
				cv2.imshow("black", input_black_image)
				cv2.waitKey(1)
				i = i + 1
			else:
				break
		cap.release()
		

		b = np.array(b)
		
		cv2.destroyAllWindows
		print(b)
		print(b.shape)
		print("Lookup Table Created")
		c[args["activity"]] = b
		f = open(args["lookup"],'wb')
		pickle.dump(c,f)
Пример #4
0
class get_Score(object):
    def __init__(self, lookup='lookup.pickle'):
        self.pose = Pose()
        self.score = Score()
        self.saved_pose = pickle.load(open(lookup, 'rb'))
        self.weights = self.saved_pose["weights"]
        self.new_video_coordinates = []

    def get_action_coords_from_dict(self, action):
        for k, v in self.saved_pose.items():
            if k == action:
                (model_array, no_of_frames) = (v, v.shape[0])
        return model_array, no_of_frames

    def calculate_Score(self, video, action):
        with tf.compat.v1.Session() as sess:
            model_cfg, model_outputs = posenet.load_model(101, sess)
            reference_coordinates, reference_video_frames = self.get_action_coords_from_dict(
                action)
            cap = cv2.VideoCapture(video)
            new_video_frames = 0
            if cap.isOpened() is False:
                print("error in opening video")
            while cap.isOpened():
                ret_val, image = cap.read()
                if ret_val:
                    input_points = self.pose.getpoints(image, sess, model_cfg,
                                                       model_outputs)
                    if len(input_points) == 0:
                        continue
                    input_new_coords = np.asarray(
                        self.pose.roi(input_points)[0:34]).reshape(17, 2)
                    self.new_video_coordinates.append(input_new_coords)
                    new_video_frames = new_video_frames + 1
                else:
                    break
            cap.release()
            final_score, score_list = self.score.compare_34dim(
                np.asarray(self.new_video_coordinates),
                np.asarray(reference_coordinates), new_video_frames,
                reference_video_frames, self.weights)
        return final_score, score_list