Exemplo n.º 1
0
    def preprocessing(self, input_length, file_names):
        human_data = list()
        robot_data = list()
        third_data = list()

        pbar = tqdm(total=len(file_names))
        for file in file_names:
            with np.load(file, allow_pickle=True) as data:
                human_data.append([norm_features(human, self.norm_method) for human in data['human_info']])
                robot_data.append([norm_features(robot, self.norm_method) for robot in data['robot_info']])
                third_data.append(data['third_info'])
            pbar.update(1)
        pbar.close()

        step = round(KINECT_FRAME_RATE / TARGET_FRAME_RATE)
        inputs = list()
        for idx, third in enumerate(third_data):
            if all(v == 1.0 for v in third):
                continue

            sampled_human_seq = human_data[idx][::step]
            sampled_third_seq = third_data[idx][::step]
            for human_seq, third_seq in zip(gen_sequence(sampled_human_seq, input_length),
                                            gen_sequence(sampled_third_seq, input_length)):
                inputs.append(np.concatenate((third_seq, human_seq), axis=1))

        return self.make_dataframe(inputs, input_length)
Exemplo n.º 2
0
def test():
    # action list to test
    # actions = ["A001", "A003", "A004", "A005", "A006", "A008"]
    actions = ["A004", "A005"]

    # show all test data
    data_files = list()
    for action in actions:
        data_files.extend(glob.glob(os.path.join(TRAIN_PATH, F"*{action}*.npz")))
    data_files.sort()
    n_data = len(data_files)

    print('There are %d data.' % n_data)
    for data_idx in range(n_data):
        print('%d: %s' % (data_idx, os.path.basename(data_files[data_idx])))

    # select data name to draw
    artist = Artist(n_plot=1)
    while True:
        var = int(input("Input data number to display: "))
        data_file = data_files[var]

        with np.load(data_file, allow_pickle=True) as data:
            # action class mapping
            print(os.path.basename(data_file))
            action = os.path.basename(data_file)[4:8]
            kmeans, sub_action_mapping = load_proper_model(action)
            km_model = kmeans.km_model

            # extract inputs from data file
            human_data = [norm_features(human, NORM_METHOD) for human in data['human_info']]
            third_data = data['third_info']

            sampled_human_data = human_data[::3]
            sampled_third_data = third_data[::3]

            # draw data from start start
            for f in range(SEQ_LENGTH - 1):
                features = denorm_features(sampled_human_data[f], NORM_METHOD)
                action_info = "None"
                frame_info = f"{f+1}/{len(sampled_human_data)}"
                artist.update([features], [action_info], [frame_info], fps=10)

            # recognize sub-action
            for human_seq, third_seq in zip(gen_sequence(sampled_human_data, SEQ_LENGTH),
                                            gen_sequence(sampled_third_data, SEQ_LENGTH)):
                seq = np.concatenate((third_seq, human_seq), axis=1)
                df = kmeans.make_dataframe([seq], SEQ_LENGTH)
                sub_action = km_model.predict(df)
                action_name = ALL_SUBACTION_NAMES[sub_action_mapping[sub_action[0]]]
                print(action_name)

                f += 1
                features = denorm_features(human_seq[-1], NORM_METHOD)
                frame_info = f"{f+1}/{len(sampled_human_data)}"
                artist.update([features], [action_name], [frame_info], fps=10)
Exemplo n.º 3
0
def test_all():
    # action list to test
    actions = ["A008"]

    # show all test data
    data_files = list()
    for action in actions:
        data_files.extend(glob.glob(os.path.join(TRAIN_PATH, F"*{action}*.npz")))
    for action in actions:
        data_files.extend(glob.glob(os.path.join(TEST_PATH, F"*{action}*.npz")))
    data_files.sort()
    n_data = len(data_files)

    print('There are %d data.' % n_data)

    # test each data
    for data_file in data_files:
        with np.load(data_file, allow_pickle=True) as data:
            results = list()

            # action class mapping
            # print(os.path.basename(data_file))
            action = os.path.basename(data_file)[4:8]
            kmeans, sub_action_mapping = load_proper_model(action)
            km_model = kmeans.km_model

            # extract inputs from data file
            human_data = [norm_features(human, NORM_METHOD) for human in data['human_info']]
            third_data = data['third_info']

            sampled_human_data = human_data[::3]
            sampled_third_data = third_data[::3]

            # recognize sub-action
            for human_seq, third_seq in zip(gen_sequence(sampled_human_data, SEQ_LENGTH),
                                            gen_sequence(sampled_third_data, SEQ_LENGTH)):
                seq = np.concatenate((third_seq, human_seq), axis=1)
                df = kmeans.make_dataframe([seq], SEQ_LENGTH)
                sub_action = km_model.predict(df)
                results.append(sub_action_mapping[sub_action[0]])

            # print results
            # if not all(result == 0 for result in results):
            # if not any(result == 1 for result in results):
            print(os.path.basename(data_file))
            print(results)
Exemplo n.º 4
0
def test_with_webcam(model):
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, W_VIDEO)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, H_VIDEO)

    last = time.time()
    inputs = list()

    while True:
        if time.time() - last > 0.1:
            last = time.time()

            # 2d skeleton from video
            ret, frame = cap.read()
            key_points, output_data = pose_keypoints(frame)

            output_data = cv2.flip(output_data, 3)
            cv2.imshow(f'{W_VIDEO}x{H_VIDEO}', output_data)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

            # extract skeleton features
            if len(key_points.shape) != 3:
                continue
            user_key_points = key_points[0]
            if not user_key_points[4].any() or not user_key_points[7].any():
                continue

            skel = user_key_points[:, :2]
            body = skel_to_AIR(skel)
            features = norm_features(body, NORM_METHOD, INPUT_DATA_TYPE,
                                     B_HANDS)

            # stack joint information
            inputs.append(features)
            inputs = inputs[-lstm_input_length:]
            if len(inputs) < lstm_input_length:
                continue

            behaviors, behavior_names = classify(model, [inputs])
            print("user:", behavior_names[0])
            yield body, behaviors[0]
Exemplo n.º 5
0
def test_with_kinect(model):
    game = BodyGameRuntime()
    last = time.time()
    first = True
    inputs = list()

    artist = Artist(n_plot=1)
    start_time = time.time()
    for joints in game.run():
        if time.time() - last > 0.1 and (len(joints) > 0 or not first):
            first = False
            last = time.time()
            if len(joints) > 0:
                depths = [joint[20].Position.z
                          for joint in joints]  # 20: spineShoulder
                idx = depths.index(min(depths))
                body = pose_to_AIR(joints[idx])
            else:
                body = null_body_3D()

            # stack joint information
            distance = body[0]['z'] / MAX_DISTANCE
            inputs.append(
                np.hstack([
                    distance if distance != 0.0 else 1.0,
                    norm_features(body, NORM_METHOD, INPUT_DATA_TYPE, B_HANDS)
                ]))
            inputs = inputs[-lstm_input_length:]
            if len(inputs) < lstm_input_length:
                continue

            behaviors, behavior_names = classify(model, [inputs])
            cur_features = denorm_features(inputs[-1][1:], NORM_METHOD,
                                           INPUT_DATA_TYPE, B_HANDS)
            elapsed_time = time_to_string(time.time() - start_time)
            artist.update([cur_features], [behavior_names[0]], [elapsed_time])
            yield body, behaviors[0]
Exemplo n.º 6
0
    data_files.extend(glob.glob(os.path.join(TEST_PATH, F"*{action}*.npz")))
    data_files.extend(glob.glob(os.path.join(TRAIN_PATH, F"*{action}*.npz")))
data_files.sort()

# label action classes
pbar = tqdm(total=len(data_files))
for data_file in data_files:
    with np.load(data_file, allow_pickle=True) as data:
        # action class mapping
        action = os.path.basename(data_file)[4:8]
        kmeans, sub_action_mapping = load_proper_model(action)
        km_model = kmeans.km_model

        # extract inputs from data file
        human_data = [
            norm_features(human, method=NORM_METHOD, type='3D', b_hand=True)
            for human in data['human_info']
        ]
        third_data = data['third_info']

        step = round(KINECT_FRAME_RATE / TARGET_FRAME_RATE)
        sampled_human_data = human_data[::step]
        sampled_third_data = third_data[::step]

        # label "None"
        sampled_labels = list()
        for f in range(SEQ_LENGTH - 1):
            sampled_labels.append("None")

        # label recognized action class by k-means clustering
        for human_seq, third_seq in zip(
Exemplo n.º 7
0
    def __init__(self,
                 data_path,
                 dim_input,
                 dim_output,
                 data_name=None,
                 b_add_noise=False,
                 b_connect_sequence=False):
        # load data from files
        self.data_path = data_path
        self.data_name = data_name
        self.file_names = list()
        if data_name is not None:
            self.file_names.append(os.path.join(self.data_path,
                                                self.data_name))
        else:
            for action in ACTIONS:
                self.file_names.extend(
                    glob.glob(os.path.join(self.data_path, f"*{action}*.npz")))
        path = data_path if data_name is None else os.path.join(
            data_path, data_name)
        print(f'Data loading... ({path})')
        print(f'Total {len(self.file_names)} files.')

        self.human_data = list()
        self.robot_data = list()
        self.third_data = list()
        human_actions = list()
        step = round(KINECT_FRAME_RATE / TARGET_FRAME_RATE)
        for file in self.file_names:
            with np.load(file, allow_pickle=True) as data:
                self.human_data.append([
                    norm_features(human, NORM_METHOD, INPUT_DATA_TYPE, B_HANDS)
                    for human in data['human_info']
                ][::step])
                self.robot_data.append([
                    norm_features(robot, NORM_METHOD, INPUT_DATA_TYPE, B_HANDS)
                    for robot in data['robot_info']
                ][::step])
                self.third_data.append(data['third_info'][::step])
                human_actions.append(data['human_action'][::step])

        # extract training data
        self.dim_input = dim_input
        self.dim_output = dim_output
        self.b_add_noise = b_add_noise
        self.b_connect_sequence = b_connect_sequence

        self.inputs = list()
        self.outputs = list()
        seq_length = self.dim_input[0]
        pbar = tqdm(total=len(self.third_data))
        for idx, third in enumerate(self.third_data):
            if all(v == 1.0 for v in third):
                continue
            f = seq_length - 1
            for human_seq, third_seq in zip(
                    gen_sequence(self.human_data[idx], seq_length),
                    gen_sequence(self.third_data[idx], seq_length)):
                action_name = human_actions[idx][f]
                if action_name not in SUBACTION_NAMES:
                    continue
                seq = np.concatenate(
                    (third_seq, human_seq),
                    axis=1) if INPUT_DATA_TYPE == '3D' else np.array(human_seq)
                self.inputs.append(seq)
                cur_action = SUBACTION_NAMES.index(action_name)
                self.outputs.append(cur_action)
                f += 1

            pbar.update(1)
        pbar.close()