コード例 #1
0
    def start_image_list_preprocessing(self,
                                       src_dir,
                                       get_worker_function,
                                       get_results_function,
                                       img_extension='.png',
                                       frames_limit=0,
                                       verbose=False):
        for lecture in self.database.lectures:
            self.current_lecture = lecture
            _, out_file, skip = self.get_lecture_params(lecture)

            if skip:
                continue

            # create a worker ...
            worker = get_worker_function(self)

            # execute the actual process ....
            processor = ImageListProcessor('{}{}'.format(
                src_dir, self.current_lecture.title),
                                           img_extension=img_extension)
            if verbose:
                print('Opening exported image folder {}{}'.format(
                    src_dir, self.current_lecture.title))
            if "forced_width" in lecture.parameters:
                processor.force_resolution(lecture.parameters["forced_width"],
                                           lecture.parameters["forced_height"])
            processor.doProcessing(worker, frames_limit, verbose)  # 0

            # save results
            if self.output_temp_prefix is not None:
                results = get_results_function(worker)
                MiscHelper.dump_save(
                    results,
                    self.temp_dir + '/' + self.output_temp_prefix + out_file)
コード例 #2
0
    def start_video_processing(self,
                               frames_per_second,
                               get_worker_function,
                               get_results_function,
                               frames_limit=0,
                               verbose=False,
                               force_no_seek=False):
        for lecture in self.database.lectures:
            self.current_lecture = lecture
            m_videos, out_file, skip = self.get_lecture_params(lecture)

            if skip:
                continue

            # create a worker ...
            worker = get_worker_function(self)

            # execute the actual process ....
            processor = VideoProcessor(m_videos, frames_per_second)
            if "forced_width" in lecture.parameters:
                processor.force_resolution(lecture.parameters["forced_width"],
                                           lecture.parameters["forced_height"])
            processor.doProcessing(worker, frames_limit, verbose,
                                   force_no_seek)  # 0

            # save results
            if self.output_temp_prefix is not None:
                results = get_results_function(worker)
                MiscHelper.dump_save(
                    results,
                    self.temp_dir + '/' + self.output_temp_prefix + out_file)
def main():
    # usage check
    if len(sys.argv) < 2:
        print("Usage:")
        print("")
        print("\tpython {0:s} config [dataset]".format(sys.argv[0]))
        print("")
        print("Where")
        print("\tconfig:\tPath to AccessMath configuration file")
        print("\tdataset:\tDataset to run (Default= Training)")
        return

    # read the configuration file ....
    config = Configuration.from_file(sys.argv[1])

    try:
        database = MetaDataDB.from_file(config.get_str("VIDEO_DATABASE_PATH"))
    except:
        print("Invalid AccessMath Database file")
        return

    output_dir = config.get_str("OUTPUT_PATH")
    video_metadata_dir = output_dir + "/" + config.get_str(
        "SPEAKER_ACTION_VIDEO_META_DATA_DIR")
    os.makedirs(video_metadata_dir, exist_ok=True)

    dataset_name = config.get("SPEAKER_TESTING_SET_NAME")
    testing_set = database.datasets[dataset_name]

    for current_lecture in testing_set:
        print("")
        print("processing: " + current_lecture.title)
        # print(all_keyframes)

        # the simple frame sampling worker ..
        worker = SimpleFrameSampler()

        # main video file names
        m_videos = [
            config.get_str("VIDEO_FILES_PATH") + "/" + video["path"]
            for video in current_lecture.main_videos
        ]

        video_info = {}
        if "forced_width" in current_lecture.parameters:
            video_info["width"] = current_lecture.parameters["forced_width"]
            video_info["height"] = current_lecture.parameters["forced_height"]
        else:
            # execute the actual process ....
            processor = SequentialVideoSampler(m_videos, [0])
            processor.doProcessing(worker, 0, True)  # 0

            video_info["width"] = worker.width
            video_info["height"] = worker.height

        output_filename = video_metadata_dir + "/" + database.name + "_" + current_lecture.title + ".pickle"
        MiscHelper.dump_save(video_info, output_filename)
コード例 #4
0
def main():
    if len(sys.argv) < 2:
        print("Usage")
        print("\tpython {0:s} config".format(sys.argv[0]))
        return

    # initialization #
    config = Configuration.from_file(sys.argv[1])

    try:
        database = MetaDataDB.from_file(config.get_str("VIDEO_DATABASE_PATH"))
    except:
        print("Invalid AccessMath Database file")
        return

    dataset_name = config.get("SPEAKER_TESTING_SET_NAME")
    testing_set = database.datasets[dataset_name]

    remove_confidence = config.get("SPEAKER_REMOVE_JOINT_CONFIDENCE")
    normalization_bone = config.get("SPEAKER_NORMALIZATION_BONE")  # pair of norm factor points

    # get the paths to the outputs from previous scripts ....
    output_dir = config.get_str("OUTPUT_PATH")

    # the per lecture openpose CSV
    lecture_filename_prefix = output_dir + "/" + config.get_str("OPENPOSE_OUTPUT_DIR_CSV") + "/" + database.name + "_"

    output_segment_dir = output_dir + "/" + config.get("SPEAKER_ACTION_SEGMENT_POSE_DATA_OUTPUT_DIR")
    os.makedirs(output_segment_dir, exist_ok=True)

    segment_length = config.get_int("SPEAKER_ACTION_SEGMENT_LENGTH")

    for lecture in testing_set:
        lecture_filename = lecture_filename_prefix + lecture.title + ".csv"
        print("Loading data for: " + lecture_filename)

        # get the corresponding data for this lecture ...
        lec_segments, lecture_data = LecturePoseSegments.InitializeFromLectureFile(lecture_filename, normalization_bone,
                                                                                   remove_confidence)

        # sequential sampling for pose segments
        vid_len = lecture_data.shape[0]
        for ind in range(0, int(vid_len / segment_length)):
            f_start = ind * segment_length
            f_end = f_start + segment_length - 1
            temp_data = lecture_data[f_start:f_end + 1, :]

            lec_segments.segments.append(PoseSegmentData(f_start, f_end, None, temp_data))

        # save ....
        output_filename = output_segment_dir + "/" + database.name + "_" + lecture.title + ".pickle"
        MiscHelper.dump_save(lec_segments, output_filename)
コード例 #5
0
def main():
    if len(sys.argv) < 2:
        print("Usage")
        print("\tpython {0:s} config".format(sys.argv[0]))
        return

    # initialization #
    config = Configuration.from_file(sys.argv[1])

    try:
        database = MetaDataDB.from_file(config.get_str("VIDEO_DATABASE_PATH"))
    except:
        print("Invalid AccessMath Database file")
        return

    # get paths and other configuration parameters ....
    output_dir = config.get_str("OUTPUT_PATH")
    features_dir = output_dir + "/" + config.get("SPEAKER_ACTION_FEATURES_DIR")
    classifier_dir = output_dir + "/" + config.get_str(
        "SPEAKER_ACTION_CLASSIFIER_DIR")
    os.makedirs(classifier_dir, exist_ok=True)
    classifier_filename = classifier_dir + "/" + config.get_str(
        "SPEAKER_ACTION_CLASSIFIER_FILENAME")

    dataset_name = config.get("SPEAKER_TRAINING_SET_NAME")
    training_set = database.datasets[dataset_name]
    training_titles = [lecture.title.lower() for lecture in training_set]

    # get classifier parameters
    rf_n_trees = config.get_int("SPEAKER_ACTION_CLASSIFIER_RF_TREES", 64)
    rf_depth = config.get_int("SPEAKER_ACTION_CLASSIFIER_RF_DEPTH", 16)

    # read all training data available ....
    train_dataset = {}
    for lecture in training_set:
        input_filename = features_dir + "/" + database.name + "_" + lecture.title + ".pickle"
        train_dataset[lecture.title.lower()] = MiscHelper.dump_load(
            input_filename)

    train_x, train_y, train_frame_infos = PoseFeatureExtractor.combine_datasets(
        training_titles, train_dataset)

    # classify and confusion matrix part
    clf = RandomForestClassifier(n_estimators=rf_n_trees,
                                 max_depth=rf_depth,
                                 random_state=0)
    clf = clf.fit(train_x, train_y)

    MiscHelper.dump_save(clf, classifier_filename)
コード例 #6
0
    def start_input_processing(self, process_function):
        for lecture in self.database.lectures:
            self.current_lecture = lecture
            m_videos, lecture_file, skip = self.get_lecture_params(lecture)

            if skip:
                continue

            # read temporal file
            if self.input_temp_prefix is None:
                # null-input process (convenient way to process lectures)
                input_data = None
            else:
                if not isinstance(self.input_temp_prefix, list):
                    input_data = MiscHelper.dump_load(self.temp_dir + '/' +
                                                      self.input_temp_prefix +
                                                      lecture_file)
                else:
                    input_data = []
                    for temp_prefix in self.input_temp_prefix:
                        input_data.append(
                            MiscHelper.dump_load(self.temp_dir + '/' +
                                                 temp_prefix + lecture_file))

            # execute the actual process ....
            timer = TimeHelper()
            timer.startTimer()
            results = process_function(self, input_data)
            timer.endTimer()

            print("Process Finished in: " + timer.totalElapsedStamp())

            # save results
            if self.output_temp_prefix is not None:
                if not isinstance(self.output_temp_prefix, list):
                    MiscHelper.dump_save(
                        results, self.temp_dir + '/' +
                        self.output_temp_prefix + lecture_file)
                else:
                    for out_idx, temp_prefix in enumerate(
                            self.output_temp_prefix):
                        MiscHelper.dump_save(
                            results[out_idx],
                            self.temp_dir + '/' + temp_prefix + lecture_file)
コード例 #7
0
def main():
    if len(sys.argv) < 2:
        print("Usage")
        print("\tpython {0:s} config".format(sys.argv[0]))
        return

    # initialization #
    config = Configuration.from_file(sys.argv[1])

    try:
        database = MetaDataDB.from_file(config.get_str("VIDEO_DATABASE_PATH"))
    except:
        print("Invalid AccessMath Database file")
        return

    # get paths and other configuration parameters ....
    output_dir = config.get_str("OUTPUT_PATH")
    output_segment_dir = output_dir + "/" + config.get("SPEAKER_ACTION_SEGMENT_POSE_DATA_OUTPUT_DIR")

    dataset_name = config.get("SPEAKER_TRAINING_SET_NAME")
    training_set = database.datasets[dataset_name]

    # prepare the feature extractor ...
    feature_points = config.get("SPEAKER_ACTION_FEATURE_POINTS")
    segment_length = config.get_int("SPEAKER_ACTION_SEGMENT_LENGTH", 15)
    feat_extractor = PoseFeatureExtractor(feature_points, segment_length)

    features_dir = output_dir + "/" + config.get("SPEAKER_ACTION_FEATURES_DIR")
    os.makedirs(features_dir, exist_ok=True)

    # for each file ... get features ...
    for lecture in training_set:
        input_filename = output_segment_dir + "/" + database.name + "_" + lecture.title + ".pickle"
        output_filename = features_dir + "/" + database.name + "_" + lecture.title + ".pickle"

        lecture_pose_segments = MiscHelper.dump_load(input_filename)

        vid_data = feat_extractor.get_feature_dataset(lecture_pose_segments)

        MiscHelper.dump_save(vid_data, output_filename)

    return
コード例 #8
0
def main():
    # usage check
    if len(sys.argv) < 2:
        print("Usage:")
        print("")
        print("\tpython {0:s} config [gt_labels]".format(sys.argv[0]))
        print("")
        print("Where")
        print("\tconfig:\tPath to AccessMath configuration file")
        print("\tgt_labels:\tuse ground truth action labels (Default= False)")
        return

    # read the configuration file ....
    config = Configuration.from_file(sys.argv[1])

    try:
        database = MetaDataDB.from_file(config.get_str("VIDEO_DATABASE_PATH"))
    except:
        print("Invalid AccessMath Database file")
        return

    output_dir = config.get_str("OUTPUT_PATH")
    output_bboxes_dir = output_dir + "/" + config.get(
        "SPEAKER_ACTION_CLASSIFICATION_BBOXES_DIR")
    video_metadata_dir = output_dir + "/" + config.get_str(
        "SPEAKER_ACTION_VIDEO_META_DATA_DIR")
    fg_mask_dir = output_dir + "/" + config.get_str(
        "SPEAKER_FG_ESTIMATION_MASK_DIR")
    os.makedirs(fg_mask_dir, exist_ok=True)

    dataset_name = config.get("SPEAKER_TESTING_SET_NAME")
    testing_set = database.datasets[dataset_name]

    speaker_exp_factor = config.get_float(
        "SPEAKER_FG_ESTIMATION_SPK_EXPANSION_FACTOR")
    min_mask_frames = config.get_int("SPEAKER_FG_ESTIMATION_MIN_MASK_FRAMES")
    mask_exp_radius = config.get_int(
        "SPEAKER_FG_ESTIMATION_MASK_EXPANSION_RADIUS")

    if len(sys.argv) >= 3:
        use_ground_truth = int(sys.argv[2]) > 0
    else:
        use_ground_truth = False

    for current_lecture in testing_set:
        bbox_filename = output_bboxes_dir + "/" + database.name + "_" + current_lecture.title + ".csv"
        frame_idxs, actions, body_bboxes, rh_bboxes = ResultReader.read_bbox_file(
            bbox_filename, use_ground_truth)

        info_filename = video_metadata_dir + "/" + database.name + "_" + current_lecture.title + ".pickle"
        video_info = MiscHelper.dump_load(info_filename)

        fg_estimator = ForegroundEstimator(video_info["width"],
                                           video_info["height"],
                                           speaker_exp_factor, min_mask_frames,
                                           mask_exp_radius)

        fg_mask = fg_estimator.get_mask(frame_idxs, actions, body_bboxes,
                                        rh_bboxes)

        # cv2.imshow(current_lecture.id, fg_mask)
        # cv2.waitKey()

        flag, raw_data = cv2.imencode(".png", fg_mask)

        output_filename = fg_mask_dir + "/" + database.name + "_" + current_lecture.title + ".pickle"
        MiscHelper.dump_save(raw_data, output_filename)
コード例 #9
0
def main():
    # usage check
    if len(sys.argv) < 2:
        print("Usage:")
        print("")
        print("\tpython {0:s} config".format(sys.argv[0]))
        print("")
        print("Where")
        print("\tconfig:\tPath to AccessMath configuration file")
        return

    # read the configuration file ....
    config = Configuration.from_file(sys.argv[1])

    try:
        database = MetaDataDB.from_file(config.get_str("VIDEO_DATABASE_PATH"))
    except:
        print("Invalid AccessMath Database file")
        return

    output_dir = config.get_str("OUTPUT_PATH")
    temporal_segments_dir = output_dir + "/" + config.get(
        "SPEAKER_ACTION_TEMPORAL_SEGMENTS_DIR")
    keyframes_dir = output_dir + "/" + config.get(
        "SPEAKER_ACTION_KEYFRAMES_DIR")
    os.makedirs(keyframes_dir, exist_ok=True)

    dataset_name = config.get("SPEAKER_TESTING_SET_NAME")
    testing_set = database.datasets[dataset_name]

    for current_lecture in testing_set:
        # read segment data ....
        input_filename = temporal_segments_dir + "/" + database.name + "_" + current_lecture.title + ".pickle"
        video_segment_data = MiscHelper.dump_load(input_filename)

        # key-frames that must be extracted from video ...
        segments, keyframes_per_segment = video_segment_data
        all_keyframes = []
        for segment_keyframes in keyframes_per_segment:
            all_keyframes += [
                keyframe_idx for keyframe_idx, bbox in segment_keyframes
            ]

        print("")
        print("processing: " + current_lecture.title)
        # print(all_keyframes)

        # the simple frame sampling worker ..
        worker = SimpleFrameSampler()

        # main video file names
        m_videos = [
            config.get_str("VIDEO_FILES_PATH") + "/" + video["path"]
            for video in current_lecture.main_videos
        ]

        # execute the actual process ....
        processor = SequentialVideoSampler(m_videos, all_keyframes)

        if "forced_width" in current_lecture.parameters:
            processor.force_resolution(
                current_lecture.parameters["forced_width"],
                current_lecture.parameters["forced_height"])
        processor.doProcessing(worker, 0, True)  # 0

        sampled_frame_data = worker.frame_times, worker.frame_indices, worker.compressed_frames

        # save results
        keyframes_data_filename = keyframes_dir + "/" + database.name + "_" + current_lecture.title + ".pickle"
        MiscHelper.dump_save(sampled_frame_data, keyframes_data_filename)
コード例 #10
0
def main():
    if len(sys.argv) < 2:
        print("Usage")
        print("\tpython {0:s} config".format(sys.argv[0]))
        return

    # initialization #
    config = Configuration.from_file(sys.argv[1])

    try:
        database = MetaDataDB.from_file(config.get_str("VIDEO_DATABASE_PATH"))
    except:
        print("Invalid AccessMath Database file")
        return

    unique_label = config.get("SPEAKER_VALID_ACTIONS")

    dataset_name = config.get("SPEAKER_TRAINING_SET_NAME")

    training_set = database.datasets[dataset_name]

    remove_confidence = config.get("SPEAKER_REMOVE_JOINT_CONFIDENCE")
    normalization_bone = config.get("SPEAKER_NORMALIZATION_BONE")  # pair of norm factor points

    # get the paths to the outputs from previous scripts ....
    output_dir = config.get_str("OUTPUT_PATH")
    action_object_name = config.get_str("SPEAKER_ACTION_MAIN_OBJECT", "speaker")
    action_segment_output_dir = config.get_str("SPEAKER_ACTION_SEGMENT_OUTPUT_DIR", ".")
    segments_output_prefix = output_dir + "/" + action_segment_output_dir + "/" + database.name + "_"

    # the per lecture openpose CSV
    lecture_filename_prefix = output_dir + "/" + config.get_str("OPENPOSE_OUTPUT_DIR_CSV") + "/" + database.name + "_"

    output_segment_dir = output_dir + "/" + config.get("SPEAKER_ACTION_SEGMENT_POSE_DATA_OUTPUT_DIR")
    os.makedirs(output_segment_dir, exist_ok=True)

    # First .... cache all OpenPose CSV data per training lecture ....
    data_per_lecture = {}
    for lecture in training_set:
        lecture_filename = lecture_filename_prefix + lecture.title + ".csv"
        print("Loading data for: " + lecture_filename)

        segments, data = LecturePoseSegments.InitializeFromLectureFile(lecture_filename, normalization_bone,
                                                                       remove_confidence)

        data_per_lecture[lecture.title.lower()] = {
            "segments": segments,
            "data": data
        }

    # read the training frame segments info file
    segment_filename = segments_output_prefix + dataset_name + "_" + action_object_name + ".csv"
    speaker_seg_train = pd.read_csv(segment_filename)  # frame segment info of training data of object speaker
    speaker_seg_train = speaker_seg_train.values

    # Split the OpenPose Data based on the given segments ...
    for vid_name, f_start, f_end, label in speaker_seg_train:
        vid_name = vid_name.lower()
        # print((vid_name, f_start, f_end, label))

        # if label is not in the main 8 labels, omit it
        if label not in unique_label:
            continue

        if not vid_name in data_per_lecture:
            print("Invalid lecture name found: " + vid_name)
            continue

        temp_data = data_per_lecture[vid_name]["data"][f_start:f_end + 1, :]

        temp_pose_segment_data = PoseSegmentData(f_start, f_end, label, temp_data)
        data_per_lecture[vid_name]["segments"].segments.append(temp_pose_segment_data)

    # save to file ...
    for lecture in training_set:
        output_filename = output_segment_dir + "/" + database.name + "_" + lecture.title + ".pickle"
        MiscHelper.dump_save(data_per_lecture[lecture.title.lower()]["segments"], output_filename)

    print("Data Segment Saving Done.")
    return
コード例 #11
0
def main():
    # usage check
    if len(sys.argv) < 2:
        print("Usage:")
        print("")
        print("\tpython {0:s} config [gt_labels]".format(sys.argv[0]))
        print("")
        print("Where")
        print("\tconfig:\tPath to AccessMath configuration file")
        print("\tgt_labels:\tuse ground truth action labels (Default= False)")
        return

    # read the configuration file ....
    config = Configuration.from_file(sys.argv[1])

    try:
        database = MetaDataDB.from_file(config.get_str("VIDEO_DATABASE_PATH"))
    except:
        print("Invalid AccessMath Database file")
        return

    output_dir = config.get_str("OUTPUT_PATH")
    video_metadata_dir = output_dir + "/" + config.get_str(
        "SPEAKER_ACTION_VIDEO_META_DATA_DIR")
    action_class_probabilities_dir = output_dir + "/" + config.get(
        "SPEAKER_ACTION_CLASSIFICATION_PROBABILITIES_DIR")
    output_bboxes_dir = output_dir + "/" + config.get(
        "SPEAKER_ACTION_CLASSIFICATION_BBOXES_DIR")
    temporal_segments_dir = output_dir + "/" + config.get(
        "SPEAKER_ACTION_TEMPORAL_SEGMENTS_DIR")
    os.makedirs(temporal_segments_dir, exist_ok=True)

    dataset_name = config.get("SPEAKER_TESTING_SET_NAME")
    testing_set = database.datasets[dataset_name]

    valid_actions = config.get("SPEAKER_VALID_ACTIONS")

    if len(sys.argv) >= 3:
        use_ground_truth = int(sys.argv[2]) > 0
    else:
        use_ground_truth = False

    for current_lecture in testing_set:
        info_filename = video_metadata_dir + "/" + database.name + "_" + current_lecture.title + ".pickle"
        proba_filename = action_class_probabilities_dir + "/" + database.name + "_" + current_lecture.title + ".csv"

        video_info = MiscHelper.dump_load(info_filename)

        segmenter = VideoSegmenter.FromConfig(config, video_info["width"],
                                              video_info["height"])

        # read label data ....
        prob_info = ResultReader.read_actions_probabilities_file(
            proba_filename, valid_actions)
        segments, gt_actions, pred_actions, prob_actions = prob_info

        # read bbox data ...
        bbox_filename = output_bboxes_dir + "/" + database.name + "_" + current_lecture.title + ".csv"
        frame_idxs, frame_actions, body_bboxes, rh_bboxes = ResultReader.read_bbox_file(
            bbox_filename, use_ground_truth)

        # (splits_frames, video_keyframes)
        video_data = segmenter.get_keyframes(pred_actions, segments,
                                             frame_idxs, body_bboxes,
                                             rh_bboxes)

        print("")
        print("video key_frames")
        print(video_data[0])
        print(video_data[1])
        print("")

        output_filename = temporal_segments_dir + "/" + database.name + "_" + current_lecture.title + ".pickle"
        MiscHelper.dump_save(video_data, output_filename)