Exemplo n.º 1
0
def full_run_sequential(video_id,
                        video_dir,
                        detector_config_path,
                        detector_model_path,
                        reid_model_path,
                        reid_model_backbone,
                        anomaly_results_dir,
                        bg_interval=4,
                        bg_alpha=0.05,
                        bg_start_frame=0,
                        bg_threshold=5,
                        raw_detect_interval=30,
                        ignore_count_thresh=0.08,
                        ignore_area_thresh=2000,
                        ignore_score_thresh=0.1,
                        ignore_gau_sigma=3,
                        abnormal_duration_thresh=60,
                        detect_duration_thresh=6,
                        undetect_duration_thresh=8,
                        bbox_score_thresh=0.3,
                        light_thresh=0.8,
                        anomaly_thresh=0.8,
                        similarity_thresh=0.95,
                        suspicious_duration_thresh=18,
                        detector_verbose_interval=20,
                        verbose=True,
                        crop_min_obj_size=8,
                        crop_row_capacity=3,
                        crop_box_aspect_ratio=2):
    """
    Full run but runs one frame at a time. This should be more suitable for live processing.
    Doesnt save any intermediate calculations.

    See full_run_single for parameter info

    """

    # Set up file paths
    video_path = os.path.join(video_dir, f"{video_id}.mp4")
    anomaly_results_path = os.path.join(anomaly_results_dir, f"{video_id}.csv")

    # Create folders
    os.makedirs(anomaly_results_dir, exist_ok=True)

    # Read Video
    raw_video = VideoReaderQueue(video_path, queue_size=8)

    # bg modeling
    print("Creating background...")
    bg_images = calc_background(raw_video.load_video()[0], bg_interval,
                                bg_alpha, bg_start_frame, bg_threshold)  # cpu
    # bg_images = calc_bg_tensor(raw_video.load_video()[0], bg_interval, bg_alpha, bg_start_frame, bg_threshold)  # gpu, doesnt seem to speed up much
    bg_images = (img for img, _ in bg_images)  # throw out frame

    # Detection
    detector = Detector(
        detector_config_path,
        detector_model_path,
        detector_verbose_interval,
        # Change this parameter depending what classes you need
        # class_restrictions={5, 6, 13, 18},  # voc vehicles
        class_restrictions={1, 2, 3, 5, 6, 7},  # coco vehicles
        # class_restrictions={0}, # binary (0=vehicle, 1=not vehicle)
        # class_restrictions=range(6),  # only vehicles (0-5=vehicles, 6=not_vehicle)
        # class_restrictions=None  # use all classes
    )
    print(detector.model)
    ## Raw Video
    print("Detecting raw video...")
    raw_images, raw_frame_nums = raw_video.load_video(raw_detect_interval)
    fbf_results_gen = detector.detect_images_generator(raw_images,
                                                       raw_frame_nums)
    fbf_results_getter = detector.detect_images_getter(raw_video.get_frame)
    fbf_results = ResultsDict(fbf_results_gen,
                              results_getter=fbf_results_getter,
                              name="fbf")

    print("Detecting background...")
    static_results_gen = detector.detect_images_generator(
        bg_images, range(bg_start_frame, raw_video.nframes, bg_interval))
    static_results = ResultsDict(static_results_gen, name="static")

    # Comment out this whole block if you dont want to use crop boxes
    print("Creating crop boxes")
    # crop_box_gen = crop_box_generator(fbf_results, raw_video.img_shape)
    # cropped_results_gen = detector.detect_images_generator(bg_images,
    #                                                        range(bg_start_frame, raw_video.nframes, bg_interval),
    #                                                        crop_box_gen)
    # static_results.results_gen = cropped_results_gen

    # Ignore Region
    print("Creating ingore mask...")

    # todo:
    #   I cant find a good way to create the ignore mask for live processing.
    #   It may need to not be used until a sufficient number of frames have been processed.
    #   It also isn't very quick at taking new regions into account. eg a car that stops on the grass wont get added for a while.

    # ignore_alpha = 0.1
    # ignore_alpha_2 = 1 - (1 - ignore_alpha) ** bg_interval  # adjusted for different intervals
    # ignore_mask_gen = create_ignore_mask_generator(static_results.iterator(), raw_video.img_shape, ignore_count_thresh,
    #                                                ignore_area_thresh, ignore_score_thresh, ignore_gau_sigma,
    #                                                alpha=ignore_alpha_2)
    ignore_mask_gen = None  # dont ignore anything

    anomalies = get_anomalies_sequential(
        raw_video, reid_model_path, fbf_results, static_results,
        ignore_mask_gen, reid_model_backbone, bg_start_frame, bg_interval,
        abnormal_duration_thresh, detect_duration_thresh,
        undetect_duration_thresh, bbox_score_thresh, light_thresh,
        anomaly_thresh, similarity_thresh, suspicious_duration_thresh, verbose)

    if anomalies is not None:
        anomaly_event_times = get_overlapping_time(anomalies)

        # Save results
        print("Saving Results...")
        anomalies.to_csv(anomaly_results_path, index=False)

        return anomalies, anomaly_event_times

    else:
        return [], []