def main():
    # Read program config
    config.load_config("./config.yml")
    # Parse the arguments
    args = parse_arguments()
    video_path = args["video_path"]
    pkl_file_path = args["calibration_file_path"]
    output_video_path = f"{video_path.split('.')[0]}_output.avi"
    # Load the transformation matrix and scale factor from the pkl file
    if pkl_file_path == "":
        pkl_file_path = config.cfg["calibration"]["pkl_file_path"]
    with open(pkl_file_path, 'rb') as f:
        transformation_matrix, scale_factor = pickle.load(f)
    # Initialize the person detector
    person_detector = Detector()
    # Read the video
    video = cv2.VideoCapture(video_path)
    w, h = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(
        video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(video.get(cv2.CAP_PROP_FPS))
    # Create the video writer
    video_writer = cv2.VideoWriter(output_video_path,
                                   cv2.VideoWriter_fourcc(*"MJPG"), fps,
                                   (w, h))
    if not video.isOpened():
        print(f"Invalid video path. Existing")
        sys.exit(1)
    # Keep running until video ends
    while True:
        # Read the next frame
        ret, frame = video.read()
        # Break if video ended
        if not ret:
            break
        # Get the person detections
        detections = person_detector.do_inference(frame, 0.5, 0.45)
        # Find out the mid-bottom point of each detection
        det_points = {}
        for i, det in enumerate(detections):
            x, y, w, h, _ = det
            det_points[i] = np.array([int(x + w / 2), int(y + h)])
        # Calculate the distance between bounding boxes
        distances = np.array([[0 for i in range(len(det_points))]
                              for j in range(len(det_points))])
        for i in det_points.keys():
            p1 = det_points[i]
            for j in det_points.keys():
                p2 = det_points[j]
                if i == j:
                    distances[i][j] = 0
                else:
                    dist = np.linalg.norm(p1 - p2)
                    distances[i][j] = dist * scale_factor
        # Check for social distancing violation
        violation_distance_threshold = config.cfg["social_distancing"][
            "distance_threshold_ft"]
        violations = []
        rows, columns = distances.shape
        for i in range(rows):
            for j in range(columns):
                if not i == j and distances[i][
                        j] < violation_distance_threshold:
                    violations.append([i, j])
        # Plot and display the detections
        frame = plot_detections(frame, detections)
        frame = plot_violations(frame, det_points, violations)
        video_writer.write(frame)
        cv2.imshow("Video Frame", frame)
        cv2.waitKey(10)
    # Release the video and video writer
    video.release()
    video_writer.release()
Exemple #2
0
                    type=int,
                    default=4,
                    help="Number of calibration points.")
    ap.add_argument("-iter",
                    "--num_iterations",
                    required=False,
                    type=int,
                    default=4,
                    help="Number of iterations for finding the scale factor.")

    return vars(ap.parse_args())


if __name__ == "__main__":
    # Read program config
    config.load_config("./config.yml")
    # Parse the command line arguments
    args = parse_arguments()
    video_path = args["video_path"]
    num_points = args["num_points"]
    num_iterations = args["num_iterations"]
    # Read video and get the first frame
    video = cv2.VideoCapture(video_path)
    if video.isOpened():
        ret, frame = video.read()
        if not ret:
            print(f"Error reading the video file. Existing")
            sys.exit(1)
    else:
        print(f"Invalid video path. Existing")
        sys.exit(1)