예제 #1
0
def movement_generator(video_test, diff_skip=7, erosion=2, binary_threshold=50):
    """
    Generator that yields movement values. The number yielded of frame_ids will be the same as the video length.
    Movement values are computed basically as the difference between 2 frames separated by diff_skip other frames.
    Some cleaning morphological operations are applied for bettwe results.

    Args:
        video_test: path to video file
        diff_skip: difference in time (ids) between the frames that have to be subtracted
        erosion: erosion iterations. higher value will result in smaller movement values as more pixels are erased
        binary_threshold: the difference between pixels of two frames will be accepted for futher processing if the pixels
            is higher than a threshold

    Yields:
        integer movement value
    """
    image_gen = framedatapoint_generator(video_test, skip=0, grayscale=True)
    image_gen1, image_gen2 = tee(image_gen)

    for i in range(diff_skip):
        next(image_gen2)  # skip the second one

    for fdp1, fdp2 in zip(image_gen1, image_gen2):

        diff = cv2.absdiff(fdp1.image, fdp2.image)
        diff = cv2.threshold(diff, binary_threshold, 255, cv2.THRESH_BINARY)[1]
        diff = cv2.erode(diff, None, iterations=erosion)

        movement = diff.sum() // 255
        yield movement

    for i in range(diff_skip):
        yield 0
def test_filter_predictions_generator():
    show_video = False
    csv_file_path = osp.join(osp.dirname(__file__), 'data', 'cut.csv')
    pred_gen_from_df = pandas_to_pred_iter(pd.read_csv(csv_file_path))
    filtered_pred = filter_pred_detections(pred_gen_from_df)
    # filtered_dataframe = TruckDetector.pred_iter_to_pandas(filtered_pred)

    video_file = osp.join(osp.dirname(__file__), '..', 'service', 'data',
                          'cut.mkv')
    image_gen = framedatapoint_generator(video_file, skip=0)
    if show_video:
        for fdp in plot_detections(image_gen, filtered_pred):
            cv2.imshow("image", fdp.image)
            cv2.waitKey(0)
예제 #3
0
def test_auu_data():
    auu_data_root = r'D:\tms_data\aau-rainsnow\Hjorringvej\Hjorringvej-2'
    video_files = [
        osp.join(r, f) for (r, _, fs) in os.walk(auu_data_root) for f in fs
        if 'avi' in f or 'mkv' in f
    ]
    model = create_model()

    for video_path in video_files:
        image_gen = framedatapoint_generator(video_path, skip=5)
        image_gen1, image_gen2 = tee(image_gen)

        for idx, fdp in enumerate(
                plot_detections(image_gen1, compute(image_gen2, model))):
            cv2.imshow("image", fdp.image)
            cv2.waitKey(1)
예제 #4
0
def pytorch_motion_map(video_file):
    """
    Not very useful. it's slow.
    """
    smoothing = GaussianSmoothing(1, 5, 1)
    batch_size = 50

    image_gen = framedatapoint_generator(video_file, skip=0, grayscale=True)
    image_gen1, image_gen2 = tee(image_gen)

    diff_skip = 7
    for i in range(diff_skip):
        next(image_gen2)  # skip the second one

    # https://stackoverflow.com/questions/56235733/is-there-a-tensor-operation-or-function-in-pytorch-that-works-like-cv2-dilate
    # https://docs.opencv.org/2.4/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.html
    with torch.no_grad():
        while True:
            try:
                list1 = [next(image_gen1).image for _ in range(batch_size)]
                list2 = [next(image_gen2).image for _ in range(batch_size)]
                tensor1 = default_collate(list1).type(torch.int32)
                tensor2 = default_collate(list2).type(torch.int32)
                # diff = (tensor2 - tensor1).abs()
                # diff = diff.view(diff.shape[0], -1).sum(dim=1)

                tensor1 = tensor1.view(tensor1.shape[0], 1, tensor1.shape[1], tensor1.shape[2])
                tensor2 = tensor2.view(tensor2.shape[0], 1, tensor2.shape[1], tensor2.shape[2])
                tensor1 = smoothing(tensor1.float())
                tensor2 = smoothing(tensor2.float())

                diff = (tensor1 - tensor2).abs()
                diff = (diff > 50).type(torch.float32)
                diff = (-nn.functional.max_pool2d(-diff, 3, 1))  # erosion
                diff = (-nn.functional.max_pool2d(-diff, 3, 1))  # erosion

                # diff = np.squeeze((diff*255).numpy().astype(np.uint8))
                # for original, image in zip(diff, list1):
                #     cv2.imshow("image", image)
                #     cv2.imshow("original", original)
                #     cv2.waitKey(1)
            except StopIteration:
                break
        pass
예제 #5
0
def test_TruckDetector_pred_iter_to_pandas():
    auu_data_root = r'D:\aau-rainsnow\Hjorringvej\Hjorringvej-2'
    video_file = [
        osp.join(r, f) for (r, _, fs) in os.walk(auu_data_root) for f in fs
        if 'avi' in f or 'mkv' in f
    ][0]
    #file 'Hjorringvej\\Hjorringvej-2\\cam1.mkv' has 6000 frames
    model = create_model(max_operating_res=320)
    image_gen = framedatapoint_generator(video_file, skip=6000 // 30)
    image_gen1, image_gen2 = tee(image_gen)

    pred_gen = compute(image_gen1, model)

    df = pred_iter_to_pandas(pred_gen)

    pred_gen_from_df = pandas_to_pred_iter(df)

    for idx, fdp in enumerate(plot_detections(image_gen2, pred_gen_from_df)):
        cv2.imshow("image", fdp.image)
        cv2.waitKey(1)
        if idx == 5: break
예제 #6
0
def test_image_generator():
    g = framedatapoint_generator(video_path=osp.join(osp.dirname(
        __file__), '..', 'service', 'data', 'cut.mkv'))
    fdp = next(g)
    assert isinstance(fdp, FrameDatapoint)
def get_raw_df_from_movie(movie_path, model):
    g1 = framedatapoint_generator(movie_path, skip=0, max_frames=200)
    g2 = compute(g1, model, filter_classes=['train', 'truck', 'bus'])
    df = pred_iter_to_pandas(g2)
    return df
import os
from truckms.inference.utils import create_avi, framedatapoint_generator

input_files = [os.path.join(r"D:\tms_data\vidin_data", f) for f in os.listdir(r"D:\tms_data\vidin_data") if ".mp4" in f and "concatenated" not in f]
destination_file = r"D:\tms_data\vidin_data\concatenated.avi"

generators = [framedatapoint_generator(video_path, skip=0) for video_path in input_files]
first_frame = None
while True:
    try:
        first_frame = next(generators[0])
        break
    except:
        generators.pop(0)

with create_avi(destination_file, first_image=first_frame.image) as append_fn:
    for g in generators:
        for fdp in g:
            append_fn(fdp.image)