def test_target_pred_iter_to_pandas(mock_some_obj_some_method):
    mock_some_obj_some_method.return_value = 100

    if platform.system() == "Linux":
        datalake_path = r"/data1/workspaces/aiftimie/tms/tms_data"
    else:
        datalake_path = r"D:\tms_data"
    download_data_if_not_exists(datalake_path)
    coco_dset = get_dataset(datalake_path)

    g_tdp_fdp_1: Iterable[Tuple[FrameDatapoint, TargetDatapoint]]
    g_tdp_fdp_2: Iterable[Tuple[FrameDatapoint, TargetDatapoint]]
    g_tdp_fdp_1, g_tdp_fdp_2 = tee(gen_cocoitem2datapoints(coco_dset))
    g_tdp = gen_cocoitem2targetdp(g_tdp_fdp_1)
    g_fdp = gen_cocoitem2framedp(g_tdp_fdp_2)

    model = create_model_efficient(
        model_creation_func=partial(create_model, max_operating_res=800))
    g_pred = compute(g_fdp,
                     model,
                     batch_size=5,
                     filter_classes=model_class_names)

    df_pred, df_target = target_pred_iter_to_pandas(g_tdp, g_pred)
    if platform.system() == "Linux":
        df_pred.to_csv(
            "/data1/workspaces/aiftimie/tms/tms_experiments/pandas_dataframes/coco_pred.csv"
        )
        df_target.to_csv(
            "/data1/workspaces/aiftimie/tms/tms_experiments/pandas_dataframes/coco_target.csv"
        )
    else:
        pass
def test_dataset_by_frame_ids():
    frame_ids = set([
        724, 1532, 5037, 5992, 6040, 6723, 7088, 7386, 7977, 8762, 9769, 9891
    ])
    if platform.system() == "Linux":
        datalake_path = r"/data1/workspaces/aiftimie/tms/tms_data"
    else:
        datalake_path = r"D:\tms_data"
    download_data_if_not_exists(datalake_path)
    coco_dset = get_dataset(datalake_path)

    g_tdp_fdp_1: Iterable[Tuple[FrameDatapoint, TargetDatapoint]]
    g_tdp_fdp_2: Iterable[Tuple[FrameDatapoint, TargetDatapoint]]
    g_tdp_fdp_1, g_tdp_fdp_2 = tee(
        gen_cocoitem2datapoints(coco_dset, frame_ids))
    g_tdp = gen_cocoitem2targetdp(g_tdp_fdp_1)
    g_fdp_1, g_fdp_2, g_fdp_3 = tee(gen_cocoitem2framedp(g_tdp_fdp_2), 3)

    model = create_model_efficient(
        model_creation_func=partial(create_model, max_operating_res=800))
    g_pred = compute(g_fdp_1,
                     model,
                     batch_size=5,
                     filter_classes=model_class_names)
    for fdp_pred, fdp_gt in zip(plot_detections(g_fdp_2, g_pred),
                                plot_targets(g_fdp_3, g_tdp)):
        cv2.imshow("image_pred", fdp_pred.image)
        cv2.imshow("image_gt", fdp_gt.image)
        cv2.waitKey(0)
示例#3
0
def test_replace_frozenbatchnorm_batchnorm():
    test_image = osp.join(osp.dirname(__file__), 'data', 'test_image.PNG')
    test_image = cv2.cvtColor(cv2.imread(test_image), cv2.COLOR_BGR2RGB)
    input_images = [FrameDatapoint(test_image, 1)]
    model = create_model()
    model = model.eval().to('cpu')
    expected_predictions = list(compute(input_images, model, cdevice='cpu'))

    replace_frozenbatchnorm_batchnorm(model)
    for child in model.modules():
        assert not isinstance(child, FrozenBatchNorm2d)
    model = model.eval().to('cpu')

    actual_predictions = list(compute(input_images, model, cdevice='cpu'))
    assert len(actual_predictions) == len(expected_predictions)
    assert (actual_predictions[0].pred['boxes'] ==
            expected_predictions[0].pred['boxes']).all()
def analyze_movie(
    video_handle: io.IOBase) -> {
        "results": io.IOBase,
        "video_results": io.IOBase
    }:
    """
    Args:
        video_handle: file object for the movie to be analyzed.
        progress_hook: function that is called with two integer arguments. the first one represents the current frame index
            the second represents the final index.
    Return:
          dictionary containing path to the .csv file and to .mp4 file
    """
    model = create_model_efficient(
        model_creation_func=partial(create_model, max_operating_res=320))

    video_handle.close()
    video_file = video_handle.name
    frame_ids = p2p_load('frame_ids',
                         loading_func=lambda filepath: np.load(filepath))
    if frame_ids is None:
        frame_ids = movement_frames_indexes(video_file,
                                            progress_hook=p2p_progress_hook)
        p2p_save("frame_ids",
                 frame_ids,
                 saving_func=lambda filepath, item: np.save(filepath, item),
                 filesuffix=".npy")
    image_gen = framedatapoint_generator_by_frame_ids2(video_file,
                                                       frame_ids,
                                                       reason="motionmap")

    # TODO set batchsize by the available VRAM
    pred_gen = compute(image_gen, model=model, batch_size=5)
    filtered_pred = filter_pred_detections(pred_gen)
    if p2p_progress_hook is not None:
        filtered_pred = generator_hook(video_file, filtered_pred,
                                       p2p_progress_hook)
    df = pred_iter_to_pandas(filtered_pred)
    destination = os.path.splitext(video_file)[0] + '.csv'
    df.to_csv(destination)
    if p2p_progress_hook is not None:
        # call one more time the hook. this is just for clean ending of the processing. it may happen in case where the
        # skip is 5 that the final index is not reached, and in percentage it will look like 99.9% finished
        size = get_video_file_size(video_file) - 1
        p2p_progress_hook(size, size)

    visual_destination = os.path.splitext(video_file)[0] + '_results.mp4'
    visual_destination_good_codec = os.path.splitext(
        video_file)[0] + '_results_good_codec.mp4'
    compare_multiple_dataframes(video_file, visual_destination, df)

    subprocess.call(
        ["ffmpeg", "-i", visual_destination, visual_destination_good_codec])
    return {
        "results": open(destination, 'rb'),
        "video_results": open(visual_destination_good_codec, 'rb')
    }
示例#5
0
def test_fusing():
    test_image = osp.join(osp.dirname(__file__), 'data', 'test_image.PNG')
    test_image = cv2.cvtColor(cv2.imread(test_image), cv2.COLOR_BGR2RGB)
    input_images = [FrameDatapoint(test_image, 1)]
    model = create_model()
    model = model.to('cpu')
    expected_predictions = list(compute(input_images, model, cdevice='cpu'))

    model = model.to('cpu')
    modules_to_fuse = get_modules_to_fuse(model)
    replace_frozenbatchnorm_batchnorm(model)
    model.eval()
    fuse_modules(model,
                 modules_to_fuse,
                 inplace=True,
                 fuser_func=custom_fuse_func)
    model = model.to('cpu')
    actual_predictions = list(compute(input_images, model, cdevice='cpu'))
    assert len(expected_predictions) == len(actual_predictions)
    assert (expected_predictions[0].pred['boxes'] ==
            actual_predictions[0].pred['boxes']).all()
    assert abs((expected_predictions[0].pred['scores'] -
                actual_predictions[0].pred['scores'])).sum() < 0.1
def test_auu_data():
    auu_data_root = r'D:\tms_data\aau-rainsnow\Hjorringvej\Hjorringvej-2'
    video_files = [
        osp.join(r, f) for (r, _, fs) in os.walk(auu_data_root) for f in fs
        if 'avi' in f or 'mkv' in f
    ]
    model = create_model()

    for video_path in video_files:
        image_gen = framedatapoint_generator(video_path, skip=5)
        image_gen1, image_gen2 = tee(image_gen)

        for idx, fdp in enumerate(
                plot_detections(image_gen1, compute(image_gen2, model))):
            cv2.imshow("image", fdp.image)
            cv2.waitKey(1)
def get_dataframes(datalake_path, pred_csv_path, target_csv_path, max_operating_res):
    download_data_if_not_exists(datalake_path)
    coco_dset = get_dataset(datalake_path)

    g_tdp_fdp_1, g_tdp_fdp_2 = tee(gen_cocoitem2datapoints(coco_dset))
    g_tdp = gen_cocoitem2targetdp(g_tdp_fdp_1)
    g_fdp = gen_cocoitem2framedp(g_tdp_fdp_2)

    model = create_model_efficient(model_creation_func=partial(create_model, max_operating_res=max_operating_res, conf_thr=0.05))
    g_pred = compute(g_fdp, model, batch_size=10, filter_classes=model_class_names)

    df_pred, df_target = target_pred_iter_to_pandas(g_tdp, g_pred)

    df_pred.to_csv(pred_csv_path)
    df_target.to_csv(target_csv_path)
    return df_pred, df_target
def evaluate_speed(mock_some_obj_some_method, datalake_path, max_operating_res):
    num_eval_frames = 200
    mock_some_obj_some_method.return_value = num_eval_frames

    download_data_if_not_exists(datalake_path)
    coco_dset = get_dataset(datalake_path)

    g_tdp_fdp_1, g_tdp_fdp_2 = tee(gen_cocoitem2datapoints(coco_dset))
    g_tdp = gen_cocoitem2targetdp(g_tdp_fdp_1)
    g_fdp = gen_cocoitem2framedp(g_tdp_fdp_2)

    model = create_model_efficient(
        model_creation_func=partial(create_model, max_operating_res=max_operating_res, conf_thr=0.05))
    g_pred = compute(g_fdp, model, batch_size=10, filter_classes=model_class_names)
    start = time.time()
    df_pred, df_target = target_pred_iter_to_pandas(g_tdp, g_pred)
    end = time.time()
    return (end - start) / num_eval_frames
def test_truck_detector():
    show_image = False
    test_image_path = osp.join(osp.dirname(__file__), 'data', 'test_image.PNG')
    test_image = cv2.cvtColor(cv2.imread(test_image_path), cv2.COLOR_BGR2RGB)
    model = create_model_efficient()

    input_images = [FrameDatapoint(test_image, 1)]
    predictions = list(compute(input_images, model))
    assert len(predictions) != 0
    assert isinstance(predictions, list)
    assert isinstance(predictions[0], PredictionDatapoint)
    assert isinstance(predictions[0].pred, dict)
    assert 'labels' in predictions[0].pred
    assert 'scores' in predictions[0].pred
    assert 'boxes' in predictions[0].pred
    if show_image:
        cv2.imshow("image",
                   next(plot_detections(input_images, predictions)).image)
        cv2.waitKey(0)
def test_TruckDetector_pred_iter_to_pandas():
    auu_data_root = r'D:\aau-rainsnow\Hjorringvej\Hjorringvej-2'
    video_file = [
        osp.join(r, f) for (r, _, fs) in os.walk(auu_data_root) for f in fs
        if 'avi' in f or 'mkv' in f
    ][0]
    #file 'Hjorringvej\\Hjorringvej-2\\cam1.mkv' has 6000 frames
    model = create_model(max_operating_res=320)
    image_gen = framedatapoint_generator(video_file, skip=6000 // 30)
    image_gen1, image_gen2 = tee(image_gen)

    pred_gen = compute(image_gen1, model)

    df = pred_iter_to_pandas(pred_gen)

    pred_gen_from_df = pandas_to_pred_iter(df)

    for idx, fdp in enumerate(plot_detections(image_gen2, pred_gen_from_df)):
        cv2.imshow("image", fdp.image)
        cv2.waitKey(1)
        if idx == 5: break
示例#11
0
def test_quantiaztion():
    with torch.no_grad():
        test_image = osp.join(osp.dirname(__file__), 'data', 'test_image.PNG')
        test_image = cv2.cvtColor(cv2.imread(test_image), cv2.COLOR_BGR2RGB)
        input_images = [FrameDatapoint(test_image, 1)] * 5
        model = create_model()
        model = model.to('cpu')
        unoptimized_model_size = size_of_model(model)
        num_evaluations = 1

        model.backbone.fpn = QuantizedFeaturePyramidNetwork(model.backbone.fpn)
        start = time.time()
        for i in range(num_evaluations):
            expected_predictions = list(
                compute(input_images, model, cdevice='cpu'))
        end = time.time()
        unoptimized = (end - start) / num_evaluations

        model = create_model(conf_thr=0.1)
        model = model.to('cpu')
        modules_to_fuse = get_modules_to_fuse(model)
        replace_frozenbatchnorm_batchnorm(model)
        model.eval()
        fuse_modules(model,
                     modules_to_fuse,
                     inplace=True,
                     fuser_func=custom_fuse_func)

        def run_fn(model, run_agrs):
            return compute(input_images, model)

        from torch.quantization.QConfig import default_qconfig
        from torch.quantization.default_mappings import DEFAULT_MODULE_MAPPING
        from torch.quantization.quantize import prepare, propagate_qconfig_
        import torch.nn.intrinsic as nni
        import itertools

        for child in model.modules():
            if isinstance(child, nn.ReLU):
                child.inplace = False

        # TODO i removed the linear layers because they were too complicated for quantization. too much logic
        qconfig_spec = dict(
            zip({nn.Conv2d, nni.ConvReLU2d, nn.ReLU},
                itertools.repeat(default_qconfig)))
        propagate_qconfig_(model.backbone, qconfig_spec)
        model.eval()
        model = torch.quantization.quantize(model,
                                            run_fn=run_fn,
                                            run_args={},
                                            mapping=DEFAULT_MODULE_MAPPING)
        # model = torch.quantization.quantize_dynamic(
        #     model,qconfig_spec=, dtype=torch.qint8,mapping=DEFAULT_MODULE_MAPPING
        # )
        print(model)
        model.transform = QuantizedGeneralizedRCNNTransform(model.transform)
        model.backbone.fpn = QuantizedFeaturePyramidNetwork(model.backbone.fpn)
        # model.rpn = QuantizedRegionProposalNetwork(model.rpn)
        optimized_model_size = size_of_model(model)
        model = model.to('cpu')
        model.eval()
        start = time.time()
        for i in range(num_evaluations):
            actual_predictions = list(
                compute(input_images, model, cdevice='cpu'))
        end = time.time()
        optimized = (end - start) / num_evaluations
        pprint(actual_predictions[0].pred['boxes'])
        pprint(expected_predictions[0].pred['boxes'])
        # assert optimized < unoptimized
        print("time UNOPTIMIZED VS OPTIMIZED", unoptimized, optimized)
        print("size UNOPTIMIZED VS OPTIMIZED", unoptimized_model_size,
              optimized_model_size)
示例#12
0
 def run_fn(model, run_agrs):
     return compute(input_images, model)
def get_raw_df_from_movie(movie_path, model):
    g1 = framedatapoint_generator(movie_path, skip=0, max_frames=200)
    g2 = compute(g1, model, filter_classes=['train', 'truck', 'bus'])
    df = pred_iter_to_pandas(g2)
    return df