Exemple #1
0
def test_get_models_to_fuse():
    model = create_model(max_operating_res=320)
    model = model.to('cpu')
    model.eval()
    replace_frozenbatchnorm_batchnorm(model)
    expected_layer_names = [[
        'backbone.body.conv1', 'backbone.body.bn1', 'backbone.body.relu'
    ], ['backbone.body.layer1.0'], ['backbone.body.layer1.1'],
                            ['backbone.body.layer1.2'],
                            ['backbone.body.layer2.0'],
                            ['backbone.body.layer2.1'],
                            ['backbone.body.layer2.2'],
                            ['backbone.body.layer2.3'],
                            ['backbone.body.layer3.0'],
                            ['backbone.body.layer3.1'],
                            ['backbone.body.layer3.2'],
                            ['backbone.body.layer3.3'],
                            ['backbone.body.layer3.4'],
                            ['backbone.body.layer3.5'],
                            ['backbone.body.layer4.0'],
                            ['backbone.body.layer4.1'],
                            ['backbone.body.layer4.2']]
    modules_to_fuse = get_modules_to_fuse(model)
    for i, j in zip(expected_layer_names, modules_to_fuse):
        for x, z in zip(i, j):
            assert x == z
    assert isinstance(_get_module(model, modules_to_fuse[0][0]), nn.Conv2d)
    assert isinstance(_get_module(model, modules_to_fuse[0][1]),
                      nn.BatchNorm2d)
    assert isinstance(_get_module(model, modules_to_fuse[0][2]), nn.ReLU)
    for module_list in modules_to_fuse[1:]:
        assert isinstance(_get_module(model, module_list[0]), Bottleneck)
def compare_raw_vs_filtered(video_file):
    model = create_model()
    if not os.path.exists("df_raw.csv"):
        df_raw = get_raw_df_from_movie(video_file, model)
        df_raw.to_csv("df_raw.csv")
    else:
        df_raw = pd.read_csv("df_raw.csv")
    df_fil = get_tracked_df_from_df(df_raw)
    compare_multiple_dataframes(video_file, video_file.replace(':', '_').replace('\\', '_'), df_raw, df_fil)
def test_auu_data():
    auu_data_root = r'D:\tms_data\aau-rainsnow\Hjorringvej\Hjorringvej-2'
    video_files = [
        osp.join(r, f) for (r, _, fs) in os.walk(auu_data_root) for f in fs
        if 'avi' in f or 'mkv' in f
    ]
    model = create_model()

    for video_path in video_files:
        image_gen = framedatapoint_generator(video_path, skip=5)
        image_gen1, image_gen2 = tee(image_gen)

        for idx, fdp in enumerate(
                plot_detections(image_gen1, compute(image_gen2, model))):
            cv2.imshow("image", fdp.image)
            cv2.waitKey(1)
Exemple #4
0
def test_replace_frozenbatchnorm_batchnorm():
    test_image = osp.join(osp.dirname(__file__), 'data', 'test_image.PNG')
    test_image = cv2.cvtColor(cv2.imread(test_image), cv2.COLOR_BGR2RGB)
    input_images = [FrameDatapoint(test_image, 1)]
    model = create_model()
    model = model.eval().to('cpu')
    expected_predictions = list(compute(input_images, model, cdevice='cpu'))

    replace_frozenbatchnorm_batchnorm(model)
    for child in model.modules():
        assert not isinstance(child, FrozenBatchNorm2d)
    model = model.eval().to('cpu')

    actual_predictions = list(compute(input_images, model, cdevice='cpu'))
    assert len(actual_predictions) == len(expected_predictions)
    assert (actual_predictions[0].pred['boxes'] ==
            expected_predictions[0].pred['boxes']).all()
def test_TruckDetector_pred_iter_to_pandas():
    auu_data_root = r'D:\aau-rainsnow\Hjorringvej\Hjorringvej-2'
    video_file = [
        osp.join(r, f) for (r, _, fs) in os.walk(auu_data_root) for f in fs
        if 'avi' in f or 'mkv' in f
    ][0]
    #file 'Hjorringvej\\Hjorringvej-2\\cam1.mkv' has 6000 frames
    model = create_model(max_operating_res=320)
    image_gen = framedatapoint_generator(video_file, skip=6000 // 30)
    image_gen1, image_gen2 = tee(image_gen)

    pred_gen = compute(image_gen1, model)

    df = pred_iter_to_pandas(pred_gen)

    pred_gen_from_df = pandas_to_pred_iter(df)

    for idx, fdp in enumerate(plot_detections(image_gen2, pred_gen_from_df)):
        cv2.imshow("image", fdp.image)
        cv2.waitKey(1)
        if idx == 5: break
Exemple #6
0
def test_fusing():
    test_image = osp.join(osp.dirname(__file__), 'data', 'test_image.PNG')
    test_image = cv2.cvtColor(cv2.imread(test_image), cv2.COLOR_BGR2RGB)
    input_images = [FrameDatapoint(test_image, 1)]
    model = create_model()
    model = model.to('cpu')
    expected_predictions = list(compute(input_images, model, cdevice='cpu'))

    model = model.to('cpu')
    modules_to_fuse = get_modules_to_fuse(model)
    replace_frozenbatchnorm_batchnorm(model)
    model.eval()
    fuse_modules(model,
                 modules_to_fuse,
                 inplace=True,
                 fuser_func=custom_fuse_func)
    model = model.to('cpu')
    actual_predictions = list(compute(input_images, model, cdevice='cpu'))
    assert len(expected_predictions) == len(actual_predictions)
    assert (expected_predictions[0].pred['boxes'] ==
            actual_predictions[0].pred['boxes']).all()
    assert abs((expected_predictions[0].pred['scores'] -
                actual_predictions[0].pred['scores'])).sum() < 0.1
Exemple #7
0
def test_quantiaztion():
    with torch.no_grad():
        test_image = osp.join(osp.dirname(__file__), 'data', 'test_image.PNG')
        test_image = cv2.cvtColor(cv2.imread(test_image), cv2.COLOR_BGR2RGB)
        input_images = [FrameDatapoint(test_image, 1)] * 5
        model = create_model()
        model = model.to('cpu')
        unoptimized_model_size = size_of_model(model)
        num_evaluations = 1

        model.backbone.fpn = QuantizedFeaturePyramidNetwork(model.backbone.fpn)
        start = time.time()
        for i in range(num_evaluations):
            expected_predictions = list(
                compute(input_images, model, cdevice='cpu'))
        end = time.time()
        unoptimized = (end - start) / num_evaluations

        model = create_model(conf_thr=0.1)
        model = model.to('cpu')
        modules_to_fuse = get_modules_to_fuse(model)
        replace_frozenbatchnorm_batchnorm(model)
        model.eval()
        fuse_modules(model,
                     modules_to_fuse,
                     inplace=True,
                     fuser_func=custom_fuse_func)

        def run_fn(model, run_agrs):
            return compute(input_images, model)

        from torch.quantization.QConfig import default_qconfig
        from torch.quantization.default_mappings import DEFAULT_MODULE_MAPPING
        from torch.quantization.quantize import prepare, propagate_qconfig_
        import torch.nn.intrinsic as nni
        import itertools

        for child in model.modules():
            if isinstance(child, nn.ReLU):
                child.inplace = False

        # TODO i removed the linear layers because they were too complicated for quantization. too much logic
        qconfig_spec = dict(
            zip({nn.Conv2d, nni.ConvReLU2d, nn.ReLU},
                itertools.repeat(default_qconfig)))
        propagate_qconfig_(model.backbone, qconfig_spec)
        model.eval()
        model = torch.quantization.quantize(model,
                                            run_fn=run_fn,
                                            run_args={},
                                            mapping=DEFAULT_MODULE_MAPPING)
        # model = torch.quantization.quantize_dynamic(
        #     model,qconfig_spec=, dtype=torch.qint8,mapping=DEFAULT_MODULE_MAPPING
        # )
        print(model)
        model.transform = QuantizedGeneralizedRCNNTransform(model.transform)
        model.backbone.fpn = QuantizedFeaturePyramidNetwork(model.backbone.fpn)
        # model.rpn = QuantizedRegionProposalNetwork(model.rpn)
        optimized_model_size = size_of_model(model)
        model = model.to('cpu')
        model.eval()
        start = time.time()
        for i in range(num_evaluations):
            actual_predictions = list(
                compute(input_images, model, cdevice='cpu'))
        end = time.time()
        optimized = (end - start) / num_evaluations
        pprint(actual_predictions[0].pred['boxes'])
        pprint(expected_predictions[0].pred['boxes'])
        # assert optimized < unoptimized
        print("time UNOPTIMIZED VS OPTIMIZED", unoptimized, optimized)
        print("size UNOPTIMIZED VS OPTIMIZED", unoptimized_model_size,
              optimized_model_size)