コード例 #1
0
 def test_flop_with_output_shape(self):
     inputs = [{
         "image": torch.rand(3, 800, 800),
         "height": 700,
         "width": 700
     }]
     res = flop_count_operators(self.model, inputs)
     self.assertEqual(int(res["conv"]), 117)
コード例 #2
0
    def test_flop(self):
        # Faster R-CNN supports flop-counting with random inputs
        inputs = [{"image": torch.rand(3, 800, 800)}]
        res = flop_count_operators(self.model, inputs)

        # This only checks flops for backbone & proposal generator
        # Flops for box head is not conv, and depends on #proposals, which is
        # almost 0 for random inputs.
        self.assertTrue(int(res["conv"]), 117)
コード例 #3
0
def do_flop(cfg):
    data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
    model = build_model(cfg)
    DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
    model.eval()

    counts = Counter()
    for idx, data in zip(tqdm.trange(args.num_inputs), data_loader):  # noqa
        counts += flop_count_operators(model, data)
    logger.info(
        "(G)Flops for Each Type of Operators:\n" + str([(k, v / idx) for k, v in counts.items()])
    )
コード例 #4
0
    def test_flop(self):
        inputs1 = [{"image": torch.rand(3, 800, 800)}]
        inputs2 = [{
            "image": torch.rand(3, 800, 800),
            "height": 700,
            "width": 700
        }]

        for inputs in [inputs1, inputs2]:
            res = flop_count_operators(self.model, inputs)
            # The mask head could have extra conv flops, so total >= 117
            self.assertGreaterEqual(int(res["conv"]), 117)
コード例 #5
0
def do_flop(cfg):
    data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
    model = build_model(cfg)
    DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
    model.eval()

    counts = Counter()
    total_flops = []
    for idx, data in zip(tqdm.trange(args.num_inputs), data_loader):  # noqa
        count = flop_count_operators(model, data)
        counts += count
        total_flops.append(sum(count.values()))
    logger.info("(G)Flops for Each Type of Operators:\n" +
                str([(k, v / idx) for k, v in counts.items()]))
    logger.info("Total (G)Flops: {}±{}".format(np.mean(total_flops),
                                               np.std(total_flops)))
コード例 #6
0
 def test_flop(self):
     # RetinaNet supports flop-counting with random inputs
     inputs = [{"image": torch.rand(3, 800, 800)}]
     res = flop_count_operators(self.model, inputs)
     self.assertTrue(int(res["conv"]), 146)  # 146B flops