예제 #1
0
class TestAdaptorONNXRT(unittest.TestCase):

    mb_v2_export_path = "mb_v2.onnx"
    mb_v2_model = torchvision.models.mobilenet_v2()
    rn50_export_path = "rn50.onnx"
    rn50_model = torchvision.models.resnet50()
 
    datasets = DATASETS('onnxrt_qlinearops')
    cv_dataset = datasets['dummy'](shape=(100, 3, 224, 224), low=0., high=1., label=True)
    cv_dataloader = DATALOADERS['onnxrt_qlinearops'](cv_dataset)

    @classmethod
    def setUpClass(self):
        build_static_yaml()
        build_dynamic_yaml()
        build_non_MSE_yaml()
        export_onnx_model(self.mb_v2_model, self.mb_v2_export_path)
        self.mb_v2_model = onnx.load(self.mb_v2_export_path)
        export_onnx_model(self.rn50_model, self.rn50_export_path)
        self.rn50_model = onnx.load(self.rn50_export_path)

    @classmethod
    def tearDownClass(self):
        os.remove("static_yaml.yaml")
        os.remove("dynamic_yaml.yaml")
        os.remove("non_MSE_yaml.yaml")
        os.remove(self.mb_v2_export_path)
        os.remove(self.rn50_export_path)
        shutil.rmtree("./saved", ignore_errors=True)
        shutil.rmtree("runs", ignore_errors=True)

    def test_adaptor(self):
        framework_specific_info = {"device": "cpu",
                               "approach": "post_training_static_quant",
                               "random_seed": 1234,
                               "q_dataloader": None,
                               "backend": "qlinearops",
                               "workspace_path": './lpot_workspace/{}/{}/'.format(
                                                       'onnxrt',
                                                       'imagenet')}
        framework = "onnxrt_qlinearops"
        adaptor = FRAMEWORKS[framework](framework_specific_info)
        adaptor.inspect_tensor(self.rn50_model, self.cv_dataloader, ["Conv"])

    def test_quantizate(self):
        from lpot.experimental import Quantization, common
        for fake_yaml in ["static_yaml.yaml", "dynamic_yaml.yaml"]:
            quantizer = Quantization(fake_yaml)
            quantizer.calib_dataloader = self.cv_dataloader
            quantizer.eval_dataloader = self.cv_dataloader
            quantizer.model = common.Model(self.rn50_model)
            q_model = quantizer()
            eval_func(q_model)
        for fake_yaml in ["non_MSE_yaml.yaml"]:
            quantizer = Quantization(fake_yaml)
            quantizer.calib_dataloader = self.cv_dataloader
            quantizer.eval_dataloader = self.cv_dataloader
            quantizer.model = common.Model(self.mb_v2_model)
            q_model = quantizer()
            eval_func(q_model)
예제 #2
0
    def test_tensorflow_dummy(self):
        datasets = DATASETS('tensorflow')
        dataset = datasets['dummy'](shape=(4, 256, 256, 3))

        data_loader = DATALOADERS['tensorflow'](dataset)
        iterator = iter(data_loader)
        data = next(iterator)
        self.assertEqual(data.shape, (1, 256, 256, 3))
        # dynamic batching
        data_loader.batch(batch_size=2, last_batch='rollover')
        iterator = iter(data_loader)
        data = next(iterator)
        self.assertEqual(data.shape, (2, 256, 256, 3))

        with self.assertRaises(AssertionError):
            dataset = datasets['dummy'](shape=[(4, 256, 256, 3), (256, 256,
                                                                  3)])
        with self.assertRaises(AssertionError):
            dataset = datasets['dummy'](shape=(4, 256, 256, 3), low=[1., 0.])
        with self.assertRaises(AssertionError):
            dataset = datasets['dummy'](shape=(4, 256, 256, 3),
                                        high=[128., 127.])
        with self.assertRaises(AssertionError):
            dataset = datasets['dummy'](shape=(4, 256, 256, 3),
                                        dtype=['float32', 'int8'])
예제 #3
0
    def test_footprint(self):
        from lpot.experimental import Benchmark, common
        from lpot.data import DATASETS
        dataset = DATASETS('tensorflow')['dummy']((100, 256, 256, 1),
                                                  label=True)

        benchmarker = Benchmark('fake_yaml_footprint.yaml')
        benchmarker.b_dataloader = common.DataLoader(dataset)
        benchmarker.model = self.constant_graph_1
        benchmarker()
예제 #4
0
 def test_style_transfer_dataset(self):
     jpg_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/c/c1/Doll_face_silver_Persian.jpg/1024px-Doll_face_silver_Persian.jpg"
     os.system("wget {} -O test.jpg".format(jpg_url))
     datasets = DATASETS('tensorflow')
     dataset = datasets['style_transfer'](content_folder='./',
                                          style_folder='./')
     length = len(dataset)
     image, label = dataset[0]
     self.assertEqual(image[0].shape, (256, 256, 3))
     self.assertEqual(image[1].shape, (256, 256, 3))
     os.remove('test.jpg')
예제 #5
0
    def test_onnx_integer_dummy(self):
        datasets = DATASETS('onnxrt_integerops')
        dataset = datasets['dummy'](shape=(4, 256, 256, 3))

        data_loader = DATALOADERS['onnxrt_integerops'](dataset)
        iterator = iter(data_loader)
        data = next(iterator)
        self.assertEqual(data.shape, (1, 256, 256, 3))
        # dynamic batching
        data_loader.batch(batch_size=2, last_batch='rollover')
        iterator = iter(data_loader)
        data = next(iterator)
        self.assertEqual(data.shape, (2, 256, 256, 3))
예제 #6
0
    def test_mxnet_dummy(self):
        datasets = DATASETS('mxnet')
        dataset = datasets['dummy'](shape=(4, 256, 256, 3))

        data_loader = DataLoader('mxnet', dataset)
        iterator = iter(data_loader)
        data = next(iterator)
        self.assertEqual(data.shape, (1, 256, 256, 3))
        # dynamic batching
        data_loader.batch(batch_size=2, last_batch='rollover')
        iterator = iter(data_loader)
        data = next(iterator)
        self.assertEqual(data.shape, (2, 256, 256, 3))
예제 #7
0
    def test_style_transfer_dataset(self):
        random_array = np.random.random_sample([100, 100, 3]) * 255
        random_array = random_array.astype(np.uint8)
        im = Image.fromarray(random_array)
        im.save('test.jpg')

        datasets = DATASETS('tensorflow')
        dataset = datasets['style_transfer'](content_folder='./',
                                             style_folder='./')
        length = len(dataset)
        image, label = dataset[0]
        self.assertEqual(image[0].shape, (256, 256, 3))
        self.assertEqual(image[1].shape, (256, 256, 3))
        os.remove('test.jpg')
예제 #8
0
    def test_pytorch_dummy(self):
        datasets = DATASETS('pytorch')
        dataset = datasets['dummy'](shape=[(4, 256, 256, 3), (4, 1)], \
            high=[10., 10.], low=[0., 0.])

        data_loader = DataLoader('pytorch', dataset)
        iterator = iter(data_loader)
        data, label = next(iterator)
        self.assertEqual(data.shape, (1, 256, 256, 3))
        # dynamic batching
        data_loader.batch(batch_size=2, last_batch='rollover')
        iterator = iter(data_loader)
        data, label = next(iterator)
        self.assertEqual(data.shape, (2, 256, 256, 3))
예제 #9
0
    def test_pytorch_dummy(self):
        datasets = DATASETS('pytorch')
        transform = TRANSFORMS('pytorch', 'preprocess')['Resize'](**{
            'size': 100
        })
        dataset = datasets['dummy'](shape=[(4, 256, 256, 3), (4, 1)], \
            high=[10., 10.], low=[0., 0.], transform=transform)

        data_loader = DATALOADERS['pytorch'](dataset)
        iterator = iter(data_loader)
        data, label = next(iterator)
        self.assertEqual(data.shape, (1, 256, 256, 3))
        # dynamic batching
        data_loader.batch(batch_size=2, last_batch='rollover')
        iterator = iter(data_loader)
        data, label = next(iterator)
        self.assertEqual(data.shape, (2, 256, 256, 3))
예제 #10
0
    def test_onnxrt_qlinear_dummy(self):
        datasets = DATASETS('onnxrt_qlinearops')
        transform = TRANSFORMS('onnxrt_qlinearops',
                               'preprocess')['Resize'](**{
                                   'size': 100
                               })
        dataset = datasets['dummy'](shape=(4, 256, 256, 3),
                                    transform=transform)

        data_loader = DATALOADERS['onnxrt_qlinearops'](dataset)
        iterator = iter(data_loader)
        data = next(iterator)
        self.assertEqual(data.shape, (1, 256, 256, 3))
        # dynamic batching
        data_loader.batch(batch_size=2, last_batch='rollover')
        iterator = iter(data_loader)
        data = next(iterator)
        self.assertEqual(data.shape, (2, 256, 256, 3))
예제 #11
0
    def test_performance(self):
        from lpot.data import DATASETS
        dataset = DATASETS('tensorflow')['dummy']((100, 256, 256, 1),
                                                  label=True)

        from lpot.experimental import Quantization, common
        from lpot.utils.utility import get_size

        quantizer = Quantization('fake_yaml.yaml')
        quantizer.calib_dataloader = common.DataLoader(dataset)
        quantizer.eval_dataloader = common.DataLoader(dataset)
        quantizer.model = self.constant_graph
        q_model = quantizer()

        from lpot.experimental import Benchmark, common
        benchmarker = Benchmark('fake_yaml.yaml')
        benchmarker.b_dataloader = common.DataLoader(dataset)
        benchmarker.model = self.constant_graph_1
        benchmarker()
예제 #12
0
def create_nlp_session():
    a_value = np.random.randn(100, 4).astype(np.float32)
    A_init = helper.make_tensor('A', TensorProto.FLOAT, [100, 4],
                                a_value.reshape(400).tolist())
    b_value = np.random.randint(2, size=(10)).astype(np.int32)
    B_init = helper.make_tensor('B', TensorProto.INT32, [10],
                                b_value.reshape(10).tolist())
    A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [100, 4])
    B = helper.make_tensor_value_info('B', TensorProto.INT32, [10])
    C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [10, 4])
    node = onnx.helper.make_node('Gather', ['A', 'B'], ['C'], name='Gather')
    graph = helper.make_graph([node], 'test_graph_1', [A, B], [C],
                              [A_init, B_init])
    model = helper.make_model(graph)

    datasets = DATASETS('onnxrt_qlinearops')
    dataset = datasets['dummy'](shape=(100, 4), label=True)
    dataloader = DATALOADERS['onnxrt_qlinearops'](dataset)
    return model, dataloader
예제 #13
0
파일: test_pruning.py 프로젝트: intel/lpot
    def test_pruning_external(self):
        from lpot.experimental import common
        from lpot import Pruning
        prune = Pruning('fake.yaml')
        datasets = DATASETS('pytorch')
        dummy_dataset = datasets['dummy'](shape=(100, 3, 224, 224),
                                          low=0.,
                                          high=1.,
                                          label=True)
        dummy_dataloader = PyTorchDataLoader(dummy_dataset)

        def training_func_for_lpot(model):
            epochs = 16
            iters = 30
            criterion = nn.CrossEntropyLoss()
            optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)
            for nepoch in range(epochs):
                model.train()
                cnt = 0
                prune.on_epoch_begin(nepoch)
                for image, target in dummy_dataloader:
                    prune.on_batch_begin(cnt)
                    print('.', end='')
                    cnt += 1
                    output = model(image)
                    loss = criterion(output, target)
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()
                    prune.on_batch_end()
                    if cnt >= iters:
                        break
                prune.on_epoch_end()

        prune.model = common.Model(self.model)
        prune.pruning_func = training_func_for_lpot
        prune.eval_dataloader = dummy_dataloader
        prune.train_dataloader = dummy_dataloader
        _ = prune(common.Model(self.model), \
                  train_dataloader=dummy_dataloader, \
                  pruning_func=training_func_for_lpot, \
                  eval_dataloader=dummy_dataloader)
예제 #14
0
def create_cv_session():
    A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
    B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 3, 3])
    b_value = np.random.randn(1, 1, 3, 3).astype(np.float32)
    B_init = helper.make_tensor('B', TensorProto.FLOAT, [1, 1, 3, 3],
                                b_value.reshape(9).tolist())
    D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 5, 5])
    conv_node = onnx.helper.make_node('Conv', ['A', 'B'], ['C'],
                                      name='Conv',
                                      kernel_shape=[3, 3],
                                      pads=[1, 1, 1, 1])
    relu_node = onnx.helper.make_node('Relu', ['C'], ['D'], name='Relu')
    graph = helper.make_graph([conv_node, relu_node], 'test_graph_1', [A, B],
                              [D], [B_init])
    model = helper.make_model(graph)

    datasets = DATASETS('onnxrt_qlinearops')
    dataset = datasets['dummy'](shape=(1, 1, 5, 5), label=True)
    dataloader = DATALOADERS['onnxrt_qlinearops'](dataset)
    return model, dataloader
예제 #15
0
    def test_tensorflow_dummy_v2(self):
        datasets = DATASETS('tensorflow')
        # test with label
        dataset = datasets['dummy_v2'](\
            input_shape=(256, 256, 3), label_shape=(1,))
        data_loader = DATALOADERS['tensorflow'](dataset)
        iterator = iter(data_loader)
        data = next(iterator)
        self.assertEqual(data[0].shape, (1, 256, 256, 3))
        self.assertEqual(data[1].shape, (1, 1))
        # dynamic batching
        data_loader.batch(batch_size=2, last_batch='rollover')
        iterator = iter(data_loader)
        data = next(iterator)
        self.assertEqual(data[0].shape, (2, 256, 256, 3))
        self.assertEqual(data[1].shape, (2, 1))

        # test without label
        dataset = datasets['dummy_v2'](input_shape=(256, 256, 3))
        data_loader = DATALOADERS['tensorflow'](dataset)
        iterator = iter(data_loader)
        data = next(iterator)
        self.assertEqual(data.shape, (1, 256, 256, 3))
        # dynamic batching
        data_loader.batch(batch_size=2, last_batch='rollover')
        iterator = iter(data_loader)
        data = next(iterator)
        self.assertEqual(data.shape, (2, 256, 256, 3))

        with self.assertRaises(AssertionError):
            dataset = datasets['dummy_v2'](\
                input_shape=(256, 256, 3), low=[1., 0.])
        with self.assertRaises(AssertionError):
            dataset = datasets['dummy_v2'](\
                input_shape=(256, 256, 3), high=[128., 127.])
        with self.assertRaises(AssertionError):
            dataset = datasets['dummy_v2'](\
                input_shape=(256, 256, 3), dtype=['float32', 'int8'])
예제 #16
0
    def testLabelBalanceCOCORecord(self):
        from PIL import Image
        tf.compat.v1.disable_eager_execution()

        random_array = np.random.random_sample([100, 100, 3]) * 255
        random_array = random_array.astype(np.uint8)
        im = Image.fromarray(random_array)
        im.save('test.jpeg')

        image = tf.compat.v1.gfile.FastGFile('test.jpeg', 'rb').read()
        source_id = '000000397133.jpg'.encode('utf-8')
        label = 'person'.encode('utf-8')
        example1 = tf.train.Example(features=tf.train.Features(
            feature={
                'image/encoded':
                tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
                'image/object/class/text':
                tf.train.Feature(bytes_list=tf.train.BytesList(value=[label])),
                'image/source_id':
                tf.train.Feature(bytes_list=tf.train.BytesList(
                    value=[source_id])),
                'image/object/bbox/xmin':
                tf.train.Feature(float_list=tf.train.FloatList(value=[10])),
                'image/object/bbox/ymin':
                tf.train.Feature(float_list=tf.train.FloatList(value=[10])),
                'image/object/bbox/xmax':
                tf.train.Feature(float_list=tf.train.FloatList(value=[100])),
                'image/object/bbox/ymax':
                tf.train.Feature(float_list=tf.train.FloatList(value=[100])),
            }))
        example2 = tf.train.Example(features=tf.train.Features(
            feature={
                'image/encoded':
                tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
                'image/object/class/text':
                tf.train.Feature(bytes_list=tf.train.BytesList(value=[label])),
                'image/source_id':
                tf.train.Feature(bytes_list=tf.train.BytesList(
                    value=[source_id])),
                'image/object/bbox/xmin':
                tf.train.Feature(float_list=tf.train.FloatList(
                    value=[10, 20])),
                'image/object/bbox/ymin':
                tf.train.Feature(float_list=tf.train.FloatList(
                    value=[10, 20])),
                'image/object/bbox/xmax':
                tf.train.Feature(float_list=tf.train.FloatList(
                    value=[100, 200])),
                'image/object/bbox/ymax':
                tf.train.Feature(float_list=tf.train.FloatList(
                    value=[100, 200])),
            }))
        with tf.io.TFRecordWriter('test.record') as writer:
            writer.write(example1.SerializeToString())
            writer.write(example2.SerializeToString())

        preprocesses = TRANSFORMS('tensorflow', 'preprocess')
        preprocess = get_preprocess(preprocesses, {'ParseDecodeCoco': {}})
        filters = FILTERS('tensorflow')
        filter = filters['LabelBalanceCOCORecord'](2)
        datasets = DATASETS('tensorflow')
        dataset = datasets['COCORecord']('test.record', \
            transform=preprocess, filter=filter)
        dataloader = DATALOADERS['tensorflow'](dataset=dataset, batch_size=1)
        for (inputs, labels) in dataloader:
            self.assertEqual(inputs.shape, (1, 100, 100, 3))
            self.assertEqual(labels[0].shape, (1, 2, 4))

        dataset2 = create_dataset('tensorflow',
                                  {'COCORecord': {
                                      'root': 'test.record'
                                  }}, {'ParseDecodeCoco': {}},
                                  {'LabelBalance': {
                                      'size': 2
                                  }})
        dataloader2 = DATALOADERS['tensorflow'](dataset=dataset2, batch_size=1)
        for (inputs, labels) in dataloader2:
            self.assertEqual(inputs.shape, (1, 100, 100, 3))
            self.assertEqual(labels[0].shape, (1, 2, 4))

        dataloader3 = create_dataloader('tensorflow', {'batch_size':1, 'dataset':{'COCORecord':{'root':'test.record'}},\
                 'filter':{'LabelBalance':{'size':2}}, 'transform':{'ParseDecodeCoco':{}}})
        for (inputs, labels) in dataloader3:
            self.assertEqual(inputs.shape, (1, 100, 100, 3))
            self.assertEqual(labels[0].shape, (1, 2, 4))
        os.remove('test.record')
        os.remove('test.jpeg')
예제 #17
0
    def testLabelBalanceCOCORaw(self):
        random_array = np.random.random_sample([100,100,3]) * 255
        random_array = random_array.astype(np.uint8)
        im = Image.fromarray(random_array)
        os.makedirs('val2017', exist_ok=True)
        im.save('./val2017/test_0.jpg')
        im.save('./val2017/test_1.jpg')
        fake_dict = {
            'info': {
                'description': 'COCO 2017 Dataset',
                'url': 'http://cocodataset.org',
                'version': '1.0',
                'year': 2017,
                'contributor': 'COCO Consortium',
                'date_created': '2017/09/01'
            },
            'licenses':{},
            'images':[{
                'file_name': 'test_0.jpg',
                'height': 100,
                'width': 100,
                'id': 0
            },
            {
                'file_name': 'test_1.jpg',
                'height': 100,
                'width': 100,
                'id': 1
            }],
            'annotations':[{
                'category_id': 18,
                'id': 1767,
                'iscrowd': 0,
                'image_id': 0,
                'bbox': [473.07, 395.93, 38.65, 28.67],
            },
            {
               'category_id': 18,
               'id': 1768,
               'iscrowd': 0,
               'image_id': 1,
               'bbox': [473.07, 395.93, 38.65, 28.67],
            },
            {
               'category_id': 18,
               'id': 1768,
               'iscrowd': 0,
               'image_id': 1,
               'bbox': [473.07, 395.93, 38.65, 28.67],
            }],
            'categories':[{
                'supercategory': 'animal',
                'id': 18,
                'name': 'dog'
            }]
        }
        fake_json = json.dumps(fake_dict)
        os.makedirs('annotations', exist_ok=True)
        with open('./annotations/instances_val2017.json', 'w') as f:
            f.write(fake_json)

        filters = FILTERS('onnxrt_qlinearops')
        filter = filters['LabelBalanceCOCORaw'](1)
        datasets = DATASETS('onnxrt_qlinearops')
        dataset = datasets['COCORaw']('./', transform=None, filter=filter)
        dataloader = DATALOADERS['onnxrt_qlinearops'](dataset=dataset, batch_size=1)
        for (inputs, labels) in dataloader:
            self.assertEqual(labels[0].shape[1], 1)

        shutil.rmtree('annotations')
        shutil.rmtree('val2017')
예제 #18
0
class TestAdaptorONNXRT(unittest.TestCase):

    mb_v2_export_path = "mb_v2.onnx"
    mb_v2_model = torchvision.models.mobilenet_v2()
    rn50_export_path = "rn50.onnx"
    rn50_model = torchvision.models.resnet50()
 
    datasets = DATASETS('onnxrt_qlinearops')
    cv_dataset = datasets['dummy'](shape=(100, 3, 224, 224), low=0., high=1., label=True)
    cv_dataloader = DATALOADERS['onnxrt_qlinearops'](cv_dataset)
    
    ir3_dataset = datasets['dummy'](shape=(10, 2048), low=0., high=1., label=True)
    ir3_dataloader = DATALOADERS['onnxrt_qlinearops'](ir3_dataset)

    @classmethod
    def setUpClass(self):
        build_static_yaml()
        build_dynamic_yaml()
        build_non_MSE_yaml()
        build_benchmark_yaml()
        export_onnx_model(self.mb_v2_model, self.mb_v2_export_path)
        self.mb_v2_model = onnx.load(self.mb_v2_export_path)
        export_onnx_model(self.rn50_model, self.rn50_export_path)
        self.rn50_model = onnx.load(self.rn50_export_path)
        self.ir3_model = build_ir3_model()

    @classmethod
    def tearDownClass(self):
        os.remove("static.yaml")
        os.remove("dynamic.yaml")
        os.remove("non_MSE.yaml")
        os.remove("benchmark.yaml")
        os.remove(self.mb_v2_export_path)
        os.remove(self.rn50_export_path)
        shutil.rmtree("./saved", ignore_errors=True)
        shutil.rmtree("runs", ignore_errors=True)

    def test_adaptor(self):
        framework_specific_info = {"device": "cpu",
                               "approach": "post_training_static_quant",
                               "random_seed": 1234,
                               "q_dataloader": None,
                               "backend": "qlinearops",
                               "workspace_path": './lpot_workspace/{}/{}/'.format(
                                                       'onnxrt',
                                                       'imagenet')}
        framework = "onnxrt_qlinearops"
        adaptor = FRAMEWORKS[framework](framework_specific_info)
        adaptor.inspect_tensor(self.rn50_model, self.cv_dataloader, inspect_type='activation')
        adaptor.inspect_tensor(self.rn50_model, self.cv_dataloader, inspect_type='activation', save_to_disk=True)
        adaptor.inspect_tensor(self.rn50_model, self.cv_dataloader, inspect_type='weight')
        adaptor.inspect_tensor(self.rn50_model, self.cv_dataloader, inspect_type='all')
        adaptor.inspect_tensor(self.rn50_model, self.cv_dataloader, ["Conv_0"], inspect_type='activation')

    def test_set_tensor(self):
        quantizer = Quantization("static.yaml")
        quantizer.calib_dataloader = self.cv_dataloader
        quantizer.eval_dataloader = self.cv_dataloader
        quantizer.model = common.Model(self.mb_v2_model)
        q_model = quantizer()
        framework_specific_info = {"device": "cpu",
                     "approach": "post_training_static_quant",
                     "random_seed": 1234,
                     "q_dataloader": None,
                     "backend": "qlinearops",
                     "workspace_path": './lpot_workspace/{}/{}/'.format(
                                             'onnxrt',
                                             'imagenet')}
        framework = "onnxrt_qlinearops"
        adaptor = FRAMEWORKS[framework](framework_specific_info) 
        q_config = {'fused Conv_0': {'weight': {'granularity': 'per_channel', 'dtype': onnx_proto.TensorProto.INT8}}}
        adaptor.q_config = q_config
        adaptor.set_tensor(q_model, {'ConvBnFusion_W_features.0.0.weight': np.random.random([32, 3, 3, 3])})
        adaptor.set_tensor(q_model, {'ConvBnFusion_BN_B_features.0.1.bias': np.random.random([32])})

    def test_adaptor(self):
        for fake_yaml in ["static.yaml", "dynamic.yaml"]:
            quantizer = Quantization(fake_yaml)
            quantizer.calib_dataloader = self.cv_dataloader
            quantizer.eval_dataloader = self.cv_dataloader
            quantizer.model = common.Model(self.rn50_model)
            q_model = quantizer()
            eval_func(q_model)
        for fake_yaml in ["non_MSE.yaml"]:
            quantizer = Quantization(fake_yaml)
            quantizer.calib_dataloader = self.cv_dataloader
            quantizer.eval_dataloader = self.cv_dataloader
            quantizer.model = common.Model(self.mb_v2_model)
            q_model = quantizer()
            eval_func(q_model)

        for fake_yaml in ["static.yaml"]:
            quantizer = Quantization(fake_yaml)
            quantizer.calib_dataloader = self.ir3_dataloader
            quantizer.eval_dataloader = self.ir3_dataloader
            quantizer.model = common.Model(self.ir3_model)
            q_model = quantizer()

        for mode in ["performance", "accuracy"]:
            fake_yaml = "benchmark.yaml"
            evaluator = Benchmark(fake_yaml)
            evaluator.b_dataloader = self.cv_dataloader
            evaluator.model = common.Model(self.rn50_model)
            evaluator(mode)
예제 #19
0
def main(args=None):
    tf.logging.set_verbosity(tf.logging.INFO)
    if not tf.gfile.Exists(FLAGS.output_dir):
        tf.gfile.MkDir(FLAGS.output_dir)

    with tf.Session() as sess:
        if FLAGS.input_model.rsplit('.', 1)[-1] == 'ckpt':
            style_img_ph = tf.placeholder(tf.float32,
                                          shape=[None, 256, 256, 3],
                                          name='style_input')
            content_img_ph = tf.placeholder(tf.float32,
                                            shape=[None, 256, 256, 3],
                                            name='content_input')
            # import meta_graph
            meta_data_path = FLAGS.input_model + '.meta'
            saver = tf.train.import_meta_graph(meta_data_path,
                                               clear_devices=True)

            sess.run(tf.global_variables_initializer())
            saver.restore(sess, FLAGS.input_model)
            graph_def = sess.graph.as_graph_def()

            replace_style = 'style_image_processing/ResizeBilinear_2'
            replace_content = 'batch_processing/batch'
            for node in graph_def.node:
                for idx, input_name in enumerate(node.input):
                    # replace style input and content input nodes to  placeholder
                    if replace_content == input_name:
                        node.input[idx] = 'content_input'
                    if replace_style == input_name:
                        node.input[idx] = 'style_input'

            if FLAGS.tune:
                _parse_ckpt_bn_input(graph_def)
            output_name = 'transformer/expand/conv3/conv/Sigmoid'
            frozen_graph = tf.graph_util.convert_variables_to_constants(
                sess, graph_def, [output_name])
        # use frozen pb instead
        elif FLAGS.input_model.rsplit('.', 1)[-1] == 'pb':
            with open(FLAGS.input_model, 'rb') as f:
                frozen_graph = tf.GraphDef()
                frozen_graph.ParseFromString(f.read())
        else:
            print("not supported model format")
            exit(-1)

        if FLAGS.tune:
            with tf.Graph().as_default() as graph:
                tf.import_graph_def(frozen_graph, name='')
                quantizer = Quantization(FLAGS.config)
                quantizer.model = graph
                quantized_model = quantizer()
                quantized_model.save(FLAGS.output_model)
                frozen_graph = quantized_model.graph_def

    # validate the quantized model here
    with tf.Graph().as_default(), tf.Session() as sess:
        if FLAGS.tune:
            # create dataloader using default style_transfer dataset
            # generate stylized images
            dataset = DATASETS('tensorflow')['style_transfer']( \
                FLAGS.content_images_paths.strip(),
                FLAGS.style_images_paths.strip(),
                crop_ratio=0.2,
                resize_shape=(256, 256))
        else:
            dataset = DATASETS('tensorflow')['dummy']( \
                shape=[(200, 256, 256, 3), (200, 256, 256, 3)], label=True)
        dataloader = DATALOADERS['tensorflow'](dataset=dataset,
                                               batch_size=FLAGS.batch_size)
        tf.import_graph_def(frozen_graph, name='')
        style_transfer(sess, dataloader, FLAGS.precision)