Exemplo n.º 1
0
    def test_tensorflow_imagenet_dataset(self):
        import tensorflow as tf
        tf.compat.v1.disable_eager_execution()
        random_array = np.random.random_sample([100, 100, 3]) * 255
        random_array = random_array.astype(np.uint8)
        im = Image.fromarray(random_array)
        im.save('test.jpeg')

        dataloader_args = {
            'dataset': {
                "ImageRecord": {
                    'root': './'
                }
            },
            'transform': None,
            'filter': None
        }
        self.assertRaises(ValueError, create_dataloader, 'tensorflow',
                          dataloader_args)

        image = tf.compat.v1.gfile.FastGFile('test.jpeg', 'rb').read()
        example = tf.train.Example(features=tf.train.Features(
            feature={
                'image/encoded':
                tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
                'image/class/label':
                tf.train.Feature(int64_list=tf.train.Int64List(value=[1])),
            }))

        with tf.io.TFRecordWriter('validation-00000-of-00000') as writer:
            writer.write(example.SerializeToString())

        eval_dataset = create_dataset('tensorflow',
                                      {'ImageRecord': {
                                          'root': './'
                                      }}, {'ParseDecodeImagenet': {}}, None)

        dataloader = DATALOADERS['tensorflow'](dataset=eval_dataset,
                                               batch_size=1)
        for (inputs, labels) in dataloader:
            self.assertEqual(inputs.shape, (1, 100, 100, 3))
            self.assertEqual(labels.shape, (1, 1))

        # test old api
        eval_dataset = create_dataset('tensorflow',
                                      {'Imagenet': {
                                          'root': './'
                                      }}, {'ParseDecodeImagenet': {}}, None)
        dataloader = DataLoader('tensorflow',
                                dataset=eval_dataset,
                                batch_size=1)
        for (inputs, labels) in dataloader:
            self.assertEqual(inputs.shape, (1, 100, 100, 3))
            self.assertEqual(labels.shape, (1, 1))

        os.remove('validation-00000-of-00000')
        os.remove('test.jpeg')
Exemplo n.º 2
0
    def testParseDecodeImagenet(self):
        random_array = np.random.random_sample([100,100,3]) * 255
        random_array = random_array.astype(np.uint8)
        im = Image.fromarray(random_array)
        im.save('test.jpeg')

        image = tf.compat.v1.gfile.FastGFile('test.jpeg','rb').read()
        label = 10
        example = tf.train.Example(features=tf.train.Features(feature={
            'image/encoded': tf.train.Feature(
                    bytes_list=tf.train.BytesList(value=[image])),
            'image/class/label': tf.train.Feature(
                    int64_list=tf.train.Int64List(value=[label])),
            'image/object/bbox/xmin': tf.train.Feature(
                    float_list=tf.train.FloatList(value=[10])),
            'image/object/bbox/ymin': tf.train.Feature(
                    float_list=tf.train.FloatList(value=[20])),
            'image/object/bbox/xmax': tf.train.Feature(
                    float_list=tf.train.FloatList(value=[100])),
            'image/object/bbox/ymax': tf.train.Feature(
                    float_list=tf.train.FloatList(value=[200])),
        }))
        with tf.io.TFRecordWriter('test.record') as writer:
            writer.write(example.SerializeToString())
        eval_dataset = create_dataset(
            'tensorflow', {'TFRecordDataset':{'root':'test.record'}}, {'ParseDecodeImagenet':{}}, None)
        dataloader = DATALOADERS['tensorflow'](dataset=eval_dataset, batch_size=1)
        for (inputs, labels) in dataloader:
            self.assertEqual(inputs.shape, (1,100,100,3))
            self.assertEqual(labels[0][0], 10)
            break
        os.remove('test.record')
        os.remove('test.jpeg')
Exemplo n.º 3
0
    def test_coco_record(self):
        import tensorflow as tf
        tf.compat.v1.disable_eager_execution()
        random_array = np.random.random_sample([100, 100, 3]) * 255
        random_array = random_array.astype(np.uint8)
        im = Image.fromarray(random_array)
        im.save('test.jpeg')

        image = tf.compat.v1.gfile.FastGFile('test.jpeg', 'rb').read()
        source_id = '000000397133.jpg'.encode('utf-8')
        label = 'person'.encode('utf-8')
        example = tf.train.Example(features=tf.train.Features(
            feature={
                'image/encoded':
                tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
                'image/object/class/text':
                tf.train.Feature(bytes_list=tf.train.BytesList(value=[label])),
                'image/source_id':
                tf.train.Feature(bytes_list=tf.train.BytesList(
                    value=[source_id])),
                'image/object/bbox/xmin':
                tf.train.Feature(float_list=tf.train.FloatList(value=[10])),
                'image/object/bbox/ymin':
                tf.train.Feature(float_list=tf.train.FloatList(value=[10])),
                'image/object/bbox/xmax':
                tf.train.Feature(float_list=tf.train.FloatList(value=[100])),
                'image/object/bbox/ymax':
                tf.train.Feature(float_list=tf.train.FloatList(value=[100])),
            }))

        with tf.io.TFRecordWriter('test.record') as writer:
            writer.write(example.SerializeToString())
        eval_dataset = create_dataset('tensorflow',
                                      {'COCORecord': {
                                          'root': 'test.record'
                                      }}, {
                                          'ParseDecodeCoco': {},
                                          'RandomVerticalFlip': {},
                                          'RandomHorizontalFlip': {},
                                          'CropResize': {
                                              'x': 0,
                                              'y': 0,
                                              'width': 10,
                                              'height': 10,
                                              'size': [5, 5]
                                          },
                                          'Transpose': {
                                              'perm': [2, 0, 1]
                                          }
                                      }, None)
        dataloader = DATALOADERS['tensorflow'](dataset=eval_dataset,
                                               batch_size=1)
        for inputs, labels in dataloader:
            self.assertEqual(inputs.shape, (1, 3, 5, 5))
            self.assertEqual(labels[0].shape, (1, 1, 4))
        os.remove('test.record')
        os.remove('test.jpeg')
Exemplo n.º 4
0
    def test_pytorch_bert_dataset(self):
        dataset = [[[101, 2043, 2001], [1, 1, 1],
                    [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0],
                     [0, 0, 0, 0, 0, 0, 0]], [1, 1, 1], [1, 1, 1],
                    [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0],
                     [0, 0, 0, 0, 0, 0, 0]]]]
        with self.assertRaises(AssertionError):
            create_dataset('pytorch',
                           {'bert': {
                               'dataset': dataset,
                               'task': 'test'
                           }}, None, None)

        ds = create_dataset(
            'pytorch', {
                'bert': {
                    'dataset': dataset,
                    'task': 'classifier',
                    'model_type': 'distilbert'
                }
            }, None, None)
        self.assertEqual(len(ds), 1)
        self.assertEqual(3, len(ds[0][0]))

        ds = create_dataset(
            'pytorch', {
                'bert': {
                    'dataset': dataset,
                    'task': 'classifier',
                    'model_type': 'bert'
                }
            }, None, None)
        self.assertEqual(4, len(ds[0][0]))

        ds = create_dataset('pytorch',
                            {'bert': {
                                'dataset': dataset,
                                'task': 'squad'
                            }}, None, None)
        self.assertEqual(3, len(ds[0][0]))

        ds = create_dataset(
            'pytorch', {
                'bert': {
                    'dataset': dataset,
                    'task': 'squad',
                    'model_type': 'distilbert'
                }
            }, None, None)
        self.assertEqual(2, len(ds[0][0]))

        ds = create_dataset('pytorch', {
            'bert': {
                'dataset': dataset,
                'task': 'squad',
                'model_type': 'xlnet'
            }
        }, None, None)
        self.assertEqual(5, len(ds[0][0]))
Exemplo n.º 5
0
 def test_onnx_imagenet(self):
     os.makedirs('val', exist_ok=True)
     os.makedirs('val/0', exist_ok=True)
     random_array = np.random.random_sample([100, 100, 3]) * 255
     random_array = random_array.astype(np.uint8)
     random_array = random_array.astype(np.uint8)
     im = Image.fromarray(random_array)
     im.save('val/test.jpg')
     args = {'ImageFolder': {'root': './val'}}
     ds = create_dataset('onnxrt_qlinearops', args, None, None)
     dataloader = DATALOADERS['onnxrt_qlinearops'](ds)
     for image, label in dataloader:
         self.assertEqual(image[0].size, (100, 100))
     shutil.rmtree('val')
Exemplo n.º 6
0
 def test_onnx_imagenet(self):
     import shutil
     os.makedirs('val')
     os.makedirs('val/0')
     random_array = np.random.random_sample([100, 100, 3]) * 255
     random_array = random_array.astype(np.uint8)
     random_array = random_array.astype(np.uint8)
     im = Image.fromarray(random_array)
     im.save('val/0000000397133.jpg')
     args = {'Imagenet': {'root': './'}}
     ds = create_dataset('onnxrt_qlinearops', args, None, None)
     dataloader = DataLoader('onnxrt_qlinearops', ds)
     for image, label in dataloader:
         self.assertEqual(image[0].size, (100, 100))
     shutil.rmtree('val')
Exemplo n.º 7
0
    def testCOCODecode(self):
        from lpot.data.transforms.coco_transform import ParseDecodeCocoTransform
        tf.compat.v1.disable_eager_execution() 

        random_array = np.random.random_sample([100,100,3]) * 255
        random_array = random_array.astype(np.uint8)
        im = Image.fromarray(random_array)
        im.save('test.jpeg')

        image = tf.compat.v1.gfile.FastGFile('test.jpeg','rb').read()
        source_id = '000000397133.jpg'.encode('utf-8')
        label = 'person'.encode('utf-8')
        example = tf.train.Example(features=tf.train.Features(feature={
            'image/encoded':tf.train.Feature(
                    bytes_list=tf.train.BytesList(value=[image])),
            'image/object/class/text':tf.train.Feature(
                    bytes_list=tf.train.BytesList(value=[label])),
            'image/source_id':tf.train.Feature(
                    bytes_list=tf.train.BytesList(value=[source_id])),
            'image/object/bbox/xmin':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[10])),
            'image/object/bbox/ymin':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[10])),
            'image/object/bbox/xmax':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[100])),
            'image/object/bbox/ymax':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[100])),
        }))

        with tf.io.TFRecordWriter('test.record') as writer:
            writer.write(example.SerializeToString())
        eval_dataset = create_dataset(
            'tensorflow', {'COCORecord':{'root':'test.record'}}, {'ParseDecodeCoco':{}}, None)
        dataloader = DATALOADERS['tensorflow'](dataset=eval_dataset, batch_size=1)
        for (inputs, labels) in dataloader:
            self.assertEqual(inputs.shape, (1,100,100,3))
            self.assertEqual(labels[0].shape, (1,1,4))

        func = ParseDecodeCocoTransform()
        out = func(example.SerializeToString())
        self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (100,100,3))
        
        example = tf.train.Example(features=tf.train.Features(feature={
            'image/encoded':tf.train.Feature(
                    bytes_list=tf.train.BytesList(value=[image])),
            'image/source_id':tf.train.Feature(
                    bytes_list=tf.train.BytesList(value=[source_id])),
            'image/object/bbox/xmin':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[10])),
            'image/object/bbox/ymin':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[10])),
            'image/object/bbox/xmax':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[100])),
            'image/object/bbox/ymax':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[100])),
        }))

        with tf.io.TFRecordWriter('test2.record') as writer:
            writer.write(example.SerializeToString())
        self.assertRaises(ValueError, create_dataset,
            'tensorflow', {'COCORecord':{'root':'test2.record'}}, None, None)

        os.remove('test2.record')
        os.remove('test.record')
        os.remove('test.jpeg')
Exemplo n.º 8
0
    def testLabelBalanceCOCORecord(self):
        from PIL import Image
        tf.compat.v1.disable_eager_execution()

        random_array = np.random.random_sample([100, 100, 3]) * 255
        random_array = random_array.astype(np.uint8)
        im = Image.fromarray(random_array)
        im.save('test.jpeg')

        image = tf.compat.v1.gfile.FastGFile('test.jpeg', 'rb').read()
        source_id = '000000397133.jpg'.encode('utf-8')
        label = 'person'.encode('utf-8')
        example1 = tf.train.Example(features=tf.train.Features(
            feature={
                'image/encoded':
                tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
                'image/object/class/text':
                tf.train.Feature(bytes_list=tf.train.BytesList(value=[label])),
                'image/source_id':
                tf.train.Feature(bytes_list=tf.train.BytesList(
                    value=[source_id])),
                'image/object/bbox/xmin':
                tf.train.Feature(float_list=tf.train.FloatList(value=[10])),
                'image/object/bbox/ymin':
                tf.train.Feature(float_list=tf.train.FloatList(value=[10])),
                'image/object/bbox/xmax':
                tf.train.Feature(float_list=tf.train.FloatList(value=[100])),
                'image/object/bbox/ymax':
                tf.train.Feature(float_list=tf.train.FloatList(value=[100])),
            }))
        example2 = tf.train.Example(features=tf.train.Features(
            feature={
                'image/encoded':
                tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
                'image/object/class/text':
                tf.train.Feature(bytes_list=tf.train.BytesList(value=[label])),
                'image/source_id':
                tf.train.Feature(bytes_list=tf.train.BytesList(
                    value=[source_id])),
                'image/object/bbox/xmin':
                tf.train.Feature(float_list=tf.train.FloatList(
                    value=[10, 20])),
                'image/object/bbox/ymin':
                tf.train.Feature(float_list=tf.train.FloatList(
                    value=[10, 20])),
                'image/object/bbox/xmax':
                tf.train.Feature(float_list=tf.train.FloatList(
                    value=[100, 200])),
                'image/object/bbox/ymax':
                tf.train.Feature(float_list=tf.train.FloatList(
                    value=[100, 200])),
            }))
        with tf.io.TFRecordWriter('test.record') as writer:
            writer.write(example1.SerializeToString())
            writer.write(example2.SerializeToString())

        preprocesses = TRANSFORMS('tensorflow', 'preprocess')
        preprocess = get_preprocess(preprocesses, {'ParseDecodeCoco': {}})
        filters = FILTERS('tensorflow')
        filter = filters['LabelBalanceCOCORecord'](2)
        datasets = DATASETS('tensorflow')
        dataset = datasets['COCORecord']('test.record', \
            transform=preprocess, filter=filter)
        dataloader = DATALOADERS['tensorflow'](dataset=dataset, batch_size=1)
        for (inputs, labels) in dataloader:
            self.assertEqual(inputs.shape, (1, 100, 100, 3))
            self.assertEqual(labels[0].shape, (1, 2, 4))

        dataset2 = create_dataset('tensorflow',
                                  {'COCORecord': {
                                      'root': 'test.record'
                                  }}, {'ParseDecodeCoco': {}},
                                  {'LabelBalance': {
                                      'size': 2
                                  }})
        dataloader2 = DATALOADERS['tensorflow'](dataset=dataset2, batch_size=1)
        for (inputs, labels) in dataloader2:
            self.assertEqual(inputs.shape, (1, 100, 100, 3))
            self.assertEqual(labels[0].shape, (1, 2, 4))

        dataloader3 = create_dataloader('tensorflow', {'batch_size':1, 'dataset':{'COCORecord':{'root':'test.record'}},\
                 'filter':{'LabelBalance':{'size':2}}, 'transform':{'ParseDecodeCoco':{}}})
        for (inputs, labels) in dataloader3:
            self.assertEqual(inputs.shape, (1, 100, 100, 3))
            self.assertEqual(labels[0].shape, (1, 2, 4))
        os.remove('test.record')
        os.remove('test.jpeg')
Exemplo n.º 9
0
    def testVOCDecode(self):
        import shutil
        tf.compat.v1.disable_eager_execution() 

        def _bytes_list_feature(values):
            import six
            def norm2bytes(value):
                return value.encode() if isinstance(value, str) and six.PY3 else value
            return tf.train.Feature(
                bytes_list=tf.train.BytesList(value=[norm2bytes(values)]))

        def _int64_list_feature(values):
            import collections
            if not isinstance(values, collections.Iterable):
                values = [values]
            return tf.train.Feature(int64_list=tf.train.Int64List(value=values))

        random_array = np.random.random_sample([100,100,3]) * 255
        random_array = random_array.astype(np.uint8)
        im = Image.fromarray(random_array)
        im.save('test.jpg')
        random_array = np.random.random_sample([100,100,3]) * 0
        random_array = random_array.astype(np.uint8)
        im = Image.fromarray(random_array)
        im.save('test.png')
        image_data = tf.compat.v1.gfile.GFile('test.jpg', 'rb').read()
        seg_data = tf.compat.v1.gfile.GFile('test.png', 'rb').read()
        filename = 'test'

        example = tf.train.Example(features=tf.train.Features(feature={
            'image/encoded': _bytes_list_feature(image_data),
            'image/filename': _bytes_list_feature(filename),
            'image/format': _bytes_list_feature('png'),
            'image/height': _int64_list_feature(100),
            'image/width': _int64_list_feature(100),
            'image/channels': _int64_list_feature(3),
            'image/segmentation/class/encoded': (
                _bytes_list_feature(seg_data)),
            'image/segmentation/class/format': _bytes_list_feature('png'),
        }))

        if not os.path.exists('./test_record'):
            os.mkdir('./test_record')
        with tf.io.TFRecordWriter('./test_record/val-test.record') as writer:
            writer.write(example.SerializeToString())
        eval_dataset = create_dataset(
            'tensorflow', {'VOCRecord':{'root':'./test_record'}}, {'ParseDecodeVoc':{}}, None)
        dataloader = DATALOADERS['tensorflow'](dataset=eval_dataset, batch_size=1)
        for (inputs, labels) in dataloader:
            self.assertEqual(inputs.shape, (1,100,100,3))
            self.assertEqual(labels[0].shape, (100,100,1))

        from lpot.experimental.data.transforms.transform import ParseDecodeVocTransform
        func = ParseDecodeVocTransform()
        out = func(example.SerializeToString())
        self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (100,100,3))

        os.remove('./test_record/val-test.record')
        os.remove('test.jpg')
        os.remove('test.png')
        shutil.rmtree('./test_record')
Exemplo n.º 10
0
    def testCOCODecode(self):
        tf.compat.v1.disable_eager_execution() 

        random_array = np.random.random_sample([100,100,3]) * 255
        random_array = random_array.astype(np.uint8)
        im = Image.fromarray(random_array)
        im.save('test.jpeg')

        image = tf.compat.v1.gfile.FastGFile('test.jpeg','rb').read()
        source_id = '000000397133.jpg'.encode('utf-8')
        label = 'person'.encode('utf-8')
        example = tf.train.Example(features=tf.train.Features(feature={
            'image/encoded':tf.train.Feature(
                    bytes_list=tf.train.BytesList(value=[image])),
            'image/object/class/text':tf.train.Feature(
                    bytes_list=tf.train.BytesList(value=[label])),
            'image/source_id':tf.train.Feature(
                    bytes_list=tf.train.BytesList(value=[source_id])),
            'image/object/bbox/xmin':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[10])),
            'image/object/bbox/ymin':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[10])),
            'image/object/bbox/xmax':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[100])),
            'image/object/bbox/ymax':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[100])),
        }))

        with tf.io.TFRecordWriter('test.record') as writer:
            writer.write(example.SerializeToString())
        eval_dataset = create_dataset(
            'tensorflow', {'COCORecord':{'root':'test.record'}}, 
            {'ParseDecodeCoco':{}, 'Resize': {'size': 50}, 'Cast':{'dtype':'int64'},
            'CropToBoundingBox':{'offset_height':2, 'offset_width':2, 'target_height':5, 'target_width':5},
            'CenterCrop':{'size':[4,4]},
            'RandomResizedCrop':{'size':[4,5]},
            }, None)
        dataloader = DATALOADERS['tensorflow'](dataset=eval_dataset, batch_size=1)
        for (inputs, labels) in dataloader:
            self.assertEqual(inputs.shape, (1,4,5,3))
            self.assertEqual(labels[0].shape, (1,1,4))

        from lpot.experimental.data.transforms.transform import TensorflowResizeWithRatio
        from lpot.experimental.data.datasets.coco_dataset import ParseDecodeCoco
        func = ParseDecodeCoco()
        out = func(example.SerializeToString())
        self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (100,100,3))

        func = ParseDecodeCoco()
        out = func(example.SerializeToString())
        self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (100,100,3))

        func = TensorflowResizeWithRatio(**{'padding':True})
        out = func(out)
        self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (1365,1365,3))

        example = tf.train.Example(features=tf.train.Features(feature={
            'image/encoded':tf.train.Feature(
                    bytes_list=tf.train.BytesList(value=[image])),
            'image/source_id':tf.train.Feature(
                    bytes_list=tf.train.BytesList(value=[source_id])),
            'image/object/bbox/xmin':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[10])),
            'image/object/bbox/ymin':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[10])),
            'image/object/bbox/xmax':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[100])),
            'image/object/bbox/ymax':tf.train.Feature(
                    float_list=tf.train.FloatList(value=[100])),
        }))

        with tf.io.TFRecordWriter('test2.record') as writer:
            writer.write(example.SerializeToString())
        self.assertRaises(ValueError, create_dataset,
            'tensorflow', {'COCORecord':{'root':'test2.record'}}, None, None)

        os.remove('test2.record')
        os.remove('test.record')
        os.remove('test.jpeg')
Exemplo n.º 11
0
    def test_coco_raw(self):
        import json
        import collections
        from lpot.data import TRANSFORMS
        import mxnet as mx
        random_array = np.random.random_sample([100, 100, 3]) * 255
        random_array = random_array.astype(np.uint8)
        im = Image.fromarray(random_array)
        im.save('test_0.jpg')
        im.save('test_1.jpg')
        fake_dict = {
            'info': {
                'description': 'COCO 2017 Dataset',
                'url': 'http://cocodataset.org',
                'version': '1.0',
                'year': 2017,
                'contributor': 'COCO Consortium',
                'date_created': '2017/09/01'
            },
            'licenses': {},
            'images': [{
                'file_name': 'test_0.jpg',
                'height': 100,
                'width': 100,
                'id': 0
            }, {
                'file_name': 'test_1.jpg',
                'height': 100,
                'width': 100,
                'id': 1
            }, {
                'file_name': 'test_2.jpg',
                'height': 100,
                'width': 100,
                'id': 2
            }],
            'annotations': [{
                'category_id': 18,
                'id': 1767,
                'iscrowd': 0,
                'image_id': 0,
                'bbox': [473.07, 395.93, 38.65, 28.67],
            }, {
                'category_id': 18,
                'id': 1768,
                'iscrowd': 0,
                'image_id': 1,
                'bbox': [473.07, 395.93, 38.65, 28.67],
            }, {
                'category_id': 18,
                'id': 1769,
                'iscrowd': 0,
                'image_id': 2,
                'bbox': [],
            }],
            'categories': [{
                'supercategory': 'animal',
                'id': 18,
                'name': 'dog'
            }]
        }
        fake_json = json.dumps(fake_dict)
        with open('anno.json', 'w') as f:
            f.write(fake_json)

        args = {
            'COCORaw': {
                'root': './',
                'img_dir': '',
                'anno_dir': 'anno.json'
            }
        }
        ds = create_dataset('tensorflow', args, None, None)
        dataloader = DATALOADERS['tensorflow'](ds)
        for image, label in dataloader:
            self.assertEqual(image[0].shape, (100, 100, 3))

        trans_args = {'Transpose': {'perm': [2, 0, 1]}}
        ds = create_dataset('tensorflow', args, trans_args, None)
        dataloader = DATALOADERS['tensorflow'](ds)
        for image, label in dataloader:
            self.assertEqual(image[0].shape, (3, 100, 100))

        args = {
            'COCORaw': {
                'root': './',
                'img_dir': '',
                'anno_dir': 'anno.json'
            }
        }
        ds = create_dataset('onnxrt_qlinearops', args, None, None)
        dataloader = DATALOADERS['onnxrt_qlinearops'](ds)
        for image, label in dataloader:
            self.assertEqual(image[0].shape, (100, 100, 3))

        args = {
            'COCORaw': {
                'root': './',
                'img_dir': '',
                'anno_dir': 'anno.json'
            }
        }
        ds = create_dataset('mxnet', args, None, None)

        def collate(batch):
            elem = batch[0]
            if isinstance(elem, mx.ndarray.NDArray):
                return mx.nd.stack(*batch)
            elif isinstance(elem, collections.abc.Sequence):
                batch = zip(*batch)
                return [collate(samples) for samples in batch]
            elif isinstance(elem, collections.abc.Mapping):
                return {key: collate([d[key] for d in batch]) for key in elem}
            elif isinstance(elem, np.ndarray):
                return np.stack(batch)
            else:
                return batch

        dataloader = DATALOADERS['mxnet'](ds, collate_fn=collate)
        for image, label in dataloader:
            self.assertEqual(image[0].shape, (100, 100, 3))

        args = {
            'COCORaw': {
                'root': './',
                'img_dir': '',
                'anno_dir': 'anno.json'
            }
        }
        ds = create_dataset('pytorch', args, None, None)

        def collate(batch):
            elem = batch[0]
            if isinstance(elem, collections.abc.Mapping):
                return {key: collate([d[key] for d in batch]) for key in elem}
            elif isinstance(elem, collections.abc.Sequence):
                batch = zip(*batch)
                return [collate(samples) for samples in batch]
            elif isinstance(elem, np.ndarray):
                return np.stack(batch)
            else:
                return batch

        dataloader = DATALOADERS['pytorch'](dataset=ds, collate_fn=collate)
        for image, label in dataloader:
            self.assertEqual(image[0].shape, (100, 100, 3))

        os.remove('test_0.jpg')
        os.remove('test_1.jpg')
        os.remove('anno.json')
Exemplo n.º 12
0
    def test_onnx(self):
        dataloader_args = {
            'dataset': {
                "ImagenetRaw": {
                    'data_path': 'val',
                    'image_list': None
                }
            },
            'transform': {
                'Resize': {
                    'size': 24
                }
            },
            'filter': None
        }
        dataloader = create_dataloader('onnxrt_integerops', dataloader_args)
        for data in dataloader:
            self.assertEqual(data[0][0].shape, (24, 24, 3))
            break

        dataloader_args = {
            'dataset': {
                "ImagenetRaw": {
                    'data_path': 'val',
                    'image_list': 'val/val.txt'
                }
            },
            'transform': {
                'Resize': {
                    'size': 24
                }
            },
            'filter': None
        }
        dataloader = create_dataloader('onnxrt_integerops', dataloader_args)
        for data in dataloader:
            self.assertEqual(data[0][0].shape, (24, 24, 3))
            break
        # test old api
        eval_dataset = create_dataset('onnxrt_integerops',
                                      {'Imagenet': {
                                          'root': './'
                                      }}, None, None)
        dataloader = DataLoader('onnxrt_integerops',
                                dataset=eval_dataset,
                                batch_size=1)
        for data in dataloader:
            self.assertEqual(data[0][0].shape, (100, 100, 3))
            break

        with open('val/fake_map.txt', 'w') as f:
            f.write('test.jpg   0 \n')
            f.write('test2.jpg   1')
        dataset_args = {
            "ImagenetRaw": {
                'data_path': 'val',
                'image_list': 'val/fake_map.txt'
            },
        }
        dataset = create_dataset('onnxrt_integerops', dataset_args, None, None)
        self.assertEqual(len(dataset), 1)

        with open('val/fake_map.txt', 'w') as f:
            f.write('test2.jpg   1')
        dataloader_args = {
            'dataset': {
                "ImagenetRaw": {
                    'data_path': 'val',
                    'image_list': 'val/fake_map.txt'
                }
            },
            'transform': None,
            'filter': None
        }
        self.assertRaises(ValueError, create_dataloader, 'onnxrt_integerops',
                          dataloader_args)

        with open('val/not_found_map.txt', 'w') as f:
            f.write('test.jpg   0' + '\n')
            f.write('not_found.jpg   1')
        dataloader_args = {
            'dataset': {
                "ImagenetRaw": {
                    'data_path': 'val',
                    'image_list': 'val/not_found_map.txt'
                }
            },
            'transform': {
                'Resize': {
                    'size': 24
                }
            },
            'filter': None
        }
        dataloader = create_dataloader('onnxrt_integerops', dataloader_args)
        for data in dataloader:
            self.assertEqual(data[0][0].shape, (24, 24, 3))
            break

        with open('val/blank.txt', 'w') as f:
            f.write('blank.jpg   0')
        dataloader_args = {
            'dataset': {
                "ImagenetRaw": {
                    'data_path': 'val',
                    'image_list': 'val/blank.txt'
                }
            },
            'transform': None,
            'filter': None
        }
        self.assertRaises(ValueError, create_dataloader, 'onnxrt_qlinearops',
                          dataloader_args)
Exemplo n.º 13
0
    def test_coco_raw(self):
        import json
        from lpot.data import TRANSFORMS
        random_array = np.random.random_sample([100, 100, 3]) * 255
        random_array = random_array.astype(np.uint8)
        im = Image.fromarray(random_array)
        im.save('000000397133.jpg')
        im.save('000000397134.jpg')
        fake_dict = {
            'info': {
                'description': 'COCO 2017 Dataset',
                'url': 'http://cocodataset.org',
                'version': '1.0',
                'year': 2017,
                'contributor': 'COCO Consortium',
                'date_created': '2017/09/01'
            },
            'licenses': {},
            'images': [{
                'file_name': '000000397133.jpg',
                'height': 100,
                'width': 100,
                'id': 397133
            }, {
                'file_name': '000000397134.jpg',
                'height': 100,
                'width': 100,
                'id': 397134
            }, {
                'file_name': '000000397135.jpg',
                'height': 100,
                'width': 100,
                'id': 397135
            }],
            'annotations': [{
                'category_id': 18,
                'id': 1768,
                'iscrowd': 0,
                'image_id': 397133,
                'bbox': [473.07, 395.93, 38.65, 28.67],
            }, {
                'category_id': 18,
                'id': 1768,
                'iscrowd': 0,
                'image_id': 397134,
                'bbox': [473.07, 395.93, 38.65, 28.67],
            }, {
                'category_id': 18,
                'id': 1768,
                'iscrowd': 0,
                'image_id': 397135,
                'bbox': [],
            }],
            'categories': [{
                'supercategory': 'animal',
                'id': 18,
                'name': 'dog'
            }]
        }
        fake_json = json.dumps(fake_dict)
        with open('anno.json', 'w') as f:
            f.write(fake_json)

        args = {
            'COCORaw': {
                'root': './',
                'img_dir': '',
                'anno_dir': 'anno.json'
            }
        }
        ds = create_dataset('tensorflow', args, None, None)
        dataloader = DataLoader('tensorflow', ds)
        for image, label in dataloader:
            self.assertEqual(image[0].size, (100, 100))

        trans_args = {'Rescale': {}}
        ds = create_dataset('tensorflow', args, trans_args, None)
        dataloader = DataLoader('tensorflow', ds)
        for image, label in dataloader:
            self.assertEqual(image[0].shape, (100, 100, 3))

        os.remove('000000397133.jpg')
        os.remove('000000397134.jpg')
        os.remove('anno.json')