def testParse(self): random_array = np.random.random_sample([10,10,3]) * 255 random_array = random_array.astype(np.uint8) img1 = Image.fromarray(random_array) onnx_transforms = TRANSFORMS('onnxrt_qlinearops', 'preprocess') onnx_parse = onnx_transforms['ToArray']() img, _ = onnx_parse((img1, None)) self.assertTrue(isinstance(img, np.ndarray)) mxnet_transforms = TRANSFORMS('mxnet', 'preprocess') mxnet_parse = mxnet_transforms['ToArray']() img, _ = mxnet_parse((mx.nd.array(random_array), None)) self.assertTrue(isinstance(img, np.ndarray)) self.assertRaises(ValueError, mxnet_parse, ([1,2], None))
def testLabelShift(self): transforms = TRANSFORMS('tensorflow', "postprocess") transform = transforms['LabelShift'](label_shift=1) rand_input = np.random.random_sample([600,600,3]).astype(np.float32) sample = (rand_input, 1001) label = transform(sample)[1] self.assertEqual(label, 1000)
def setUpClass(cls): cls.img = np.random.random_sample([10,10,3])*255 cls.tf_trans = TRANSFORMS('tensorflow', 'preprocess') cls.pt_trans = TRANSFORMS('pytorch', 'preprocess') cls.mx_trans = TRANSFORMS('mxnet', 'preprocess') cls.ox_trans = TRANSFORMS('onnxrt_qlinearops', 'preprocess') cls.mx_img = mx.nd.array(cls.img.astype(np.uint8)) cls.pt_img = Image.fromarray(cls.img.astype(np.uint8)) _ = TRANSFORMS('tensorflow', 'postprocess') _ = TRANSFORMS('pytorch', 'postprocess') _ = TRANSFORMS('mxnet', 'postprocess') _ = TRANSFORMS('onnxrt_qlinearops' , 'postprocess') _ = TRANSFORMS('onnxrt_integerops', 'postprocess')
def testResizeCropImagenetTransform(self): transforms = TRANSFORMS('onnxrt_integerops', "preprocess") transform = transforms['ResizeCropImagenet'](height=224, width=224) sample = (self.img, 0) result = transform(sample) resized_input = result[0] self.assertEqual(len(resized_input), 3) self.assertEqual(len(resized_input[0]), 224) self.assertEqual(len(resized_input[0][0]), 224)
def testQuantizedInput(self): transforms = TRANSFORMS('tensorflow', "preprocess") transform = transforms['QuantizedInput'](dtype='uint8', scale=100) rand_input = np.random.random_sample([600,600,3]).astype(np.float32) sample = (rand_input, 1001) result = transform(sample) quantized_input = result[0].eval(session=tf.compat.v1.Session()) self.assertLessEqual(quantized_input.max(), 255) self.assertGreaterEqual(quantized_input.min(), 0)
def testParse(self): from PIL import Image random_array = np.random.random_sample([100, 100, 3]) * 255 random_array = random_array.astype(np.uint8) img1 = Image.fromarray(random_array) onnx_transforms = TRANSFORMS('onnxrt_qlinearops', 'preprocess') onnx_parse = onnx_transforms['ImageTypeParse']() onnx_compose = onnx_transforms['Compose']([onnx_parse]) onnx_result = onnx_compose((img1, None)) self.assertEqual(type(onnx_result[0]).__name__, 'ndarray')
def testResizeCropImagenetTransform(self): transforms = TRANSFORMS('tensorflow', "preprocess") transform = transforms['ResizeCropImagenet'](height=224, width=224) rand_input = np.random.random_sample([600,600,3]).astype(np.float32) sample = (rand_input, 0) result = transform(sample) resized_input = result[0].eval(session=tf.compat.v1.Session()) self.assertEqual(len(resized_input), 224) self.assertEqual(len(resized_input[0]), 224) self.assertEqual(len(resized_input[0][0]), 3)
def test_tensorflow_2(self): image = np.ones([1, 256, 256, 1]) resize_kwargs = {"size":[224, 224]} transforms = TRANSFORMS(framework="tensorflow", process="preprocess") resize = transforms['Resize'](**resize_kwargs) random_crop_kwargs = {"size": 128} random_crop = transforms['RandomCrop'](**random_crop_kwargs) transform_list = [resize, random_crop] compose = transforms['Compose'](transform_list) image_result = compose((image, None)) self.assertEqual(image_result[0].shape, (1, 128, 128, 1))
def test_pytorch_dummy(self): datasets = DATASETS('pytorch') transform = TRANSFORMS('pytorch', 'preprocess')['Resize'](**{ 'size': 100 }) dataset = datasets['dummy'](shape=[(4, 256, 256, 3), (4, 1)], \ high=[10., 10.], low=[0., 0.], transform=transform) data_loader = DATALOADERS['pytorch'](dataset) iterator = iter(data_loader) data, label = next(iterator) self.assertEqual(data.shape, (1, 256, 256, 3)) # dynamic batching data_loader.batch(batch_size=2, last_batch='rollover') iterator = iter(data_loader) data, label = next(iterator) self.assertEqual(data.shape, (2, 256, 256, 3))
def test_onnxrt_qlinear_dummy(self): datasets = DATASETS('onnxrt_qlinearops') transform = TRANSFORMS('onnxrt_qlinearops', 'preprocess')['Resize'](**{ 'size': 100 }) dataset = datasets['dummy'](shape=(4, 256, 256, 3), transform=transform) data_loader = DATALOADERS['onnxrt_qlinearops'](dataset) iterator = iter(data_loader) data = next(iterator) self.assertEqual(data.shape, (1, 256, 256, 3)) # dynamic batching data_loader.batch(batch_size=2, last_batch='rollover') iterator = iter(data_loader) data = next(iterator) self.assertEqual(data.shape, (2, 256, 256, 3))
def testPyTorch(self): transforms = TRANSFORMS('pytorch', 'preprocess') align = transforms['AlignImageChannel'](**{'dim':1}) image, _ = align((TestAlignImageChannel.pt_img1, None)) self.assertEqual(image.mode, 'L') align = transforms['AlignImageChannel'](**{'dim':1}) image, _ = align((TestAlignImageChannel.pt_img2, None)) self.assertEqual(image.mode, 'L') align = transforms['AlignImageChannel'](**{'dim':3}) image, _ = align((TestAlignImageChannel.pt_img3, None)) self.assertEqual(image.mode, 'RGB') with self.assertRaises(ValueError): align = transforms['AlignImageChannel'](**{'dim':2}) with self.assertRaises(ValueError): transforms['AlignImageChannel'](**{'dim':5})
def testONNX(self): transforms = TRANSFORMS('onnxrt_qlinearops', 'preprocess') align = transforms['AlignImageChannel'](**{'dim':1}) image, _ = align((TestAlignImageChannel.img1.astype(np.uint8), None)) self.assertEqual(image.shape[-1], 1) align = transforms['AlignImageChannel'](**{'dim':1}) image, _ = align((TestAlignImageChannel.img2.astype(np.uint8), None)) self.assertEqual(image.shape[-1], 1) align = transforms['AlignImageChannel'](**{'dim':3}) image, _ = align((TestAlignImageChannel.img3.astype(np.uint8), None)) self.assertEqual(image.shape[-1], 3) align = transforms['AlignImageChannel'](**{'dim':2}) self.assertRaises(ValueError, align, (TestAlignImageChannel.img1.astype(np.uint8), None)) with self.assertRaises(ValueError): transforms['AlignImageChannel'](**{'dim':5})
def setUpClass(cls): cls.img = np.ones([10,10,3]) cls.transforms = TRANSFORMS('tensorflow', 'preprocess')
def setUpClass(cls): cls.img = np.random.random_sample([100,100,3]) * 255 cls.transforms = TRANSFORMS('onnxrt_qlinearops', 'preprocess')
def setUpClass(cls): array = np.random.random_sample([100,100,3]) * 255 cls.img = mx.nd.array(array) cls.transforms = TRANSFORMS('mxnet', 'preprocess')
def testLabelBalanceCOCORecord(self): from PIL import Image tf.compat.v1.disable_eager_execution() random_array = np.random.random_sample([100, 100, 3]) * 255 random_array = random_array.astype(np.uint8) im = Image.fromarray(random_array) im.save('test.jpeg') image = tf.compat.v1.gfile.FastGFile('test.jpeg', 'rb').read() source_id = '000000397133.jpg'.encode('utf-8') label = 'person'.encode('utf-8') example1 = tf.train.Example(features=tf.train.Features( feature={ 'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])), 'image/object/class/text': tf.train.Feature(bytes_list=tf.train.BytesList(value=[label])), 'image/source_id': tf.train.Feature(bytes_list=tf.train.BytesList( value=[source_id])), 'image/object/bbox/xmin': tf.train.Feature(float_list=tf.train.FloatList(value=[10])), 'image/object/bbox/ymin': tf.train.Feature(float_list=tf.train.FloatList(value=[10])), 'image/object/bbox/xmax': tf.train.Feature(float_list=tf.train.FloatList(value=[100])), 'image/object/bbox/ymax': tf.train.Feature(float_list=tf.train.FloatList(value=[100])), })) example2 = tf.train.Example(features=tf.train.Features( feature={ 'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])), 'image/object/class/text': tf.train.Feature(bytes_list=tf.train.BytesList(value=[label])), 'image/source_id': tf.train.Feature(bytes_list=tf.train.BytesList( value=[source_id])), 'image/object/bbox/xmin': tf.train.Feature(float_list=tf.train.FloatList( value=[10, 20])), 'image/object/bbox/ymin': tf.train.Feature(float_list=tf.train.FloatList( value=[10, 20])), 'image/object/bbox/xmax': tf.train.Feature(float_list=tf.train.FloatList( value=[100, 200])), 'image/object/bbox/ymax': tf.train.Feature(float_list=tf.train.FloatList( value=[100, 200])), })) with tf.io.TFRecordWriter('test.record') as writer: writer.write(example1.SerializeToString()) writer.write(example2.SerializeToString()) preprocesses = TRANSFORMS('tensorflow', 'preprocess') preprocess = get_preprocess(preprocesses, {'ParseDecodeCoco': {}}) filters = FILTERS('tensorflow') filter = filters['LabelBalanceCOCORecord'](2) datasets = DATASETS('tensorflow') dataset = datasets['COCORecord']('test.record', \ transform=preprocess, filter=filter) dataloader = DATALOADERS['tensorflow'](dataset=dataset, batch_size=1) for (inputs, labels) in dataloader: self.assertEqual(inputs.shape, (1, 100, 100, 3)) self.assertEqual(labels[0].shape, (1, 2, 4)) dataset2 = create_dataset('tensorflow', {'COCORecord': { 'root': 'test.record' }}, {'ParseDecodeCoco': {}}, {'LabelBalance': { 'size': 2 }}) dataloader2 = DATALOADERS['tensorflow'](dataset=dataset2, batch_size=1) for (inputs, labels) in dataloader2: self.assertEqual(inputs.shape, (1, 100, 100, 3)) self.assertEqual(labels[0].shape, (1, 2, 4)) dataloader3 = create_dataloader('tensorflow', {'batch_size':1, 'dataset':{'COCORecord':{'root':'test.record'}},\ 'filter':{'LabelBalance':{'size':2}}, 'transform':{'ParseDecodeCoco':{}}}) for (inputs, labels) in dataloader3: self.assertEqual(inputs.shape, (1, 100, 100, 3)) self.assertEqual(labels[0].shape, (1, 2, 4)) os.remove('test.record') os.remove('test.jpeg')
def setUpClass(cls): cls.img = np.random.random_sample([10,10,3])*255 cls.mx_trans = TRANSFORMS('mxnet', 'preprocess') cls.pt_trans = TRANSFORMS('pytorch', 'preprocess')