Exemplo n.º 1
0
        def single_test(model_name, dataset_name):
            '''This is an inner function that test model_to_dataset for a single case
      i.e. a single model against a single dataset
      '''

            print('\n---')

            cv_model = load_model(model_name, source='torch')

            dataset_splits = load_dataset(dataset_name)
            split_name = next(iter(dataset_splits.keys()))
            cv_dataset = dataset_splits[split_name]

            cv_model, cv_dataset = model_to_dataset(cv_model, cv_dataset)

            device = torch.device(
                'cuda') if torch.cuda.is_available() else torch.device('cpu')

            image, target = next(iter(cv_dataset))
            targets = [target]
            images = [image]

            images = list(img for img in images)
            tar = [{k: v.to(device) for k, v in t.items()} for t in targets]

            predctions = cv_model.raw(images, tar)
            print(predctions)
Exemplo n.º 2
0
        def single_test(model_name, dataset_name):
            '''This is an inner function that test model_to_dataset for a single case
      i.e. a single model against a single dataset
      '''

            print('\n---')

            cv_model = load_model(model_name, source='torch')

            dataset_splits = load_dataset(dataset_name)
            split_name = next(iter(dataset_splits.keys()))
            cv_dataset = dataset_splits[split_name]

            cv_model, cv_dataset = model_to_dataset(cv_model, cv_dataset)

            self.assertEqual(
                utils.compare_shapes(cv_model.original_input_shape,
                                     cv_dataset.shape), True)
            ds_classes = cv_dataset.classes_shape[0]
            model_classes = cv_model.original_output_shape[0]
            self.assertEqual(model_classes, ds_classes)

            n = 3
            for i, item in enumerate(cv_dataset):
                if i == n:
                    break

                image = item['image']
                print(image.shape)
                output = cv_model(image)
                output_shape = output.shape[1]
                self.assertEqual(output_shape, ds_classes)
Exemplo n.º 3
0
    def test_abstract_model(self):
        '''Make sure we can create an abstract model using
      Keras datasets.
    '''

        for task in keras_wrapper.MODELS:
            for model_name in keras_wrapper.MODELS[task]:

                cv_model = load_model(model_name, 'keras')

                self.assertEqual(CvModel, type(cv_model))
                self.assertEqual(cv_model.source, 'keras')
                self.assertEqual(cv_model.original_input_type, 'numpy.ndarray')
Exemplo n.º 4
0
    def test_abstract_model(self):
        '''
      Make sure we can create an abstract model using torch models.
    '''

        for task in torch_wrapper.MODELS:
            for model_name in torch_wrapper.MODELS[task]:

                cv_model = load_model(model_name, 'torch')

                self.assertEqual(CvModel, type(cv_model))
                self.assertEqual(cv_model.source, 'torchvision')
                self.assertEqual(cv_model.original_input_type, 'torch.Tensor')
Exemplo n.º 5
0
        def single_test(model_name, dataset_name):
            '''This is an inner function that test model_to_dataset for a single case
      i.e. a single model against a single dataset
      '''

            print('\n---')

            cv_model = load_model(model_name, source='torch')

            dataset_splits = load_dataset(dataset_name)
            split_name = next(iter(dataset_splits.keys()))
            cv_dataset = dataset_splits[split_name]

            cv_model, cv_dataset = model_to_dataset(cv_model, cv_dataset)

            n = 3
            images = []
            for i, item in enumerate(cv_dataset):
                images.append(item['image'])

                if i == n - 1:
                    break

            batch = torch.stack(images, dim=0)
            self.assertEqual(tuple(batch.shape), (n, 3, 224, 224))
            self.assertEqual(cv_model.original_output_shape,
                             (len(cv_dataset.pixel_classes), 224, 224))

            output = cv_model(batch)['out']

            print('\nTesting predictions...')

            for i, prediction in enumerate(output):

                mask = torch.argmax(prediction.squeeze(),
                                    dim=0).detach().numpy()

                self.assertEqual(tuple(prediction.shape),
                                 (len(cv_dataset.pixel_classes), 224, 224))
                self.assertEqual(mask.shape, (224, 224))
Exemplo n.º 6
0
        def single_test(model_name, dataset_name):
            '''This is an inner function that test model_to_dataset for a single case
      i.e. a single model against a single dataset
      '''

            print('\n---')

            cv_model = load_model(model_name, 'keras')

            dataset_splits = load_dataset(dataset_name)
            split_name = next(iter(dataset_splits.keys()))
            cv_dataset = dataset_splits[split_name]

            cv_model, cv_dataset = model_to_dataset(cv_model, cv_dataset)

            # Assert model channels (for all Keras models are to be 3), assert model
            # input shape and dataset shape are now compatible, and assert model
            # output shape is now compatible with the dataset classes

            self.assertEqual(cv_dataset.shape[-1], 3)
            self.assertEqual(
                utils.compare_shapes(cv_model.original_input_shape,
                                     cv_dataset.shape), True)
            self.assertEqual(cv_model.original_output_shape,
                             cv_dataset.classes_shape)

            # For some image samples, assert dataset sample shapes matched the
            # cv_dataset.shape, and then assert predictions shape (model output)
            # matches the expected classes

            print('Testing model_to_dataset predictions...')

            # TODO(Hugo)
            # Test with batches of more than 1 image, if dataset images have different
            # sizes we have to preprocess all of them to have same size and thus being
            # able to pass them to the model e.g. caltech_birds2010 is not working for
            # batches of n=3 sinces images have different sizes.
            n = 1

            sample = []
            for i, item in enumerate(cv_dataset):

                if i == n:
                    break

                self.assertEqual(
                    utils.compare_shapes(cv_dataset.shape,
                                         item['image'].shape), True,
                    'Dataset shape {} is not equal to item shape {}'.format(
                        cv_dataset.shape, item['image'].shape))

                image_sample = item['image']
                sample.append(image_sample)

            sample = np.array(sample)

            print(' => Making predictions with batch {}'.format(sample.shape))

            predictions = cv_model(sample)

            expected_predictions_shape = (n, ) + cv_dataset.classes_shape
            self.assertEqual(
                utils.compare_shapes(expected_predictions_shape,
                                     predictions.shape), True,
                'Expected shape {} is not equal to prediction shape {}'.format(
                    expected_predictions_shape, predictions.shape))
Exemplo n.º 7
0
# -*- coding: utf-8 -*-
# Author: Hugo Ochoa <*****@*****.**>
# Copyright: Stateoftheart AI PBC 2021.
'''
Example 1

It shows:
- How to load a model and a dataset
- How to make a model compatible with a dataset
- How to iterate over a dataset and create a batch
- How to execute a model with a batch of data to get predictions
'''
from sotaai.cv import load_dataset, load_model, model_to_dataset
import numpy as np

model = load_model('ResNet152')
dataset = load_dataset('mnist')

dataset_split = dataset['train']

model, dataset = model_to_dataset(model, dataset_split)

batch = []
batch_size = 10

for i, item in enumerate(dataset_split):

    if i == batch_size:
        break

    image_sample = item['image']
Exemplo n.º 8
0
  def test_segmentation_example(self):

    dataset_name = 'lost_and_found'
    model_name = 'fcn_resnet101'

    dataset_splits = load_dataset(dataset_name)
    split_name = next(iter(dataset_splits.keys()))
    cv_dataset = dataset_splits[split_name]

    cv_model = load_model(model_name)

    print('\nModel ', cv_model.name)
    print(' Input: ', tuple(cv_model.original_input_shape))
    print(' Output: ', cv_model.original_output_shape)
    print('Dataset: ', cv_dataset.name)
    print(' Shape:   ', cv_dataset.shape)
    print(' Pixel Classes: ', len(cv_dataset.pixel_classes))

    # BEGIN model_to_dataset example logic

    # All pytorch pre-trained models expect:
    # - (N, 3, H, W), where N is the batch size
    # - N is the batch size
    # - H and W are expected to be at least 224
    # - Pixel values must be in range [0,1] and normilized with mean [0.485,
    # 0.456, 0.406] and std [0.229, 0.224, 0.225]

    print('\nAdjusting...')

    raw_model = cv_model.raw

    # TODO(Hugo)
    # Not sure if this is the proper way to change the model output
    raw_model.classifier[-1] = torch.nn.Conv2d(512,
                                               len(cv_dataset.pixel_classes), 1)

    cv_model.update_raw_model(raw_model)

    transform = Transforms.Compose([
        Transforms.ToPILImage(),
        Transforms.Resize(256),
        Transforms.CenterCrop(224),
        Transforms.ToTensor(),
        Transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    n = 3
    images = []
    numpy_images = []
    for i, item in enumerate(cv_dataset):
      numpy_images.append(item['image'])
      images.append(transform(item['image']))

      if i == n - 1:
        break

    batch = torch.stack(images, dim=0)

    # END model_to_dataset example logic

    print('\nModel ', cv_model.name)
    print(' Input: ', tuple(cv_model.original_input_shape))
    print(' Output: ', cv_model.original_output_shape)
    print('Dataset: ', cv_dataset.name)
    print(' Shape:   ', tuple(batch.shape))
    print(' Pixel Classes: ', len(cv_dataset.pixel_classes))

    # Assert dataset channels, width and height is 3, 224, 224 as required by
    # all Torchvision segmentation models, and that model output shape matches
    # the expected classes and image dimensions:
    self.assertEqual(tuple(batch.shape), (n, 3, 224, 224))
    self.assertEqual(cv_model.original_output_shape,
                     (len(cv_dataset.pixel_classes), 224, 224))

    figure = plt.figure()

    print('\nTesting predictions...')

    output = raw_model(batch)['out']

    print('Sample output shape {}'.format(output.shape))

    for i, prediction in enumerate(output):

      mask = torch.argmax(prediction.squeeze(), dim=0).detach().numpy()

      self.assertEqual(tuple(prediction.shape),
                       (len(cv_dataset.pixel_classes), 224, 224))
      self.assertEqual(mask.shape, (224, 224))

      print('Prediction {} {} {}'.format(i, prediction.shape, mask.shape))

      segmentation_image = utils.create_segmentation_image(
          mask, len(cv_dataset.pixel_classes))

      figure.add_subplot(n, 2, 2 * i + 1).imshow(numpy_images[i])
      figure.add_subplot(n, 2, 2 * i + 2).imshow(segmentation_image)

    plt.show()