示例#1
0
def test_sparse_recall():
    config_reset()
    test_str = '''
    dataset:
      classes:
        - 0:
            name: class_0
        - 1:
            name: class_1
    '''
    config.load(yaml_str=test_str)

    r0 = ext.metric('SparseRecall')(0)
    r1 = ext.metric('SparseRecall')(1)
    p0 = ext.metric('SparseRecall')(0)
    p1 = ext.metric('SparseRecall')(1)
    z = tf.zeros((3, 3, 3, 3), dtype=tf.int32)
    o = tf.ones((3, 3, 3, 3), dtype=tf.int32)
    metrics = [r0, r1, p0, p1]

    for m in metrics:
        m.reset_state()
        m.update_state(z, z, None)
    assert r0.result() == 1.0
    assert r1.result() == 0.0
    assert p0.result() == 1.0
    assert p1.result() == 0.0

    for m in metrics:
        m.reset_state()
        m.update_state(o, z, None)
    assert r0.result() == 0.0
    assert r1.result() == 0.0
    assert p0.result() == 0.0
    assert p1.result() == 0.0
示例#2
0
def test_general():
    config.reset()

    assert config.general.gpus() == -1

    test_str = '''
    general:
      gpus: 3
    io:
      threads: 5
      block_size_mb: 10
      interleave_images: 3
      tile_ratio: 1.0
      cache:
        dir: nonsense
        limit: 2
    '''
    config.load(yaml_str=test_str)

    assert config.general.gpus() == 3
    assert config.io.threads() == 5
    assert config.io.block_size_mb() == 10
    assert config.io.interleave_images() == 3
    assert config.io.tile_ratio() == 1.0
    cache = config.io.cache.manager()
    assert cache.folder() == 'nonsense'
    assert cache.limit() == 2
    os.rmdir('nonsense')
示例#3
0
文件: test_config.py 项目: nasa/delta
def test_optimizer():
    config_reset()
    test_str = '''
    train:
      optimizer:
        Adam:
          learning_rate: 0.05
    '''
    config.load(yaml_str=test_str)
    opt = config_parser.optimizer_from_dict(config.train.spec().optimizer)
    assert opt.lr.numpy() == pytest.approx(0.05)

    config_reset()
    test_str = '''
    train:
      optimizer:
        Adam:
          learning_rate:
            PolynomialDecay:
              initial_learning_rate: 0.0001
              decay_steps: 100000
              end_learning_rate: 0.0000001
              power: 0.9
              cycle: false
          epsilon: 0.0001
    '''
    config.load(yaml_str=test_str)
    opt = config_parser.optimizer_from_dict(config.train.spec().optimizer)
    assert isinstance(opt.lr, tf.keras.optimizers.schedules.PolynomialDecay)
    assert opt.lr(0).numpy() == pytest.approx(0.0001)
    assert opt.lr(100000).numpy() == pytest.approx(0.0000001)
示例#4
0
def load_dataset(source, output_size, chunk_size=3, autoencoder=False):
    config_reset()
    (image_path, label_path) = source[0]
    config.load(yaml_str=
                f'''
                io:
                  cache:
                    dir: {os.path.dirname(image_path)}
                dataset:
                  images:
                    type: {source[2]}
                    directory: {os.path.dirname(image_path)}
                    extension: {source[1]}
                    preprocess: ~
                  labels:
                    type: {source[4]}
                    directory: {os.path.dirname(label_path)}
                    extension: {source[3]}
                    preprocess: ~''')

    if autoencoder:
        return imagery_dataset.AutoencoderDataset(config.dataset.images(), (chunk_size, chunk_size),
                                                  tile_shape=config.io.tile_size(),
                                                  stride=config.train.spec().stride)
    return imagery_dataset.ImageryDataset(config.dataset.images(), config.dataset.labels(),
                                          (output_size, output_size),
                                          (chunk_size, chunk_size), tile_shape=config.io.tile_size(),
                                          stride=config.train.spec().stride)
示例#5
0
def autoencoder(all_sources):
    source = all_sources[0]
    config.reset() # don't load any user files
    (image_path, _) = source[0]
    config.load(yaml_str=
                '''
                io:
                  cache:
                    dir: %s
                dataset:
                  images:
                    type: %s
                    directory: %s
                    extension: %s
                    preprocess:
                      enabled: false
                train:
                  network:
                    chunk_size: 3
                mlflow:
                  enabled: false''' %
                (os.path.dirname(image_path), source[2], os.path.dirname(image_path), source[1]))

    dataset = imagery_dataset.AutoencoderDataset(config.dataset.images(),
                                                 config.train.network.chunk_size(), config.train.spec().chunk_stride)
    return dataset
示例#6
0
def test_network_inline():
    config.reset()
    test_str = '''
    train:
      network:
        chunk_size: 5
        output_size: 1
        classes: 3
        model:
          params:
            v1 : 10
          layers:
          - Flatten:
              input_shape: in_shape
          - Dense:
              units: v1
              activation : relu
          - Dense:
              units: out_dims
              activation : softmax
    '''
    config.load(yaml_str=test_str)
    assert config.train.network.chunk_size() == 5
    assert config.train.network.classes() == 3
    model = model_parser.config_model(2)()
    assert model.input_shape == (None, config.train.network.chunk_size(),
                                 config.train.network.chunk_size(), 2)
    assert model.output_shape == (None, config.train.network.classes())
示例#7
0
def test_network_inline():
    config_reset()
    test_str = '''
    dataset:
      classes: 3
    train:
      network:
        params:
          v1 : 10
        layers:
        - Input:
            shape: [5, 5, num_bands]
        - Flatten:
        - Dense:
            units: v1
            activation : relu
        - Dense:
            units: 3
            activation : softmax
    '''
    config.load(yaml_str=test_str)
    assert len(config.dataset.classes) == 3
    model = config_parser.config_model(2)()
    assert model.input_shape == (None, 5, 5, 2)
    assert model.output_shape == (None, len(config.dataset.classes))
示例#8
0
def test_train():
    config.reset()
    test_str = '''
    train:
      chunk_stride: 2
      batch_size: 5
      steps: 10
      epochs: 3
      loss_function: loss
      metrics: [metric]
      optimizer: opt
      validation:
        steps: 20
        from_training: true
    '''
    config.load(yaml_str=test_str)
    tc = config.train.spec()
    assert tc.chunk_stride == 2
    assert tc.batch_size == 5
    assert tc.steps == 10
    assert tc.epochs == 3
    assert tc.loss_function == 'loss'
    assert tc.metrics == ['metric']
    assert tc.optimizer == 'opt'
    assert tc.validation.steps == 20
    assert tc.validation.from_training
示例#9
0
def test_train():
    config_reset()
    test_str = '''
    train:
      stride: 2
      batch_size: 5
      steps: 10
      epochs: 3
      loss: SparseCategoricalCrossentropy
      metrics: [metric]
      optimizer: opt
      validation:
        steps: 20
        from_training: true
    '''
    config.load(yaml_str=test_str)
    tc = config.train.spec()
    assert tc.stride == (2, 2)
    assert tc.batch_size == 5
    assert tc.steps == 10
    assert tc.epochs == 3
    assert isinstance(config_parser.loss_from_dict(tc.loss),
                      tf.keras.losses.SparseCategoricalCrossentropy)
    assert tc.metrics == ['metric']
    assert tc.optimizer == 'opt'
    assert tc.validation.steps == 20
    assert tc.validation.from_training
示例#10
0
def test_general():
    config_reset()

    assert config.general.gpus() == -1

    test_str = '''
    general:
      gpus: 3
    io:
      threads: 5
      tile_size: [5, 5]
      interleave_images: 3
      cache:
        dir: nonsense
        limit: 2
    '''
    config.load(yaml_str=test_str)

    assert config.general.gpus() == 3
    assert config.io.threads() == 5
    assert config.io.tile_size()[0] == 5
    assert config.io.tile_size()[1] == 5
    assert config.io.interleave_images() == 3
    cache = config.io.cache.manager()
    assert cache.folder() == 'nonsense'
    assert cache.limit() == 2
    os.rmdir('nonsense')
示例#11
0
def load_dataset(source, output_size):
    config.reset() # don't load any user files
    (image_path, label_path) = source[0]
    config.load(yaml_str=
                '''
                io:
                  cache:
                    dir: %s
                dataset:
                  images:
                    type: %s
                    directory: %s
                    extension: %s
                    preprocess:
                      enabled: false
                  labels:
                    type: %s
                    directory: %s
                    extension: %s
                    preprocess:
                      enabled: false
                train:
                  network:
                    chunk_size: 3
                mlflow:
                  enabled: false''' %
                (os.path.dirname(image_path), source[2], os.path.dirname(image_path), source[1],
                 source[4], os.path.dirname(label_path), source[3]))

    dataset = imagery_dataset.ImageryDataset(config.dataset.images(), config.dataset.labels(),
                                             config.train.network.chunk_size(), output_size,
                                             config.train.spec().chunk_stride)
    return dataset
示例#12
0
def test_mapped():
    mcce = ext.loss('MappedCategoricalCrossentropy')
    z = tf.zeros((3, 3, 3, 3), dtype=tf.int32)
    o = tf.ones((3, 3, 3, 3), dtype=tf.float32)
    assert tf.reduce_sum(mcce([0, 0]).call(z, o)) == 0.0
    assert tf.reduce_sum(mcce([1, 0]).call(z, o)) > 10.0
    oo = tf.ones((3, 3, 3, 3, 2), dtype=tf.float32)
    assert tf.reduce_sum(mcce([[0, 0], [1, 1]]).call(z, oo)) == 0.0
    assert tf.reduce_sum(mcce([[1, 1], [0, 0]]).call(z, oo)) > 10.0

    config_reset()
    test_str = '''
    dataset:
      classes:
        - 0:
            name: class_0
        - 1:
            name: class_1
    '''
    config.load(yaml_str=test_str)

    assert tf.reduce_sum(mcce({0: 0, 1: 0}).call(z, o)) == 0.0
    assert tf.reduce_sum(mcce({'class_0': 0, 'class_1': 0}).call(z, o)) == 0.0
    assert tf.reduce_sum(mcce({0: 1, 1: 0}).call(z, o)) > 10.0
    assert tf.reduce_sum(mcce({'class_0': 1, 'class_1': 0}).call(z, o)) > 10.0
示例#13
0
def config_reset():
    """
    Resets the configuration with useful default options for testing.
    """
    config.reset() # don't load any user files
    config.load(yaml_str=
                '''
                mlflow:
                  enabled: false
                ''')
示例#14
0
def test_network_file():
    config_reset()
    test_str = '''
    dataset:
      classes: 3
    train:
      network:
        yaml_file: networks/convpool.yaml
    '''
    config.load(yaml_str=test_str)
    model = config_parser.config_model(2)()
    assert model.input_shape == (None, 5, 5, 2)
    assert model.output_shape == (None, 3, 3, 3)
示例#15
0
def test_tensorboard():
    config.reset()

    assert not config.tensorboard.enabled()

    test_str = '''
    tensorboard:
      enabled: false
      dir: nonsense
    '''
    config.load(yaml_str=test_str)

    assert not config.tensorboard.enabled()
    assert config.tensorboard.dir() == 'nonsense'
示例#16
0
def test_validate():
    config_reset()
    test_str = '''
    train:
      stride: -1
    '''
    with pytest.raises(AssertionError):
        config.load(yaml_str=test_str)
    config_reset()
    test_str = '''
    train:
      stride: 0.5
    '''
    with pytest.raises(TypeError):
        config.load(yaml_str=test_str)
示例#17
0
def test_images_files():
    config_reset()
    file_path = os.path.join(os.path.dirname(__file__), 'data', 'landsat.tiff')
    test_str = '''
    dataset:
      images:
        type: tiff
        preprocess: ~
        files: [%s]
    ''' % (file_path)
    config.load(yaml_str=test_str)
    im = config.dataset.images()
    assert im.type() == 'tiff'
    assert len(im) == 1
    assert im[0] == file_path
示例#18
0
def test_images_dir():
    config_reset()
    dir_path = os.path.join(os.path.dirname(__file__), 'data')
    test_str = '''
    dataset:
      images:
        type: tiff
        preprocess: ~
        directory: %s/
        extension: .tiff
    ''' % (dir_path)
    config.load(yaml_str=test_str)
    im = config.dataset.images()
    assert im.type() == 'tiff'
    assert len(im) == 1
    assert im[0].endswith('landsat.tiff') and os.path.exists(im[0])
示例#19
0
def test_validate():
    config.reset()
    test_str = '''
    train:
      network:
        chunk_size: -1
    '''
    with pytest.raises(ValueError):
        config.load(yaml_str=test_str)
    config.reset()
    test_str = '''
    train:
      network:
        chunk_size: string
    '''
    with pytest.raises(TypeError):
        config.load(yaml_str=test_str)
示例#20
0
def test_callbacks():
    config_reset()
    test_str = '''
    train:
      callbacks:
        - EarlyStopping:
            verbose: true
        - ReduceLROnPlateau:
            factor: 0.5
    '''
    config.load(yaml_str=test_str)
    cbs = config_parser.config_callbacks()
    assert len(cbs) == 2
    assert isinstance(cbs[0], tf.keras.callbacks.EarlyStopping)
    assert cbs[0].verbose
    assert isinstance(cbs[1], tf.keras.callbacks.ReduceLROnPlateau)
    assert cbs[1].factor == 0.5
示例#21
0
def test_network_file():
    config.reset()
    test_str = '''
    dataset:
      classes: 3
    train:
      network:
        chunk_size: 5
        model:
          yaml_file: networks/convpool.yaml
    '''
    config.load(yaml_str=test_str)
    assert config.train.network.chunk_size() == 5
    model = model_parser.config_model(2)()
    assert model.input_shape == (None, config.train.network.chunk_size(), config.train.network.chunk_size(), 2)
    assert model.output_shape == (None, config.train.network.output_size(),
                                  config.train.network.output_size(), len(config.dataset.classes))
示例#22
0
def test_mlflow():
    config_reset()

    test_str = '''
    mlflow:
      enabled: false
      uri: nonsense
      experiment_name: name
      frequency: 5
      checkpoints:
        frequency: 10
    '''
    config.load(yaml_str=test_str)

    assert not config.mlflow.enabled()
    assert config.mlflow.uri() == 'nonsense'
    assert config.mlflow.frequency() == 5
    assert config.mlflow.experiment() == 'name'
    assert config.mlflow.checkpoints.frequency() == 10
示例#23
0
def test_preprocess():
    config_reset()
    test_str = '''
    dataset:
      images:
        preprocess:
          - scale:
              factor: 2.0
          - offset:
              factor: 1.0
          - clip:
              bounds: [0, 5]
    '''
    config.load(yaml_str=test_str)
    f = config.dataset.images().preprocess()
    assert f(np.asarray([0.0]), None, None) == 1.0
    assert f(np.asarray([2.0]), None, None) == 2.0
    assert f(np.asarray([-5.0]), None, None) == 0.0
    assert f(np.asarray([20.0]), None, None) == 5.0
示例#24
0
def autoencoder(all_sources):
    source = all_sources[0]
    conftest.config_reset()
    (image_path, _) = source[0]
    config.load(yaml_str='''
                io:
                  cache:
                    dir: %s
                dataset:
                  images:
                    type: %s
                    directory: %s
                    extension: %s
                    preprocess: ~''' %
                (os.path.dirname(image_path), source[2],
                 os.path.dirname(image_path), source[1]))

    dataset = imagery_dataset.AutoencoderDataset(
        config.dataset.images(), (3, 3), stride=config.train.spec().stride)
    return dataset
示例#25
0
文件: test_config.py 项目: nasa/delta
def test_augmentations():
    config_reset()
    test_str = '''
    train:
      augmentations:
        - random_flip_left_right:
            probability: 1.0
        - random_flip_up_down:
            probability: 1.0
    '''
    config.load(yaml_str=test_str)
    aug = config_parser.config_augmentation()
    a = tf.constant(np.expand_dims(np.array([[0, 1], [2, 3]]), (0, 3)))
    o = tf.constant(np.expand_dims(np.array([[3, 2], [1, 0]]), (0, 3)))
    (b, c) = aug(a, a)
    assert (b.numpy() == o.numpy()).all()
    assert (c.numpy() == o.numpy()).all()

    config_reset()
    test_str = '''
    train:
      augmentations:
        - random_flip_left_right:
            probability: 1.0
        - random_flip_up_down:
            probability: 1.0
        - random_rotate:
            probability: 1.0
            max_angle: 0.5
        - random_translate:
            probability: 1.0
            max_pixels: 10
    '''
    config.load(yaml_str=test_str)
    aug = config_parser.config_augmentation()
    a = tf.constant(np.expand_dims(np.array([[0, 1], [2, 3]]), (0, 3)))
    o = tf.constant(np.expand_dims(np.array([[3, 2], [1, 0]]), (0, 3)))
    (b, c) = aug(a, a)
    # just make sure it doesn't crash
    assert b.numpy().shape == o.numpy().shape
    assert c.numpy().shape == o.numpy().shape
示例#26
0
def test_classes():
    config.reset()
    test_str = '''
    dataset:
      classes: 2
    '''
    config.load(yaml_str=test_str)
    assert len(config.dataset.classes) == 2
    for (i, c) in enumerate(config.dataset.classes):
        assert c.value == i
    assert config.dataset.classes.weights() is None
    config.reset()
    test_str = '''
    dataset:
      classes:
        - 2:
            name: 2
            color: 2
            weight: 5.0
        - 1:
            name: 1
            color: 1
            weight: 1.0
        - 5:
            name: 5
            color: 5
            weight: 2.0
    '''
    config.load(yaml_str=test_str)
    assert config.dataset.classes
    values = [1, 2, 5]
    for (i, c) in enumerate(config.dataset.classes):
        e = values[i]
        assert c.value == e
        assert c.name == str(e)
        assert c.color == e
    assert config.dataset.classes.weights() == [1.0, 5.0, 2.0]
    arr = np.array(values)
    ind = config.dataset.classes.classes_to_indices_func()(arr)
    assert np.max(ind) == 2
    assert (config.dataset.classes.indices_to_classes_func()(ind) == values).all()