コード例 #1
0
def load_dataset(source, output_size, chunk_size=3, autoencoder=False):
    config_reset()
    (image_path, label_path) = source[0]
    config.load(yaml_str=
                f'''
                io:
                  cache:
                    dir: {os.path.dirname(image_path)}
                dataset:
                  images:
                    type: {source[2]}
                    directory: {os.path.dirname(image_path)}
                    extension: {source[1]}
                    preprocess: ~
                  labels:
                    type: {source[4]}
                    directory: {os.path.dirname(label_path)}
                    extension: {source[3]}
                    preprocess: ~''')

    if autoencoder:
        return imagery_dataset.AutoencoderDataset(config.dataset.images(), (chunk_size, chunk_size),
                                                  tile_shape=config.io.tile_size(),
                                                  stride=config.train.spec().stride)
    return imagery_dataset.ImageryDataset(config.dataset.images(), config.dataset.labels(),
                                          (output_size, output_size),
                                          (chunk_size, chunk_size), tile_shape=config.io.tile_size(),
                                          stride=config.train.spec().stride)
コード例 #2
0
ファイル: train.py プロジェクト: turpinhill/delta
def main(options):
    images = config.dataset.images()
    if not images:
        print('No images specified.', file=sys.stderr)
        return 1
    tc = config.train.spec()
    if options.autoencoder:
        ids = imagery_dataset.AutoencoderDataset(
            images, config.train.network.chunk_size(), tc.chunk_stride)
    else:
        labels = config.dataset.labels()
        if not labels:
            print('No labels specified.', file=sys.stderr)
            return 1
        ids = imagery_dataset.ImageryDataset(
            images, labels, config.train.network.chunk_size(),
            config.train.network.output_size(), tc.chunk_stride)

    try:
        if options.resume is not None:
            model = tf.keras.models.load_model(options.resume,
                                               custom_objects=ALL_LAYERS)
        else:
            model = config_model(ids.num_bands())
        model, _ = train(model, ids, tc)

        if options.model is not None:
            model.save(options.model)
    except KeyboardInterrupt:
        print()
        print('Training cancelled.')

    return 0
コード例 #3
0
def load_dataset(source, output_size):
    config.reset() # don't load any user files
    (image_path, label_path) = source[0]
    config.load(yaml_str=
                '''
                io:
                  cache:
                    dir: %s
                dataset:
                  images:
                    type: %s
                    directory: %s
                    extension: %s
                    preprocess:
                      enabled: false
                  labels:
                    type: %s
                    directory: %s
                    extension: %s
                    preprocess:
                      enabled: false
                train:
                  network:
                    chunk_size: 3
                mlflow:
                  enabled: false''' %
                (os.path.dirname(image_path), source[2], os.path.dirname(image_path), source[1],
                 source[4], os.path.dirname(label_path), source[3]))

    dataset = imagery_dataset.ImageryDataset(config.dataset.images(), config.dataset.labels(),
                                             config.train.network.chunk_size(), output_size,
                                             config.train.spec().chunk_stride)
    return dataset
コード例 #4
0
def main(options):

    log_folder = config.dataset.log_folder()
    if log_folder:
        if not options.resume: # Start fresh and clear the read logs
            os.system('rm ' + log_folder + '/*')
            print('Dataset progress recording in: ' + log_folder)
        else:
            print('Resuming dataset progress recorded in: ' + log_folder)

    start_time = time.time()
    images = config.dataset.images()
    if not images:
        print('No images specified.', file=sys.stderr)
        return 1
    tc = config.train.spec()
    if options.autoencoder:
        ids = imagery_dataset.AutoencoderDataset(images, config.train.network.chunk_size(),
                                                 tc.chunk_stride, resume_mode=options.resume,
                                                 log_folder=log_folder)
    else:
        labels = config.dataset.labels()
        if not labels:
            print('No labels specified.', file=sys.stderr)
            return 1
        ids = imagery_dataset.ImageryDataset(images, labels, config.train.network.chunk_size(),
                                             config.train.network.output_size(), tc.chunk_stride,
                                             resume_mode=options.resume,
                                             log_folder=log_folder)

    try:
        if options.resume is not None:
            model = tf.keras.models.load_model(options.resume, custom_objects=ALL_LAYERS)
        else:
            model = config_model(ids.num_bands())
        model, _ = train(model, ids, tc)

        if options.model is not None:
            save_model(model, options.model)
    except KeyboardInterrupt:
        print()
        print('Training cancelled.')

    stop_time = time.time()
    print('Elapsed time = ', stop_time-start_time)
    return 0
コード例 #5
0
def main(options):

    log_folder = config.train.log_folder()
    if log_folder:
        if not options.resume: # Start fresh and clear the read logs
            os.system('rm -f ' + log_folder + '/*')
            print('Dataset progress recording in: ' + log_folder)
        else:
            print('Resuming dataset progress recorded in: ' + log_folder)

    images = config.dataset.images()
    if not images:
        print('No images specified.', file=sys.stderr)
        return 1

    img = images.load(0)
    model = config_model(img.num_bands())
    if options.resume is not None:
        temp_model = tf.keras.models.load_model(options.resume, custom_objects=custom_objects())
    else:
        # this one is not built with proper scope, just used to get input and output shapes
        temp_model = model()

    start_time = time.time()
    tile_size = config.io.tile_size()
    tile_overlap = None
    stride = config.train.spec().stride

    # compute input and output sizes
    if temp_model.input_shape[1] is None:
        in_shape = None
        out_shape = temp_model.compute_output_shape((0, tile_size[0], tile_size[1], temp_model.input_shape[3]))
        out_shape = out_shape[1:3]
        tile_overlap = (tile_size[0] - out_shape[0], tile_size[1] - out_shape[1])
    else:
        in_shape = temp_model.input_shape[1:3]
        out_shape = temp_model.output_shape[1:3]

    if options.autoencoder:
        ids = imagery_dataset.AutoencoderDataset(images, in_shape, tile_shape=tile_size,
                                                 tile_overlap=tile_overlap, stride=stride)
    else:
        labels = config.dataset.labels()
        if not labels:
            print('No labels specified.', file=sys.stderr)
            return 1
        ids = imagery_dataset.ImageryDataset(images, labels, out_shape, in_shape,
                                             tile_shape=tile_size, tile_overlap=tile_overlap,
                                             stride=stride)
    if log_folder is not None:
        ids.set_resume_mode(options.resume, log_folder)

    assert temp_model.input_shape[1] == temp_model.input_shape[2], 'Must have square chunks in model.'
    assert temp_model.input_shape[3] == ids.num_bands(), 'Model takes wrong number of bands.'
    tf.keras.backend.clear_session()

    try:
        model, _ = train(model, ids, config.train.spec(), options.resume)

        if options.model is not None:
            save_model(model, options.model)
    except KeyboardInterrupt:
        print('Training cancelled.')

    stop_time = time.time()
    print('Elapsed time = ', stop_time-start_time)
    return 0
コード例 #6
0
def main(options):
    images = config.dataset.images()
    if not images:
        print('No images specified.', file=sys.stderr)
        return 1
    img = images.load(0)
    model = config_model(img.num_bands())
    temp_model = model()

    tile_size = config.io.tile_size()
    tile_overlap = None
    stride = config.train.spec().stride

    # compute input and output sizes
    if temp_model.input_shape[1] is None:
        in_shape = None
        out_shape = temp_model.compute_output_shape((0, tile_size[0], tile_size[1], temp_model.input_shape[3]))
        out_shape = out_shape[1:3]
        tile_overlap = (tile_size[0] - out_shape[0], tile_size[1] - out_shape[1])
    else:
        in_shape = temp_model.input_shape[1:3]
        out_shape = temp_model.output_shape[1:3]

    if options.autoencoder:
        ids = imagery_dataset.AutoencoderDataset(images, in_shape, tile_shape=tile_size,
                                                 tile_overlap=tile_overlap, stride=stride)
    else:
        labels = config.dataset.labels()
        if not labels:
            print('No labels specified.', file=sys.stderr)
            return 1
        ids = imagery_dataset.ImageryDataset(images, labels, out_shape, in_shape,
                                             tile_shape=tile_size, tile_overlap=tile_overlap,
                                             stride=stride)

    assert temp_model.input_shape[1] == temp_model.input_shape[2], 'Must have square chunks in model.'
    assert temp_model.input_shape[3] == ids.num_bands(), 'Model takes wrong number of bands.'
    tf.keras.backend.clear_session()

    #colormap = np.zeros(dtype=np.uint8, shape=(len(config.dataset.classes), 3))
    #for c in config.dataset.classes:
    #    print(len(config.dataset.classes), c.value)
    #    colormap[c.value][0] = (c.color >> 32) & 0xFF
    #    colormap[c.value][1] = (c.color >> 16) & 0xFF
    #    colormap[c.value][2] = c.color & 0xFF

    images = []
    labels = []
    PLOT_AT_ONCE=7

    for result in ids.dataset(config.dataset.classes.weights(), config_augmentation()):
        image = result[0].numpy()
        label = result[1].numpy()
        pw = (image.shape[0] - label.shape[0]) // 2
        ph = (image.shape[1] - label.shape[1]) // 2
        label = np.pad(label, ((pw, pw), (ph, ph), (0, 0)))
        images.append(image)
        labels.append(label)
        if len(images) == PLOT_AT_ONCE:
            plot_images(images, labels)
            images = []
            labels = []

    return 0
コード例 #7
0
def main(options):
    if mixed_policy_device_compatible(
    ) and not config.train.disable_mixed_precision():
        mixed_precision.set_global_policy('mixed_float16')
        print(
            'Tensorflow Mixed Precision is enabled. This improves training performance on compatible GPUs. '
            'However certain precautions should be taken and several additional changes can be made to improve '
            'performance further. Details: https://www.tensorflow.org/guide/mixed_precision#summary'
        )

    images = config.dataset.images()
    if not images:
        print('No images specified.', file=sys.stderr)
        return 1

    img = images.load(0)
    model = config_model(img.num_bands())
    if options.resume is not None and not options.resume.endswith('.h5'):
        temp_model = load_model(options.resume)
    else:
        # this one is not built with proper scope, just used to get input and output shapes
        temp_model = model()

    start_time = time.time()
    tile_size = config.io.tile_size()
    tile_overlap = None
    stride = config.train.spec().stride

    # compute input and output sizes
    if temp_model.input_shape[1] is None:
        in_shape = None
        out_shape = temp_model.compute_output_shape(
            (0, tile_size[0], tile_size[1], temp_model.input_shape[3]))
        out_shape = out_shape[1:3]
        tile_overlap = (tile_size[0] - out_shape[0],
                        tile_size[1] - out_shape[1])
    else:
        in_shape = temp_model.input_shape[1:3]
        out_shape = temp_model.output_shape[1:3]

    if options.autoencoder:
        ids = imagery_dataset.AutoencoderDataset(
            images,
            in_shape,
            tile_shape=tile_size,
            tile_overlap=tile_overlap,
            stride=stride,
            max_rand_offset=config.train.spec().max_tile_offset)
    else:
        labels = config.dataset.labels()
        if not labels:
            print('No labels specified.', file=sys.stderr)
            return 1
        ids = imagery_dataset.ImageryDataset(
            images,
            labels,
            out_shape,
            in_shape,
            tile_shape=tile_size,
            tile_overlap=tile_overlap,
            stride=stride,
            max_rand_offset=config.train.spec().max_tile_offset)

    assert temp_model.input_shape[1] == temp_model.input_shape[
        2], 'Must have square chunks in model.'
    assert temp_model.input_shape[3] == ids.num_bands(
    ), 'Model takes wrong number of bands.'
    tf.keras.backend.clear_session()

    # Try to have the internal model format we use match the output model format
    internal_model_extension = '.savedmodel'
    if options.model and ('.h5' in options.model):
        internal_model_extension = '.h5'
    try:
        model, _ = train(model, ids, config.train.spec(), options.resume,
                         internal_model_extension)

        if options.model is not None:
            save_model(model, options.model)
    except KeyboardInterrupt:
        print('Training cancelled.')

    stop_time = time.time()
    print('Elapsed time = ', stop_time - start_time)
    return 0