Esempio n. 1
0
def _prep_datasets(ids, tc):
    ds = ids.dataset(config.dataset.classes.weights())

    validation=None
    if tc.validation:
        if tc.validation.from_training:
            validation = ds.take(tc.validation.steps)
            ds = ds.skip(tc.validation.steps)
        else:
            vimg   = tc.validation.images
            vlabel = tc.validation.labels
            if not vimg:
                validation = None
            else:
                if vlabel:
                    vimagery = ImageryDataset(vimg, vlabel, ids.output_shape(), ids.chunk_shape(),
                                              tile_shape=ids.tile_shape(), stride=ids.stride(),
                                              tile_overlap=ids.tile_overlap())
                else:
                    vimagery = AutoencoderDataset(vimg, ids.chunk_shape(), tile_shape=ids.tile_shape(),
                                                  stride=ids.stride(), tile_overlap=ids.tile_overlap())
                validation = vimagery.dataset(config.dataset.classes.weights())
                if tc.validation.steps:
                    validation = validation.take(tc.validation.steps)
        if validation:
            validation = validation.batch(tc.batch_size, drop_remainder=True).prefetch(1)
    else:
        validation = None

    ds = ds.batch(tc.batch_size, drop_remainder=True)
    ds = ds.prefetch(1)
    if tc.steps:
        ds = ds.take(tc.steps)
    return (ds, validation)
Esempio n. 2
0
def _prep_datasets(ids, tc):
    if tc.max_tile_offset:
        # with filtering nodata, number of tiles changes
        assert tc.steps, 'max_tile_offset only supported with steps set.'
    ds = ids.dataset(config.dataset.classes.weights(), config_augmentation())

    validation=None
    if tc.validation:
        if tc.validation.from_training:
            validation = ds.take(tc.validation.steps)
            ds = ds.skip(tc.validation.steps)
        else:
            vimg   = tc.validation.images
            vlabel = tc.validation.labels
            if not vimg:
                validation = None
            else:
                if vlabel:
                    vimagery = ImageryDataset(vimg, vlabel, ids.output_shape(), ids.chunk_shape(),
                                              tile_shape=ids.tile_shape(), stride=ids.stride(),
                                              tile_overlap=ids.tile_overlap())
                else:
                    vimagery = AutoencoderDataset(vimg, ids.chunk_shape(), tile_shape=ids.tile_shape(),
                                                  stride=ids.stride(), tile_overlap=ids.tile_overlap())
                validation = vimagery.dataset(config.dataset.classes.weights())
        if validation:
            validation = validation.batch(tc.batch_size, drop_remainder=True)
    else:
        validation = None

    ds = ds.batch(tc.batch_size, drop_remainder=True)
    return (ds, validation)
Esempio n. 3
0
def _prep_datasets(ids, tc, chunk_size, output_size):
    ds = ids.dataset(config.dataset.classes.weights())
    ds = ds.batch(tc.batch_size)
    #ds = ds.cache()
    ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
    if tc.validation:
        if tc.validation.from_training:
            validation = ds.take(tc.validation.steps)
            ds = ds.skip(tc.validation.steps)
        else:
            vimg = tc.validation.images
            vlabel = tc.validation.labels
            if not vimg:
                validation = None
            else:
                if vlabel:
                    vimagery = ImageryDataset(vimg,
                                              vlabel,
                                              chunk_size,
                                              output_size,
                                              tc.chunk_stride,
                                              resume_mode=False)
                else:
                    vimagery = AutoencoderDataset(vimg,
                                                  chunk_size,
                                                  tc.chunk_stride,
                                                  resume_mode=False)
                validation = vimagery.dataset().batch(tc.batch_size)
                if tc.validation.steps:
                    validation = validation.take(tc.validation.steps)
        #validation = validation.prefetch(4)#tf.data.experimental.AUTOTUNE)
    else:

        validation = None
    if tc.steps:
        ds = ds.take(tc.steps)
    #ds = ds.prefetch(4)#tf.data.experimental.AUTOTUNE)
    ds = ds.repeat(tc.epochs)
    return (ds, validation)
Esempio n. 4
0
def _prep_datasets(ids, tc, chunk_size, output_size):
    ds = ids.dataset()
    ds = ds.batch(tc.batch_size)
    if tc.validation:
        if tc.validation.from_training:
            validation = ds.take(tc.validation.steps)
            ds = ds.skip(tc.validation.steps)
        else:
            vimg = tc.validation.images
            vlabel = tc.validation.labels
            if not vimg or not vlabel:
                validation = None
            else:
                vimagery = ImageryDataset(vimg, vlabel, chunk_size,
                                          output_size, tc.chunk_stride)
                validation = vimagery.dataset().batch(tc.batch_size).take(
                    tc.validation.steps)
    else:
        validation = None
    if tc.steps:
        ds = ds.take(tc.steps)
    ds = ds.repeat(tc.epochs)
    return (ds, validation)