Esempio n. 1
0
def prepare_batch(config):
    """Extract faces from the dataset."""
    gl = utils.load_fun(config['get_label'])

    stream = utils.load_fun(config['get_files'])(**config)
    stream = ds.stream(ds.add_label(gl), stream)
    stream = ds.stream(ds.apply_to_x(detect.load_img), stream)
    stream = ds.stream(
        ds.apply_to_x(adapters.resize(**config['full_image_size'])), stream
    )

    batches = ds.stream_batch(stream, config['batch_size'])

    adapters_list = [
        ds.apply_to_x(ds.foreach(adapters.astype('uint8')))
    ]

    if config['has_faces']:
        adapters_list.append(
            ds.apply_to_x(ds.foreach(features.get_face()))
        )

    adapters_list.extend([
        ds.apply_to_x(ds.foreach(adapters.resize(**config['image_size']))),
        ds.apply_to_x(ds.foreach(adapters.astype('uint8')))
    ])

    batches = ds.batch_adapt(batches, adapters_list)

    return batches
Esempio n. 2
0
def prepare_batch(filenames, config, epochs, *args, **kwargs):
    """Prepare a batch."""
    shape = config['image_size']['img_x'], config['image_size']['img_y'], 1

    filenames = dataset.epochs(filenames, epochs=epochs)

    get_label = utils.load_fun(config['get_label'])
    stream = dataset.get_data(filenames, get_label)

    batches = dataset.stream_batch(stream, config['batch_size'])

    adapters_list = [
        dataset.apply_to_y(dataset.foreach(dataset.categorical)),
        dataset.apply_to_x(dataset.foreach(adapters.rgb_to_bn)),
        dataset.apply_to_x(dataset.foreach(
            adapters.resize(**config['image_size'])
        )),
        dataset.apply_to_x(adapters.matrix_to_bn),
    ]

    if 'distortions' in config:
        adapters_list.append(
            adapters.apply_distortion(
                ImageDataGenerator(**config['distortions'])
            )
        )

    adapters_list.append(dataset.apply_to_x(adapters.normalize(255))),

    batches = dataset.batch_adapt(batches, adapters_list)

    return batches, shape
Esempio n. 3
0
def prepare_batch(filenames, config, epochs, *args, **kwargs):
    """Prepare a batch."""
    shape = 68 * 2
    norm_max = max(config['image_size'].values())

    filenames = dataset.epochs(filenames, epochs=epochs)

    get_label = load_fun(config['get_label'])
    stream = dataset.get_data(filenames, get_label, loader=np.load)

    batches = dataset.stream_batch(stream, config['batch_size'])
    batches = dataset.batch_adapt(batches, [
        dataset.apply_to_y(dataset.foreach(dataset.categorical)),
        dataset.apply_to_x(dataset.foreach(dataset.flatten)),
        dataset.apply_to_x(adapters.normalize(norm_max)),
    ])

    return batches, shape
Esempio n. 4
0
def _prepare_submodels(filenames, config, epochs, type_):
    """Prepare submodels."""
    filenames = get_names_only(filenames)
    output_shape = 0

    batches_list = []
    for subconf in config['submodels']:
        # get filenames for the submodel
        subtrain = deepcopy(filenames)
        subtrain = attach_basepath(subconf['directory'], subtrain)
        if 'files_types' in subconf:
            subtrain = attach_filetype(subconf['files_types'][0], subtrain)
        subtrain = dataset.epochs(subtrain, epochs=epochs)
        # load keras submodel
        submodel = models.load_model(subconf['model'])

        # compute the output shape
        (_, shape) = submodel.output_shape
        output_shape += shape

        # input shape if is not explicitly set
        if 'image_size' not in subconf:
            # input shape
            (_, img_x, img_y, _) = submodel.input_shape
            subconf['image_size'] = {'img_x': img_x, 'img_y': img_y}

        # copy global configuration
        subconf['batch_size'] = config['batch_size']

        # check if defined a personalized prediction function
        to_predict = dataset.to_predict
        if 'to_predict' in subconf:
            to_predict = utils.load_fun(subconf['to_predict'])(type_=type_)

        # build prediction batches
        prepare = utils.load_fun(subconf['prepare_batch'])
        batches, _shape = prepare(subtrain, subconf, epochs)
        batches = dataset.batch_adapt(batches, [
            dataset.apply_to_x(to_predict(model=submodel)),
        ])

        batches_list.append(batches)

    todo = dataset.flatten

    return dataset.merge_batches(batches_list,
                                 adapters=[
                                     dataset.apply_to_x(dataset.foreach(todo))
                                 ]), output_shape
Esempio n. 5
0
directory = "data/KDEF-straight"
shape_predictor = "data/shape_predictor_68_face_landmarks.dat"
img_x, img_y = 100, 100

filenames = dataset.get_files(directory)
filenames = list(filenames)

stream = get_data(filenames)

batches = dataset.stream_batch(stream, 1)

batches = dataset.batch_adapt(
    batches,
    [
        dataset.apply_to_y(dataset.foreach(dataset.categorical)),
        #  dataset.apply_to_x(dataset.foreach(
        #      adapters.resize(100, 100)
        #  )),
        dataset.apply_to_x(dataset.foreach(adapters.astype('uint8'))),
        #  dataset.apply_to_x(dataset.foreach(
        #      features.extract_shape(shape_predictor)
        #  )),
        dataset.apply_to_x(
            dataset.foreach(
                features.extract_part(
                    'nose', features.extract_shape(shape_predictor)))),
        #  dataset.apply_to_x(dataset.foreach(detect.shape2matrix)),
        #  dataset.apply_to_x(dataset.foreach(features.expand2image(100, 100)))
    ])