示例#1
0
                random.shuffle(imgNames)
            rgb1Batch[b, :, :, :], labelWVBatch[b, :, :, 0] = prepareImage(k)
            k += 1
        yield rgb1Batch, labelWVBatch


batch_size = 4

traingen = generator(imgList[:trainCount], batch_size=batch_size)
testgen = generator(imgList[trainCount:], batch_size=batch_size)

testCount = len(imgList[trainCount:])

model = FaceModel()
model.summary()
model.load_weights('./weights/faceModel.hdf5')
if not testMode:
    model.fit_generator(generator=traingen,
                        validation_data=testgen,
                        steps_per_epoch=int(trainCount / batch_size),
                        validation_steps=int(testCount / batch_size),
                        epochs=30000,
                        verbose=1,
                        callbacks=[
                            ModelCheckpoint('./weights/faceModel.hdf5',
                                            verbose=1,
                                            monitor='val_loss',
                                            save_best_only=False)
                        ])
else:
    k = 0
示例#2
0
from configurations import DATASET_IMAGES
from helpers import load_tf_image


def grouper(iterable, n, fillvalue=None):
    "Collect data into fixed-length chunks or blocks"
    # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
    args = [iter(iterable)] * n
    return zip_longest(*args, fillvalue=fillvalue)


if __name__ == '__main__':
    # Create path:descriptor lookup

    model = FaceModel()
    model.load_weights('./checkpoints/my_checkpoint')

    image_paths = []
    descs = []
    for person_folder in DATASET_IMAGES.iterdir():
        if not person_folder.is_dir():
            continue

        for img_path in person_folder.glob('*.jpg'):
            image_paths.append(img_path)

    # Process 100 images at once
    batches = list(grouper(image_paths, 200))
    for batch in tqdm(batches):
        images = tf.stack([
            load_tf_image(img_path, augment=False) for img_path in batch