예제 #1
0
    'latent':
    128,
    'encode': [
        (32, 3, (2, 2)),  # out: 64, 64 32
        (64, 3, (2, 2)),  # out: 32, 32, 64
        (128, 3, (2, 2)),  # out: 16, 16, 128
        (256, 3, (2, 2)),  # out: 8, 8, 256
    ],
    'decode':
    None,  # Mirror enconding for reconstruction
    'name':
    'face_cvae_3'
}

model = CVAE(arch_def)
model.load_weights('models/face_cvae_3/face_cvae_3.weights')

test_root = '/home/martin/dataset/face_test'
test_imgs = [str(p) for p in Path(test_root).glob('*.jpg')]
batch_size = 8
epochs = 16
loss = []
tb = CVAEToolBox(model)

for i in range(len(test_imgs[:5])):
    inp, outp = tb.load_and_reconstruct_image(test_imgs[i])
    plt.subplot(2, 5, 1 + i)
    plt.imshow(inp)
    plt.title('Input')
    plt.axis('off')
예제 #2
0
def load_model_weights(latent_dim, dimension, filename):
    model = CVAE(latent_dim, dimension)
    model.load_weights(filename)
    return model
예제 #3
0
        'encode': [
            (32, 3, (2, 2)),  # out: 256, 256 32
            (64, 3, (2, 2)),  # out: 128, 128, 64
            (128, 3, (2, 2)),  # out: 64, 64, 128
            (256, 3, (2, 2)),  # out: 32, 32, 256
            (512, 3, (2, 2)),  # out: 16, 3162, 512
            (1024, 3, (2, 2)),  # out: 8, 8, 1024
            (1024, 3, (2, 2)),  # out: 4, 4, 1024
        ],
        'decode':
        None,  # Mirror enconding for reconstruction
        'name':
        'face_cvae_1'
    }
    model = CVAE(arch_def)
    model.load_weights(weights_path)
else:
    model = CVAE.from_file(arch_def_path, weights_path)

load_and_preprocess = get_load_and_preprocess_func(model.arch_def['input'])

image_paths = [
    '/home/martin/Desktop/data/validation_set/Bakeriet_4-Action_sequence_00336.jpg',
    '/home/martin/Desktop/data/validation_set/Stand_up_small_group_2.jpg'
]

toolbox = CVAEToolBox(model)

a = toolbox.to_tensor(image_paths[0], with_batch_dim=True, preprocess=True)
b = toolbox.to_tensor(image_paths[1], with_batch_dim=True, preprocess=True)