Ejemplo n.º 1
0
        print(label)
        if len(label2image[label]) >= 4:
            continue
        label2image[label].append(image)

    for label, images in label2image.items():
        for i, image in enumerate(images[:4]):
            plt.subplot(2, 2, 1 + i)
            plt.imshow(image)
            plt.title(f'label {label}')
            plt.axis('off')
        plt.show()


image_root = '/home/martin/Desktop/data/darknet_data/openimgs_extra_v2'
model = CVAE.from_file('models/oi_cvae_5/oi_cvae_5.state',
                       'models/oi_cvae_5/oi_cvae_5.weights')
tb = CVAEToolBox(model)
images = ImageFeed(image_root, tb)

set_size = 100000

kmeans = Kmeans(49)
load = False
save = True

if load:
    kmeans.cluster_centers_ = np.load('centroids.npy')
else:
    batch = []
    for _ in tqdm(range(set_size), leave=False, desc='loading  '):
        tensor, _ = next(images)
Ejemplo n.º 2
0
            (64, 3, (2, 2)),  # out: 128, 128, 64
            (128, 3, (2, 2)),  # out: 64, 64, 128
            (256, 3, (2, 2)),  # out: 32, 32, 256
            (512, 3, (2, 2)),  # out: 16, 3162, 512
            (1024, 3, (2, 2)),  # out: 8, 8, 1024
            (1024, 3, (2, 2)),  # out: 4, 4, 1024
        ],
        'decode':
        None,  # Mirror enconding for reconstruction
        'name':
        'face_cvae_1'
    }
    model = CVAE(arch_def)
    model.load_weights(weights_path)
else:
    model = CVAE.from_file(arch_def_path, weights_path)

load_and_preprocess = get_load_and_preprocess_func(model.arch_def['input'])

image_paths = [
    '/home/martin/Desktop/data/validation_set/Bakeriet_4-Action_sequence_00336.jpg',
    '/home/martin/Desktop/data/validation_set/Stand_up_small_group_2.jpg'
]

toolbox = CVAEToolBox(model)

a = toolbox.to_tensor(image_paths[0], with_batch_dim=True, preprocess=True)
b = toolbox.to_tensor(image_paths[1], with_batch_dim=True, preprocess=True)

images = toolbox.interpolate_between_images(a, b, steps=12)
a_img, a_latent = toolbox.load_and_reconstruct_image(image_paths[0])