Example #1
0
def run_cub():
    cub_database = CUBDatabase()
    base_model = tf.keras.applications.VGG19(weights='imagenet')
    feature_model = tf.keras.models.Model(inputs=base_model.input,
                                          outputs=base_model.layers[24].output)

    sml = SML(
        database=cub_database,
        network_cls=MiniImagenetModel,
        n=5,
        k=1,
        k_val_ml=5,
        k_val_val=15,
        k_val_test=15,
        k_test=15,
        meta_batch_size=4,
        num_steps_ml=5,
        lr_inner_ml=0.05,
        num_steps_validation=5,
        save_after_iterations=3000,
        meta_learning_rate=0.001,
        n_clusters=5000,
        feature_model=feature_model,
        # feature_size=288,
        feature_size=4096,
        input_shape=(224, 224, 3),
        preprocess_function=tf.keras.applications.vgg19.preprocess_input,
        log_train_images_after_iteration=1000,
        number_of_tasks_val=100,
        number_of_tasks_test=1000,
        clip_gradients=True,
        report_validation_frequency=250,
        experiment_name='cub_imagenet_features')
    # sml.train(iterations=6000)
    sml.evaluate(iterations=50, seed=42, iterations_to_load_from=3000)
Example #2
0
def run_celeba():
    vox_celeb_database = VoxCelebDatabase()
    feature_model = hub.Module("https://tfhub.dev/google/speech_embedding/1")

    sml = SML(
        database=vox_celeb_database,
        network_cls=VoxCelebModel,
        n=5,
        k_ml=1,
        k_val_ml=5,
        k_val_val=15,
        k_val_test=15,
        k_test=15,
        meta_batch_size=4,
        num_steps_ml=5,
        lr_inner_ml=0.05,
        num_steps_validation=5,
        save_after_iterations=15000,
        meta_learning_rate=0.001,
        n_clusters=20,
        feature_model=feature_model,
        # feature_size=288,
        feature_size=4096,
        input_shape=(224, 224, 3),
        preprocess_function=tf.keras.applications.vgg19.preprocess_input,
        log_train_images_after_iteration=1000,
        number_of_tasks_val=100,
        number_of_tasks_test=1000,
        clip_gradients=True,
        report_validation_frequency=250,
        experiment_name='voxceleb_embedding_features')
    sml.train(iterations=60000)
    sml.evaluate(iterations=50, seed=42)
def run_cub():
    cub_database = CUBDatabase()
    base_model = tf.keras.applications.InceptionResNetV2(weights=None)
    feature_model = tf.keras.models.Model(inputs=base_model.input,
                                          outputs=base_model.layers[-2].output)

    sml = SML(
        database=cub_database,
        network_cls=MiniImagenetModel,
        n=5,
        k=1,
        k_val_ml=5,
        k_val_val=15,
        k_val_test=15,
        k_test=1,
        meta_batch_size=4,
        num_steps_ml=5,
        lr_inner_ml=0.05,
        num_steps_validation=5,
        save_after_iterations=2000,
        meta_learning_rate=0.001,
        n_clusters=500,
        feature_model=feature_model,
        # feature_size=288,
        # feature_size=4096,
        feature_size=1536,
        input_shape=(224, 224, 3),
        preprocess_function=tf.keras.applications.inception_resnet_v2.
        preprocess_input,
        log_train_images_after_iteration=1000,
        number_of_tasks_val=100,
        number_of_tasks_test=1000,
        clip_gradients=True,
        report_validation_frequency=250,
        experiment_name='cub_resnet_features_unsupervised')
    sml.train(iterations=20000)
    sml.evaluate(iterations=50, seed=42)
def run_omniglot():
    omniglot_database = OmniglotDatabase(random_seed=47,
                                         num_train_classes=1200,
                                         num_val_classes=100)
    base_model = tf.keras.applications.VGG19(weights='imagenet')
    feature_model = tf.keras.models.Model(inputs=base_model.input,
                                          outputs=base_model.layers[24].output)

    sml = SML(
        database=omniglot_database,
        network_cls=SimpleModel,
        n=5,
        k=1,
        k_val_ml=5,
        k_val_val=15,
        k_val_test=15,
        k_test=1,
        meta_batch_size=4,
        num_steps_ml=5,
        lr_inner_ml=0.05,
        num_steps_validation=5,
        save_after_iterations=15000,
        meta_learning_rate=0.001,
        n_clusters=5000,
        feature_model=feature_model,
        # feature_size=288,
        feature_size=4096,
        input_shape=(224, 224, 3),
        preprocess_function=tf.keras.applications.vgg19.preprocess_input,
        log_train_images_after_iteration=1000,
        number_of_tasks_val=100,
        number_of_tasks_test=1000,
        clip_gradients=True,
        report_validation_frequency=250,
        experiment_name='omniglot_imagenet_features')
    sml.train(iterations=60000)
    sml.evaluate(iterations=50, seed=42)