Example #1
0
def run_mini_imagenet():
    mini_imagenet_database = MiniImagenetDatabase()

    maml = ModelAgnosticMetaLearningModel(
        database=mini_imagenet_database,
        test_database=MiniImagenetDatabase(),
        network_cls=MiniImagenetModel,
        n=5,
        k_ml=1,
        k_val_ml=5,
        k_val=1,
        k_val_val=15,
        k_test=50,
        k_val_test=15,
        meta_batch_size=4,
        num_steps_ml=5,
        lr_inner_ml=0.05,
        num_steps_validation=5,
        save_after_iterations=15000,
        meta_learning_rate=0.001,
        report_validation_frequency=1000,
        log_train_images_after_iteration=1000,
        num_tasks_val=100,
        clip_gradients=True,
        experiment_name='mini_imagenet_test_res',
        val_seed=42,
        val_test_batch_norm_momentum=0.0,
    )

    # maml.train(iterations=60000)
    maml.evaluate(50, num_tasks=1000, seed=42, use_val_batch_statistics=True)
    maml.evaluate(50, num_tasks=1000, seed=42, use_val_batch_statistics=False)
Example #2
0
def run_transfer_meta_learning():
    mini_imagenet_database = MiniImagenetDatabase()
    euro_sat_database = EuroSatDatabase()
    tml = TransferMetaLearningVGG16(
        database=mini_imagenet_database,
        val_database=euro_sat_database,
        target_database=euro_sat_database,
        network_cls=None,
        n=5,
        k=1,
        k_val_ml=5,
        k_val_val=15,
        k_val_test=15,
        k_test=1,
        meta_batch_size=4,
        num_steps_ml=1,
        lr_inner_ml=0.001,
        num_steps_validation=5,
        save_after_iterations=1500,
        meta_learning_rate=0.0001,
        report_validation_frequency=250,
        log_train_images_after_iteration=1000,
        number_of_tasks_val=100,
        number_of_tasks_test=100,
        clip_gradients=True,
        experiment_name='transfer_meta_learning_mini_imagenet_euro_sat',
        val_seed=42,
        val_test_batch_norm_momentum=0.0,
        random_layer_initialization_seed=42,
        num_trainable_layers=3,
    )

    tml.train(iterations=6000)
    tml.evaluate(50, seed=42, use_val_batch_statistics=True)
    tml.evaluate(50, seed=42, use_val_batch_statistics=False)
def run_mini_imagenet():
    mini_imagenet_database = MiniImagenetDatabase()

    gan_sampling = GANSampling(
        database=mini_imagenet_database,
        network_cls=MiniImagenetModel,
        n=5,
        k=1,
        k_val_ml=5,
        k_val_train=1,
        k_val_val=15,
        k_val_test=15,
        k_test=1,
        meta_batch_size=4,
        num_steps_ml=5,
        lr_inner_ml=0.05,
        num_steps_validation=5,
        save_after_iterations=1000,
        meta_learning_rate=0.001,
        report_validation_frequency=50,
        log_train_images_after_iteration=250,
        number_of_tasks_val=100,
        number_of_tasks_test=1000,
        clip_gradients=True,
        experiment_name='mini_imagenet_interpolation_std_1.2_shift_5',
        val_seed=42,
        val_test_batch_norm_momentum=0.0)

    gan_sampling.train(iterations=60000)
    gan_sampling.evaluate(iterations=50,
                          use_val_batch_statistics=True,
                          seed=42,
                          iterations_to_load_from=16000)
Example #4
0
def run_mini_imagenet():
    mini_imagenet_database = MiniImagenetDatabase()

    anil = ANILUnsupervised(
        database=mini_imagenet_database,
        network_cls=MiniImagenetModel,
        n=5,
        k_ml=1,
        k_val_ml=1,
        k_val=1,
        k_val_val=15,
        k_test=1,
        k_val_test=15,
        meta_batch_size=4,
        num_steps_ml=5,
        lr_inner_ml=0.05,
        num_steps_validation=5,
        save_after_iterations=15000,
        meta_learning_rate=0.001,
        report_validation_frequency=1000,
        log_train_images_after_iteration=1000,
        num_tasks_val=100,
        clip_gradients=True,
        experiment_name='mini_imagenet_unsupervised_permutation',
        val_seed=42,
        val_test_batch_norm_momentum=0.0,
        # set_of_frozen_layers={'conv1', 'conv2', 'conv3', 'conv4', 'bn1', 'bn2', 'bn3', 'bn4', 'dense'}
        set_of_frozen_layers={
            'conv1', 'conv2', 'conv3', 'conv4', 'bn1', 'bn2', 'bn3', 'bn4'
        }
        # set_of_frozen_layers={}
    )

    anil.train(iterations=60000)
    anil.evaluate(50, num_tasks=1000, seed=42, use_val_batch_statistics=True)
def run_transfer_learning():
    miniimagenet_database = MiniImagenetDatabase()
    transfer_learning = TransferLearning(
        database=miniimagenet_database,
        network_cls=get_network,
        n=5,
        k_ml=1,
        k_val_ml=5,
        k_val_val=15,
        k_val=1,
        k_test=50,
        k_val_test=15,
        meta_batch_size=4,
        num_steps_ml=5,
        lr_inner_ml=0.05,
        num_steps_validation=5,
        save_after_iterations=15000,
        meta_learning_rate=0.001,
        report_validation_frequency=250,
        log_train_images_after_iteration=1000,
        num_tasks_val=100,
        clip_gradients=True,
        experiment_name='fixed_vgg_19',
        val_seed=42,
        val_test_batch_norm_momentum=0.0,
    )
    print(f'k: {transfer_learning.k_test}')
    transfer_learning.evaluate(50,
                               num_tasks=1000,
                               seed=42,
                               use_val_batch_statistics=True)
 def get_train_dataset(self):
     database = MiniImagenetDatabase()
     dataset = self.get_supervised_meta_learning_dataset(
         database.train_folders,
         n=self.n,
         k=self.k,
         k_validation=self.k_val_ml,
         meta_batch_size=self.meta_batch_size
     )
     return dataset
 def get_train_dataset(self):
     trn_database = MiniImagenetDatabase()
     val_database = PlantDiseaseDatabase()
     dataset = self.get_separated_supervised_meta_learning_dataset(
         trn_database.train_folders,
         val_database.train_folders,
         n=self.n,
         k=self.k_ml,
         k_validation=self.k_val_ml,
         meta_batch_size=self.meta_batch_size)
     return dataset
Example #8
0
def run_domain_attention():
    train_domain_databases = [
        MiniImagenetDatabase(),
        OmniglotDatabase(random_seed=47,
                         num_train_classes=1200,
                         num_val_classes=100),
        DTDDatabase(),
        VGGFlowerDatabase()
    ]
    meta_train_domain_databases = [
        # AirplaneDatabase(),
        FungiDatabase(),
        # CUBDatabase(),
    ]

    test_database = FungiDatabase()

    da = DomainAttentionUnsupervised(
        train_databases=train_domain_databases,
        meta_train_databases=meta_train_domain_databases,
        database=test_database,
        test_database=test_database,
        network_cls=None,
        image_shape=(84, 84, 3),
        n=5,
        k_ml=1,
        k_val_ml=5,
        k_val=1,
        k_val_val=15,
        k_test=5,
        k_val_test=15,
        meta_batch_size=4,
        num_steps_ml=1,
        lr_inner_ml=0.05,
        num_steps_validation=5,
        save_after_iterations=5000,
        meta_learning_rate=0.001,
        report_validation_frequency=1000,
        log_train_images_after_iteration=1000,
        num_tasks_val=100,
        clip_gradients=True,
        experiment_name=
        'domain_attention_all_frozen_layers_unsupervised_fungi2',
        val_seed=42,
        val_test_batch_norm_momentum=0.0,
    )

    print(da.experiment_name)

    # da.train(iterations=5000)
    da.evaluate(iterations=50, num_tasks=1000, seed=42)
Example #9
0
    def setUp(self):
        def parse_function(example_address):
            return example_address

        self.parse_function = parse_function

        self.omniglot_database = OmniglotDatabase(random_seed=-1,
                                                  num_train_classes=1200,
                                                  num_val_classes=100)
        self.mini_imagenet_database = MiniImagenetDatabase()
        self.celeba_database = CelebADatabase()
        self.lfw_database = LFWDatabase()
        self.euro_sat_database = EuroSatDatabase()
        self.isic_database = ISICDatabase()
        self.chest_x_ray_database = ChestXRay8Database()
Example #10
0
 def __init__(self, meta_train_databases=None, *args, **kwargs):
     super(CombinedCrossDomainMetaLearning, self).__init__(*args, **kwargs)
     if meta_train_databases is None:
         self.meta_train_databases = [
             MiniImagenetDatabase(),
             AirplaneDatabase(),
             CUBDatabase(),
             OmniglotDatabase(random_seed=47,
                              num_train_classes=1200,
                              num_val_classes=100),
             DTDDatabase(),
             FungiDatabase(),
             VGGFlowerDatabase()
         ]
     else:
         self.meta_train_databases = meta_train_databases
def run_domain_attention():
    train_domain_databases = [
        MiniImagenetDatabase(),
        OmniglotDatabase(random_seed=47,
                         num_train_classes=1200,
                         num_val_classes=100),
        DTDDatabase(),
        VGGFlowerDatabase()
    ]
    meta_train_domain_databases = [
        AirplaneDatabase(),
        FungiDatabase(),
        CUBDatabase(),
    ]

    test_database = EuroSatDatabase()

    ewda = ElementWiseDomainAttention(
        train_databases=train_domain_databases,
        meta_train_databases=meta_train_domain_databases,
        database=test_database,
        test_database=test_database,
        network_cls=None,
        image_shape=(84, 84, 3),
        n=5,
        k_ml=1,
        k_val_ml=5,
        k_val=1,
        k_val_val=15,
        k_test=5,
        k_val_test=15,
        meta_batch_size=4,
        num_steps_ml=5,
        lr_inner_ml=0.05,
        num_steps_validation=5,
        save_after_iterations=15000,
        meta_learning_rate=0.001,
        report_validation_frequency=1000,
        log_train_images_after_iteration=1000,
        num_tasks_val=100,
        clip_gradients=True,
        experiment_name='element_wise_domain_attention',
        val_seed=42,
        val_test_batch_norm_momentum=0.0,
    )
    ewda.train(iterations=60000)
    ewda.evaluate(iterations=50, num_tasks=1000, seed=14)
    def get_train_dataset(self):
        databases = [
            MiniImagenetDatabase(),
            AirplaneDatabase(),
            CUBDatabase(),
            OmniglotDatabase(random_seed=47, num_train_classes=1200, num_val_classes=100),
            DTDDatabase(),
            FungiDatabase(),
            VGGFlowerDatabase()
        ]

        dataset = self.get_cross_domain_meta_learning_dataset(
            databases=databases,
            n=self.n,
            k_ml=self.k_ml,
            k_validation=self.k_val_ml,
            meta_batch_size=self.meta_batch_size
        )
        return dataset
def run_mini_imagenet():
    mini_imagenet_database = MiniImagenetDatabase(random_seed=-1)

    maml = MAMLAbstractLearner(database=mini_imagenet_database,
                               network_cls=MiniImagenetModel,
                               n=5,
                               k=4,
                               meta_batch_size=1,
                               num_steps_ml=5,
                               lr_inner_ml=0.01,
                               num_steps_validation=5,
                               save_after_epochs=500,
                               meta_learning_rate=0.001,
                               report_validation_frequency=50,
                               log_train_images_after_iteration=1000,
                               least_number_of_tasks_val_test=50,
                               clip_gradients=True)

    maml.train(epochs=30000)
    maml.evaluate(50)
def run_omniglot():
    omniglot_database = Omniglot84x84Database(
        random_seed=47,
        num_train_classes=1200,
        num_val_classes=100,
    )

    maml = ModelAgnosticMetaLearningModel(
        database=omniglot_database,
        test_database=MiniImagenetDatabase(),
        network_cls=MiniImagenetModel,
        n=5,
        k_ml=1,
        k_val_ml=5,
        k_val=1,
        k_val_val=15,
        k_test=5,
        k_val_test=15,
        meta_batch_size=4,
        num_steps_ml=5,
        lr_inner_ml=0.05,
        num_steps_validation=5,
        save_after_iterations=15000,
        meta_learning_rate=0.001,
        report_validation_frequency=1000,
        log_train_images_after_iteration=1000,
        num_tasks_val=100,
        clip_gradients=True,
        experiment_name='omniglot_84x84',
        val_seed=42,
        val_test_batch_norm_momentum=0.0)

    maml.train(iterations=60000)
    maml.evaluate(iterations=50,
                  num_tasks=1000,
                  use_val_batch_statistics=True,
                  seed=42)
    maml.evaluate(iterations=50,
                  num_tasks=1000,
                  use_val_batch_statistics=False,
                  seed=42)
Example #15
0
def run_airplane():
    test_database = MiniImagenetDatabase()

    cdae = CrossDomainAE2(
        database=test_database,
        batch_size=512,
        # domains=('fungi', ),
        # domains=('airplane', 'fungi', 'cub', 'dtd', 'miniimagenet', 'omniglot', 'vggflowers'),
        domains=('airplane', 'fungi', 'cub', 'dtd', 'omniglot', 'vggflowers'),
        # domains=('cub', 'miniimagenet', 'vggflowers'),
        # domains=('fungi', 'cub', 'dtd', 'miniimagenet', 'omniglot', 'vggflowers'),
    )

    experiment_name = 'all_domains_288'

    cdae.train(epochs=20, experiment_name=experiment_name)
    cdae.evaluate(10,
                  num_tasks=1000,
                  k_test=1,
                  k_val_test=15,
                  inner_learning_rate=0.001,
                  experiment_name=experiment_name,
                  seed=42)
Example #16
0
def run_mini_imagenet():
    mini_imagenet_database = MiniImagenetDatabase()
    n_data_points = 38400
    data_points, classes, non_labeled_data_points = sample_data_points(
        mini_imagenet_database.train_folders, n_data_points)
    features_dataset, n_classes = make_features_dataset_mini_imagenet(
        data_points,
        classes,
        non_labeled_data_points,
        # shuffle_buffer_size=n_data_points,
        # batch_size=32,
        batch_size=16,
        shuffle_buffer_size=1000,
    )
    feature_model = tf.keras.applications.VGG19(weights=None,
                                                classes=n_classes)
    feature_model.compile(
        loss=tf.keras.losses.CategoricalCrossentropy(),
        metrics=['accuracy'],
        optimizer=tf.keras.optimizers.Adam(learning_rate=1e-6),
    )

    def save_call_back(epoch, logs):
        if epoch % 100 == 0:
            feature_model.save_weights(
                filepath=f'./feature_models/feature_model_{epoch}')

    # saver_callback = tf.keras.callbacks.LambdaCallback(on_epoch_end=save_call_back)
    # feature_model.fit(features_dataset, epochs=101, callbacks=[saver_callback])
    feature_model.load_weights(filepath=f'./feature_models/feature_model_100')
    # feature_model.evaluate(features_dataset)
    # exit()
    feature_model = tf.keras.models.Model(
        inputs=feature_model.input, outputs=feature_model.layers[24].output)

    # print(n_classes)
    # feature_model = VariationalAutoEncoderFeature(input_shape=(84, 84, 3), latent_dim=32, n_classes=n_classes)
    # feature_model = train_the_feature_model_with_classification(
    #     feature_model,
    #     features_dataset,
    #     n_classes,
    #     mini_imagenet_database.input_shape
    # )

    # feature_model = None

    # use imagenet
    # base_model = tf.keras.applications.VGG19(weights='imagenet')
    # feature_model = tf.keras.models.Model(inputs=base_model.input, outputs=base_model.layers[24].output)

    sml = SML(
        database=mini_imagenet_database,
        network_cls=MiniImagenetModel,
        n=5,
        k=1,
        k_val_ml=5,
        k_val_val=15,
        k_val_test=15,
        k_test=1,
        meta_batch_size=4,
        num_steps_ml=5,
        lr_inner_ml=0.05,
        num_steps_validation=5,
        save_after_iterations=15000,
        meta_learning_rate=0.001,
        n_clusters=500,
        feature_model=feature_model,
        # feature_size=288,
        feature_size=4096,
        input_shape=(224, 224, 3),
        preprocess_function=tf.keras.applications.vgg19.preprocess_input,
        log_train_images_after_iteration=1000,
        least_number_of_tasks_val_test=100,
        report_validation_frequency=250,
        experiment_name='mini_imagenet_learn_miniimagent_features')
    sml.train(iterations=60000)
Example #17
0
            else:
                new_z = tf.stack([
                    z[0, ...] + (z[(i + 1) % self.n, ...] - z[0, ...]) * 0.3,
                    z[1, ...] + (z[(i + 2) % self.n, ...] - z[1, ...]) * 0.3,
                    z[2, ...] + (z[(i + 3) % self.n, ...] - z[2, ...]) * 0.3,
                    z[3, ...] + (z[(i + 4) % self.n, ...] - z[3, ...]) * 0.3,
                    z[4, ...] + (z[(i + 0) % self.n, ...] - z[4, ...]) * 0.3,
                ],
                                 axis=0)
                vectors.append(new_z)

        return vectors


if __name__ == '__main__':
    mini_imagenet_database = MiniImagenetDatabase(input_shape=(224, 224, 3))
    shape = (224, 224, 3)
    latent_dim = 120
    import os
    os.environ['TFHUB_CACHE_DIR'] = os.path.expanduser('~/tf_hub')

    gan = hub.load("https://tfhub.dev/deepmind/bigbigan-resnet50/1",
                   tags=[]).signatures['generate']
    setattr(gan, 'parser', MiniImagenetParser(shape=shape))

    maml_gan = MiniImageNetMAMLBigGan(
        gan=gan,
        latent_dim=latent_dim,
        generated_image_shape=shape,
        database=mini_imagenet_database,
        network_cls=FiveLayerResNet,
Example #18
0
        print("Error: Too many arguments")
        sys.exit(0)

    # CONFIGS
    ITERATIONS = 15000
    GAN_EPOCHS = 100
    N_TASK_EVAL = 100
    K = 5
    N_WAY = 5
    META_BATCH_SIZE = 1
    LASIUM_TYPE = "p1"

    print("K=",K)
    print("N_WAY=",N_WAY)

    mini_imagenet_database = MiniImagenetDatabase()
    shape = (84, 84, 3)
    latent_dim = 512
    mini_imagenet_generator = get_generator(latent_dim)
    mini_imagenet_discriminator = get_discriminator()
    mini_imagenet_parser = MiniImagenetParser(shape=shape)

    experiment_name = prefix+str(labeled_percentage)

    # for the SSGAN we need to feed the labels, L, when initializing
    gan = GAN(
        'mini_imagenet',
        image_shape=shape,
        latent_dim=latent_dim,
        database=mini_imagenet_database,
        parser=mini_imagenet_parser,
        axes[0, i].xaxis.set_label_position('top')
        axes[0, i].set_xlabel(title_name)

    # fig.suptitle('', fontsize=12, y=1)
    plt.savefig(fname=os.path.join(root_folder_to_save, 'all_domains2.pdf'))
    plt.show()


if __name__ == '__main__':
    tf.config.set_visible_devices([], 'GPU')
    root_folder_to_save = os.path.expanduser('~/datasets_visualization/')
    if not os.path.exists(root_folder_to_save):
        os.mkdir(root_folder_to_save)

    databases = (
        MiniImagenetDatabase(),
        OmniglotDatabase(random_seed=42,
                         num_train_classes=1200,
                         num_val_classes=100),
        AirplaneDatabase(),
        CUBDatabase(),
        DTDDatabase(),
        FungiDatabase(),
        VGGFlowerDatabase(),
        # TrafficSignDatabase(),
        # MSCOCODatabase(),
        # PlantDiseaseDatabase(),
        # EuroSatDatabase(),
        # ISICDatabase(),
        # ChestXRay8Database(),
    )
    for style_layer_name in style_layers
]
model = tf.keras.models.Model(inputs=vgg19.inputs, outputs=outputs)


def convert_to_activaitons(imgs):
    imgs_activations = []
    for image in imgs:
        image = convert_str_to_img(image)[np.newaxis, ...]
        activations = model.predict(image)
        imgs_activations.append(activations)
    return imgs_activations


DTDDatabase()
d1_imgs = get_all_instances(MiniImagenetDatabase())
d2_imgs = get_all_instances(ISICDatabase())
d3_imgs = get_all_instances(ChestXRay8Database())
d4_imgs = get_all_instances(PlantDiseaseDatabase())

d1_imgs = np.random.choice(d1_imgs, 10, replace=False)
d2_imgs = np.random.choice(d2_imgs, 10, replace=False)
d3_imgs = np.random.choice(d3_imgs, 10, replace=False)
d4_imgs = np.random.choice(d4_imgs, 10, replace=False)

print(d1_imgs)
print(d2_imgs)
print(d3_imgs)
print(d4_imgs)

d1_imgs_activations = convert_to_activaitons(d1_imgs)