def main():
    if FLAGS.datasource == 'sinusoid':
        if FLAGS.train:
            test_num_updates = 5
        else:
            test_num_updates = 10
    else:
        if FLAGS.datasource == 'miniimagenet' or FLAGS.datasource == 'cifar100':
            if FLAGS.train == True:
                test_num_updates = 1  # eval on at least one update during training
            else:
                test_num_updates = 10
        else:
            test_num_updates = 10

    if FLAGS.train == False:
        orig_meta_batch_size = FLAGS.meta_batch_size
        # always use meta batch size of 1 when testing.
        FLAGS.meta_batch_size = 1

    if FLAGS.datasource == 'sinusoid':
        data_generator = DataGenerator(FLAGS.update_batch_size * 2,
                                       FLAGS.meta_batch_size)
    else:
        if FLAGS.metatrain_iterations == 0:  #and FLAGS.datasource == 'miniimagenet':
            assert FLAGS.meta_batch_size == 1
            assert FLAGS.update_batch_size == 1
            data_generator = DataGenerator(
                1, FLAGS.meta_batch_size)  # only use one datapoint,
        else:
            if FLAGS.datasource == 'miniimagenet':  #or FLAGS.datasource == 'cifar100': # TODO - use 15 val examples for imagenet?
                if FLAGS.train:
                    data_generator = DataGenerator(
                        FLAGS.update_batch_size + 15, FLAGS.meta_batch_size
                    )  # only use one datapoint for testing to save memory
                else:
                    data_generator = DataGenerator(
                        FLAGS.update_batch_size * 2, FLAGS.meta_batch_size
                    )  # only use one datapoint for testing to save memory
            else:
                data_generator = DataGenerator(
                    FLAGS.update_batch_size * 2, FLAGS.meta_batch_size
                )  # only use one datapoint for testing to save memory

    dim_output = data_generator.dim_output
    if FLAGS.baseline == 'oracle':
        assert FLAGS.datasource == 'sinusoid'
        dim_input = 3
        FLAGS.pretrain_iterations += FLAGS.metatrain_iterations
        FLAGS.metatrain_iterations = 0
    else:
        dim_input = data_generator.dim_input

    if FLAGS.datasource == 'miniimagenet' or FLAGS.datasource == 'omniglot' or FLAGS.datasource == 'cifar100':
        tf_data_load = True
        num_classes = data_generator.num_classes

        if FLAGS.train:  # only construct training model if needed
            random.seed(5)
            image_tensor, label_tensor = data_generator.make_data_tensor()
            inputa = tf.slice(image_tensor, [0, 0, 0],
                              [-1, num_classes * FLAGS.update_batch_size, -1])
            inputb = tf.slice(image_tensor,
                              [0, num_classes * FLAGS.update_batch_size, 0],
                              [-1, -1, -1])
            labela = tf.slice(label_tensor, [0, 0, 0],
                              [-1, num_classes * FLAGS.update_batch_size, -1])
            labelb = tf.slice(label_tensor,
                              [0, num_classes * FLAGS.update_batch_size, 0],
                              [-1, -1, -1])
            input_tensors = {
                'inputa': inputa,
                'inputb': inputb,
                'labela': labela,
                'labelb': labelb
            }

        random.seed(6)
        image_tensor, label_tensor = data_generator.make_data_tensor(
            train=False)
        inputa = tf.slice(image_tensor, [0, 0, 0],
                          [-1, num_classes * FLAGS.update_batch_size, -1])
        inputb = tf.slice(image_tensor,
                          [0, num_classes * FLAGS.update_batch_size, 0],
                          [-1, -1, -1])
        labela = tf.slice(label_tensor, [0, 0, 0],
                          [-1, num_classes * FLAGS.update_batch_size, -1])
        labelb = tf.slice(label_tensor,
                          [0, num_classes * FLAGS.update_batch_size, 0],
                          [-1, -1, -1])
        metaval_input_tensors = {
            'inputa': inputa,
            'inputb': inputb,
            'labela': labela,
            'labelb': labelb
        }
    else:
        tf_data_load = False
        input_tensors = None

    random.seed(7)
    X = data_generator.make_autoencoder_data_tensor(train=True)
    Y = data_generator.make_autoencoder_data_tensor(train=False)

    ims_for_encodings = data_generator.make_autoencoder_test_data_tensor(
        train=True)

    # print(X.get_shape())

    autoencoder_input_tensors = {'X': X, 'Y': Y}
    dim_s = 32
    autoencoder = Autoencoder(dim_input, dim_s)

    model = MAML(autoencoder,
                 dim_input,
                 dim_output,
                 test_num_updates=test_num_updates)
    if FLAGS.train or not tf_data_load:
        autoencoder.construct_autoencoder(
            input_tensors=autoencoder_input_tensors,
            prefix='autoencoder_train')

    if tf_data_load:
        model.construct_model(input_tensors=metaval_input_tensors,
                              prefix='metaval_')
    model.summ_op = tf.summary.merge_all()
    autoencoder.summ_op = tf.summary.merge_all()
    saver = loader = tf.train.Saver(tf.get_collection(
        tf.GraphKeys.TRAINABLE_VARIABLES),
                                    max_to_keep=10)

    sess = tf.InteractiveSession()

    if FLAGS.train == False:
        # change to original meta batch size when loading model.
        FLAGS.meta_batch_size = orig_meta_batch_size

    if FLAGS.train_update_batch_size == -1:
        FLAGS.train_update_batch_size = FLAGS.update_batch_size
    if FLAGS.train_update_lr == -1:
        FLAGS.train_update_lr = FLAGS.update_lr

    exp_string = 'cls_' + str(FLAGS.num_classes) + '.mbs_' + str(
        FLAGS.meta_batch_size) + '.ubs_' + str(
            FLAGS.train_update_batch_size) + '.numstep' + str(
                FLAGS.num_updates) + '.updatelr' + str(FLAGS.train_update_lr)

    if FLAGS.num_filters != 64:
        exp_string += 'hidden' + str(FLAGS.num_filters)
    if FLAGS.max_pool:
        exp_string += 'maxpool'
    if FLAGS.stop_grad:
        exp_string += 'stopgrad'
    if FLAGS.baseline:
        exp_string += FLAGS.baseline
    if FLAGS.norm == 'batch_norm':
        exp_string += 'batchnorm'
    elif FLAGS.norm == 'layer_norm':
        exp_string += 'layernorm'
    elif FLAGS.norm == 'None':
        exp_string += 'nonorm'
    else:
        print('Norm setting not recognized.')

    resume_itr = 0
    model_file = None

    tf.global_variables_initializer().run()
    tf.train.start_queue_runners()

    if FLAGS.resume or not FLAGS.train:
        model_file = tf.train.latest_checkpoint(FLAGS.logdir + '/' +
                                                exp_string)
        if FLAGS.test_iter > 0:
            model_file = model_file[:model_file.index('model'
                                                      )] + 'model' + str(
                                                          FLAGS.test_iter)
        if model_file:
            ind1 = model_file.index('model')
            resume_itr = int(model_file[ind1 + 5:])
            print("Restoring model weights from " + model_file)
            saver.restore(sess, model_file)

    if FLAGS.train:
        train(model, autoencoder, saver, sess, exp_string, data_generator,
              resume_itr)
    else:
        test(model, autoencoder, saver, sess, exp_string, data_generator,
             test_num_updates)