コード例 #1
0
ファイル: test.py プロジェクト: dkloz/query_completion
def test_model(dataset_name, context, testdata):
    tf.reset_default_graph()
    exp_dir = os.path.join(expdir, dataset_name, context)

    metamodel = MetaModel(exp_dir)
    model_loaded = metamodel.model
    metamodel.MakeSessionAndRestore(threads)

    total_word_count = 0
    total_log_prob = 0
    results = []

    for idx in range(len(testdata.df) / testdata.batch_size):
        feed_dict = testdata.GetFeedDict(model_loaded)
        c, words_in_batch, sentence_costs = metamodel.session.run([
            model_loaded.avg_loss, model_loaded.words_in_batch,
            model_loaded.per_sentence_loss
        ], feed_dict)

        total_word_count += words_in_batch
        total_log_prob += float(c * words_in_batch)
        print '{0}\t{1:.3f}'.format(idx,
                                    np.exp(total_log_prob / total_word_count))

        lens = feed_dict[model_loaded.query_lengths]
        for length, sentence_cost in zip(lens, sentence_costs):
            data_row = {'length': length, 'cost': sentence_cost}
            results.append(data_row)

    results = pandas.DataFrame(results)
    results.to_csv(os.path.join(exp_dir, 'pplstats.csv'))

    idx = len(testdata.df) / testdata.batch_size
    print '{0}\t{1:.3f}'.format(idx, np.exp(total_log_prob / total_word_count))
コード例 #2
0
def test_residual_ratio():
    hyperparameters = Hyperparameters()


    model = MetaModel(hyperparameters)
    model.populate_from_embedding(MetaModel.get_nasnet_embedding())
    model.cells[0].process_stuff()
    model.cells[1].process_stuff()
コード例 #3
0
def main():
    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument('--data-dir',
                        type=str,
                        default='data/corona',
                        help='data directory containing input.txt')
    parser.add_argument('--rnn-type',
                        type=str,
                        default='GRU',
                        help='RNN type used [GRU|LSTM|SMGU]')
    parser.add_argument('--live-sample',
                        action='store_true',
                        help='live sample the model after each epoch')
    parser.add_argument(
        '--word-tokens',
        action='store_true',
        help='whether to model the rnn at word level or char level')
    parser.add_argument(
        '--pristine-input',
        action='store_true',
        help='do not lowercase or attempt fancy tokenization of input')
    parser.add_argument('--pristine-output',
                        action='store_true',
                        help='do not detokenize output (word-tokens only)')
    parser.add_argument('--embedding-size',
                        type=int,
                        default=64,
                        help='size of the embedding')
    parser.add_argument('--rnn-size',
                        type=int,
                        default=128,
                        help='size of RNN layers')
    parser.add_argument('--num-layers',
                        type=int,
                        default=1,
                        help='number of layers in the RNN')
    parser.add_argument('--batch-size',
                        type=int,
                        default=32,
                        help='minibatch size')
    parser.add_argument('--seq-length',
                        type=int,
                        default=50,
                        help='training sequence length')
    parser.add_argument(
        '--seq-step',
        type=int,
        default=25,
        help='how often to pull a training sequence from the data')
    parser.add_argument('--num-epochs',
                        type=int,
                        default=50,
                        help='number of epochs')
    args = parser.parse_args()

    model = MetaModel()
    model.train(**vars(args))
    save(model, args.data_dir)
コード例 #4
0
    def test_model():
        metamodel = MetaModel(hyperparameters)
        metamodel.populate_with_nasnet_metacells()

        drop_path_tracker = DropPathTracker(
            hyperparameters.parameters['DROP_PATH_CHANCE'], 0, total_steps)
        first_cell = CellDataHolder(
            3, hyperparameters.parameters['TARGET_FILTER_DIMS'],
            metamodel.cells[0], False, drop_path_tracker, 0.)

        def get_model():
            cell_input = tf.keras.Input(dataset.images_shape)
            cell_output = tf.keras.layers.Conv2D(
                hyperparameters.parameters['TARGET_FILTER_DIMS'], 1, 1,
                'same')(cell_input)
            cell_output = first_cell.build([cell_output, cell_output])
            cell_output = cell_output[0]
            cell_output = tf.keras.layers.Lambda(lambda x: tf.reduce_mean(
                input_tensor=x, axis=[1, 2]))(cell_output)
            cell_output = tf.keras.layers.Dropout(.5)(cell_output)
            cell_output = tf.keras.layers.Dense(10)(cell_output)
            model = tf.keras.Model(inputs=cell_input, outputs=cell_output)
            optimizer = tf.keras.optimizers.Adam(
                learning_rate=hyperparameters.
                parameters['MAXIMUM_LEARNING_RATE'])
            model.compile(optimizer=optimizer,
                          loss=tf.keras.losses.SparseCategoricalCrossentropy(
                              from_logits=True),
                          metrics=['accuracy'])
            return model

        accuracies = []
        for i in range(cell_samples):
            cell_model = get_model()
            cell_model.fit(dataset.train_images,
                           dataset.train_labels,
                           shuffle=True,
                           batch_size=hyperparameters.parameters['BATCH_SIZE'],
                           epochs=1,
                           callbacks=[drop_path_tracker])
            model_accuracies = []
            for test_set_index in range(len(dataset.test_set_images)):
                accuracy = cell_model.evaluate(
                    dataset.test_set_images[test_set_index],
                    dataset.test_set_labels[test_set_index])[-1]
                print(
                    f'{dataset.test_set_names[test_set_index]} test set accuracy: {accuracy}'
                )
                model_accuracies.append(accuracy)
            # accuracy = cell_model.evaluate(dataset.test_images, dataset.test_labels)[-1]
            # accuracies.append(accuracy)
            accuracies.append(model_accuracies)
            tf.keras.backend.clear_session()
            del cell_model

        return accuracies, metamodel.get_embedding()
コード例 #5
0
ファイル: TaskEvo.py プロジェクト: dkoleber/nas
def test_benchmark_models():
    dir_path = os.path.join(evo_dir, 'cell_evo_benchmarks_6')
    results_path = os.path.join(dir_path, 'results.json')
    mods = [
        ObjectModifier.SizeModifier, ObjectModifier.PerspectiveModifier,
        ObjectModifier.RotationModifier, ObjectModifier.ColorModifier
    ]
    hyperparameters = Hyperparameters()
    cell_samples = 8

    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    # load dataset, or create a new one if one doesn't exist
    dataset_exists = os.path.exists(dir_path) and 'dataset.npy' in os.listdir(
        dir_path)
    if not dataset_exists:
        print('Generating dataset')
        # size, dim, num_classes, vertices per object, objects per image,
        DatasetGenerator.build_task_dataset(40000, (16, 16),
                                            10,
                                            4,
                                            10,
                                            dir_path,
                                            modifiers=mods,
                                            max_depth_of_target=1)
    dataset = DatasetGenerator.get_task_dataset(dir_path)

    embeddings = [
        MetaModel.get_nasnet_embedding(),
        MetaModel.get_s1_embedding(),
        MetaModel.get_identity_embedding(),
        MetaModel.get_m1_sep3_embedding(),
        MetaModel.get_m1_sep7_embedding(),
        MetaModel.get_m1_sep3_serial_embedding(),
    ]

    data = {'embeddings': [], 'accuracies': []}
    if os.path.exists(results_path):
        with open(results_path, 'r') as fl:
            data = json.load(fl)

    def save_data():
        with open(results_path, 'w+') as fl:
            json.dump(data, fl, indent=4)

    for e in embeddings:
        metamodel = MetaModel(hyperparameters)
        metamodel.populate_from_embedding(e)
        accuracies = test_model(metamodel, dataset, cell_samples)
        data['embeddings'].append(metamodel.get_embedding())
        data['accuracies'].append(accuracies)
        save_data()

    performances = [performance(x) for x in data['accuracies']]
    print(performances)
コード例 #6
0
def cell_performance_test_1():

    hyperparameters = Hyperparameters()

    dataset = ImageDataset.get_cifar10()

    def get_sorted(images, labels):
        sorted_by_class = [[] for _ in range(10)]
        for index in range(len(images)):
            sorted_by_class[labels[index, 0]].append(images[index, :, :])

    sorted_train = get_sorted(dataset.train_images, dataset.train_labels)
    sorted_test = get_sorted(dataset.test_images, dataset.test_labels)

    model = MetaModel(hyperparameters)

    # model.populate_with_nasnet_metacells()
    model.populate_from_embedding(MetaModel.get_nasnet_embedding())

    # model.build_model(dataset.images_shape)
    first_cell = CellDataHolder(3, 3, model.cells[0])

    cell_input = tf.keras.Input(dataset.images_shape)
    cell_output = first_cell.build([cell_input, cell_input])
    cell_model = tf.keras.Model(inputs=cell_input, outputs=cell_output)

    def gram_matrix(input_tensor):
        result = tf.linalg.einsum('bijc,bijd->bcd', input_tensor, input_tensor)
        input_shape = tf.shape(input_tensor)
        num_locations = tf.cast(input_shape[1] * input_shape[2], tf.float32)
        return result / (num_locations)

    optimizer = tf.keras.optimizers.Adam(
        learning_rate=hyperparameters.parameters['LEARNING_RATE'])

    def loss(real_image, fake_image, output):
        real_maximize = None  # TODO: INNER PROUDCT
        fake_minimize = None

    def train_step(input_image_1, input_image_2):
        with tf.GradientTape() as tape:
            image_1_output = cell_model(input_image_1)
            image_2_output = cell_model(input_image_2)

            total_loss = loss(input_image_1,
                              input_image_2, image_1_output) + loss(
                                  input_image_2, input_image_1, image_2_output)

        gradient = tape.gradient(loss, cell_model.trainable_variables)
        optimizer.apply_gradients(zip(gradient,
                                      cell_model.trainable_variables))
コード例 #7
0
ファイル: TaskEvo.py プロジェクト: dkoleber/nas
def get_flops_for_cell_models_from_embeddings():
    tf.compat.v1.disable_eager_execution()
    dir_path = os.path.join(evo_dir, 'cell_evo_benchmarks')
    dataset = DatasetGenerator.get_task_dataset(dir_path)
    hyperparameters = Hyperparameters()

    embeddings = [
        MetaModel.get_nasnet_embedding(),
        MetaModel.get_s1_embedding(),
        MetaModel.get_identity_embedding(),
        MetaModel.get_m1_sep3_embedding(),
        MetaModel.get_m1_sep7_embedding()
    ]

    flops = []

    for e in embeddings:
        e_flops = []
        metamodel = MetaModel(hyperparameters)
        metamodel.populate_from_embedding(e)

        steps_per_epoch = math.ceil(
            len(dataset.train_labels) /
            metamodel.hyperparameters.parameters['BATCH_SIZE'])
        total_steps = metamodel.hyperparameters.parameters[
            'TRAIN_ITERATIONS'] * metamodel.hyperparameters.parameters[
                'TRAIN_EPOCHS'] * steps_per_epoch
        for meta_cell in metamodel.cells:
            drop_path_tracker = DropPathTracker(
                metamodel.hyperparameters.parameters['DROP_PATH_CHANCE'], 0,
                total_steps)
            first_cell = CellDataHolder(
                3, metamodel.hyperparameters.parameters['TARGET_FILTER_DIMS'],
                meta_cell, False, drop_path_tracker, 0.)

            cell_model = build_cell_model(
                first_cell, dataset.images_shape,
                metamodel.hyperparameters.parameters['TARGET_FILTER_DIMS'],
                metamodel.hyperparameters.parameters['MAXIMUM_LEARNING_RATE'])
            e_flops.append(
                get_flops_for_keras_model(cell_model, dataset.images_shape))
            tf.keras.backend.clear_session()
            del cell_model

        flops.append(e_flops)
        print(flops)

    print(flops)
コード例 #8
0
parser = argparse.ArgumentParser()
parser.add_argument('expdir', help='experiment directory')
parser.add_argument('--threads',
                    type=int,
                    default=12,
                    help='how many threads to use in tensorflow')
args = parser.parse_args()

df = pandas.read_csv('/g/ssli/data/LowResourceLM/aol/queries01.dev.txt.gz',
                     sep='\t',
                     header=None)
df.columns = ['user', 'query_', 'date']
df['user'] = df.user.apply(lambda x: 's' + str(x))

m = MetaModel(args.expdir)  # Load the model
m.MakeSessionAndRestore(args.threads)

for i in range(23000):
    row = df.iloc[i]
    query_len = len(row.query_)

    if query_len <= 3:
        continue

    prefix_len = GetPrefixLen(row.user, row.query_)
    prefix = row.query_[:prefix_len]
    b = GetCompletions(['<S>'] + list(prefix),
                       m.user_vocab[row.user],
                       m,
                       branching_factor=4)
コード例 #9
0
def test_model_accuracy_from_embedding(dir_name, embedding):
    dir_path = os.path.join(evo_dir, dir_name)
    # dataset = ImageDataset.get_cifar10_reduced()
    dataset = ImageDataset.get_cifar10()

    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    hyperparameters = Hyperparameters()

    model = MetaModel(hyperparameters)

    model.populate_from_embedding(embedding)

    model.build_model(dataset.images_shape)
    model.evaluate(dataset)
    model.save_model(dir_path)
    model.generate_graph(dir_path)
    model.save_metadata(dir_path)
    model.clear_model()
コード例 #10
0
def test_accuracy_at_different_train_amounts():
    dir_path = os.path.join(evo_dir, 'test_accuracy_epochs')
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)
    hyperparameters = Hyperparameters()
    hyperparameters.parameters['POPULATION_SIZE'] = 32
    hyperparameters.parameters['ROUNDS'] = 0
    hyperparameters.parameters['TRAIN_EPOCHS'] = 1
    hyperparameters.parameters['TRAIN_ITERATIONS'] = 16

    dataset = ImageDataset.get_cifar10()

    existing_sims = [
        x for x in os.listdir(dir_path) if 'small' not in x and '.png' not in x
    ]

    num_already_done = len(existing_sims)
    num_remaining = hyperparameters.parameters[
        'POPULATION_SIZE'] - num_already_done
    total_todo = hyperparameters.parameters['POPULATION_SIZE']
    population = []
    for round_num in range(num_remaining):
        print(
            f'Evaluating model {round_num + 1 + num_already_done} of {total_todo}'
        )
        new_candidate = MetaModel(hyperparameters)
        new_candidate.populate_with_nasnet_metacells()
        new_candidate.model_name = 'evo_' + str(
            time.time()
        )  # this is redone here since all models are initialized within microseconds of eachother for init population
        new_candidate.build_model(dataset.images_shape)
        new_candidate.evaluate(dataset)
        new_candidate.save_model(dir_path)
        # new_candidate.metrics.metrics['accuracy'].extend([x + round_num for x in range(4)])
        new_candidate.save_metadata(dir_path)
        population.append(new_candidate)
        new_candidate.clear_model()
コード例 #11
0
    "/Users/songdongdong/PycharmProjects/query_completion/model/1605774995")
parser.add_argument('--data',
                    type=str,
                    action='append',
                    dest='data',
                    default=[data_dir + "queries07.test.txt.gz"],
                    help='where to load the data')
parser.add_argument('--threads',
                    type=int,
                    default=12,
                    help='how many threads to use in tensorflow')
args = parser.parse_args()
expdir = args.expdir

# 模型加载
metamodel = MetaModel(expdir)
model = metamodel.model
metamodel.MakeSessionAndRestore(args.threads)
# 数据加载
df = LoadData(args.data)
dataset = Dataset(df,
                  metamodel.char_vocab,
                  metamodel.user_vocab,
                  max_len=metamodel.params.max_len)

total_word_count = 0
total_log_prob = 0
print(len(dataset.df), dataset.batch_size)  # 20999    24
for idx in range(0, int(len(dataset.df) / dataset.batch_size)):
    feed_dict = dataset.GetFeedDict(model)
    # 这里的session 是 获取的是 保存后的模型
コード例 #12
0
 def eval_model(embedding = None, metamodel = None):
     model = metamodel
     if model is None:
         model = MetaModel(hyperparameters)
         if embedding is None:
             model.populate_with_nasnet_metacells()
         else:
             model.populate_from_embedding(embedding)
         model.build_model(dataset.images_shape)
     model.evaluate(dataset, 1, dir_path)
     model.save_metadata(dir_path)
     model.save_model(dir_path)
     model.generate_graph(dir_path)
     model.clear_model()
     tf.keras.backend.clear_session()
コード例 #13
0
def train_nasnet_archs():

    num_models = 16

    def default_params(epochs: int) -> Hyperparameters:
        params = Hyperparameters()
        params.parameters['REDUCTION_EXPANSION_FACTOR'] = 2
        params.parameters['SGDR_EPOCHS_PER_RESTART'] = epochs
        params.parameters['TRAIN_ITERATIONS'] = epochs
        params.parameters['MAXIMUM_LEARNING_RATE'] = 0.025
        params.parameters['MINIMUM_LEARNING_RATE'] = 0.001
        params.parameters['DROP_PATH_TOTAL_STEPS_MULTI'] = 1
        params.parameters['BATCH_SIZE'] = 16
        return params

    def standard_params(epochs: int) -> Hyperparameters:
        params = default_params(epochs)
        params.parameters['TARGET_FILTER_DIMS'] = 32
        params.parameters['CELL_STACKS'] = [6, 1]
        params.parameters['CELL_LAYERS'] = 3
        return params

    def medium_params(epochs: int, filters=32) -> Hyperparameters:
        params = default_params(epochs)
        params.parameters['TARGET_FILTER_DIMS'] = filters
        params.parameters['CELL_STACKS'] = [5, 1]
        params.parameters['CELL_LAYERS'] = 3
        return params

    def small_params(epochs: int) -> Hyperparameters:
        params = default_params(epochs)
        params.parameters['TARGET_FILTER_DIMS'] = 24
        params.parameters['CELL_STACKS'] = [3, 1]
        params.parameters['CELL_LAYERS'] = 3
        return params

    def long_params() -> Hyperparameters:
        params = default_params(16)
        params.parameters['TARGET_FILTER_DIMS'] = 16
        params.parameters['CELL_STACKS'] = [3, 1]
        params.parameters['CELL_LAYERS'] = 2
        params.parameters['CONCATENATE_ALL'] = False
        params.parameters['GROUPS_PER_CELL'] = 7
        return params

    embeddings = []
    np.random.seed(0)

    for i in range(num_models):
        m = MetaModel(default_params(0))
        m.populate_with_nasnet_metacells()
        embeddings.append(m.get_embedding())

    np.random.seed(0)
    long_embeddings = []
    for i in range(num_models):
        m = MetaModel(long_params())
        m.populate_with_nasnet_metacells()
        long_embeddings.append(m.get_embedding())

    multi_model_test('zs_small_3x3_16e_24f',
                     num_models=num_models,
                     hparams=small_params(16),
                     emb_queue=embeddings)
    multi_model_test('zs_small_3x3_32e_24f',
                     num_models=num_models,
                     hparams=small_params(32),
                     emb_queue=embeddings)
    multi_model_test('zs_medium_5x3_16e_24f',
                     num_models=num_models,
                     hparams=medium_params(32),
                     emb_queue=embeddings)
    multi_model_test('zs_medium_6x3_16e_32f',
                     num_models=num_models,
                     hparams=medium_params(16, 32),
                     emb_queue=embeddings)
    multi_model_test('zs_standard_6x3_16e_32f',
                     num_models=num_models,
                     hparams=medium_params(16),
                     emb_queue=embeddings)
    multi_model_test('zs_standard_6x3_32e_32f',
                     num_models=num_models,
                     hparams=medium_params(32),
                     emb_queue=embeddings)
コード例 #14
0
ファイル: TaskEvo.py プロジェクト: dkoleber/nas
def run_test(dir_name):
    cell_samples = 16
    base_population = 8
    evolved_population = 24

    mods = [
        ObjectModifier.SizeModifier, ObjectModifier.PerspectiveModifier,
        ObjectModifier.RotationModifier, ObjectModifier.ColorModifier
    ]
    hyperparameters = Hyperparameters()

    dir_path = os.path.join(evo_dir, dir_name)
    results_path = os.path.join(dir_path, 'results.json')

    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    # load dataset, or create a new one if one doesn't exist
    dataset_exists = os.path.exists(dir_path) and 'dataset.npy' in os.listdir(
        dir_path)
    if not dataset_exists:
        print('Generating dataset')
        DatasetGenerator.build_task_dataset(20000, (32, 32),
                                            10,
                                            4,
                                            2,
                                            dir_path,
                                            modifiers=mods,
                                            max_depth_of_target=1)
    dataset = DatasetGenerator.get_task_dataset(dir_path)

    # load previous test results if they exist
    data = {'embeddings': [], 'accuracies': []}
    if os.path.exists(results_path):
        with open(results_path, 'r') as fl:
            data = json.load(fl)

    def save_data():
        with open(results_path, 'w+') as fl:
            json.dump(data, fl, indent=4)

    def get_average_accuracy(model_index: int, cell_index: int):
        return np.mean(data['accuracies'][model_index][cell_index], axis=0)

    existing_population_size = len(data['embeddings'])
    remaining_base_population = 0 if existing_population_size > base_population else base_population - existing_population_size
    remaining_evolved_population = evolved_population if existing_population_size < base_population else evolved_population - (
        existing_population_size - base_population)

    print(
        f'Evaluating {remaining_base_population} base candidates ({base_population - remaining_base_population}/{base_population} done) '
        f'and {remaining_evolved_population} evolved candidates ({evolved_population - remaining_evolved_population}/{evolved_population} done)'
    )

    for i in range(remaining_base_population):
        print(
            f'Evaluating candidates {i} of {remaining_base_population} base candidates'
        )
        metamodel = MetaModel(hyperparameters)
        metamodel.populate_with_nasnet_metacells()
        accuracies = test_model(metamodel, dataset, cell_samples)
        data['embeddings'].append(metamodel.get_embedding())
        data['accuracies'].append(accuracies)
        save_data()

    performances = [performance(x) for x in data['accuracies']]

    def find_best_indexes():
        best_performances = np.full(performances[0].shape,
                                    1.,
                                    dtype=np.float32)
        best_indexes = np.zeros(performances[0].shape, dtype=np.int)
        for performance_index, x in enumerate(performances):
            for i, entry in enumerate(x):
                if best_performances[i] > entry:
                    best_performances[i] = entry
                    best_indexes[i] = performance_index

        return best_indexes

    for i in range(remaining_evolved_population):
        print(
            f'Evaluating candidates {i} of {remaining_evolved_population} evolved candidates'
        )
        best_indexes = find_best_indexes()
        print(f'best indexes: {best_indexes}')
        combined_embeddings = combine_embeddings(
            data['embeddings'][best_indexes[0]],
            data['embeddings'][best_indexes[1]])
        mutated_embeddings = mutate_cell_from_embedding(combined_embeddings, 0)
        mutated_embeddings = mutate_cell_from_embedding(mutated_embeddings, 1)
        metamodel = MetaModel(hyperparameters)
        metamodel.populate_from_embedding(mutated_embeddings)
        accuracies = test_model(metamodel, dataset, cell_samples)
        data['embeddings'].append(metamodel.get_embedding())
        data['accuracies'].append(accuracies)
        performances.append(performance(accuracies))
        save_data()
コード例 #15
0
ファイル: TaskEvo.py プロジェクト: dkoleber/nas
def test_nth_in_dir(dir_name, n: int):
    dir_path = os.path.join(evo_dir, dir_name)
    data_path = os.path.join(dir_path, 'results.json')

    with open(data_path, 'r') as fl:
        data = json.load(fl)

    performances = [performance(x) for x in data['accuracies']]

    performances_with_indexes = [(performances[i], data['embeddings'][i])
                                 for i in range(len(performances))]
    num_cells = len(performances[0])  # should be 2
    pwi_per_cell = [performances_with_indexes.copy() for i in range(num_cells)]

    for i in range(num_cells):
        pwi_per_cell[i].sort(key=lambda x: x[0][i])

    selected_embeddings = [x[n][1] for x in pwi_per_cell]

    combined_embeddings = combine_embeddings(selected_embeddings[0],
                                             selected_embeddings[1])
    print(combined_embeddings)

    hyperparameters = Hyperparameters()
    hyperparameters.parameters['TRAIN_EPOCHS'] = 2
    hyperparameters.parameters['TRAIN_ITERATIONS'] = 16
    # hyperparameters.parameters['SGDR_EPOCHS_PER_RESTART'] = hyperparameters.parameters['TRAIN_ITERATIONS'] * hyperparameters.parameters['TRAIN_EPOCHS'] #effectively makes SGDR into basic cosine annealing

    dataset = ImageDataset.get_cifar10()

    metamodel = MetaModel(hyperparameters)
    metamodel.populate_from_embedding(combined_embeddings)
    metamodel.build_model(dataset.images_shape)
    metamodel.evaluate(dataset)
    metamodel.save_metadata(dir_path)
    metamodel.save_model(dir_path)
    metamodel.clear_model()