コード例 #1
0
    def test_model():
        metamodel = MetaModel(hyperparameters)
        metamodel.populate_with_nasnet_metacells()

        drop_path_tracker = DropPathTracker(
            hyperparameters.parameters['DROP_PATH_CHANCE'], 0, total_steps)
        first_cell = CellDataHolder(
            3, hyperparameters.parameters['TARGET_FILTER_DIMS'],
            metamodel.cells[0], False, drop_path_tracker, 0.)

        def get_model():
            cell_input = tf.keras.Input(dataset.images_shape)
            cell_output = tf.keras.layers.Conv2D(
                hyperparameters.parameters['TARGET_FILTER_DIMS'], 1, 1,
                'same')(cell_input)
            cell_output = first_cell.build([cell_output, cell_output])
            cell_output = cell_output[0]
            cell_output = tf.keras.layers.Lambda(lambda x: tf.reduce_mean(
                input_tensor=x, axis=[1, 2]))(cell_output)
            cell_output = tf.keras.layers.Dropout(.5)(cell_output)
            cell_output = tf.keras.layers.Dense(10)(cell_output)
            model = tf.keras.Model(inputs=cell_input, outputs=cell_output)
            optimizer = tf.keras.optimizers.Adam(
                learning_rate=hyperparameters.
                parameters['MAXIMUM_LEARNING_RATE'])
            model.compile(optimizer=optimizer,
                          loss=tf.keras.losses.SparseCategoricalCrossentropy(
                              from_logits=True),
                          metrics=['accuracy'])
            return model

        accuracies = []
        for i in range(cell_samples):
            cell_model = get_model()
            cell_model.fit(dataset.train_images,
                           dataset.train_labels,
                           shuffle=True,
                           batch_size=hyperparameters.parameters['BATCH_SIZE'],
                           epochs=1,
                           callbacks=[drop_path_tracker])
            model_accuracies = []
            for test_set_index in range(len(dataset.test_set_images)):
                accuracy = cell_model.evaluate(
                    dataset.test_set_images[test_set_index],
                    dataset.test_set_labels[test_set_index])[-1]
                print(
                    f'{dataset.test_set_names[test_set_index]} test set accuracy: {accuracy}'
                )
                model_accuracies.append(accuracy)
            # accuracy = cell_model.evaluate(dataset.test_images, dataset.test_labels)[-1]
            # accuracies.append(accuracy)
            accuracies.append(model_accuracies)
            tf.keras.backend.clear_session()
            del cell_model

        return accuracies, metamodel.get_embedding()
コード例 #2
0
 def eval_model(embedding = None, metamodel = None):
     model = metamodel
     if model is None:
         model = MetaModel(hyperparameters)
         if embedding is None:
             model.populate_with_nasnet_metacells()
         else:
             model.populate_from_embedding(embedding)
         model.build_model(dataset.images_shape)
     model.evaluate(dataset, 1, dir_path)
     model.save_metadata(dir_path)
     model.save_model(dir_path)
     model.generate_graph(dir_path)
     model.clear_model()
     tf.keras.backend.clear_session()
コード例 #3
0
def test_accuracy_at_different_train_amounts():
    dir_path = os.path.join(evo_dir, 'test_accuracy_epochs')
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)
    hyperparameters = Hyperparameters()
    hyperparameters.parameters['POPULATION_SIZE'] = 32
    hyperparameters.parameters['ROUNDS'] = 0
    hyperparameters.parameters['TRAIN_EPOCHS'] = 1
    hyperparameters.parameters['TRAIN_ITERATIONS'] = 16

    dataset = ImageDataset.get_cifar10()

    existing_sims = [
        x for x in os.listdir(dir_path) if 'small' not in x and '.png' not in x
    ]

    num_already_done = len(existing_sims)
    num_remaining = hyperparameters.parameters[
        'POPULATION_SIZE'] - num_already_done
    total_todo = hyperparameters.parameters['POPULATION_SIZE']
    population = []
    for round_num in range(num_remaining):
        print(
            f'Evaluating model {round_num + 1 + num_already_done} of {total_todo}'
        )
        new_candidate = MetaModel(hyperparameters)
        new_candidate.populate_with_nasnet_metacells()
        new_candidate.model_name = 'evo_' + str(
            time.time()
        )  # this is redone here since all models are initialized within microseconds of eachother for init population
        new_candidate.build_model(dataset.images_shape)
        new_candidate.evaluate(dataset)
        new_candidate.save_model(dir_path)
        # new_candidate.metrics.metrics['accuracy'].extend([x + round_num for x in range(4)])
        new_candidate.save_metadata(dir_path)
        population.append(new_candidate)
        new_candidate.clear_model()
コード例 #4
0
def train_nasnet_archs():

    num_models = 16

    def default_params(epochs: int) -> Hyperparameters:
        params = Hyperparameters()
        params.parameters['REDUCTION_EXPANSION_FACTOR'] = 2
        params.parameters['SGDR_EPOCHS_PER_RESTART'] = epochs
        params.parameters['TRAIN_ITERATIONS'] = epochs
        params.parameters['MAXIMUM_LEARNING_RATE'] = 0.025
        params.parameters['MINIMUM_LEARNING_RATE'] = 0.001
        params.parameters['DROP_PATH_TOTAL_STEPS_MULTI'] = 1
        params.parameters['BATCH_SIZE'] = 16
        return params

    def standard_params(epochs: int) -> Hyperparameters:
        params = default_params(epochs)
        params.parameters['TARGET_FILTER_DIMS'] = 32
        params.parameters['CELL_STACKS'] = [6, 1]
        params.parameters['CELL_LAYERS'] = 3
        return params

    def medium_params(epochs: int, filters=32) -> Hyperparameters:
        params = default_params(epochs)
        params.parameters['TARGET_FILTER_DIMS'] = filters
        params.parameters['CELL_STACKS'] = [5, 1]
        params.parameters['CELL_LAYERS'] = 3
        return params

    def small_params(epochs: int) -> Hyperparameters:
        params = default_params(epochs)
        params.parameters['TARGET_FILTER_DIMS'] = 24
        params.parameters['CELL_STACKS'] = [3, 1]
        params.parameters['CELL_LAYERS'] = 3
        return params

    def long_params() -> Hyperparameters:
        params = default_params(16)
        params.parameters['TARGET_FILTER_DIMS'] = 16
        params.parameters['CELL_STACKS'] = [3, 1]
        params.parameters['CELL_LAYERS'] = 2
        params.parameters['CONCATENATE_ALL'] = False
        params.parameters['GROUPS_PER_CELL'] = 7
        return params

    embeddings = []
    np.random.seed(0)

    for i in range(num_models):
        m = MetaModel(default_params(0))
        m.populate_with_nasnet_metacells()
        embeddings.append(m.get_embedding())

    np.random.seed(0)
    long_embeddings = []
    for i in range(num_models):
        m = MetaModel(long_params())
        m.populate_with_nasnet_metacells()
        long_embeddings.append(m.get_embedding())

    multi_model_test('zs_small_3x3_16e_24f',
                     num_models=num_models,
                     hparams=small_params(16),
                     emb_queue=embeddings)
    multi_model_test('zs_small_3x3_32e_24f',
                     num_models=num_models,
                     hparams=small_params(32),
                     emb_queue=embeddings)
    multi_model_test('zs_medium_5x3_16e_24f',
                     num_models=num_models,
                     hparams=medium_params(32),
                     emb_queue=embeddings)
    multi_model_test('zs_medium_6x3_16e_32f',
                     num_models=num_models,
                     hparams=medium_params(16, 32),
                     emb_queue=embeddings)
    multi_model_test('zs_standard_6x3_16e_32f',
                     num_models=num_models,
                     hparams=medium_params(16),
                     emb_queue=embeddings)
    multi_model_test('zs_standard_6x3_32e_32f',
                     num_models=num_models,
                     hparams=medium_params(32),
                     emb_queue=embeddings)
コード例 #5
0
ファイル: TaskEvo.py プロジェクト: dkoleber/nas
def run_test(dir_name):
    cell_samples = 16
    base_population = 8
    evolved_population = 24

    mods = [
        ObjectModifier.SizeModifier, ObjectModifier.PerspectiveModifier,
        ObjectModifier.RotationModifier, ObjectModifier.ColorModifier
    ]
    hyperparameters = Hyperparameters()

    dir_path = os.path.join(evo_dir, dir_name)
    results_path = os.path.join(dir_path, 'results.json')

    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    # load dataset, or create a new one if one doesn't exist
    dataset_exists = os.path.exists(dir_path) and 'dataset.npy' in os.listdir(
        dir_path)
    if not dataset_exists:
        print('Generating dataset')
        DatasetGenerator.build_task_dataset(20000, (32, 32),
                                            10,
                                            4,
                                            2,
                                            dir_path,
                                            modifiers=mods,
                                            max_depth_of_target=1)
    dataset = DatasetGenerator.get_task_dataset(dir_path)

    # load previous test results if they exist
    data = {'embeddings': [], 'accuracies': []}
    if os.path.exists(results_path):
        with open(results_path, 'r') as fl:
            data = json.load(fl)

    def save_data():
        with open(results_path, 'w+') as fl:
            json.dump(data, fl, indent=4)

    def get_average_accuracy(model_index: int, cell_index: int):
        return np.mean(data['accuracies'][model_index][cell_index], axis=0)

    existing_population_size = len(data['embeddings'])
    remaining_base_population = 0 if existing_population_size > base_population else base_population - existing_population_size
    remaining_evolved_population = evolved_population if existing_population_size < base_population else evolved_population - (
        existing_population_size - base_population)

    print(
        f'Evaluating {remaining_base_population} base candidates ({base_population - remaining_base_population}/{base_population} done) '
        f'and {remaining_evolved_population} evolved candidates ({evolved_population - remaining_evolved_population}/{evolved_population} done)'
    )

    for i in range(remaining_base_population):
        print(
            f'Evaluating candidates {i} of {remaining_base_population} base candidates'
        )
        metamodel = MetaModel(hyperparameters)
        metamodel.populate_with_nasnet_metacells()
        accuracies = test_model(metamodel, dataset, cell_samples)
        data['embeddings'].append(metamodel.get_embedding())
        data['accuracies'].append(accuracies)
        save_data()

    performances = [performance(x) for x in data['accuracies']]

    def find_best_indexes():
        best_performances = np.full(performances[0].shape,
                                    1.,
                                    dtype=np.float32)
        best_indexes = np.zeros(performances[0].shape, dtype=np.int)
        for performance_index, x in enumerate(performances):
            for i, entry in enumerate(x):
                if best_performances[i] > entry:
                    best_performances[i] = entry
                    best_indexes[i] = performance_index

        return best_indexes

    for i in range(remaining_evolved_population):
        print(
            f'Evaluating candidates {i} of {remaining_evolved_population} evolved candidates'
        )
        best_indexes = find_best_indexes()
        print(f'best indexes: {best_indexes}')
        combined_embeddings = combine_embeddings(
            data['embeddings'][best_indexes[0]],
            data['embeddings'][best_indexes[1]])
        mutated_embeddings = mutate_cell_from_embedding(combined_embeddings, 0)
        mutated_embeddings = mutate_cell_from_embedding(mutated_embeddings, 1)
        metamodel = MetaModel(hyperparameters)
        metamodel.populate_from_embedding(mutated_embeddings)
        accuracies = test_model(metamodel, dataset, cell_samples)
        data['embeddings'].append(metamodel.get_embedding())
        data['accuracies'].append(accuracies)
        performances.append(performance(accuracies))
        save_data()