コード例 #1
0
def test_residual_ratio():
    hyperparameters = Hyperparameters()


    model = MetaModel(hyperparameters)
    model.populate_from_embedding(MetaModel.get_nasnet_embedding())
    model.cells[0].process_stuff()
    model.cells[1].process_stuff()
コード例 #2
0
ファイル: TaskEvo.py プロジェクト: dkoleber/nas
def test_benchmark_models():
    dir_path = os.path.join(evo_dir, 'cell_evo_benchmarks_6')
    results_path = os.path.join(dir_path, 'results.json')
    mods = [
        ObjectModifier.SizeModifier, ObjectModifier.PerspectiveModifier,
        ObjectModifier.RotationModifier, ObjectModifier.ColorModifier
    ]
    hyperparameters = Hyperparameters()
    cell_samples = 8

    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    # load dataset, or create a new one if one doesn't exist
    dataset_exists = os.path.exists(dir_path) and 'dataset.npy' in os.listdir(
        dir_path)
    if not dataset_exists:
        print('Generating dataset')
        # size, dim, num_classes, vertices per object, objects per image,
        DatasetGenerator.build_task_dataset(40000, (16, 16),
                                            10,
                                            4,
                                            10,
                                            dir_path,
                                            modifiers=mods,
                                            max_depth_of_target=1)
    dataset = DatasetGenerator.get_task_dataset(dir_path)

    embeddings = [
        MetaModel.get_nasnet_embedding(),
        MetaModel.get_s1_embedding(),
        MetaModel.get_identity_embedding(),
        MetaModel.get_m1_sep3_embedding(),
        MetaModel.get_m1_sep7_embedding(),
        MetaModel.get_m1_sep3_serial_embedding(),
    ]

    data = {'embeddings': [], 'accuracies': []}
    if os.path.exists(results_path):
        with open(results_path, 'r') as fl:
            data = json.load(fl)

    def save_data():
        with open(results_path, 'w+') as fl:
            json.dump(data, fl, indent=4)

    for e in embeddings:
        metamodel = MetaModel(hyperparameters)
        metamodel.populate_from_embedding(e)
        accuracies = test_model(metamodel, dataset, cell_samples)
        data['embeddings'].append(metamodel.get_embedding())
        data['accuracies'].append(accuracies)
        save_data()

    performances = [performance(x) for x in data['accuracies']]
    print(performances)
コード例 #3
0
def cell_performance_test_1():

    hyperparameters = Hyperparameters()

    dataset = ImageDataset.get_cifar10()

    def get_sorted(images, labels):
        sorted_by_class = [[] for _ in range(10)]
        for index in range(len(images)):
            sorted_by_class[labels[index, 0]].append(images[index, :, :])

    sorted_train = get_sorted(dataset.train_images, dataset.train_labels)
    sorted_test = get_sorted(dataset.test_images, dataset.test_labels)

    model = MetaModel(hyperparameters)

    # model.populate_with_nasnet_metacells()
    model.populate_from_embedding(MetaModel.get_nasnet_embedding())

    # model.build_model(dataset.images_shape)
    first_cell = CellDataHolder(3, 3, model.cells[0])

    cell_input = tf.keras.Input(dataset.images_shape)
    cell_output = first_cell.build([cell_input, cell_input])
    cell_model = tf.keras.Model(inputs=cell_input, outputs=cell_output)

    def gram_matrix(input_tensor):
        result = tf.linalg.einsum('bijc,bijd->bcd', input_tensor, input_tensor)
        input_shape = tf.shape(input_tensor)
        num_locations = tf.cast(input_shape[1] * input_shape[2], tf.float32)
        return result / (num_locations)

    optimizer = tf.keras.optimizers.Adam(
        learning_rate=hyperparameters.parameters['LEARNING_RATE'])

    def loss(real_image, fake_image, output):
        real_maximize = None  # TODO: INNER PROUDCT
        fake_minimize = None

    def train_step(input_image_1, input_image_2):
        with tf.GradientTape() as tape:
            image_1_output = cell_model(input_image_1)
            image_2_output = cell_model(input_image_2)

            total_loss = loss(input_image_1,
                              input_image_2, image_1_output) + loss(
                                  input_image_2, input_image_1, image_2_output)

        gradient = tape.gradient(loss, cell_model.trainable_variables)
        optimizer.apply_gradients(zip(gradient,
                                      cell_model.trainable_variables))
コード例 #4
0
 def eval_model(embedding = None, metamodel = None):
     model = metamodel
     if model is None:
         model = MetaModel(hyperparameters)
         if embedding is None:
             model.populate_with_nasnet_metacells()
         else:
             model.populate_from_embedding(embedding)
         model.build_model(dataset.images_shape)
     model.evaluate(dataset, 1, dir_path)
     model.save_metadata(dir_path)
     model.save_model(dir_path)
     model.generate_graph(dir_path)
     model.clear_model()
     tf.keras.backend.clear_session()
コード例 #5
0
ファイル: TaskEvo.py プロジェクト: dkoleber/nas
def get_flops_for_cell_models_from_embeddings():
    tf.compat.v1.disable_eager_execution()
    dir_path = os.path.join(evo_dir, 'cell_evo_benchmarks')
    dataset = DatasetGenerator.get_task_dataset(dir_path)
    hyperparameters = Hyperparameters()

    embeddings = [
        MetaModel.get_nasnet_embedding(),
        MetaModel.get_s1_embedding(),
        MetaModel.get_identity_embedding(),
        MetaModel.get_m1_sep3_embedding(),
        MetaModel.get_m1_sep7_embedding()
    ]

    flops = []

    for e in embeddings:
        e_flops = []
        metamodel = MetaModel(hyperparameters)
        metamodel.populate_from_embedding(e)

        steps_per_epoch = math.ceil(
            len(dataset.train_labels) /
            metamodel.hyperparameters.parameters['BATCH_SIZE'])
        total_steps = metamodel.hyperparameters.parameters[
            'TRAIN_ITERATIONS'] * metamodel.hyperparameters.parameters[
                'TRAIN_EPOCHS'] * steps_per_epoch
        for meta_cell in metamodel.cells:
            drop_path_tracker = DropPathTracker(
                metamodel.hyperparameters.parameters['DROP_PATH_CHANCE'], 0,
                total_steps)
            first_cell = CellDataHolder(
                3, metamodel.hyperparameters.parameters['TARGET_FILTER_DIMS'],
                meta_cell, False, drop_path_tracker, 0.)

            cell_model = build_cell_model(
                first_cell, dataset.images_shape,
                metamodel.hyperparameters.parameters['TARGET_FILTER_DIMS'],
                metamodel.hyperparameters.parameters['MAXIMUM_LEARNING_RATE'])
            e_flops.append(
                get_flops_for_keras_model(cell_model, dataset.images_shape))
            tf.keras.backend.clear_session()
            del cell_model

        flops.append(e_flops)
        print(flops)

    print(flops)
コード例 #6
0
def test_model_accuracy_from_embedding(dir_name, embedding):
    dir_path = os.path.join(evo_dir, dir_name)
    # dataset = ImageDataset.get_cifar10_reduced()
    dataset = ImageDataset.get_cifar10()

    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    hyperparameters = Hyperparameters()

    model = MetaModel(hyperparameters)

    model.populate_from_embedding(embedding)

    model.build_model(dataset.images_shape)
    model.evaluate(dataset)
    model.save_model(dir_path)
    model.generate_graph(dir_path)
    model.save_metadata(dir_path)
    model.clear_model()
コード例 #7
0
ファイル: TaskEvo.py プロジェクト: dkoleber/nas
def test_nth_in_dir(dir_name, n: int):
    dir_path = os.path.join(evo_dir, dir_name)
    data_path = os.path.join(dir_path, 'results.json')

    with open(data_path, 'r') as fl:
        data = json.load(fl)

    performances = [performance(x) for x in data['accuracies']]

    performances_with_indexes = [(performances[i], data['embeddings'][i])
                                 for i in range(len(performances))]
    num_cells = len(performances[0])  # should be 2
    pwi_per_cell = [performances_with_indexes.copy() for i in range(num_cells)]

    for i in range(num_cells):
        pwi_per_cell[i].sort(key=lambda x: x[0][i])

    selected_embeddings = [x[n][1] for x in pwi_per_cell]

    combined_embeddings = combine_embeddings(selected_embeddings[0],
                                             selected_embeddings[1])
    print(combined_embeddings)

    hyperparameters = Hyperparameters()
    hyperparameters.parameters['TRAIN_EPOCHS'] = 2
    hyperparameters.parameters['TRAIN_ITERATIONS'] = 16
    # hyperparameters.parameters['SGDR_EPOCHS_PER_RESTART'] = hyperparameters.parameters['TRAIN_ITERATIONS'] * hyperparameters.parameters['TRAIN_EPOCHS'] #effectively makes SGDR into basic cosine annealing

    dataset = ImageDataset.get_cifar10()

    metamodel = MetaModel(hyperparameters)
    metamodel.populate_from_embedding(combined_embeddings)
    metamodel.build_model(dataset.images_shape)
    metamodel.evaluate(dataset)
    metamodel.save_metadata(dir_path)
    metamodel.save_model(dir_path)
    metamodel.clear_model()
コード例 #8
0
ファイル: TaskEvo.py プロジェクト: dkoleber/nas
def run_test(dir_name):
    cell_samples = 16
    base_population = 8
    evolved_population = 24

    mods = [
        ObjectModifier.SizeModifier, ObjectModifier.PerspectiveModifier,
        ObjectModifier.RotationModifier, ObjectModifier.ColorModifier
    ]
    hyperparameters = Hyperparameters()

    dir_path = os.path.join(evo_dir, dir_name)
    results_path = os.path.join(dir_path, 'results.json')

    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    # load dataset, or create a new one if one doesn't exist
    dataset_exists = os.path.exists(dir_path) and 'dataset.npy' in os.listdir(
        dir_path)
    if not dataset_exists:
        print('Generating dataset')
        DatasetGenerator.build_task_dataset(20000, (32, 32),
                                            10,
                                            4,
                                            2,
                                            dir_path,
                                            modifiers=mods,
                                            max_depth_of_target=1)
    dataset = DatasetGenerator.get_task_dataset(dir_path)

    # load previous test results if they exist
    data = {'embeddings': [], 'accuracies': []}
    if os.path.exists(results_path):
        with open(results_path, 'r') as fl:
            data = json.load(fl)

    def save_data():
        with open(results_path, 'w+') as fl:
            json.dump(data, fl, indent=4)

    def get_average_accuracy(model_index: int, cell_index: int):
        return np.mean(data['accuracies'][model_index][cell_index], axis=0)

    existing_population_size = len(data['embeddings'])
    remaining_base_population = 0 if existing_population_size > base_population else base_population - existing_population_size
    remaining_evolved_population = evolved_population if existing_population_size < base_population else evolved_population - (
        existing_population_size - base_population)

    print(
        f'Evaluating {remaining_base_population} base candidates ({base_population - remaining_base_population}/{base_population} done) '
        f'and {remaining_evolved_population} evolved candidates ({evolved_population - remaining_evolved_population}/{evolved_population} done)'
    )

    for i in range(remaining_base_population):
        print(
            f'Evaluating candidates {i} of {remaining_base_population} base candidates'
        )
        metamodel = MetaModel(hyperparameters)
        metamodel.populate_with_nasnet_metacells()
        accuracies = test_model(metamodel, dataset, cell_samples)
        data['embeddings'].append(metamodel.get_embedding())
        data['accuracies'].append(accuracies)
        save_data()

    performances = [performance(x) for x in data['accuracies']]

    def find_best_indexes():
        best_performances = np.full(performances[0].shape,
                                    1.,
                                    dtype=np.float32)
        best_indexes = np.zeros(performances[0].shape, dtype=np.int)
        for performance_index, x in enumerate(performances):
            for i, entry in enumerate(x):
                if best_performances[i] > entry:
                    best_performances[i] = entry
                    best_indexes[i] = performance_index

        return best_indexes

    for i in range(remaining_evolved_population):
        print(
            f'Evaluating candidates {i} of {remaining_evolved_population} evolved candidates'
        )
        best_indexes = find_best_indexes()
        print(f'best indexes: {best_indexes}')
        combined_embeddings = combine_embeddings(
            data['embeddings'][best_indexes[0]],
            data['embeddings'][best_indexes[1]])
        mutated_embeddings = mutate_cell_from_embedding(combined_embeddings, 0)
        mutated_embeddings = mutate_cell_from_embedding(mutated_embeddings, 1)
        metamodel = MetaModel(hyperparameters)
        metamodel.populate_from_embedding(mutated_embeddings)
        accuracies = test_model(metamodel, dataset, cell_samples)
        data['embeddings'].append(metamodel.get_embedding())
        data['accuracies'].append(accuracies)
        performances.append(performance(accuracies))
        save_data()