Пример #1
0
def test_node_consistency():
    graph = DefaultClassifierGenerator(10, (32, 32, 3)).generate()
    assert graph.layer_list[6].output.shape == (16, 16, 64)

    for layer in graph.layer_list:
        assert layer.output.shape == layer.output_shape

    graph.to_wider_model(6, 64)
    assert graph.layer_list[6].output.shape == (16, 16, 128)

    for layer in graph.layer_list:
        assert layer.output.shape == layer.output_shape

    graph.to_conv_deeper_model(6, 3)
    assert graph.layer_list[19].output.shape == (16, 16, 128)

    for layer in graph.layer_list:
        assert layer.output.shape == layer.output_shape

    graph.to_add_skip_model(6, 19)
    assert graph.layer_list[23].output.shape == (16, 16, 128)

    for layer in graph.layer_list:
        assert layer.output.shape == layer.output_shape

    graph.to_concat_skip_model(6, 19)
    assert graph.layer_list[25].output.shape == (16, 16, 128)

    for layer in graph.layer_list:
        assert layer.output.shape == layer.output_shape
Пример #2
0
    def search(self, x_train, y_train, x_test, y_test):
        """Override parent's search function. First model is randomly generated"""
        if not self.history:
            model = DefaultClassifierGenerator(self.n_classes,
                                               self.input_shape).generate()
            self.add_model(model, x_train, y_train, x_test, y_test)
            pickle_to_file(self, os.path.join(self.path, 'searcher'))

        else:
            model = self.load_best_model()
            new_graphs = transform(Graph(model, False))
            new_models = []
            for graph in new_graphs:
                nm_graph = Graph(model, True)
                for args in graph.operation_history:
                    getattr(nm_graph, args[0])(*list(args[1:]))
                    new_models.append(nm_graph.produce_model())
            new_models = self._remove_duplicate(list(new_models))

            for model in new_models:
                if self.model_count < constant.MAX_MODEL_NUM:
                    self.add_model(model, x_train, y_train, x_test, y_test)
                    pickle_to_file(self, os.path.join(self.path, 'searcher'))

            backend.clear_session()

        return self.load_best_model()
Пример #3
0
def test_default_transform():
    graphs = default_transform(
        DefaultClassifierGenerator(10, (32, 32, 3)).generate())
    model = graphs[0].produce_model()
    model(torch.Tensor(get_conv_data()))
    assert len(graphs) == 1
    assert len(graphs[0].layer_list) == 44
Пример #4
0
    def search(self, x_train, y_train, x_test, y_test):
        if not self.history:
            model = DefaultClassifierGenerator(self.n_classes,
                                               self.input_shape).generate()
            history_item = self.add_model(model, x_train, y_train, x_test,
                                          y_test)
            self.search_tree.add_child(-1, history_item['model_id'])
            self.gpr.first_fit(
                Graph(model).extract_descriptor(), history_item['accuracy'])
            pickle.dump(self, open(os.path.join(self.path, 'searcher'), 'wb'))
            del model
            backend.clear_session()

        else:
            model_ids = self.search_tree.get_leaves()
            new_model, father_id = self.maximize_acq(model_ids)

            history_item = self.add_model(new_model, x_train, y_train, x_test,
                                          y_test)
            self.search_tree.add_child(father_id, history_item['model_id'])
            self.gpr.incremental_fit(
                Graph(new_model).extract_descriptor(),
                history_item['accuracy'])
            pickle.dump(self, open(os.path.join(self.path, 'searcher'), 'wb'))
            del new_model
            backend.clear_session()
Пример #5
0
def test_long_transform():
    graph = DefaultClassifierGenerator(10, (32, 32, 3)).generate()
    history = [('to_wider_model', 1, 256), ('to_conv_deeper_model', 1, 3),
               ('to_concat_skip_model', 6, 11)]
    for args in history:
        getattr(graph, args[0])(*list(args[1:]))
        graph.produce_model()
    assert legal_graph(graph)
Пример #6
0
def test_default_transform():
    graphs = default_transform(DefaultClassifierGenerator(10, (28, 28, 1)).generate())
    # print()
    # for index, layer in enumerate(graphs[0].layer_list):
    #     print(index, layer)
    graphs[0].produce_model()
    assert len(graphs) == 1
    assert len(graphs[0].layer_list) == 42
Пример #7
0
def test_wider_conv():
    model = DefaultClassifierGenerator(10,
                                       (28, 28, 3)).generate().produce_model()
    model.set_weight_to_graph()
    graph = model.graph

    assert isinstance(wider_pre_conv(graph.layer_list[1], 3), StubConv)
    assert isinstance(wider_bn(graph.layer_list[2], 3, 3, 3),
                      StubBatchNormalization)
    assert isinstance(wider_next_conv(graph.layer_list[5], 3, 3, 3), StubConv)
Пример #8
0
 def init_search(self):
     if self.verbose:
         print('Initializing search.')
     graph = DefaultClassifierGenerator(self.n_classes,
                                        self.input_shape).generate(
                                            self.default_model_len,
                                            self.default_model_width)
     model_id = self.model_count
     self.model_count += 1
     self.training_queue.append((graph, -1, model_id))
     for child_graph in default_transform(graph):
         child_id = self.model_count
         self.model_count += 1
         self.training_queue.append((child_graph, model_id, child_id))
     if self.verbose:
         print('Initialization finished.')
Пример #9
0
def test_long_transform():
    graph = Graph(DefaultClassifierGenerator(10, (32, 32, 2)).generate(), True)
    graph.to_concat_skip_model(2, 12)
    graph = Graph(graph.produce_model(), True)
    operations = [('to_concat_skip_model', 2, 7),
                  ('to_concat_skip_model', 7, 12), ('to_wider_model', 7, 32),
                  ('to_conv_deeper_model', 19, 5),
                  ('to_concat_skip_model', 12, 19),
                  ('to_concat_skip_model', 7, 19),
                  ('to_conv_deeper_model', 12, 3),
                  ('to_concat_skip_model', 12, 19),
                  ('to_concat_skip_model', 12, 34),
                  ('to_concat_skip_model', 2, 12)]

    for args in operations[:-1]:
        getattr(graph, args[0])(*list(args[1:]))
        print(args)
    graph.to_concat_skip_model(2, 12)
    graph.produce_model()
Пример #10
0
    def search(self, x_train, y_train, x_test, y_test):
        if not self.history:
            model = DefaultClassifierGenerator(self.n_classes,
                                               self.input_shape).generate(
                                                   self.default_model_len,
                                                   self.default_model_width)
            history_item = self.add_model(model, x_train, y_train, x_test,
                                          y_test)
            self.search_tree.add_child(-1, history_item['model_id'])

            graph = Graph(model)
            self.init_search_queue = []
            # for child_graph in transform(graph):
            #     self.init_search_queue.append((child_graph, history_item['model_id']))
            self.init_gpr_x.append(graph.extract_descriptor())
            self.init_gpr_y.append(history_item['accuracy'])
            pickle_to_file(self, os.path.join(self.path, 'searcher'))
            return

        if self.init_search_queue:
            graph, father_id = self.init_search_queue.pop()
            model = graph.produce_model()
            history_item = self.add_model(model, x_train, y_train, x_test,
                                          y_test)
            self.search_tree.add_child(father_id, history_item['model_id'])
            self.init_gpr_x.append(graph.extract_descriptor())
            self.init_gpr_y.append(history_item['accuracy'])
            pickle_to_file(self, os.path.join(self.path, 'searcher'))
            return

        if not self.init_search_queue and not self.gpr.first_fitted:
            self.gpr.first_fit(self.init_gpr_x, self.init_gpr_y)

        new_model, father_id = self.maximize_acq()

        history_item = self.add_model(new_model, x_train, y_train, x_test,
                                      y_test)
        self.search_tree.add_child(father_id, history_item['model_id'])
        self.gpr.incremental_fit(
            Graph(new_model).extract_descriptor(), history_item['accuracy'])
        pickle_to_file(self, os.path.join(self.path, 'searcher'))
Пример #11
0
    def search(self, x_train, y_train, x_test, y_test):
        """Override parent's search function. First model is randomly generated"""
        if not self.history:
            model = DefaultClassifierGenerator(self.n_classes,
                                               self.input_shape).generate()
            self.add_model(model, x_train, y_train, x_test, y_test)

        optimal_accuracy = 0.0
        while self.model_count < constant.MAX_MODEL_NUM:
            model = self.load_best_model()
            new_models = self._remove_duplicate(transform(model))

            for model in new_models:
                if self.model_count < constant.MAX_MODEL_NUM:
                    self.add_model(model, x_train, y_train, x_test, y_test)

            max_accuracy = max(self.history,
                               key=lambda x: x['accuracy'])['accuracy']
            if max_accuracy <= optimal_accuracy:
                break
            optimal_accuracy = max_accuracy

        return self.load_best_model()
Пример #12
0
def test_deeper_conv_block():
    graph = DefaultClassifierGenerator(10, (28, 28, 3)).generate()
    layers = deeper_conv_block(graph.layer_list[1], 3)
    assert len(layers) == Constant.CONV_BLOCK_DISTANCE + 1
Пример #13
0
from keras.datasets import cifar10

from autokeras.generator import DefaultClassifierGenerator
from autokeras.net_transformer import default_transform
from autokeras.preprocessor import OneHotEncoder
from autokeras.utils import ModelTrainer

if __name__ == '__main__':
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    print('Start Encoding')
    encoder = OneHotEncoder()
    encoder.fit(y_train)

    y_train = encoder.transform(y_train)
    y_test = encoder.transform(y_test)

    print('Start Generating')
    graphs = default_transform(
        DefaultClassifierGenerator(10, x_train.shape[1:]).generate())
    keras_model = graphs[0].produce_model()

    print('Start Training')
    ModelTrainer(keras_model, x_train, y_train, x_test, y_test,
                 True).train_model(max_no_improvement_num=100, batch_size=128)
    print(keras_model.evaluate(x_test, y_test, True))
Пример #14
0
def test_model_trainer():
    model = DefaultClassifierGenerator(3,
                                       (28, 28, 3)).generate().produce_model()
    train_data, test_data = get_processed_data()
    ModelTrainer(model, train_data, test_data, Accuracy,
                 False).train_model(max_iter_num=3)