Exemplo n.º 1
0
    def __init__(self, verbose=False, path=None, resume=False, searcher_args=None,
                 search_type=BayesianSearcher):
        """Initialize the instance.

        The classifier will be loaded from the files in 'path' if parameter 'resume' is True.
        Otherwise it would create a new one.
        Args:
            verbose: A boolean of whether the search process will be printed to stdout.
            path: A string. The path to a directory, where the intermediate results are saved.
            resume: A boolean. If True, the classifier will continue to previous work saved in path.
                Otherwise, the classifier will start a new search.
            searcher_args: A dictionary containing the parameters for the searcher's __init__ function.
            search_type: A constant denoting the type of hyperparameter search algorithm that must be used.
        """
        super().__init__(verbose)

        if searcher_args is None:
            searcher_args = {}

        if path is None:
            path = rand_temp_folder_generator()

        self.path = path
        ensure_dir(path)
        if resume:
            classifier = pickle_from_file(os.path.join(self.path, 'classifier'))
            self.__dict__ = classifier.__dict__
            self.cnn = pickle_from_file(os.path.join(self.path, 'module'))
        else:
            self.y_encoder = None
            self.data_transformer = None
            self.verbose = verbose
            self.cnn = CnnModule(self.loss, self.metric, searcher_args, path, verbose, search_type)
Exemplo n.º 2
0
    def __init__(self, verbose=False, path=None, resume=False, searcher_args=None,
                 search_type=BayesianSearcher):
        """Initialize the instance.

        The classifier will be loaded from the files in 'path' if parameter 'resume' is True.
        Otherwise it would create a new one.
        Args:
            verbose: A boolean of whether the search process will be printed to stdout.
            path: A string. The path to a directory, where the intermediate results are saved.
            resume: A boolean. If True, the classifier will continue to previous work saved in path.
                Otherwise, the classifier will start a new search.
            searcher_args: A dictionary containing the parameters for the searcher's __init__ function.
            search_type: A constant denoting the type of hyperparameter search algorithm that must be used.
        """
        super().__init__(verbose)

        if searcher_args is None:
            searcher_args = {}

        if path is None:
            path = rand_temp_folder_generator()

        self.path = path
        ensure_dir(path)
        if resume:
            classifier = pickle_from_file(os.path.join(self.path, 'classifier'))
            self.__dict__ = classifier.__dict__
            self.cnn = pickle_from_file(os.path.join(self.path, 'module'))
        else:
            self.y_encoder = None
            self.data_transformer = None
            self.verbose = verbose
            self.cnn = CnnModule(self.loss, self.metric, searcher_args, path, verbose, search_type)
Exemplo n.º 3
0
def test_export_keras_model(_, _1):
    Constant.MAX_ITER_NUM = 1
    Constant.MAX_MODEL_NUM = 1
    Constant.SEARCH_MAX_ITER = 1
    Constant.T_MIN = 0.8
    train_x = np.random.rand(100, 25, 25, 1)
    train_y = np.random.randint(0, 5, 100)
    test_x = np.random.rand(100, 25, 25, 1)
    clean_dir(TEST_TEMP_DIR)
    clf = ImageClassifier(path=TEST_TEMP_DIR, verbose=False, resume=False)
    clf.n_epochs = 100
    clf.fit(train_x, train_y)
    score = clf.evaluate(train_x, train_y)
    assert score <= 1.0

    model_file_name = os.path.join(TEST_TEMP_DIR, 'test_keras_model.graph')
    clf.export_keras_model(model_file_name)
    from keras.models import load_model
    model = load_model(model_file_name)
    results = model.predict(test_x)
    assert len(results) == len(test_x)
    del model, results, model_file_name

    model_file_name = os.path.join(TEST_TEMP_DIR, 'test_autokeras_model.pkl')
    clf.export_autokeras_model(model_file_name)
    from autokeras.utils import pickle_from_file
    model = pickle_from_file(model_file_name)
    results = model.predict(test_x)
    assert len(results) == len(test_x)
    score = model.evaluate(train_x, train_y)
    assert score <= 1.0
    before = model.graph
    model.fit(train_x, train_y, train_x, train_y)
    assert model.graph == before
    clean_dir(TEST_TEMP_DIR)

    clf = ImageRegressor(path=TEST_TEMP_DIR, verbose=False, resume=False)
    clf.n_epochs = 100
    clf.fit(train_x, train_y)
    score = clf.evaluate(train_x, train_y)
    assert score >= 0.0

    model_file_name = os.path.join(TEST_TEMP_DIR, 'test_keras_model.graph')
    clf.export_keras_model(model_file_name)
    from keras.models import load_model
    model = load_model(model_file_name)
    results = model.predict(test_x)
    assert len(results) == len(test_x)
    del model, results, model_file_name

    model_file_name = os.path.join(TEST_TEMP_DIR, 'test_autokeras_model.pkl')
    clf.export_autokeras_model(model_file_name)
    from autokeras.utils import pickle_from_file
    model = pickle_from_file(model_file_name)
    results = model.predict(test_x)
    assert len(results) == len(test_x)
    score = model.evaluate(train_x, train_y)
    assert score >= 0.0
    clean_dir(TEST_TEMP_DIR)
def test_export_keras_model(_, _1):
    Constant.MAX_ITER_NUM = 1
    Constant.MAX_MODEL_NUM = 1
    Constant.SEARCH_MAX_ITER = 1
    Constant.T_MIN = 0.8
    train_x = np.random.rand(100, 25, 25, 1)
    train_y = np.random.randint(0, 5, 100)
    test_x = np.random.rand(100, 25, 25, 1)
    clean_dir(TEST_TEMP_DIR)
    clf = ImageClassifier(path=TEST_TEMP_DIR, verbose=False, resume=False)
    clf.n_epochs = 100
    clf.fit(train_x, train_y)
    score = clf.evaluate(train_x, train_y)
    assert score <= 1.0

    model_file_name = os.path.join(TEST_TEMP_DIR, 'test_keras_model.graph')
    clf.export_keras_model(model_file_name)
    from keras.models import load_model
    model = load_model(model_file_name)
    results = model.predict(test_x)
    assert len(results) == len(test_x)
    del model, results, model_file_name

    model_file_name = os.path.join(TEST_TEMP_DIR, 'test_autokeras_model.pkl')
    clf.export_autokeras_model(model_file_name)
    from autokeras.utils import pickle_from_file
    model = pickle_from_file(model_file_name)
    results = model.predict(test_x)
    assert len(results) == len(test_x)
    score = model.evaluate(train_x, train_y)
    assert score <= 1.0
    before = model.graph
    model.fit(train_x, train_y)
    assert model.graph == before
    clean_dir(TEST_TEMP_DIR)

    clf = ImageRegressor(path=TEST_TEMP_DIR, verbose=False, resume=False)
    clf.n_epochs = 100
    clf.fit(train_x, train_y)
    score = clf.evaluate(train_x, train_y)
    assert score >= 0.0

    model_file_name = os.path.join(TEST_TEMP_DIR, 'test_keras_model.graph')
    clf.export_keras_model(model_file_name)
    from keras.models import load_model
    model = load_model(model_file_name)
    results = model.predict(test_x)
    assert len(results) == len(test_x)
    del model, results, model_file_name

    model_file_name = os.path.join(TEST_TEMP_DIR, 'test_autokeras_model.pkl')
    clf.export_autokeras_model(model_file_name)
    from autokeras.utils import pickle_from_file
    model = pickle_from_file(model_file_name)
    results = model.predict(test_x)
    assert len(results) == len(test_x)
    score = model.evaluate(train_x, train_y)
    assert score >= 0.0
    clean_dir(TEST_TEMP_DIR)
Exemplo n.º 5
0
    def __init__(self,
                 verbose=False,
                 path=None,
                 resume=False,
                 searcher_args=None):
        super().__init__(verbose)

        if searcher_args is None:
            searcher_args = {}

        if path is None:
            path = temp_folder_generator()

        self.cnn = CnnModule(self.loss, self.metric, searcher_args, path,
                             verbose)

        self.path = path
        if has_file(os.path.join(self.path, 'text_classifier')) and resume:
            classifier = pickle_from_file(
                os.path.join(self.path, 'text_classifier'))
            self.__dict__ = classifier.__dict__
        else:
            self.y_encoder = None
            self.data_transformer = None
            self.verbose = verbose
Exemplo n.º 6
0
def visualize(path):
    cnn_module = pickle_from_file(os.path.join(path, 'module'))
    cnn_module.searcher.path = path
    for item in cnn_module.searcher.history:
        model_id = item['model_id']
        graph = cnn_module.searcher.load_model_by_id(model_id)
        to_pdf(graph, os.path.join(path, str(model_id)))
Exemplo n.º 7
0
def visualize(path):
    cnn_module = pickle_from_file(os.path.join(path, 'module'))
    cnn_module.searcher.path = path
    for item in cnn_module.searcher.history:
        model_id = item['model_id']
        graph = cnn_module.searcher.load_model_by_id(model_id)
    to_pdf(graph, os.path.join(path, str(model_id)))
Exemplo n.º 8
0
    def __init__(self, verbose=False, path=constant.DEFAULT_SAVE_PATH, resume=False,
                 searcher_args=None):
        """Initialize the instance.

        The classifier will be loaded from the files in 'path' if parameter 'resume' is True.
        Otherwise it would create a new one.

        Args:
            verbose: An boolean of whether the search process will be printed to stdout.
            path: A string. The path to a directory, where the intermediate results are saved.
            resume: An boolean. If True, the classifier will continue to previous work saved in path.
                Otherwise, the classifier will start a new search.

        """
        if searcher_args is None:
            searcher_args = {}

        if has_file(os.path.join(path, 'classifier')) and resume:
            classifier = pickle_from_file(os.path.join(path, 'classifier'))
            self.__dict__ = classifier.__dict__
            self.path = path
        else:
            self.y_encoder = None
            self.data_transformer = None
            self.verbose = verbose
            self.searcher = False
            self.path = path
            self.searcher_args = searcher_args
            ensure_dir(path)
Exemplo n.º 9
0
def test_export_keras_model(_):
    Constant.MAX_ITER_NUM = 1
    Constant.MAX_MODEL_NUM = 1
    Constant.SEARCH_MAX_ITER = 1
    Constant.T_MIN = 0.8
    train_x = np.random.rand(100, 25, 25, 1)
    train_y = np.random.randint(0, 5, 100)
    test_x = np.random.rand(100, 25, 25, 1)
    path = 'tests/resources/temp'
    clean_dir(path)
    clf = ImageClassifier(path=path, verbose=False, resume=False)
    clf.n_epochs = 100
    clf.fit(train_x, train_y)
    score = clf.evaluate(train_x, train_y)
    assert score <= 1.0

    model_file_name = os.path.join(path, 'test_keras_model.h5')
    clf.export_keras_model(model_file_name)
    from keras.models import load_model
    model = load_model(model_file_name)
    results = model.predict(test_x)
    assert len(results) == len(test_x)
    del model, results, model_file_name

    model_file_name = os.path.join(path, 'test_autokeras_model.pkl')
    clf.export_autokeras_model(model_file_name)
    from autokeras.utils import pickle_from_file
    model = pickle_from_file(model_file_name)
    results = model.predict(test_x)
    assert len(results) == len(test_x)
    score = model.evaluate(train_x, train_y)
    assert score <= 1.0
    clean_dir(path)
Exemplo n.º 10
0
    def __init__(self,
                 verbose=False,
                 path=None,
                 resume=False,
                 searcher_args=None,
                 augment=None):
        """Initialize the instance.

        The classifier will be loaded from the files in 'path' if parameter 'resume' is True.
        Otherwise it would create a new one.

        Args:
            verbose: A boolean of whether the search process will be printed to stdout.
            path: A string. The path to a directory, where the intermediate results are saved.
            resume: A boolean. If True, the classifier will continue to previous work saved in path.
                Otherwise, the classifier will start a new search.
            augment: A boolean value indicating whether the data needs augmentation. If not define, then it
                will use the value of Constant.DATA_AUGMENTATION which is True by default.

        """
        super().__init__(verbose)

        if searcher_args is None:
            searcher_args = {}

        if path is None:
            path = temp_folder_generator()

        if augment is None:
            augment = Constant.DATA_AUGMENTATION

        self.path = path
        if has_file(os.path.join(self.path, 'classifier')) and resume:
            classifier = pickle_from_file(os.path.join(self.path,
                                                       'classifier'))
            self.__dict__ = classifier.__dict__
            self.cnn = pickle_from_file(os.path.join(self.path, 'module'))
        else:
            self.y_encoder = None
            self.data_transformer = None
            self.verbose = verbose
            self.augment = augment
            self.cnn = CnnModule(self.loss, self.metric, searcher_args, path,
                                 verbose)

        self.resize_height = None
        self.resize_width = None
Exemplo n.º 11
0
    def predict_autokeras(self):
        #Load images
        test_data, test_labels = load_image_dataset(csv_file_path=self.PREDICT_CSV_DIR, images_path=self.RESIZE_PREDICT_IMG_DIR)
        test_data = test_data.astype('float32') / 255
        print("Test data shape:", test_data.shape)

        autokeras_model = pickle_from_file(self.MODEL_DIR)
        autokeras_score = autokeras_model.evaluate(test_data, test_labels)
        print(autokeras_score)
Exemplo n.º 12
0
def visualize(path):
    """ visualize the model that is defined in the automodels folder lying above:
    that is the folder as generated by Auto-Keras in which it drops its (temporary) models"""
    cnn_module = pickle_from_file(os.path.join(path, 'module'))
    cnn_module.searcher.path = path
    for item in cnn_module.searcher.history:
        model_id = item['model_id']
        graph = cnn_module.searcher.load_model_by_id(model_id)
        to_pdf(graph, os.path.join(path, str(model_id)))
Exemplo n.º 13
0
 def load_preprocessors(self, path):
     preprocessors = utils.pickle_from_file(path)
     configs = preprocessors['configs']
     weights = preprocessors['weights']
     for name, config in configs.items():
         block = self._get_block(name)
         block.set_config(config)
     for name, weight in weights.items():
         block = self._get_block(name)
         block.set_weights(weight)
Exemplo n.º 14
0
def run_searcher_once(x_train, y_train, x_test, y_test, path):
    if constant.LIMIT_MEMORY:
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        init = tf.global_variables_initializer()
        sess.run(init)
        backend.set_session(sess)
    searcher = pickle_from_file(os.path.join(path, 'searcher'))
    searcher.search(x_train, y_train, x_test, y_test)
Exemplo n.º 15
0
def visualize(path):
    """
        constructs a dot graph, visualizing the found model architectures
    Args:
        path: of the model files
    """
    cnn_module = pickle_from_file(os.path.join(path, 'module'))
    cnn_module.searcher.path = path
    for item in cnn_module.searcher.history:
        model_id = item['model_id']
        graph = cnn_module.searcher.load_model_by_id(model_id)
        to_pdf(graph, os.path.join(path, str(model_id)))
Exemplo n.º 16
0
 def predict(self, x_test):
     classifier = pickle_from_file(os.path.join(self.path, 'classifier'))
     self.__dict__ = classifier.__dict__
     self.net.eval()
     test_data = self.preprocess(x_test)
     test_data = self.data_transformer.transform_test(test_data)
     outputs = []
     with torch.no_grad():
         for index, inputs in enumerate(test_data):
             outputs.append(self.net(inputs).numpy())
     output = reduce(lambda x, y: np.concatenate((x, y)), outputs)
     predicted = self.encoder.inverse_transform(output)
     return predicted
Exemplo n.º 17
0
    def load_preprocessors(self, path):
        """Load the preprocessors in the hypermodel from a single file.

        Args:
            path: String. The path to a single file.
        """
        if self.contains_hyper_block():
            self._plain_graph_hm.load_preprocessors(path)
            return
        preprocessors = utils.pickle_from_file(path)
        for name, state in preprocessors.items():
            block = self._get_block(name)
            block.set_state(state)
Exemplo n.º 18
0
    def fit(self,
            n_output_node,
            input_shape,
            train_data,
            test_data,
            time_limit=24 * 60 * 60):
        """ Search the best CnnModule.

        Args:
            n_output_node: A integer value represent the number of output node in the final layer.
            input_shape: A tuple to express the shape of every train entry. For example,
                MNIST dataset would be (28,28,1)
            train_data: A PyTorch DataLoader instance represents the training data
            test_data: A PyTorch DataLoader instance represents the testing data
            time_limit: A integer value represents the time limit on searching for models.
        """
        # Create the searcher and save on disk
        if not self.searcher:
            input_shape = input_shape[1:]
            self.searcher_args['n_output_node'] = n_output_node
            self.searcher_args['input_shape'] = input_shape
            self.searcher_args['path'] = self.path
            self.searcher_args['metric'] = self.metric
            self.searcher_args['loss'] = self.loss
            self.searcher_args['verbose'] = self.verbose
            searcher = Searcher(**self.searcher_args)
            self._save_searcher(searcher)
            self.searcher = True

        start_time = time.time()
        time_remain = time_limit
        try:
            while time_remain > 0:
                searcher = pickle_from_file(os.path.join(
                    self.path, 'searcher'))
                searcher.search(train_data, test_data, int(time_remain))
                if len(self._load_searcher().history
                       ) >= Constant.MAX_MODEL_NUM:
                    break
                time_elapsed = time.time() - start_time
                time_remain = time_limit - time_elapsed
            # if no search executed during the time_limit, then raise an error
            if time_remain <= 0:
                raise TimeoutError
        except TimeoutError:
            if len(self._load_searcher().history) == 0:
                raise TimeoutError(
                    "Search Time too short. No model was found during the search time."
                )
            elif self.verbose:
                print('Time is out.')
Exemplo n.º 19
0
def predict(conf):
    try:
        if conf.model.framework == 'auto_sklearn' or conf.model.framework == 'tpot':
            with open(conf.model.model_path, 'rb') as f:
                my_model = pickle.load(f)

            x = numpy.load(
                os.path.join(AUTO_ML_DATA_PATH,
                             conf.model.training_data_filename))
            y = numpy.load(
                os.path.join(AUTO_ML_DATA_PATH,
                             conf.model.training_labels_filename))

            if conf.model.preprocessing_object.input_data_type == 'png':
                x = reformat_data(x)

        elif conf.model.framework == 'auto_keras':
            my_model = pickle_from_file(conf.model.model_path)
            x, y = load_ml_data(conf.model.validation_data_filename,
                                conf.model.validation_labels_filename, False,
                                conf.model.make_one_hot_encoding_task_binary)
        else:
            print('notimpl (epic fail)')

        print('about to pred.')
        y_pred = my_model.predict(x)
        print('about to acc')

        if conf.scoring_strategy == 'accuracy':
            score = sklearn.metrics.accuracy_score(y, y_pred)
        elif (conf.scoring_strategy == 'precision'):
            score = sklearn.metrics.average_precision_score(y, y_pred)
        elif (conf.scoring_strategy == 'roc_auc'):
            score = sklearn.metrics.roc_auc_score(y, y_pred)
        else:
            score = 0
            print('epic fail! no Strat applied')

        print('savy!')
        conf.status = 'success'
        conf.score = str(round(score, 4))
        conf.save()

    except Exception as e:
        conf.status = 'fail'
        conf.additional_remarks = e
        conf.save()
Exemplo n.º 20
0
    def load_preprocessors(self, path):
        """Load the preprocessors in the hypermodel from a single file.

        Args:
            path: String. The path to a single file.
        """
        if self.contains_hyper_block():
            self._plain_graph_hm.load_preprocessors(path)
            return
        preprocessors = utils.pickle_from_file(path)
        configs = preprocessors['configs']
        weights = preprocessors['weights']
        for name, config in configs.items():
            block = self._get_block(name)
            block.set_config(config)
        for name, weight in weights.items():
            block = self._get_block(name)
            block.set_weights(weight)
Exemplo n.º 21
0
def dog_submit():
    X_train, y_train, X_test = load_dog_breed()

    target = pd.read_csv("datasets/dog-breed/sample_submission.csv")

    column = target.columns.values[1:]

    id = target.id

    model = pickle_from_file("best_auto_keras_model_dog.h5")
    results = model.predict_proba(X_test)

    df = pd.DataFrame(columns=column, data=results)

    df.insert(0, "id", id, True)

    print(df.head())

    df.to_csv("dog_auto_keras_submission.csv", index=False)
Exemplo n.º 22
0
    def predict(self):
        ret = self.mkdatasets()
        if ret is False:
            self.log.info("生成数据失败")
            return False
        model = pickle_from_file(self.projectinfo['modpath'])
        self.woker_percent(10, 1800) #默认设置ETA为30分钟

        result = []
        df_cells = pd.read_csv(self.project_predict_labels_csv)
        ts1 = int(time.time()*1000)
        for index, cellinfo in df_cells.iterrows():
            ts2 = int(time.time()*1000)
            needtime = (ts2 - ts1) * (df_cells.shape[0] - index)
            if index > 0:
                self.log.info("step %d / %d 预计还需要 %d秒" % (index, df_cells.shape[0] -1, needtime/1000))
            else:
                self.log.info("step %d / %d" % (index, df_cells.shape[0] -1))
            ts1 = ts2
            #向服务器报告任务进度,这里占90%
            self.woker_percent(int(90 * (index + 1) / (df_cells.shape[0] -1)), needtime/1000)

            celltype = str(cellinfo['Label'])
            cellpath = os.path.join(self.project_resize_predict_dir, cellinfo['File Name'])
            resize = self.projectinfo['parameter_resize']

            img = load_img(cellpath)
            x = img_to_array(img)
            x = x.astype('float32') / 255
            x = np.reshape(x, (1, resize, resize, 3))
            y = model.predict(x)
            correct = 1
            if celltype != str(y[0]):
                correct = 0
            result.append([cellpath, celltype, str(y[0]), correct])

        #结果统计给前端展示
        df_result = pd.DataFrame(result, columns=['cellpath', 'true_label', 'predict_label', 'correct'])
        #df_result.to_csv('111.csv', quoting = 1, mode = 'w', index = False, header = True)
        self.result_predict(df_result)
        return True
Exemplo n.º 23
0
def plant_seedlings():
    X_train, y_train, X_test = load_plant_seedlings()

    target = pd.read_csv("datasets/plant-seedlings/sample_submission.csv")

    column = target.columns.values[1:]

    id = np.array(target.file)
    print(id.shape)
    print(X_test.shape)

    model = pickle_from_file("best_auto_keras_model_plant.h5")
    results = model.predict(X_test)

    df = pd.DataFrame(columns=column, data=results)

    df.insert(0, "id", id, True)

    print(df.head())

    df.to_csv("plant_auto_keras_submission.csv", index=False)
Exemplo n.º 24
0
    def __init__(self, verbose=False, searcher_type=None, path=constant.DEFAULT_SAVE_PATH, resume=False,
                 searcher_args=None):
        """Initialize the instance.

        The classifier will be loaded from file if the directory in 'path' has a saved classifier.
        Otherwise it would create a new one.
        """
        if searcher_args is None:
            searcher_args = {}

        if has_file(os.path.join(path, 'classifier')) and resume:
            classifier = pickle_from_file(os.path.join(path, 'classifier'))
            self.__dict__ = classifier.__dict__
            self.path = path
        else:
            self.y_encoder = None
            self.verbose = verbose
            self.searcher = False
            self.searcher_type = searcher_type
            self.path = path
            self.searcher_args = searcher_args
            ensure_dir(path)
Exemplo n.º 25
0
def invasive_submit():
    X_train, y_train, X_test = load_invasive_species()

    target = pd.read_csv("datasets/invasive-species/sample_submission.csv")

    column = target.columns.values[1:]

    id = np.array(dtype=int, object=target.name)

    print(id.shape)
    print(X_test.shape)

    model = pickle_from_file("best_auto_keras_model_invasive.h5")
    results = model.predict(X_test)

    df = pd.DataFrame(columns=column, data=results)

    df.insert(0, "name", id, True)

    print(df.head())

    df.to_csv("invasive_auto_keras_submission.csv", index=False)
Exemplo n.º 26
0
    def __init__(self, verbose=False, path=None, resume=False, searcher_args=None, augment=None):
        """Initialize the instance.

        The classifier will be loaded from the files in 'path' if parameter 'resume' is True.
        Otherwise it would create a new one.

        Args:
            verbose: A boolean of whether the search process will be printed to stdout.
            path: A string. The path to a directory, where the intermediate results are saved.
            resume: A boolean. If True, the classifier will continue to previous work saved in path.
                Otherwise, the classifier will start a new search.
            augment: A boolean value indicating whether the data needs augmentation. If not define, then it
                will use the value of Constant.DATA_AUGMENTATION which is True by default.

        """
        super().__init__(verbose)
        if searcher_args is None:
            searcher_args = {}

        if path is None:
            path = temp_folder_generator()

        if augment is None:
            augment = Constant.DATA_AUGMENTATION

        if has_file(os.path.join(path, 'classifier')) and resume:
            classifier = pickle_from_file(os.path.join(path, 'classifier'))
            self.__dict__ = classifier.__dict__
            self.path = path
        else:
            self.y_encoder = None
            self.data_transformer = None
            self.verbose = verbose
            self.searcher = False
            self.path = path
            self.searcher_args = searcher_args
            self.augment = augment
            ensure_dir(path)
Exemplo n.º 27
0
    def predict_autokeras2(self):
        autokeras_model = pickle_from_file(self.MODEL_DIR)
        result = []
        crop_cells = []

        #Load images
        for label in os.listdir(self.RESIZE_PREDICT_IMG_DIR):
            celltype = {'type': label, 'total': 0, 'count_false': 0}

            images = os.listdir(os.path.join(self.RESIZE_PREDICT_IMG_DIR, label))
            total = len(images)
            count_false = 0
            for index in range(0, total):
                img_path = os.path.join(self.RESIZE_PREDICT_IMG_DIR, label, images[index])
                if not os.path.exists(img_path):
                    continue

                img = load_img(img_path)
                x = img_to_array(img)
                x = x.astype('float32') / 255
                x = np.reshape(x, (1, self.RESIZE, self.RESIZE, 3))
                y = autokeras_model.predict(x)
                crop_cells.append({'url': img_path[len(self.scratchdir) + 1:], 'type': label, 'predict': y[0]})

                if str(label) != str(y[0]):
                    #print("%s %s result=%s" % (images[index], label, y[0]))
                    count_false = count_false + 1
                    error_image_dir = os.path.join(self.PREDICT_ERROR_IMG_DIR, label)
                    if not os.path.exists(error_image_dir):
                        os.makedirs(error_image_dir)
                    copyfile(img_path, os.path.join(error_image_dir, images[index]))

            celltype['total'] = int(total)
            celltype['count_false'] = int(count_false)
            result.append(celltype)
            print("%s 的个数/准确率:%d %f 出错的个数%d" % (label, total, (total - count_false) / total, count_false))
        return result, crop_cells
Exemplo n.º 28
0
def recognizeByAutoKeras(request):
    from flask import render_template

    # リクエストがポストかどうかの判別
    if request.method == 'POST':
        # ファイルがなかった場合の処理
        if 'image' not in request.files:
            return render_template("error.html", reason="ファイルが取得できないため")

        # ファイルに関するデータの取り出し
        file = request.files['image']

        # ファイル名がなかった時の処理
        if file.filename == '':
            return render_template("error.html", reason="ファイル名が取得できないため")

        # ファイルの存在チェック
        if file:
            import base64
            import cv2
            import numpy as np
            from autokeras.utils import pickle_from_file

            # 非公開なパラメータを入れておくところ
            import Params

            MAX_WIDTH = 640
            MAX_HEIGHT = 480

            # ペイロードの作成。大きすぎる画像をリサイズ。
            # 画像をはbase64にエンコードしておく
            img_array = np.asarray(bytearray(file.stream.read()),
                                   dtype=np.uint8)
            img = cv2.imdecode(img_array, 1)

            if (img.shape[0] > MAX_HEIGHT):
                img = cv2.resize(img, (int(
                    img.shape[1] * MAX_WIDTH // img.shape[0]), MAX_HEIGHT))
            if (img.shape[1] > MAX_WIDTH):
                img = cv2.resize(
                    img, (MAX_WIDTH,
                          int(img.shape[0] * MAX_HEIGHT // img.shape[1])))

            encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 85]
            result, encimg = cv2.imencode(".jpeg", img, encode_param)
            imageBin = base64.b64encode(bytes(encimg))
            imageString = imageBin.decode()

            # AutoKerasで作ったモデルで画像を判定する
            img = cv2.imdecode(img_array, 1)

            # center cropping
            w = img.shape[1]
            h = img.shape[0]
            edge = np.min(img.shape)
            img = img[(h - edge) // 2:(h + edge) // 2,
                      (w - edge) // 2:(w + edge) // 2, :]
            img = cv2.resize(img, (224, 224))

            img = img[np.newaxis, :, :, :]
            clf = pickle_from_file(Params.model_path)
            result = clf.predict(img)
            result = int(result[0])
            resultString = "ある" if result == 1 else "ない"

            # 画像を含んだ結果をHTMLに埋め込む
            # 画像を埋め込んだ理由はCloud FunctionsからStorageにアップロードできないように作られているためである
            # (不正なアップローダー防止の対策とはいえ、めんどくさい仕様だ・・・
            return render_template("result.html",
                                   image_string=imageString,
                                   class_name=f"class_{result}",
                                   result=resultString)

    # GETなどの例外処理
    return render_template("error.html", reason="想定されていないため")
Exemplo n.º 29
0
def run_searcher_once(train_data, test_data, path, timeout):
    if Constant.LIMIT_MEMORY:
        pass
    searcher = pickle_from_file(os.path.join(path, 'searcher'))
    searcher.search(train_data, test_data, timeout)
Exemplo n.º 30
0
 def load_searcher(self):
     return pickle_from_file(os.path.join(self.path, 'searcher'))
Exemplo n.º 31
0
 def load_model_by_id(self, model_id):
     return pickle_from_file(
         os.path.join(self.path,
                      str(model_id) + '.graph'))
Exemplo n.º 32
0
def run_searcher_once(train_data, test_data, path, timeout):
    if Constant.LIMIT_MEMORY:
        pass
    searcher = pickle_from_file(os.path.join(path, 'searcher'))
    searcher.search(train_data, test_data, timeout)
Exemplo n.º 33
0
 def load_searcher(self):
     return pickle_from_file(os.path.join(self.path, 'searcher'))
Exemplo n.º 34
0
 def load_model_by_id(self, model_id):
     return pickle_from_file(os.path.join(self.path, str(model_id) + '.h5'))
Exemplo n.º 35
0
    dot = Digraph(comment='The Round Table')

    for index, node in enumerate(graph.node_list):
        dot.node(str(index), str(node.shape))

    for u in range(graph.n_nodes):
        for v, layer_id in graph.adj_list[u]:
            dot.edge(str(u), str(v), str(graph.layer_list[layer_id]))

    dot.render(path)


def visualize(path):
    cnn_module = pickle_from_file(os.path.join(path, 'module'))
    #cnn_module.searcher.path = path
    #cnn_module.searcher =
    for item in cnn_module.searcher.history:
        model_id = item['model_id']
        graph = cnn_module.searcher.load_model_by_id(model_id)


if __name__ == '__main__':
    #cnn_module = pickle_from_file(os.path.join("./data_by_liuyang/show_net", 'module'))
    for i in range(0, 59):
        graph = pickle_from_file(
            os.path.join("data_by_liuyang/show_net/",
                         str(i) + '.graph'))
        to_pdf(graph, os.path.join("data_by_liuyang/graph", str(i)))
    #visualize("./data_by_liuyang/show_net/")
    #graph = cnn_module.searcher.load_model_by_id(1)
Exemplo n.º 36
0
def to_pdf(graph, path):
    dot = Digraph(comment='The Round Table')

    for index, node in enumerate(graph.node_list):
        dot.node(str(index), str(node.shape))

    for u in range(graph.n_nodes):
        for v, layer_id in graph.adj_list[u]:
            dot.edge(str(u), str(v), str(graph.layer_list[layer_id]))

    dot.render(path)


def visualize(path):
    cnn_module = pickle_from_file(os.path.join(path, 'module'))
    #cnn_module.searcher.path = path
    #cnn_module.searcher =
    for item in cnn_module.searcher.history:
        model_id = item['model_id']
        graph = cnn_module.searcher.load_model_by_id(model_id)


if __name__ == '__main__':
    #cnn_module = pickle_from_file(os.path.join("./deal-data/show_net", 'module'))
    for i in range(0, 59):
        graph = pickle_from_file(
            os.path.join("deal-data/show_net/",
                         str(i) + '.graph'))
        to_pdf(graph, os.path.join("deal-data/graph", str(i)))
    #visualize("./deal-data/show_net/")
    #graph = cnn_module.searcher.load_model_by_id(1)
Exemplo n.º 37
0
from keras.utils import plot_model
from keras.models import load_model
from keras.models import Sequential
from autokeras.utils import pickle_from_file
import sys
import numpy as np
sys.path.append("/home/deep/PycharmProjects/autokeras")
from autokeras.image.image_supervised import load_image_dataset

x_test_keyhole, y_test_keyhole = load_image_dataset(
    csv_file_path="data_by_liuyang/test/test_keyhole.csv",
    images_path="data_by_liuyang/test/the_keyhole")
print("the key hole:", x_test_keyhole.shape)
print(y_test_keyhole.shape)

x_test_no_keyhole, y_test_no_keyhole = load_image_dataset(
    csv_file_path="data_by_liuyang/test/test_no_keyhole.csv",
    images_path="data_by_liuyang/test/no_keyhole")
print(x_test_no_keyhole.shape)
print(y_test_no_keyhole.shape)
x_test, y_test = np.vstack((x_test_keyhole, x_test_no_keyhole)), np.hstack(
    (y_test_keyhole, y_test_no_keyhole))

model_file_name = 'data_by_liuyang/model/autokeras.h5'  #加载模型
#model = load_model('data_by_liuyang/model/autokeras.h5')
model = pickle_from_file(model_file_name)
#model = Sequential(model)
#plot_model(model, to_file='my_model.png')
results = model.evaluate(x_test, y_test)  #用测试数据测试
print(results)  #打印结果
Exemplo n.º 38
0
import os

from keras.datasets import mnist

from autokeras import ImageClassifier
from autokeras.utils import pickle_from_file

# Customer temp dir by your own
TEMP_DIR = '/tmp/autokeras_U8KEOQ'
model_file_name = os.path.join(TEMP_DIR, 'test_autokeras_model.pkl')

if __name__ == '__main__':
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(x_train.shape + (1,))
    x_test = x_test.reshape(x_test.shape + (1,))
    clf = ImageClassifier(verbose=True, augment=False, path=TEMP_DIR, resume=True)
    clf.fit(x_train, y_train, time_limit=30 * 60)
    clf.final_fit(x_train, y_train, x_test, y_test)
    clf.export_autokeras_model(model_file_name)
    model = pickle_from_file(model_file_name)
    results = model.evaluate(x_test, y_test)
    print(results)