Пример #1
0
def main():
    # Loading the config file.
    cfg = ut.get_config()
    # Creating the searcher.
    searcher = RandomSearcher(ss.search_space_fn)
    # Creating the search folder for logging information.
    sl.create_search_folderpath(cfg['folderpath'],
                                cfg['search_name'],
                                abort_if_exists=cfg["abort_if_exists"],
                                delete_if_exists=cfg['delete_if_exists'],
                                create_parent_folders=True)
    # Search loop.
    for evaluation_id in range(cfg['num_samples']):
        logger = sl.EvaluationLogger(cfg["folderpath"], cfg["search_name"],
                                     evaluation_id)
        if not logger.config_exists():
            (inputs, outputs, hyperp_value_lst,
             searcher_eval_token) = searcher.sample()
            # Logging results (including graph).
            logger.log_config(hyperp_value_lst, searcher_eval_token)
            vi.draw_graph(
                outputs,
                draw_module_hyperparameter_info=False,
                print_to_screen=False,
                out_folderpath=logger.get_evaluation_data_folderpath())
Пример #2
0
def main():
    num_classes = 10
    num_samples = 4
    num_training_epochs = 2
    # NOTE: change to True for graph visualization
    show_graph = False

    # load data
    (X_train, y_train, X_val, y_val, X_test, y_test) = load_mnist('data/mnist',
                                                                  flatten=True)
    train_dataset = InMemoryDataset(X_train, y_train, True)
    val_dataset = InMemoryDataset(X_val, y_val, False)
    test_dataset = InMemoryDataset(X_test, y_test, False)

    # defining evaluator and searcher
    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        num_classes,
        num_training_epochs=num_training_epochs,
        log_output_to_terminal=True)
    ssf = mo.SearchSpaceFactory(lambda: dnn_net(num_classes))
    searcher = se.RandomSearcher(ssf.get_search_space)

    for i in range(num_samples):
        inputs, outputs, _, searcher_eval_token = searcher.sample()
        if show_graph:
            # try setting draw_module_hyperparameter_info=False and
            # draw_hyperparameters=True for a different visualization.
            vi.draw_graph(outputs,
                          draw_module_hyperparameter_info=False,
                          draw_hyperparameters=True)
        results = evaluator.evaluate(inputs, outputs)
        # updating the searcher. no-op for the random searcher.
        searcher.update(results['validation_accuracy'], searcher_eval_token)
Пример #3
0
def main():
    # Loading the config file.
    cfg = ut.get_config()
    num_classes = 10
    num_samples = cfg['num_samples']
    # Loading the data.
    (Xtrain, ytrain, Xval, yval, Xtest, ytest) = load_mnist('data/mnist')
    train_dataset = InMemoryDataset(Xtrain, ytrain, True)
    val_dataset = InMemoryDataset(Xval, yval, False)
    test_dataset = InMemoryDataset(Xtest, ytest, False)
    # Creating up the evaluator.
    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        num_classes,
        './temp',
        max_eval_time_in_minutes=cfg['max_eval_time_in_minutes'],
        log_output_to_terminal=True,
        test_dataset=test_dataset)
    # Creating the search space.
    search_space_fn = lambda: css_dnn.dnn_net(num_classes)
    search_space_factory = mo.SearchSpaceFactory(search_space_fn)

    sl.create_search_folderpath(
        cfg["folderpath"],
        cfg["search_name"],
        delete_if_exists=cfg['delete_if_exists'],
        abort_if_exists=False,
        create_parent_folders=True)

    # Creating the searcher.
    searcher = RandomSearcher(search_space_factory.get_search_space)
    # Search loop.
    for evaluation_id in range(num_samples):
        eval_logger = sl.EvaluationLogger(cfg["folderpath"], cfg["search_name"],
                                          evaluation_id)
        if not eval_logger.config_exists():

            inputs, outputs, hyperp_value_lst, eval_token = searcher.sample()
            results = evaluator.eval(inputs, outputs)
            # Logging results (including graph).
            eval_logger.log_config(hyperp_value_lst, eval_token)
            eval_logger.log_results(results)
            vi.draw_graph(
                outputs,
                draw_module_hyperparameter_info=True,
                print_to_screen=False,
                out_folderpath=eval_logger.get_evaluation_data_folderpath())
            # Updating the searcher given the results of logging.
            searcher.update(results['validation_accuracy'], eval_token)
Пример #4
0
def run_searcher(searcher, evaluator, num_samples, get_evaluation_logger):
    for idx in range(num_samples):
        evaluation_logger = get_evaluation_logger(idx)
        (inputs, outputs, hyperp_value_lst,
         searcher_eval_token) = searcher.sample()
        results = evaluator.eval(inputs, outputs)
        evaluation_logger.log_config(hyperp_value_lst, searcher_eval_token)
        evaluation_logger.log_results(results)
        vi.draw_graph(
            outputs,
            True,
            True,
            print_to_screen=False,
            out_folderpath=evaluation_logger.get_user_data_folderpath())
        searcher.update(results['validation_accuracy'], searcher_eval_token)
Пример #5
0
def main():
    num_classes = 10
    num_samples = 4
    num_training_epochs = 2
    validation_frac = 0.2
    # NOTE: change to True for graph visualization
    show_graph = False

    # load the data.
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    fn = lambda X: X.reshape((X.shape[0], -1))
    X_train = fn(X_train) / 255.0
    X_test = fn(X_test) / 255.0
    num_train = int((1.0 - validation_frac) * X_train.shape[0])
    X_train, X_val = X_train[:num_train], X_train[num_train:]
    y_train, y_val = y_train[:num_train], y_train[num_train:]

    # define the search and the evalutor
    evaluator = SimpleClassifierEvaluator(
        X_train,
        y_train,
        X_val,
        y_val,
        num_classes,
        num_training_epochs=num_training_epochs)
    search_space_fn = lambda: dnn_net(num_classes)
    searcher = se.RandomSearcher(search_space_fn)

    for i in range(num_samples):
        (inputs, outputs, hyperp_value_lst,
         searcher_eval_token) = searcher.sample()
        if show_graph:
            # try setting draw_module_hyperparameter_info=False and
            # draw_hyperparameters=True for a different visualization.
            vi.draw_graph(outputs,
                          draw_module_hyperparameter_info=False,
                          draw_hyperparameters=True)
        results = evaluator.evaluate(inputs, outputs)
        # updating the searcher. no-op for the random searcher.
        searcher.update(results['validation_accuracy'], searcher_eval_token)
Пример #6
0
def main():
    num_classes = 10
    num_samples = 3
    num_training_epochs = 2
    batch_size = 256
    # NOTE: change to True for graph visualization
    show_graph = False

    # load and normalize data
    (X_train, y_train, X_val, y_val, X_test,
     y_test) = load_mnist(flatten=True, one_hot=False)
    train_dataset = InMemoryDataset(X_train, y_train, True)
    val_dataset = InMemoryDataset(X_val, y_val, False)
    test_dataset = InMemoryDataset(X_test, y_test, False)

    # defining evaluator and searcher
    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        num_classes,
        num_training_epochs=num_training_epochs,
        batch_size=batch_size,
        log_output_to_terminal=True)
    search_space_fn = lambda: dnn_net(num_classes)
    searcher = se.RandomSearcher(search_space_fn)

    for i in range(num_samples):
        inputs, outputs, _, searcher_eval_token = searcher.sample()
        if show_graph:
            # try setting draw_module_hyperparameter_info=False and
            # draw_hyperparameters=True for a different visualization.
            vi.draw_graph(outputs,
                          draw_module_hyperparameter_info=False,
                          draw_hyperparameters=True)

        results = evaluator.evaluate(inputs, outputs)
        searcher.update(results['validation_accuracy'], searcher_eval_token)
Пример #7
0
    name_to_hyperp = {
        ut.json_object_to_json_string({
            "node_id": i,
            "in_node_id": j
        }): D([0, 1])
        for i in range(num_nodes) for j in range(i)
    }
    return mo.substitution_module("ConvStage",
                                  substitution_fn,
                                  name_to_hyperp, ["in"], ["out"],
                                  scope=None)


(inputs, outputs
 ) = mo.SearchSpaceFactory(lambda: conv_stage(32, 3, 8)).get_search_space()
random_specify(outputs)
# for h in co.unassigned_independent_hyperparameter_iterator(outputs):
#     h.assign_value(1)

vi.draw_graph(outputs, draw_module_hyperparameter_info=False)

# inputs_val = Input((32, 32, 3))
# co.forward({inputs["in"]: inputs_val})
# outputs_val = outputs["out"].val

# model = Model(inputs=inputs_val, outputs=outputs_val)
# model.summary()

### NOTE: these are done.

# TODO: finish the model