コード例 #1
0
ファイル: master.py プロジェクト: zeta1999/deep_architect
def main():
    # Loading the config file.
    cfg = ut.get_config()
    # Creating the searcher.
    searcher = RandomSearcher(ss.search_space_fn)
    # Creating the search folder for logging information.
    sl.create_search_folderpath(cfg['folderpath'],
                                cfg['search_name'],
                                abort_if_exists=cfg["abort_if_exists"],
                                delete_if_exists=cfg['delete_if_exists'],
                                create_parent_folders=True)
    # Search loop.
    for evaluation_id in range(cfg['num_samples']):
        logger = sl.EvaluationLogger(cfg["folderpath"], cfg["search_name"],
                                     evaluation_id)
        if not logger.config_exists():
            (inputs, outputs, hyperp_value_lst,
             searcher_eval_token) = searcher.sample()
            # Logging results (including graph).
            logger.log_config(hyperp_value_lst, searcher_eval_token)
            vi.draw_graph(
                outputs,
                draw_module_hyperparameter_info=False,
                print_to_screen=False,
                out_folderpath=logger.get_evaluation_data_folderpath())
コード例 #2
0
def main():
    # Loading the config file.
    cfg = ut.get_config()
    num_classes = 10
    num_samples = cfg['num_samples']
    # Loading the data.
    (Xtrain, ytrain, Xval, yval, Xtest, ytest) = load_mnist('data/mnist')
    train_dataset = InMemoryDataset(Xtrain, ytrain, True)
    val_dataset = InMemoryDataset(Xval, yval, False)
    test_dataset = InMemoryDataset(Xtest, ytest, False)
    # Creating up the evaluator.
    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        num_classes,
        './temp',
        max_eval_time_in_minutes=cfg['max_eval_time_in_minutes'],
        log_output_to_terminal=True,
        test_dataset=test_dataset)
    # Creating the search space.
    search_space_fn = lambda: css_dnn.dnn_net(num_classes)
    search_space_factory = mo.SearchSpaceFactory(search_space_fn)

    sl.create_search_folderpath(
        cfg["folderpath"],
        cfg["search_name"],
        delete_if_exists=cfg['delete_if_exists'],
        abort_if_exists=False,
        create_parent_folders=True)

    # Creating the searcher.
    searcher = RandomSearcher(search_space_factory.get_search_space)
    # Search loop.
    for evaluation_id in range(num_samples):
        eval_logger = sl.EvaluationLogger(cfg["folderpath"], cfg["search_name"],
                                          evaluation_id)
        if not eval_logger.config_exists():

            inputs, outputs, hyperp_value_lst, eval_token = searcher.sample()
            results = evaluator.eval(inputs, outputs)
            # Logging results (including graph).
            eval_logger.log_config(hyperp_value_lst, eval_token)
            eval_logger.log_results(results)
            vi.draw_graph(
                outputs,
                draw_module_hyperparameter_info=True,
                print_to_screen=False,
                out_folderpath=eval_logger.get_evaluation_data_folderpath())
            # Updating the searcher given the results of logging.
            searcher.update(results['validation_accuracy'], eval_token)
コード例 #3
0
def main():
    cmd = ut.CommandLineArgs()
    cmd.add('config_filepath', 'str')
    cmd.add('worker_id', 'int')
    cmd.add('num_workers', 'int')
    out = cmd.parse()
    cfg = ut.read_jsonfile(out['config_filepath'])

    # Loading the data.
    (Xtrain, ytrain, Xval, yval, Xtest, ytest) = load_mnist('data/mnist')
    train_dataset = InMemoryDataset(Xtrain, ytrain, True)
    val_dataset = InMemoryDataset(Xval, yval, False)
    test_dataset = InMemoryDataset(Xtest, ytest, False)

    # Creating up the evaluator.
    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        ss.num_classes,
        './temp/worker%d' % out["worker_id"],
        max_eval_time_in_minutes=cfg['max_eval_time_in_minutes'],
        log_output_to_terminal=True,
        test_dataset=test_dataset)

    for evaluation_id in range(out["worker_id"], cfg["num_samples"],
                               out["num_workers"]):
        logger = sl.EvaluationLogger(cfg["folderpath"],
                                     cfg["search_name"],
                                     evaluation_id,
                                     abort_if_notexists=True)
        if not logger.results_exist():
            eval_cfg = logger.read_config()
            inputs, outputs = ss.search_space_fn()
            specify(outputs, eval_cfg["hyperp_value_lst"])
            results = evaluator.eval(inputs, outputs)
            logger.log_results(results)
コード例 #4
0
def start_searcher(comm,
                   searcher,
                   resume_if_exists,
                   folderpath,
                   search_name,
                   searcher_load_path,
                   num_samples=-1,
                   num_epochs=-1,
                   save_every=1):
    assert num_samples != -1 or num_epochs != -1

    print('SEARCHER')

    sl.create_search_folderpath(folderpath, search_name)
    search_data_folder = sl.get_search_data_folderpath(folderpath, search_name)
    save_filepath = ut.join_paths((search_data_folder, searcher_load_path))

    models_sampled = 0
    epochs = 0
    finished = 0
    killed = 0
    best_accuracy = 0.

    # Load previous searcher
    if resume_if_exists:
        searcher.load(search_data_folder)
        state = ut.read_jsonfile(save_filepath)
        epochs = state['epochs']
        killed = state['killed']
        models_sampled = state['models_finished']
        finished = state['models_finished']

    while (finished < models_sampled or killed < comm.num_workers):
        # Search end conditions
        cont = num_samples == -1 or models_sampled < num_samples
        cont = cont and (num_epochs == -1 or epochs < num_epochs)
        if cont:
            # See whether workers are ready to consume architectures
            if comm.is_ready_to_publish_architecture():
                eval_logger = sl.EvaluationLogger(folderpath, search_name,
                                                  models_sampled)
                _, _, vs, searcher_eval_token = searcher.sample()

                eval_logger.log_config(vs, searcher_eval_token)
                comm.publish_architecture_to_worker(vs, models_sampled,
                                                    searcher_eval_token)

                models_sampled += 1
        else:
            if comm.is_ready_to_publish_architecture():
                comm.kill_worker()
                killed += 1

        # See which workers have finished evaluation
        for worker in range(comm.num_workers):
            msg = comm.receive_results_in_master(worker)
            if msg is not None:
                results, model_id, searcher_eval_token = msg
                eval_logger = sl.EvaluationLogger(folderpath, search_name,
                                                  model_id)
                eval_logger.log_results(results)

                if 'epoch' in results:
                    epochs = max(epochs, results['epoch'])

                searcher.update(results['validation_accuracy'],
                                searcher_eval_token)
                best_accuracy = max(best_accuracy,
                                    results['validation_accuracy'])
                finished += 1
                if finished % save_every == 0:
                    print('Models sampled: %d Best Accuracy: %f' %
                          (finished, best_accuracy))
                    best_accuracy = 0.

                    searcher.save_state(search_data_folder)
                    state = {
                        'models_finished': finished,
                        'epochs': epochs,
                        'killed': killed
                    }
                    ut.write_jsonfile(state, save_filepath)