コード例 #1
0
def main():
    # Loading the config file.
    cfg = ut.get_config()
    num_classes = 10
    num_samples = cfg['num_samples']
    # Loading the data.
    (Xtrain, ytrain, Xval, yval, Xtest, ytest) = load_mnist('data/mnist')
    train_dataset = InMemoryDataset(Xtrain, ytrain, True)
    val_dataset = InMemoryDataset(Xval, yval, False)
    test_dataset = InMemoryDataset(Xtest, ytest, False)
    # Creating up the evaluator.
    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        num_classes,
        './temp',
        max_eval_time_in_minutes=cfg['max_eval_time_in_minutes'],
        log_output_to_terminal=True,
        test_dataset=test_dataset)
    # Creating the search space.
    search_space_fn = lambda: css_dnn.dnn_net(num_classes)
    search_space_factory = mo.SearchSpaceFactory(search_space_fn)

    sl.create_search_folderpath(
        cfg["folderpath"],
        cfg["search_name"],
        delete_if_exists=cfg['delete_if_exists'],
        abort_if_exists=False,
        create_parent_folders=True)

    # Creating the searcher.
    searcher = RandomSearcher(search_space_factory.get_search_space)
    # Search loop.
    for evaluation_id in range(num_samples):
        eval_logger = sl.EvaluationLogger(cfg["folderpath"], cfg["search_name"],
                                          evaluation_id)
        if not eval_logger.config_exists():

            inputs, outputs, hyperp_value_lst, eval_token = searcher.sample()
            results = evaluator.eval(inputs, outputs)
            # Logging results (including graph).
            eval_logger.log_config(hyperp_value_lst, eval_token)
            eval_logger.log_results(results)
            vi.draw_graph(
                outputs,
                draw_module_hyperparameter_info=True,
                print_to_screen=False,
                out_folderpath=eval_logger.get_evaluation_data_folderpath())
            # Updating the searcher given the results of logging.
            searcher.update(results['validation_accuracy'], eval_token)
コード例 #2
0
def main():
    num_classes = 10
    num_samples = 8
    (Xtrain, ytrain, Xval, yval, Xtest, ytest) = load_mnist('data/mnist')
    train_dataset = InMemoryDataset(Xtrain, ytrain, True)
    val_dataset = InMemoryDataset(Xval, yval, False)
    test_dataset = InMemoryDataset(Xtest, ytest, False)
    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        num_classes,
        './temp',
        max_eval_time_in_minutes=1.0,
        log_output_to_terminal=True)

    search_space_fn = lambda: css_dnn.dnn_net(num_classes)
    searcher = RandomSearcher(search_space_fn)
    for _ in range(num_samples):
        inputs, outputs, searcher_eval_token, _ = searcher.sample()
        val_acc = evaluator.eval(inputs, outputs)['validation_accuracy']
        searcher.update(val_acc, searcher_eval_token)
コード例 #3
0
def main():
    cmd = ut.CommandLineArgs()
    cmd.add('config_filepath', 'str')
    cmd.add('worker_id', 'int')
    cmd.add('num_workers', 'int')
    out = cmd.parse()
    cfg = ut.read_jsonfile(out['config_filepath'])

    # Loading the data.
    (Xtrain, ytrain, Xval, yval, Xtest, ytest) = load_mnist('data/mnist')
    train_dataset = InMemoryDataset(Xtrain, ytrain, True)
    val_dataset = InMemoryDataset(Xval, yval, False)
    test_dataset = InMemoryDataset(Xtest, ytest, False)

    # Creating up the evaluator.
    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        ss.num_classes,
        './temp/worker%d' % out["worker_id"],
        max_eval_time_in_minutes=cfg['max_eval_time_in_minutes'],
        log_output_to_terminal=True,
        test_dataset=test_dataset)

    for evaluation_id in range(out["worker_id"], cfg["num_samples"],
                               out["num_workers"]):
        logger = sl.EvaluationLogger(cfg["folderpath"],
                                     cfg["search_name"],
                                     evaluation_id,
                                     abort_if_notexists=True)
        if not logger.results_exist():
            eval_cfg = logger.read_config()
            inputs, outputs = ss.search_space_fn()
            specify(outputs, eval_cfg["hyperp_value_lst"])
            results = evaluator.eval(inputs, outputs)
            logger.log_results(results)
コード例 #4
0
ファイル: main.py プロジェクト: dapatil211/deep_architect
def main():
    num_classes = 10
    (Xtrain, ytrain, Xval, yval, Xtest, ytest) = load_mnist('data/mnist')
    train_dataset = InMemoryDataset(Xtrain, ytrain, True)
    val_dataset = InMemoryDataset(Xval, yval, False)
    test_dataset = InMemoryDataset(Xtest, ytest, False)

    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        num_classes,
        ut.join_paths(['temp', 'benchmarks', cfg['search_name']]),
        max_eval_time_in_minutes=cfg['max_eval_time_in_minutes'],
        log_output_to_terminal=True,
        test_dataset=test_dataset)

    for rep_i in range(cfg['num_repetitions']):
        for search_space_name in cfg['search_space_name_lst']:
            for searcher_name in cfg['searcher_name_lst']:

                folderpath = ut.join_paths([
                    cfg['logs_folderpath'], cfg['search_name'],
                    search_space_name, searcher_name
                ])

                sl.create_search_folderpath(
                    folderpath,
                    'rep%d' % rep_i,
                    abort_if_exists=True,
                    delete_if_exists=cfg["delete_if_exists"],
                    create_parent_folders=True)

                search_space_fn = local_ss.name_to_search_space_fn[
                    search_space_name](num_classes)
                searcher = local_se.name_to_get_searcher_fn[searcher_name](
                    search_space_fn)
                run_searcher(
                    searcher, evaluator, cfg['num_samples'] -
                    search_logger.get_current_evaluation_id(), search_logger)
コード例 #5
0
            if msg is not None:
                results, model_id, searcher_eval_token = msg
                searcher.update(results['validation_accuracy'],
                                searcher_eval_token)
                finished += 1
                print('Model %d accuracy: %f' %
                      (model_id, results['validation_accuracy']))

# At this point, all of the workers should be killed, and the searcher should
# have evaluated all the architectures it needed to finish its search.
# print('Best architecture accuracy: %f' % searcher.best_acc)
# print('Best architecture params: %r' % searcher.best_vs)
else:
    evaluator = SimpleClassifierEvaluator(train_dataset,
                                          val_dataset,
                                          10,
                                          './temp',
                                          max_num_training_epochs=2)

    # This process keeps going until it receives a kill signal from the master
    # process. At that point, it breaks out of its loop and ends.
    while (True):
        arch = comm.receive_architecture_in_worker()

        if arch is None:
            break

        vs, evaluation_id, searcher_eval_token = arch

        # In order to evaluate the architecture sent by the searcher, we create a new
        # unspecified search space, and recreate the architecture using the values of
コード例 #6
0
def main():
    configs = ut.read_jsonfile(
        "./examples/tensorflow/full_benchmarks/experiment_config.json")

    parser = argparse.ArgumentParser("MPI Job for architecture search")
    parser.add_argument('--config',
                        '-c',
                        action='store',
                        dest='config_name',
                        default='normal')

    # Other arguments
    parser.add_argument('--display-output',
                        '-o',
                        action='store_true',
                        dest='display_output',
                        default=False)
    parser.add_argument('--resume',
                        '-r',
                        action='store_true',
                        dest='resume',
                        default=False)

    options = parser.parse_args()
    config = configs[options.config_name]

    num_procs = config['num_procs'] if 'num_procs' in config else 0
    comm = get_communicator(config['communicator'], num_procs)
    if len(gpu_utils.get_gpu_information()) != 0:
        #https://github.com/tensorflow/tensorflow/issues/1888
        gpu_utils.set_visible_gpus(
            [comm.get_rank() % gpu_utils.get_total_num_gpus()])

    if 'eager' in config and config['eager']:
        import tensorflow as tf
        tf.logging.set_verbosity(tf.logging.ERROR)
        tf.enable_eager_execution()
    datasets = {
        'cifar10': lambda: (load_cifar10('data/cifar10/', one_hot=False), 10),
        'mnist': lambda: (load_mnist('data/mnist/'), 10),
    }

    (Xtrain, ytrain, Xval, yval, Xtest,
     ytest), num_classes = datasets[config['dataset']]()
    search_space_factory = name_to_search_space_factory_fn[
        config['search_space']](num_classes)

    save_every = 1 if 'save_every' not in config else config['save_every']
    if comm.get_rank() == 0:
        searcher = name_to_searcher_fn[config['searcher']](
            search_space_factory.get_search_space)
        num_samples = -1 if 'samples' not in config else config['samples']
        num_epochs = -1 if 'epochs' not in config else config['epochs']
        start_searcher(comm,
                       searcher,
                       options.resume,
                       config['search_folder'],
                       config['search_name'],
                       config['searcher_file_name'],
                       num_samples=num_samples,
                       num_epochs=num_epochs,
                       save_every=save_every)
    else:
        train_d_advataset = InMemoryDataset(Xtrain, ytrain, True)
        val_dataset = InMemoryDataset(Xval, yval, False)
        test_dataset = InMemoryDataset(Xtest, ytest, False)

        search_path = sl.get_search_folderpath(config['search_folder'],
                                               config['search_name'])
        ut.create_folder(ut.join_paths([search_path, 'scratch_data']),
                         create_parent_folders=True)
        scratch_folder = ut.join_paths(
            [search_path, 'scratch_data', 'eval_' + str(comm.get_rank())])
        ut.create_folder(scratch_folder)

        evaluators = {
            'simple_classification':
            lambda: SimpleClassifierEvaluator(
                train_dataset,
                val_dataset,
                num_classes,
                './temp' + str(comm.get_rank()),
                max_num_training_epochs=config['eval_epochs'],
                log_output_to_terminal=options.display_output,
                test_dataset=test_dataset),
        }

        assert not config['evaluator'].startswith('enas') or hasattr(
            search_space_factory, 'weight_sharer')
        evaluator = evaluators[config['evaluator']]()

        start_worker(comm,
                     evaluator,
                     search_space_factory,
                     config['search_folder'],
                     config['search_name'],
                     resume=options.resume,
                     save_every=save_every)