Exemple #1
0
 def sample(self):
     assert self.idx < len(self.queue)
     hyperp_value_lst = self.queue[self.idx]
     (inputs, outputs) = self.search_space_fn()
     se.specify(outputs, hyperp_value_lst)
     idx = self.idx
     self.idx += 1
     return inputs, outputs, hyperp_value_lst, {"idx": idx}
Exemple #2
0
    def update(self, val, searcher_eval_token):
        (inputs, outputs) = self.search_space_fn()
        specify(outputs, searcher_eval_token['vs'])
        feats = extract_features(inputs, outputs)
        self.surr_model.update(val, feats)

        self.cnt += 1
        if self.cnt % self.tree_refit_interval == 0:
            self.mcts = MCTSSearcher(self.search_space_fn)
Exemple #3
0
    def __init__(self, search_space_fn, hyperp_value_lst=None):

        inputs, outputs = mo.buffer_io(*search_space_fn())
        if hyperp_value_lst is not None:
            seco.specify(outputs, hyperp_value_lst)
            self.hyperp_value_lst = hyperp_value_lst
        else:
            self.hyperp_value_lst = seco.random_specify(outputs)

        self.inputs = inputs
        self.outputs = outputs
        # precomputed for efficiency reasons. not necessary for static graphs.
        self._module_seq = co.determine_module_eval_seq(inputs)
Exemple #4
0
def main(args):

    num_classes = 10

    # load and normalize data
    (x_train, y_train), (x_test, y_test) = load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0

    # defining evaluator, and logger
    evaluator = SimpleClassifierEvaluator((x_train, y_train),
                                          num_classes,
                                          max_num_training_epochs=5)
    inputs, outputs = get_search_space(num_classes)()
    h_values = ut.read_jsonfile(args.config)
    specify(outputs, hs,
            h_values["hyperp_value_lst"])  # hs is "extra" hyperparameters
    results = evaluator.evaluate(inputs, outputs)
    ut.write_jsonfile(results, args.result_fp)
def start_worker(comm,
                 evaluator,
                 search_space_factory,
                 folderpath,
                 search_name,
                 resume=True,
                 save_every=1):
    # set the available gpu for process
    print('WORKER %d' % comm.get_rank())
    step = 0

    sl.create_search_folderpath(folderpath, search_name)
    search_data_folder = sl.get_search_data_folderpath(folderpath, search_name)
    save_filepath = ut.join_paths(
        (search_data_folder, 'worker' + str(comm.get_rank()) + '.json'))

    if resume:
        evaluator.load_state(search_data_folder)
        state = ut.read_jsonfile(save_filepath)
        step = state['step']

    while (True):
        arch = comm.receive_architecture_in_worker()

        # if a kill signal is received
        if arch is None:
            break

        vs, evaluation_id, searcher_eval_token = arch

        inputs, outputs = search_space_factory.get_search_space()
        se.specify(outputs, vs)
        results = evaluator.eval(inputs, outputs)
        step += 1
        if step % save_every == 0:
            evaluator.save_state(search_data_folder)
            state = {'step': step}
            ut.write_jsonfile(state, save_filepath)
        comm.publish_results_to_master(results, evaluation_id,
                                       searcher_eval_token)
Exemple #6
0
def main():
    cmd = ut.CommandLineArgs()
    cmd.add('config_filepath', 'str')
    cmd.add('worker_id', 'int')
    cmd.add('num_workers', 'int')
    out = cmd.parse()
    cfg = ut.read_jsonfile(out['config_filepath'])

    # Loading the data.
    (Xtrain, ytrain, Xval, yval, Xtest, ytest) = load_mnist('data/mnist')
    train_dataset = InMemoryDataset(Xtrain, ytrain, True)
    val_dataset = InMemoryDataset(Xval, yval, False)
    test_dataset = InMemoryDataset(Xtest, ytest, False)

    # Creating up the evaluator.
    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        ss.num_classes,
        './temp/worker%d' % out["worker_id"],
        max_eval_time_in_minutes=cfg['max_eval_time_in_minutes'],
        log_output_to_terminal=True,
        test_dataset=test_dataset)

    for evaluation_id in range(out["worker_id"], cfg["num_samples"],
                               out["num_workers"]):
        logger = sl.EvaluationLogger(cfg["folderpath"],
                                     cfg["search_name"],
                                     evaluation_id,
                                     abort_if_notexists=True)
        if not logger.results_exist():
            eval_cfg = logger.read_config()
            inputs, outputs = ss.search_space_fn()
            specify(outputs, eval_cfg["hyperp_value_lst"])
            results = evaluator.eval(inputs, outputs)
            logger.log_results(results)
Exemple #7
0
 def update(self, val, searcher_eval_token):
     (inputs, outputs) = self.search_space_fn()
     specify(outputs, searcher_eval_token['vs'])
     feats = extract_features(inputs, outputs)
     self.surr_model.update(val, feats)
Exemple #8
0
# At this point, all of the workers should be killed, and the searcher should
# have evaluated all the architectures it needed to finish its search.
# print('Best architecture accuracy: %f' % searcher.best_acc)
# print('Best architecture params: %r' % searcher.best_vs)
else:
    evaluator = SimpleClassifierEvaluator(train_dataset,
                                          val_dataset,
                                          10,
                                          './temp',
                                          max_num_training_epochs=2)

    # This process keeps going until it receives a kill signal from the master
    # process. At that point, it breaks out of its loop and ends.
    while (True):
        arch = comm.receive_architecture_in_worker()

        if arch is None:
            break

        vs, evaluation_id, searcher_eval_token = arch

        # In order to evaluate the architecture sent by the searcher, we create a new
        # unspecified search space, and recreate the architecture using the values of
        # the hyperparameters received by the worker.
        inputs, outputs = ssf.get_search_space()
        se.specify(outputs, vs)
        results = evaluator.eval(inputs, outputs)
        comm.publish_results_to_master(results, evaluation_id,
                                       searcher_eval_token)
Exemple #9
0
def main():
    comm, search_space_factory, evaluator_fn, config = process_config_and_args(
    )

    search_data_folder = sl.get_search_data_folderpath(config['search_folder'],
                                                       config['search_name'])
    state = {
        'specified': False,
        'evaluated': False,
        'arch_data': None,
        'started': False
    }

    comm.subscribe(get_topic_name(ARCH_TOPIC, config),
                   callback=lambda message: retrieve_message(
                       message, comm, config, state))
    thread = threading.Thread(target=nudge_master, args=(comm, config, state))
    thread.start()
    step = 0
    while True:
        while not state['specified']:
            time.sleep(5)
        if state['arch_data'] is not None:
            vs = state['arch_data']['vs']
            evaluation_id = state['arch_data']['evaluation_id']
            searcher_eval_token = state['arch_data']['searcher_eval_token']
            eval_hparams = state['arch_data'][
                'eval_hparams'] if 'eval_hparams' in state[
                    'arch_data'] else {}
            logger.info('Evaluating architecture %d', evaluation_id)
            inputs, outputs = search_space_factory.get_search_space()
            se.specify(outputs, vs)
            eval_state = comm.get_value(get_topic_name(ARCH_TOPIC, config),
                                        'evaluation_id', evaluation_id)
            if eval_state is not None and 'data' in eval_state and 'state' in eval_state[
                    'data']:
                logger.info(
                    'Loading previous evaluation state for architecture %d',
                    eval_state['data']['evaluation_id'])
                eval_state = eval_state['data']['state']
            else:
                eval_state = None
            evaluator = evaluator_fn(**eval_hparams)
            results = evaluator.eval(
                inputs,
                outputs,
                save_fn=lambda eval_state: save_state(comm, evaluation_id,
                                                      config, eval_state),
                state=eval_state)
            logger.info('Finished evaluating architecture %d', evaluation_id)
            step += 1
            if step % config['save_every'] == 0:
                logger.info('Saving evaluator state')
                evaluator.save_state(search_data_folder)

            encoded_results = {
                'results': results,
                'vs': vs,
                'evaluation_id': evaluation_id,
                'searcher_eval_token': searcher_eval_token
            }
            comm.publish(get_topic_name(RESULTS_TOPIC, config),
                         encoded_results)
            state['evaluated'] = True
            state['specified'] = False
        else:
            break
    thread.join()
Exemple #10
0
def main():
    global specified
    global evaluated
    global results_topic, arch_subscription
    configs = ut.read_jsonfile(
        "/deep_architect/dev/google_communicator/experiment_config.json")

    parser = argparse.ArgumentParser("MPI Job for architecture search")
    parser.add_argument('--config',
                        '-c',
                        action='store',
                        dest='config_name',
                        default='search_evol')

    # Other arguments
    parser.add_argument('--display-output',
                        '-o',
                        action='store_true',
                        dest='display_output',
                        default=False)
    parser.add_argument('--project-id',
                        action='store',
                        dest='project_id',
                        default='deep-architect')
    parser.add_argument('--bucket',
                        '-b',
                        action='store',
                        dest='bucket',
                        default='normal')
    parser.add_argument('--resume',
                        '-r',
                        action='store_true',
                        dest='resume',
                        default=False)

    options = parser.parse_args()
    config = configs[options.config_name]

    PROJECT_ID = options.project_id
    BUCKET_NAME = options.bucket
    results_topic = publisher.topic_path(PROJECT_ID, 'results')
    arch_subscription = subscriber.subscription_path(PROJECT_ID,
                                                     'architectures-sub')

    datasets = {
        'cifar10': ('/data/cifar10/', 10),
    }

    data_dir, num_classes = datasets[config['dataset']]
    search_space_factory = name_to_search_space_factory_fn[
        config['search_space']](num_classes)

    save_every = 1 if 'save_every' not in config else config['save_every']

    evaluators = {
        'tpu_classification':
        lambda: TPUEstimatorEvaluator(
            'gs://' + BUCKET_NAME + data_dir,
            max_num_training_epochs=config['eval_epochs'],
            log_output_to_terminal=options.display_output,
            base_dir='gs://' + BUCKET_NAME + '/scratch_dir'),
    }

    evaluator = evaluators[config['evaluator']]()

    search_data_folder = sl.get_search_data_folderpath(config['search_folder'],
                                                       config['search_name'])
    subscription = subscriber.subscribe(arch_subscription,
                                        callback=retrieve_message)
    thread = threading.Thread(target=nudge_master)
    thread.start()
    step = 0
    while True:
        while not specified:
            time.sleep(5)
        if arch_data:
            vs, evaluation_id, searcher_eval_token = arch_data
            inputs, outputs = search_space_factory.get_search_space()
            se.specify(outputs, vs)
            print('Evaluating architecture')
            results = evaluator.eval(inputs, outputs)
            print('Evaluated architecture')
            step += 1
            if step % save_every == 0:
                evaluator.save_state(search_data_folder)
            encoded_results = json.dumps((results, vs, evaluation_id,
                                          searcher_eval_token)).encode('utf-8')
            future = publisher.publish(results_topic, encoded_results)
            future.result()
            evaluated = True
            specified = False
        else:
            break
    thread.join()
    subscription.cancel()