Пример #1
0
def read_search_folder(search_folderpath):
    """Reads all the standard JSON log files associated to a search experiment.

    See also :func:`deep_architect.search_logging.read_evaluation_folder` for the function
    that reads a single evaluation folder. The list of dictionaries is ordered
    in increasing order of evaluation id.

    Args:
        search_folderpath (str): Path to the search folder used for logging.

    Returns:
        list[dict[str,dict[str,object]]]:
            List of nested dictionaries with the logged information. Each
            dictionary in the list corresponds to an evaluation.
    """
    assert ut.folder_exists(search_folderpath)
    all_evaluations_folderpath = ut.join_paths(
        [search_folderpath, 'evaluations'])
    eval_id = 0
    log_lst = []
    while True:
        evaluation_folderpath = ut.join_paths(
            [all_evaluations_folderpath,
             'x%d' % eval_id])
        if ut.folder_exists(evaluation_folderpath):
            name_to_log = read_evaluation_folder(evaluation_folderpath)
            log_lst.append(name_to_log)
            eval_id += 1
        else:
            break
    return log_lst
Пример #2
0
def draw_graph_evolution(outputs,
                         hyperp_value_lst,
                         out_folderpath,
                         graph_name='graph',
                         draw_hyperparameters=True,
                         draw_io_labels=True,
                         draw_module_hyperparameter_info=True):
    def draw_fn(i):
        return draw_graph(
            outputs,
            draw_hyperparameters=draw_hyperparameters,
            draw_io_labels=draw_io_labels,
            draw_module_hyperparameter_info=draw_module_hyperparameter_info,
            out_folderpath=out_folderpath,
            graph_name=graph_name + '-%d' % i,
            print_to_screen=False)

    draw_fn(0)
    h_iter = co.unassigned_independent_hyperparameter_iterator(outputs)
    for i, v in enumerate(hyperp_value_lst):
        h = next(h_iter)
        h.assign_value(v)
        draw_fn(i + 1)

    filepath_lst = [
        ut.join_paths([out_folderpath, graph_name + '-%d.pdf' % i])
        for i in range(len(hyperp_value_lst) + 1)
    ]
    out_filepath = ut.join_paths([out_folderpath, graph_name + '.pdf'])

    cmd = " ".join(["pdftk"] + filepath_lst + ["cat", "output", out_filepath])
    ut.run_bash_command(cmd)

    for fp in filepath_lst:
        ut.delete_file(fp)
Пример #3
0
    def __init__(self,
                 folderpath,
                 search_name,
                 evaluation_id,
                 abort_if_exists=False,
                 abort_if_notexists=False):

        self.evaluation_folderpath = get_evaluation_folderpath(
            folderpath, search_name, evaluation_id)
        self.evaluation_data_folderpath = get_evaluation_data_folderpath(
            folderpath, search_name, evaluation_id)

        assert (not abort_if_exists) or (not ut.folder_exists(
            self.evaluation_folderpath))
        assert (not abort_if_notexists) or ut.folder_exists(
            self.evaluation_folderpath)
        ut.create_folder(self.evaluation_folderpath,
                         abort_if_exists=abort_if_exists,
                         create_parent_folders=True)
        ut.create_folder(self.evaluation_data_folderpath,
                         abort_if_exists=abort_if_exists,
                         create_parent_folders=True)

        self.config_filepath = ut.join_paths(
            [self.evaluation_folderpath, 'config.json'])
        self.results_filepath = ut.join_paths(
            [self.evaluation_folderpath, 'results.json'])
Пример #4
0
 def save_state(self, folderpath):
     state = {
         'num_evals': len(self.vecs_lst),
         'vals_lst': self.vals_lst,
     }
     ut.write_jsonfile(state,
                       ut.join_paths([folderpath, 'hash_model_state.json']))
     for i, vecs in enumerate(self.vecs_lst):
         sp.save_npz(ut.join_paths([folderpath, str(i) + '.npz']), vecs)
Пример #5
0
 def load_state(self, folderpath):
     state = ut.read_jsonfile(
         ut.join_paths([folderpath, 'hash_model_state.json']))
     self.vals_lst = state['vals_lst']
     num_evals = state['num_evals']
     for i in range(num_evals):
         self.vecs_lst.append(
             sp.load_npz(ut.join_paths([folderpath,
                                        str(i) + '.npz'])))
     if num_evals > 0:
         self._refit()
 def load_state(self, folderpath):
     filepath = ut.join_paths([folderpath, 'evolution_searcher.json'])
     state = ut.read_jsonfile(filepath)
     self.P = state["P"]
     self.S = state["S"]
     self.regularized = state['regularized']
     self.population = deque(state['population'])
     self.initializing = state['initializing']
 def save_state(self, folderpath):
     filepath = ut.join_paths([folderpath, 'evolution_searcher.json'])
     state = {
         "P": self.P,
         "S": self.S,
         "population": list(self.population),
         "regularized": self.regularized,
         "initializing": self.initializing,
     }
     ut.write_jsonfile(state, filepath)
Пример #8
0
    def load_state(self, folder_name):
        filepath = join_paths([folder_name, 'evolution_searcher.json'])
        if not file_exists(filepath):
            raise RuntimeError("Load file does not exist")

        state = read_jsonfile(filepath)
        self.P = state["P"]
        self.S = state["S"]
        self.regularized = state['regularized']
        self.population = deque(state['population'])
        self.initializing = state['initializing']
Пример #9
0
def main():
    num_classes = 10
    (Xtrain, ytrain, Xval, yval, Xtest, ytest) = load_mnist('data/mnist')
    train_dataset = InMemoryDataset(Xtrain, ytrain, True)
    val_dataset = InMemoryDataset(Xval, yval, False)
    test_dataset = InMemoryDataset(Xtest, ytest, False)

    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        num_classes,
        ut.join_paths(['temp', 'benchmarks', cfg['search_name']]),
        max_eval_time_in_minutes=cfg['max_eval_time_in_minutes'],
        log_output_to_terminal=True,
        test_dataset=test_dataset)

    for rep_i in range(cfg['num_repetitions']):
        for search_space_name in cfg['search_space_name_lst']:
            for searcher_name in cfg['searcher_name_lst']:

                folderpath = ut.join_paths([
                    cfg['logs_folderpath'], cfg['search_name'],
                    search_space_name, searcher_name
                ])

                sl.create_search_folderpath(
                    folderpath,
                    'rep%d' % rep_i,
                    abort_if_exists=True,
                    delete_if_exists=cfg["delete_if_exists"],
                    create_parent_folders=True)

                search_space_fn = local_ss.name_to_search_space_fn[
                    search_space_name](num_classes)
                searcher = local_se.name_to_get_searcher_fn[searcher_name](
                    search_space_fn)
                run_searcher(
                    searcher, evaluator, cfg['num_samples'] -
                    search_logger.get_current_evaluation_id(), search_logger)
Пример #10
0
def start_worker(comm,
                 evaluator,
                 search_space_factory,
                 folderpath,
                 search_name,
                 resume=True,
                 save_every=1):
    # set the available gpu for process
    print('WORKER %d' % comm.get_rank())
    step = 0

    sl.create_search_folderpath(folderpath, search_name)
    search_data_folder = sl.get_search_data_folderpath(folderpath, search_name)
    save_filepath = ut.join_paths(
        (search_data_folder, 'worker' + str(comm.get_rank()) + '.json'))

    if resume:
        evaluator.load_state(search_data_folder)
        state = ut.read_jsonfile(save_filepath)
        step = state['step']

    while (True):
        arch = comm.receive_architecture_in_worker()

        # if a kill signal is received
        if arch is None:
            break

        vs, evaluation_id, searcher_eval_token = arch

        inputs, outputs = search_space_factory.get_search_space()
        se.specify(outputs, vs)
        results = evaluator.eval(inputs, outputs)
        step += 1
        if step % save_every == 0:
            evaluator.save_state(search_data_folder)
            state = {'step': step}
            ut.write_jsonfile(state, save_filepath)
        comm.publish_results_to_master(results, evaluation_id,
                                       searcher_eval_token)
Пример #11
0
def read_evaluation_folder(evaluation_folderpath):
    """Reads all the standard JSON log files associated to a single evaluation.

    See also :func:`deep_architect.search_logging.read_search_folder` for the function
    that reads all the evaluations in a search folder.

    Args:
        evaluation_folderpath (str): Path to the folder containing the standard
            JSON logs.

    Returns:
        dict[str,dict[str,object]]:
            Nested dictionaries with the logged information. The first
            dictionary has keys corresponding to the names of the standard
            log files.
    """
    assert ut.folder_exists(evaluation_folderpath)

    name_to_log = {}
    for name in ['config', 'results']:
        log_filepath = ut.join_paths([evaluation_folderpath, name + '.json'])
        name_to_log[name] = ut.read_jsonfile(log_filepath)
    return name_to_log
Пример #12
0
def start_searcher(comm,
                   searcher,
                   resume_if_exists,
                   folderpath,
                   search_name,
                   searcher_load_path,
                   num_samples=-1,
                   num_epochs=-1,
                   save_every=1):
    assert num_samples != -1 or num_epochs != -1

    print('SEARCHER')

    sl.create_search_folderpath(folderpath, search_name)
    search_data_folder = sl.get_search_data_folderpath(folderpath, search_name)
    save_filepath = ut.join_paths((search_data_folder, searcher_load_path))

    models_sampled = 0
    epochs = 0
    finished = 0
    killed = 0
    best_accuracy = 0.

    # Load previous searcher
    if resume_if_exists:
        searcher.load(search_data_folder)
        state = ut.read_jsonfile(save_filepath)
        epochs = state['epochs']
        killed = state['killed']
        models_sampled = state['models_finished']
        finished = state['models_finished']

    while (finished < models_sampled or killed < comm.num_workers):
        # Search end conditions
        cont = num_samples == -1 or models_sampled < num_samples
        cont = cont and (num_epochs == -1 or epochs < num_epochs)
        if cont:
            # See whether workers are ready to consume architectures
            if comm.is_ready_to_publish_architecture():
                eval_logger = sl.EvaluationLogger(folderpath, search_name,
                                                  models_sampled)
                _, _, vs, searcher_eval_token = searcher.sample()

                eval_logger.log_config(vs, searcher_eval_token)
                comm.publish_architecture_to_worker(vs, models_sampled,
                                                    searcher_eval_token)

                models_sampled += 1
        else:
            if comm.is_ready_to_publish_architecture():
                comm.kill_worker()
                killed += 1

        # See which workers have finished evaluation
        for worker in range(comm.num_workers):
            msg = comm.receive_results_in_master(worker)
            if msg is not None:
                results, model_id, searcher_eval_token = msg
                eval_logger = sl.EvaluationLogger(folderpath, search_name,
                                                  model_id)
                eval_logger.log_results(results)

                if 'epoch' in results:
                    epochs = max(epochs, results['epoch'])

                searcher.update(results['validation_accuracy'],
                                searcher_eval_token)
                best_accuracy = max(best_accuracy,
                                    results['validation_accuracy'])
                finished += 1
                if finished % save_every == 0:
                    print('Models sampled: %d Best Accuracy: %f' %
                          (finished, best_accuracy))
                    best_accuracy = 0.

                    searcher.save_state(search_data_folder)
                    state = {
                        'models_finished': finished,
                        'epochs': epochs,
                        'killed': killed
                    }
                    ut.write_jsonfile(state, save_filepath)
Пример #13
0
def main():
    configs = ut.read_jsonfile(
        "./examples/tensorflow/full_benchmarks/experiment_config.json")

    parser = argparse.ArgumentParser("MPI Job for architecture search")
    parser.add_argument('--config',
                        '-c',
                        action='store',
                        dest='config_name',
                        default='normal')

    # Other arguments
    parser.add_argument('--display-output',
                        '-o',
                        action='store_true',
                        dest='display_output',
                        default=False)
    parser.add_argument('--resume',
                        '-r',
                        action='store_true',
                        dest='resume',
                        default=False)

    options = parser.parse_args()
    config = configs[options.config_name]

    num_procs = config['num_procs'] if 'num_procs' in config else 0
    comm = get_communicator(config['communicator'], num_procs)
    if len(gpu_utils.get_gpu_information()) != 0:
        #https://github.com/tensorflow/tensorflow/issues/1888
        gpu_utils.set_visible_gpus(
            [comm.get_rank() % gpu_utils.get_total_num_gpus()])

    if 'eager' in config and config['eager']:
        import tensorflow as tf
        tf.logging.set_verbosity(tf.logging.ERROR)
        tf.enable_eager_execution()
    datasets = {
        'cifar10': lambda: (load_cifar10('data/cifar10/', one_hot=False), 10),
        'mnist': lambda: (load_mnist('data/mnist/'), 10),
    }

    (Xtrain, ytrain, Xval, yval, Xtest,
     ytest), num_classes = datasets[config['dataset']]()
    search_space_factory = name_to_search_space_factory_fn[
        config['search_space']](num_classes)

    save_every = 1 if 'save_every' not in config else config['save_every']
    if comm.get_rank() == 0:
        searcher = name_to_searcher_fn[config['searcher']](
            search_space_factory.get_search_space)
        num_samples = -1 if 'samples' not in config else config['samples']
        num_epochs = -1 if 'epochs' not in config else config['epochs']
        start_searcher(comm,
                       searcher,
                       options.resume,
                       config['search_folder'],
                       config['search_name'],
                       config['searcher_file_name'],
                       num_samples=num_samples,
                       num_epochs=num_epochs,
                       save_every=save_every)
    else:
        train_d_advataset = InMemoryDataset(Xtrain, ytrain, True)
        val_dataset = InMemoryDataset(Xval, yval, False)
        test_dataset = InMemoryDataset(Xtest, ytest, False)

        search_path = sl.get_search_folderpath(config['search_folder'],
                                               config['search_name'])
        ut.create_folder(ut.join_paths([search_path, 'scratch_data']),
                         create_parent_folders=True)
        scratch_folder = ut.join_paths(
            [search_path, 'scratch_data', 'eval_' + str(comm.get_rank())])
        ut.create_folder(scratch_folder)

        evaluators = {
            'simple_classification':
            lambda: SimpleClassifierEvaluator(
                train_dataset,
                val_dataset,
                num_classes,
                './temp' + str(comm.get_rank()),
                max_num_training_epochs=config['eval_epochs'],
                log_output_to_terminal=options.display_output,
                test_dataset=test_dataset),
        }

        assert not config['evaluator'].startswith('enas') or hasattr(
            search_space_factory, 'weight_sharer')
        evaluator = evaluators[config['evaluator']]()

        start_worker(comm,
                     evaluator,
                     search_space_factory,
                     config['search_folder'],
                     config['search_name'],
                     resume=options.resume,
                     save_every=save_every)
    def eval(self, inputs, outputs):
        tf.reset_default_graph()

        model_dir = ut.join_paths(
            [self.base_dir, 'eval' + str(self.num_archs)])
        ut.create_folder(model_dir, abort_if_exists=False)

        def model_fn(features, labels, mode, params):
            feature_columns = list(get_feature_columns().values())

            images = tf.feature_column.input_layer(
                features=features, feature_columns=feature_columns)

            images = tf.reshape(images,
                                shape=(-1, IMAGE_HEIGHT, IMAGE_WIDTH,
                                       IMAGE_DEPTH))
            set_recompile(outputs, True)
            gc.collect()
            htfe.set_is_training(outputs, mode == tf.estimator.ModeKeys.TRAIN)
            co.forward({inputs['in']: images})
            logits = outputs['out'].val

            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            predicted_classes = tf.argmax(logits, 1)
            if mode == tf.estimator.ModeKeys.PREDICT:
                predictions = {
                    'class_ids': predicted_classes[:, tf.newaxis],
                    'probabilities': tf.nn.softmax(logits),
                    'logits': logits,
                }
                return tf.estimator.EstimatorSpec(mode,
                                                  predictions=predictions)
            # define loss and optimizer
            train_vars = tf.trainable_variables()
            with tf.variable_scope('l2'):
                l2_loss = tf.add_n([
                    tf.nn.l2_loss(v) for v in train_vars if 'kernel' in v.name
                ]) * self.weight_decay
            unreg_loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                        labels=labels))
            loss = unreg_loss + l2_loss
            # Compute evaluation metrics.
            accuracy = tf.metrics.accuracy(labels=tf.argmax(labels, 1),
                                           predictions=predicted_classes,
                                           name='acc_op')
            metrics = {'accuracy': accuracy}
            if mode == tf.estimator.ModeKeys.EVAL:
                loss = tf.Print(loss, [
                    accuracy, l2_loss, unreg_loss, loss,
                    tf.argmax(labels, 1), predicted_classes
                ],
                                summarize=10)
                return tf.estimator.EstimatorSpec(mode,
                                                  loss=loss,
                                                  eval_metric_ops=metrics)

            # Create training op.
            assert mode == tf.estimator.ModeKeys.TRAIN
            step = tf.train.get_or_create_global_step()
            learning_rate = self.get_learning_rate(step)
            optimizer = tf.train.RMSPropOptimizer(learning_rate,
                                                  .9,
                                                  momentum=.9,
                                                  epsilon=1.0)
            loss = tf.Print(loss, [
                accuracy, l2_loss, unreg_loss, loss, learning_rate,
                tf.argmax(labels, 1), predicted_classes
            ],
                            summarize=10)
            with tf.control_dependencies(update_ops):
                train_op = optimizer.minimize(
                    loss, global_step=tf.train.get_global_step())
            return tf.estimator.EstimatorSpec(mode,
                                              loss=loss,
                                              train_op=train_op)

        # NUM_GPUS = 2
        # strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=NUM_GPUS)
        # config = tf.estimator.RunConfig(train_distribute=strategy)
        gpu_ops = tf.GPUOptions(allow_growth=True)
        config = tf.ConfigProto(gpu_options=gpu_ops)
        run_config = tf.estimator.RunConfig(model_dir=model_dir,
                                            session_config=config)
        estimator = tf.estimator.Estimator(model_fn=model_fn,
                                           config=run_config,
                                           params={})
        seqs = ut.SequenceTracker(abort_if_different_lengths=True)

        best_val_acc = -np.inf
        stop_counter = self.stop_patience
        timer_manager = ut.TimerManager()
        timer_manager.create_timer('eval')

        # getting the gpu_id based on the environment.
        if gpu_utils.is_environment_variable_defined('CUDA_VISIBLE_DEVICES'):
            s = gpu_utils.get_environment_variable('CUDA_VISIBLE_DEVICES')
            s_lst = s.split(',')
            if len(s_lst) == 1 and len(s_lst[0]) > 0:
                gpu_id = int(s_lst[0])
            else:
                gpu_id = None
        else:
            gpus = gpu_utils.get_gpu_information()
            if len(gpus) == 1:
                gpu_id = 0
            else:
                gpu_id = None

        for epoch in range(self.max_num_training_epochs):
            train_fn = lambda: input_fn(self.X_train, self.y_train, train=True)
            val_fn = lambda: input_fn(self.X_val, self.y_val, train=False)
            print('\n\nTraining')
            estimator.train(input_fn=train_fn)
            print('\n\nEvaluating')
            eval_results = estimator.evaluate(input_fn=val_fn)

            # early stopping
            val_acc = eval_results['accuracy']

            # Display logs per epoch step
            if self.log_output_to_terminal and epoch % self.display_step == 0:
                print(
                    "time:", "%7.1f" %
                    timer_manager.get_time_since_event('eval', 'start'),
                    "epoch:", '%04d' % (epoch + 1), "validation loss:",
                    "{:.9f}".format(eval_results['loss']),
                    "validation_accuracy:", "%.5f" % val_acc)

            d = {
                'validation_accuracy':
                val_acc,
                'validation_loss':
                eval_results['loss'],
                'epoch_number':
                epoch + 1,
                'time_in_minutes':
                timer_manager.get_time_since_event('eval',
                                                   'start',
                                                   units='minutes'),
            }
            seqs.append(d)

            # update the patience counters.
            if best_val_acc < val_acc:
                best_val_acc = val_acc
                # reinitialize all the counters.
                stop_counter = self.stop_patience
            else:
                stop_counter -= 1
                if stop_counter == 0:
                    break

        print("Optimization Finished!")

        timer_manager.tick_timer('eval')
        eval_results = estimator.evaluate(
            input_fn=lambda: input_fn(self.X_val, self.y_val))

        val_acc = eval_results['accuracy']
        t_infer = (
            timer_manager.get_time_since_last_tick('eval', 'miliseconds') /
            self.X_val.shape[0])

        print("Validation accuracy: %f" % val_acc)
        seqs_dict = seqs.get_dict()
        results = {
            'validation_accuracy': val_acc,
            'num_parameters': float(htf.get_num_trainable_parameters()),
            'inference_time_per_example_in_miliseconds': t_infer,
            'num_training_epochs': seqs_dict['epoch_number'],
            'sequences': seqs_dict
        }
        if 'gpu_utilization_in_percent' in seqs_dict:
            results['average_gpu_utilization_in_percent'] = np.mean(
                seqs_dict['gpu_utilization_in_percent'])
            results['average_gpu_memory_utilization_in_gigabytes'] = np.mean(
                seqs_dict['gpu_memory_utilization_in_gigabytes'])

        if self.X_test != None and self.y_test != None:
            test_results = estimator.evaluate(
                input_fn=lambda: input_fn(self.X_test, self.y_test))
            test_acc = test_results['accuracy']
            print("Test accuracy: %f" % test_acc)
            results['test_accuracy'] = test_acc

        results['training_time_in_hours'] = timer_manager.get_time_since_event(
            'eval', 'start', units='hours')
        self.num_archs += 1
        return results
Пример #15
0
def get_all_evaluations_folderpath(folderpath, search_name):
    return ut.join_paths(
        [get_search_folderpath(folderpath, search_name), 'evaluations'])
Пример #16
0
 def load_state(self, folderpath):
     self.mcts.load_state(folderpath)
     self.surr_model.load_state(folderpath)
     state = ut.load_jsonfile(ut.join_paths([folderpath, "state.json"]))
     self.cnt = state["cnt"]
Пример #17
0
def process_config_and_args():

    parser = argparse.ArgumentParser("MPI Job for architecture search")
    parser.add_argument('--config',
                        '-c',
                        action='store',
                        dest='config_name',
                        default='normal')
    parser.add_argument(
        '--config-file',
        action='store',
        dest='config_file',
        default=
        '/deep_architect/kubernetes/mongo_communicator/train_best_config.json')
    parser.add_argument('--bucket',
                        '-b',
                        action='store',
                        dest='bucket',
                        default=BUCKET_NAME)

    # Other arguments
    parser.add_argument('--resume',
                        '-r',
                        action='store_true',
                        dest='resume',
                        default=False)
    parser.add_argument('--mongo-host',
                        '-m',
                        action='store',
                        dest='mongo_host',
                        default='127.0.0.1')
    parser.add_argument('--mongo-port',
                        '-p',
                        action='store',
                        dest='mongo_port',
                        default=27017)
    parser.add_argument('--repetition', default=0)
    options = parser.parse_args()
    configs = ut.read_jsonfile(options.config_file)
    config = configs[options.config_name]

    config['bucket'] = options.bucket

    comm = MongoCommunicator(host=options.mongo_host,
                             port=options.mongo_port,
                             data_refresher=True)

    # SET UP GOOGLE STORE FOLDER
    config['search_name'] = config['search_name'] + '_' + str(
        options.repetition)
    search_logger = sl.SearchLogger(config['search_folder'],
                                    config['search_name'])
    search_data_folder = search_logger.get_search_data_folderpath()
    config['save_filepath'] = ut.join_paths(
        (search_data_folder, config['searcher_file_name']))
    config['eval_path'] = sl.get_all_evaluations_folderpath(
        config['search_folder'], config['search_name'])
    config['full_search_folder'] = sl.get_search_folderpath(
        config['search_folder'], config['search_name'])
    config['results_file'] = os.path.join(
        config['results_prefix'] + '_' + str(options.repetition),
        config['results_file'])
    state = {'finished': 0, 'best_accuracy': 0.0}
    if options.resume:
        try:
            download_folder(search_data_folder, config['full_search_folder'],
                            config['bucket'])
            searcher.load_state(search_data_folder)
            if ut.file_exists(config['save_filepath']):
                old_state = ut.read_jsonfile(config['save_filepath'])
                state['finished'] = old_state['finished']
                state['best_accuracy'] = old_state['best_accuracy']
        except:
            pass

    return comm, search_logger, state, config
Пример #18
0
def get_search_data_folderpath(folderpath, search_name):
    return ut.join_paths(
        [get_search_folderpath(folderpath, search_name), "search_data"])
Пример #19
0
def get_search_folderpath(folderpath, search_name):
    return ut.join_paths([folderpath, search_name])
Пример #20
0
def is_search_log_folder(folderpath):
    return ut.folder_exists(ut.join_paths([folderpath, 'evaluations', 'x0']))
Пример #21
0
 def save_state(self, folder_name):
     state = self.get_searcher_state_token()
     write_jsonfile(state,
                    join_paths([folder_name, 'evolution_searcher.json']))
Пример #22
0
def get_evaluation_folderpath(folderpath, search_name, evaluation_id):
    return ut.join_paths([
        get_all_evaluations_folderpath(folderpath, search_name),
        'x%d' % evaluation_id
    ])
Пример #23
0
def get_evaluation_data_folderpath(folderpath, search_name, evaluation_id):
    return ut.join_paths([
        get_evaluation_folderpath(folderpath, search_name, evaluation_id),
        "eval_data"
    ])
Пример #24
0
 def save_state(self, folderpath):
     self.mcts.save_state(folderpath)
     self.surr_model.save_state(folderpath)
     ut.write_jsonfile({"cnt": self.cnt},
                       ut.join_paths([folderpath, "state.json"]))
Пример #25
0
    ds = []
    for i, log in enumerate(log_lst):
        d = log['results']
        # d.pop('sequences')
        # d['num_training_epochs'] = len(d['num_training_epochs'])
        d['evaluation_id'] = i
        ds.append(d)
    return ds


### NOTE: this needs to be changed with the paths for log folders that you
# wish to consider.
logs_folderpath = "logs"
# path_lst = ['logs/test_cifar10_short', 'logs/test_cifar10_medium', 'logs/test']
path_lst = [
    ut.join_paths([logs_folderpath, p])
    for p in ['cifar10_medium', "cifar10_short"]
]
print(path_lst)
path_to_log = {p: process_logs(sl.read_search_folder(p)) for p in path_lst}
keys = path_to_log.values()[0][0].keys()
print keys
ds = path_to_log.values()[0]


### Components to layout the HTML page.
def full_column(contents):
    return html.Div(contents, className="row")


def one_half_one_half_column(left_contents, right_contents):
Пример #26
0
 def save_state(self, folderpath):
     ut.write_jsonfile(
         {
             'mcts_root_node': MCTSTreeNode.serialize(self.mcts_root_node),
         }, ut.join_paths([folderpath, 'mcts_searcher_state.json']))
Пример #27
0
 def load_state(self, folderpath):
     state = ut.read_jsonfile(
         ut.join_paths([folderpath, 'mcts_searcher_state.json']))
     self.mcts_root_node = MCTSTreeNode.deserialize(state['mcts_root_node'])
Пример #28
0
data_dir, num_classes = datasets[config['dataset']]
search_space_factory = name_to_search_space_factory_fn[config['search_space']](
    num_classes)

save_every = 1 if 'save_every' not in config else config['save_every']
searcher = name_to_searcher_fn[config['searcher']](
    search_space_factory.get_search_space)
num_epochs = -1 if 'epochs' not in config else config['epochs']
num_samples = -1 if 'samples' not in config else config['samples']
eval_epochs = config['eval_epochs']

# SET UP GOOGLE STORE FOLDER
search_logger = sl.SearchLogger(config['search_folder'], config['search_name'])
# sl.create_search_folderpath(config['search_folder'], config['search_name'])
search_data_folder = search_logger.get_search_data_folderpath()
save_filepath = ut.join_paths(
    (search_data_folder, config['searcher_file_name']))
eval_path = sl.get_all_evaluations_folderpath(config['search_folder'],
                                              config['search_name'])
search_folder = sl.get_search_folderpath(config['search_folder'],
                                         config['search_name'])
models_sampled = 0
epochs = 0
finished = 0
killed = 0
best_accuracy = 0.

# Load previous searcher
if options.resume:
    download_folder(search_folder)
    searcher.load(search_data_folder)
    state = ut.read_jsonfile(save_filepath)
Пример #29
0
def process_config_and_args():

    parser = argparse.ArgumentParser("MPI Job for architecture search")
    parser.add_argument('--config',
                        '-c',
                        action='store',
                        dest='config_name',
                        default='normal')
    parser.add_argument(
        '--config-file',
        action='store',
        dest='config_file',
        default=
        '/deep_architect/examples/contrib/kubernetes/experiment_config.json')
    parser.add_argument('--bucket',
                        '-b',
                        action='store',
                        dest='bucket',
                        default=BUCKET_NAME)

    # Other arguments
    parser.add_argument('--resume',
                        '-r',
                        action='store_true',
                        dest='resume',
                        default=False)
    parser.add_argument('--mongo-host',
                        '-m',
                        action='store',
                        dest='mongo_host',
                        default='127.0.0.1')
    parser.add_argument('--mongo-port',
                        '-p',
                        action='store',
                        dest='mongo_port',
                        default=27017)
    parser.add_argument('--log',
                        choices=['debug', 'info', 'warning', 'error'],
                        default='info')
    parser.add_argument('--repetition', default=0)
    options = parser.parse_args()

    numeric_level = getattr(logging, options.log.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % options.log)
    logging.getLogger().setLevel(numeric_level)

    configs = ut.read_jsonfile(options.config_file)
    config = configs[options.config_name]

    config['bucket'] = options.bucket

    comm = MongoCommunicator(host=options.mongo_host,
                             port=options.mongo_port,
                             data_refresher=True,
                             refresh_period=10)

    datasets = {
        'cifar10': ('data/cifar10/', 10),
    }

    _, num_classes = datasets[config['dataset']]
    search_space_factory = name_to_search_space_factory_fn[
        config['search_space']](num_classes)

    config['save_every'] = 1 if 'save_every' not in config else config[
        'save_every']
    searcher = name_to_searcher_fn[config['searcher']](
        search_space_factory.get_search_space)
    config['num_epochs'] = -1 if 'epochs' not in config else config['epochs']
    config[
        'num_samples'] = -1 if 'samples' not in config else config['samples']

    # SET UP GOOGLE STORE FOLDER
    config['search_name'] = config['search_name'] + '_' + str(
        options.repetition)
    search_logger = sl.SearchLogger(config['search_folder'],
                                    config['search_name'])
    search_data_folder = search_logger.get_search_data_folderpath()
    config['save_filepath'] = ut.join_paths(
        (search_data_folder, config['searcher_file_name']))
    config['eval_path'] = sl.get_all_evaluations_folderpath(
        config['search_folder'], config['search_name'])
    config['full_search_folder'] = sl.get_search_folderpath(
        config['search_folder'], config['search_name'])
    config['eval_hparams'] = {} if 'eval_hparams' not in config else config[
        'eval_hparams']

    state = {
        'epochs': 0,
        'models_sampled': 0,
        'finished': 0,
        'best_accuracy': 0.0
    }
    if options.resume:
        try:
            download_folder(search_data_folder, config['full_search_folder'],
                            config['bucket'])
            searcher.load_state(search_data_folder)
            if ut.file_exists(config['save_filepath']):
                old_state = ut.read_jsonfile(config['save_filepath'])
                state['epochs'] = old_state['epochs']
                state['models_sampled'] = old_state['models_sampled']
                state['finished'] = old_state['finished']
                state['best_accuracy'] = old_state['best_accuracy']
        except:
            pass

    return comm, search_logger, searcher, state, config