示例#1
0
def main(config):

    searcher = se.RandomSearcher(get_search_space(
        config.num_classes))  # random searcher
    # create a logging folder to log information (config and features)
    logger = log.SearchLogger('logs',
                              config.exp_name,
                              resume_if_exists=True,
                              create_parent_folders=True)
    # return values
    architectures = dict()

    for i in range(int(config.num_samples)):
        print("Sampling architecture %d" % i)
        inputs, outputs, h_value_hist, searcher_eval_token = searcher.sample()
        eval_logger = logger.get_current_evaluation_logger()
        eval_logger.log_config(h_value_hist, searcher_eval_token)
        eval_logger.log_features(inputs, outputs)
        architectures[i] = {
            'config_filepath': eval_logger.config_filepath,
            'evaluation_filepath': eval_logger.get_evaluation_folderpath()
        }

    # write to a json file to communicate with master
    ut.write_jsonfile(architectures, config.result_fp)
示例#2
0
def main():

    num_classes = 10
    num_samples = 3  # number of architecture to sample
    best_val_acc, best_architecture = 0., -1

    # load and normalize data
    mnist = tf.keras.datasets.mnist
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0

    # defining evaluator and searcher
    evaluator = SimpleClassifierEvaluator((x_train, y_train),
                                          num_classes,
                                          max_num_training_epochs=5)
    searcher = se.RandomSearcher(get_search_space(num_classes))

    for i in xrange(num_samples):
        print("Sampling architecture %d" % i)
        inputs, outputs, _, searcher_eval_token = searcher.sample()
        val_acc = evaluator.evaluate(
            inputs,
            outputs)['val_acc']  # evaluate and return validation accuracy
        print("Finished evaluating architecture %d, validation accuracy is %f" %
              (i, val_acc))
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            best_architecture = i
        searcher.update(val_acc, searcher_eval_token)
    print("Best validation accuracy is %f with architecture %d" %
          (best_val_acc, best_architecture))
示例#3
0
def main():

    num_classes = 10
    num_samples = 3  # number of architecture to sample
    best_val_acc, best_architecture = 0., -1

    # donwload and normalize data, using test as val for simplicity
    X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',
                                                      normalize_range=True)

    # defining evaluator
    evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val),
                                          num_classes,
                                          max_num_training_epochs=5,
                                          log_output_to_terminal=True)
    searcher = se.RandomSearcher(get_search_space(num_classes))
    for i in xrange(num_samples):
        print("Sampling architecture %d" % i)
        M.renew_collection()
        inputs, outputs, _, searcher_eval_token = searcher.sample()
        val_acc = evaluator.evaluate(
            inputs,
            outputs)['val_acc']  # evaluate and return validation accuracy
        print("Finished evaluating architecture %d, validation accuracy is %f" %
              (i, val_acc))
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            best_architecture = i
        searcher.update(val_acc, searcher_eval_token)
    print("Best validation accuracy is %f with architecture %d" %
          (best_val_acc, best_architecture))
def main():
    num_classes = 10
    num_samples = 4
    num_training_epochs = 2
    # NOTE: change to True for graph visualization
    show_graph = False

    # load data
    (X_train, y_train, X_val, y_val, X_test, y_test) = load_mnist('data/mnist',
                                                                  flatten=True)
    train_dataset = InMemoryDataset(X_train, y_train, True)
    val_dataset = InMemoryDataset(X_val, y_val, False)
    test_dataset = InMemoryDataset(X_test, y_test, False)

    # defining evaluator and searcher
    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        num_classes,
        num_training_epochs=num_training_epochs,
        log_output_to_terminal=True)
    ssf = mo.SearchSpaceFactory(lambda: dnn_net(num_classes))
    searcher = se.RandomSearcher(ssf.get_search_space)

    for i in range(num_samples):
        inputs, outputs, _, searcher_eval_token = searcher.sample()
        if show_graph:
            # try setting draw_module_hyperparameter_info=False and
            # draw_hyperparameters=True for a different visualization.
            vi.draw_graph(outputs,
                          draw_module_hyperparameter_info=False,
                          draw_hyperparameters=True)
        results = evaluator.evaluate(inputs, outputs)
        # updating the searcher. no-op for the random searcher.
        searcher.update(results['validation_accuracy'], searcher_eval_token)
示例#5
0
def main():

    num_classes = 10
    num_samples = 3 # number of architecture to sample
    metric = 'val_accuracy' # evaluation metric
    resource_type = 'epoch'
    max_resource = 81 # max resource that a configuration can have

    # load and normalize data
    (x_train, y_train),(x_test, y_test) = mnist.load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0

    # defining searcher and evaluator
    evaluator = SimpleClassifierEvaluator((x_train, y_train), num_classes,
                                        max_num_training_epochs=5)
    searcher = se.RandomSearcher(get_search_space(num_classes))
    hyperband = SimpleArchitectureSearchHyperBand(searcher, hyperband, metric, resource_type)
    (best_config, best_perf) = hyperband.evaluate(max_resource)
    print("Best %s is %f with architecture %d" % (metric, best_perf[0], best_config[0]))
示例#6
0
def main():
    num_classes = 10
    num_samples = 4
    num_training_epochs = 2
    validation_frac = 0.2
    # NOTE: change to True for graph visualization
    show_graph = False

    # load the data.
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    fn = lambda X: X.reshape((X.shape[0], -1))
    X_train = fn(X_train) / 255.0
    X_test = fn(X_test) / 255.0
    num_train = int((1.0 - validation_frac) * X_train.shape[0])
    X_train, X_val = X_train[:num_train], X_train[num_train:]
    y_train, y_val = y_train[:num_train], y_train[num_train:]

    # define the search and the evalutor
    evaluator = SimpleClassifierEvaluator(
        X_train,
        y_train,
        X_val,
        y_val,
        num_classes,
        num_training_epochs=num_training_epochs)
    search_space_fn = lambda: dnn_net(num_classes)
    searcher = se.RandomSearcher(search_space_fn)

    for i in range(num_samples):
        (inputs, outputs, hyperp_value_lst,
         searcher_eval_token) = searcher.sample()
        if show_graph:
            # try setting draw_module_hyperparameter_info=False and
            # draw_hyperparameters=True for a different visualization.
            vi.draw_graph(outputs,
                          draw_module_hyperparameter_info=False,
                          draw_hyperparameters=True)
        results = evaluator.evaluate(inputs, outputs)
        # updating the searcher. no-op for the random searcher.
        searcher.update(results['validation_accuracy'], searcher_eval_token)
示例#7
0
def main():
    num_classes = 10
    num_samples = 3
    num_training_epochs = 2
    batch_size = 256
    # NOTE: change to True for graph visualization
    show_graph = False

    # load and normalize data
    (X_train, y_train, X_val, y_val, X_test,
     y_test) = load_mnist(flatten=True, one_hot=False)
    train_dataset = InMemoryDataset(X_train, y_train, True)
    val_dataset = InMemoryDataset(X_val, y_val, False)
    test_dataset = InMemoryDataset(X_test, y_test, False)

    # defining evaluator and searcher
    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        num_classes,
        num_training_epochs=num_training_epochs,
        batch_size=batch_size,
        log_output_to_terminal=True)
    search_space_fn = lambda: dnn_net(num_classes)
    searcher = se.RandomSearcher(search_space_fn)

    for i in range(num_samples):
        inputs, outputs, _, searcher_eval_token = searcher.sample()
        if show_graph:
            # try setting draw_module_hyperparameter_info=False and
            # draw_hyperparameters=True for a different visualization.
            vi.draw_graph(outputs,
                          draw_module_hyperparameter_info=False,
                          draw_hyperparameters=True)

        results = evaluator.evaluate(inputs, outputs)
        searcher.update(results['validation_accuracy'], searcher_eval_token)
示例#8
0
from __future__ import absolute_import
from deep_architect.searchers.regularized_evolution import EvolutionSearcher, mutatable
import deep_architect.searchers.random as ra
from deep_architect.searchers.smbo_mcts import SMBOSearcherWithMCTSOptimizer
from deep_architect.searchers.mcts import MCTSSearcher
from deep_architect.searchers.smbo_random import SMBOSearcher
from deep_architect.surrogates.hashing import HashingSurrogate

name_to_searcher_fn = {
    'random':
    lambda ssf: ra.RandomSearcher(ssf),
    'evolution_pop=100_samp=25_reg=t':
    lambda ssf: EvolutionSearcher(ssf, mutatable, 100, 25, regularized=True),
    'evolution_pop=64_samp=16_reg=t':
    lambda ssf: EvolutionSearcher(ssf, mutatable, 64, 16, regularized=True),
    'evolution_pop=20_samp=20_reg=t':
    lambda ssf: EvolutionSearcher(ssf, mutatable, 20, 20, regularized=True),
    'evolution_pop=100_samp=50_reg=t':
    lambda ssf: EvolutionSearcher(ssf, mutatable, 100, 50, regularized=True),
    'evolution_pop=100_samp=2_reg=t':
    lambda ssf: EvolutionSearcher(ssf, mutatable, 100, 2, regularized=True),
    'evolution_pop=100_samp=25_reg=f':
    lambda ssf: EvolutionSearcher(ssf, mutatable, 100, 25, regularized=False),
    'evolution_pop=64_samp=16_reg=f':
    lambda ssf: EvolutionSearcher(ssf, mutatable, 64, 16, regularized=False),
    'evolution_pop=20_samp=20_reg=f':
    lambda ssf: EvolutionSearcher(ssf, mutatable, 20, 20, regularized=False),
    'evolution_pop=100_samp=50_reg=f':
    lambda ssf: EvolutionSearcher(ssf, mutatable, 100, 50, regularized=False),
    'evolution_pop=100_samp=2_reg=f':
    lambda ssf: EvolutionSearcher(ssf, mutatable, 100, 2, regularized=False),