def main():
    num_classes = 10
    num_samples = 4
    num_training_epochs = 2
    # NOTE: change to True for graph visualization
    show_graph = False

    # load data
    (X_train, y_train, X_val, y_val, X_test, y_test) = load_mnist('data/mnist',
                                                                  flatten=True)
    train_dataset = InMemoryDataset(X_train, y_train, True)
    val_dataset = InMemoryDataset(X_val, y_val, False)
    test_dataset = InMemoryDataset(X_test, y_test, False)

    # defining evaluator and searcher
    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        num_classes,
        num_training_epochs=num_training_epochs,
        log_output_to_terminal=True)
    ssf = mo.SearchSpaceFactory(lambda: dnn_net(num_classes))
    searcher = se.RandomSearcher(ssf.get_search_space)

    for i in range(num_samples):
        inputs, outputs, _, searcher_eval_token = searcher.sample()
        if show_graph:
            # try setting draw_module_hyperparameter_info=False and
            # draw_hyperparameters=True for a different visualization.
            vi.draw_graph(outputs,
                          draw_module_hyperparameter_info=False,
                          draw_hyperparameters=True)
        results = evaluator.evaluate(inputs, outputs)
        # updating the searcher. no-op for the random searcher.
        searcher.update(results['validation_accuracy'], searcher_eval_token)
Esempio n. 2
0
def get_conv_search_space_fn(num_classes):
    def search_space_fn():
        h_num_spatial_reductions = D([2, 3, 4])
        h_pool_op = D(['max', 'avg'])
        return mo.siso_sequential([
            css_cnn2d.conv_net(h_num_spatial_reductions),
            css_cnn2d.spatial_squeeze(h_pool_op, D([num_classes]))
        ])

    return mo.SearchSpaceFactory(search_space_fn).get_search_space
Esempio n. 3
0
def main():
    # Loading the config file.
    cfg = ut.get_config()
    num_classes = 10
    num_samples = cfg['num_samples']
    # Loading the data.
    (Xtrain, ytrain, Xval, yval, Xtest, ytest) = load_mnist('data/mnist')
    train_dataset = InMemoryDataset(Xtrain, ytrain, True)
    val_dataset = InMemoryDataset(Xval, yval, False)
    test_dataset = InMemoryDataset(Xtest, ytest, False)
    # Creating up the evaluator.
    evaluator = SimpleClassifierEvaluator(
        train_dataset,
        val_dataset,
        num_classes,
        './temp',
        max_eval_time_in_minutes=cfg['max_eval_time_in_minutes'],
        log_output_to_terminal=True,
        test_dataset=test_dataset)
    # Creating the search space.
    search_space_fn = lambda: css_dnn.dnn_net(num_classes)
    search_space_factory = mo.SearchSpaceFactory(search_space_fn)

    sl.create_search_folderpath(
        cfg["folderpath"],
        cfg["search_name"],
        delete_if_exists=cfg['delete_if_exists'],
        abort_if_exists=False,
        create_parent_folders=True)

    # Creating the searcher.
    searcher = RandomSearcher(search_space_factory.get_search_space)
    # Search loop.
    for evaluation_id in range(num_samples):
        eval_logger = sl.EvaluationLogger(cfg["folderpath"], cfg["search_name"],
                                          evaluation_id)
        if not eval_logger.config_exists():

            inputs, outputs, hyperp_value_lst, eval_token = searcher.sample()
            results = evaluator.eval(inputs, outputs)
            # Logging results (including graph).
            eval_logger.log_config(hyperp_value_lst, eval_token)
            eval_logger.log_results(results)
            vi.draw_graph(
                outputs,
                draw_module_hyperparameter_info=True,
                print_to_screen=False,
                out_folderpath=eval_logger.get_evaluation_data_folderpath())
            # Updating the searcher given the results of logging.
            searcher.update(results['validation_accuracy'], eval_token)
Esempio n. 4
0
def main():
    num_classes = 10
    num_samples = 4
    num_training_epochs = 2
    validation_frac = 0.2
    # NOTE: change to True for graph visualization
    show_graph = False

    # load the data.
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    fn = lambda X: X.reshape((X.shape[0], -1))
    X_train = fn(X_train) / 255.0
    X_test = fn(X_test) / 255.0
    num_train = int((1.0 - validation_frac) * X_train.shape[0])
    X_train, X_val = X_train[:num_train], X_train[num_train:]
    y_train, y_val = y_train[:num_train], y_train[num_train:]

    # define the search and the evalutor
    evaluator = SimpleClassifierEvaluator(
        X_train,
        y_train,
        X_val,
        y_val,
        num_classes,
        num_training_epochs=num_training_epochs)
    ssf = mo.SearchSpaceFactory(lambda: dnn_net(num_classes))
    searcher = se.RandomSearcher(ssf.get_search_space)

    for i in range(num_samples):
        (inputs, outputs, hyperp_value_lst,
         searcher_eval_token) = searcher.sample()
        if show_graph:
            # try setting draw_module_hyperparameter_info=False and
            # draw_hyperparameters=True for a different visualization.
            vi.draw_graph(outputs,
                          draw_module_hyperparameter_info=False,
                          draw_hyperparameters=True)
        results = evaluator.evaluate(inputs, outputs)
        # updating the searcher. no-op for the random searcher.
        searcher.update(results['validation_accuracy'], searcher_eval_token)
Esempio n. 5
0
def main():
    num_classes = 10
    num_samples = 8
    (Xtrain, ytrain, Xval, yval, Xtest, ytest) = load_mnist('data/mnist')
    train_dataset = InMemoryDataset(Xtrain, ytrain, True)
    val_dataset = InMemoryDataset(Xval, yval, False)
    test_dataset = InMemoryDataset(Xtest, ytest, False)
    evaluator = SimpleClassifierEvaluator(train_dataset,
                                          val_dataset,
                                          num_classes,
                                          './temp',
                                          max_eval_time_in_minutes=1.0,
                                          log_output_to_terminal=True)

    search_space_fn = lambda: css_dnn.dnn_net(num_classes)
    search_space_factory = mo.SearchSpaceFactory(search_space_fn)

    searcher = RandomSearcher(search_space_factory.get_search_space)
    for _ in range(num_samples):
        inputs, outputs, searcher_eval_token, _ = searcher.sample()
        val_acc = evaluator.eval(inputs, outputs)['validation_accuracy']
        searcher.update(val_acc, searcher_eval_token)
Esempio n. 6
0
def get_dnn_search_space_fn(num_classes):
    return mo.SearchSpaceFactory(
        lambda: css_dnn.dnn_net(num_classes)).get_search_space
Esempio n. 7
0
# First, create the communicator. This communicator is used by by to master to
# send candidate architectures to the workers to evaluate, and by the workers
# to send back the results for the architectures they evaluated. Currently,
# the communicator can be MPI based or file based (file based requires the
# processes to share a filesystem).
comm = get_communicator(args.comm, num_procs=args.num_procs)

# This is the number of total models to be evaluated in search
num_total_models = 25

# Now we set up the datasets and the search space factory.
X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',
                                                  normalize_range=True)
train_dataset = InMemoryDataset(X_train, y_train, True)
val_dataset = InMemoryDataset(X_val, y_val, False)
ssf = mo.SearchSpaceFactory(lambda: dnn.dnn_net(10))

# Each process should have a unique rank. The process with rank 0 will act as the
# master process that is in charge of the searcher. Every other process acts
# as a worker that evaluates architectures sent to them.
if comm.get_rank() == 0:
    searcher = RandomSearcher(ssf.get_search_space)

    models_sampled = 0
    killed = 0
    finished = 0

    # This process keeps going as long as we have not received results for all sampled
    # models and not all the worker processes have been killed. Kill signals start
    # being sent out once the searcher has finished sampling the number of models
    # specified by the `num_total_models` parameter
Esempio n. 8
0
import deep_architect.modules as mo
import deep_architect.contrib.misc.search_spaces.tensorflow.dnn as css_dnn


def get_dnn_search_space_fn(num_classes):
    return mo.SearchSpaceFactory(
        lambda: css_dnn.dnn_net(num_classes)).get_search_space


num_classes = 10
search_space_fn = mo.SearchSpaceFactory(
    get_dnn_search_space_fn(num_classes)).get_search_space
Esempio n. 9
0
    name_to_hyperp = {
        ut.json_object_to_json_string({
            "node_id": i,
            "in_node_id": j
        }): D([0, 1])
        for i in range(num_nodes) for j in range(i)
    }
    return mo.substitution_module("ConvStage",
                                  substitution_fn,
                                  name_to_hyperp, ["in"], ["out"],
                                  scope=None)


(inputs, outputs
 ) = mo.SearchSpaceFactory(lambda: conv_stage(32, 3, 8)).get_search_space()
random_specify(outputs)
# for h in co.unassigned_independent_hyperparameter_iterator(outputs):
#     h.assign_value(1)

vi.draw_graph(outputs, draw_module_hyperparameter_info=False)

# inputs_val = Input((32, 32, 3))
# co.forward({inputs["in"]: inputs_val})
# outputs_val = outputs["out"].val

# model = Model(inputs=inputs_val, outputs=outputs_val)
# model.summary()

### NOTE: these are done.
Esempio n. 10
0
            node_id_to_outputs.append(c_outputs)

        out_outputs = node_id_to_outputs[-1]
        return in_inputs, out_outputs

    name_to_hyperp = {
        ut.json_object_to_json_string({
            "node_id": i,
            "in_node_id": j
        }): D([0, 1]) for i in range(1, num_nodes) for j in range(i - 1)
    }
    return mo.substitution_module(
        "Motif", substitution_fn, name_to_hyperp, ["in"], ["out"], scope=None)


(inputs, outputs) = mo.SearchSpaceFactory(
    lambda: motif(lambda: motif(batch_normalization, 4), 4)).get_search_space()
# (inputs, outputs) = mo.SearchSpaceFactory(
#     lambda: motif(batch_normalization, 4)).get_search_space()
# random_specify(outputs)
for h in co.unassigned_independent_hyperparameter_iterator(outputs):
    h.assign_value(1)

vi.draw_graph(outputs, draw_module_hyperparameter_info=False)

# inputs_val = Input((32, 32, 3))
# co.forward({inputs["in"]: inputs_val})
# outputs_val = outputs["out"].val

# model = Model(inputs=inputs_val, outputs=outputs_val)
# model.summary()
Esempio n. 11
0
 def __init__(self, search_space_fn, reset_default_scope_upon_sample=True):
     x = mo.SearchSpaceFactory(search_space_fn,
                               reset_default_scope_upon_sample)
     self.search_space_fn = x.get_search_space