示例#1
0
文件: nas.py 项目: lkampoli/deephyper
def main(**kwargs):
    search_name = sys.argv[2]
    search_cls = load_attr_from(HPS_SEARCHES[search_name])
    search_obj = search_cls(**kwargs)
    try:
        on_exit = load_attr_from(f"{search_obj.__module__}.on_exit")
        signal.signal(signal.SIGINT, on_exit)
        signal.signal(signal.SIGTERM, on_exit)
    except AttributeError:  # on_exit is not defined
        print("This search doesn't have an exiting procedure...")
    search_obj.main()
示例#2
0
def main(**kwargs):
    search_name = sys.argv[2]
    search_cls = load_attr_from(HPS_SEARCHES[search_name])
    search_obj = search_cls(**kwargs)
    try:
        on_exit = load_attr_from(f"{search_obj.__module__}.on_exit")
        signal.signal(signal.SIGINT, on_exit)
        signal.signal(signal.SIGTERM, on_exit)
    except AttributeError as e:  # on_exit is not defined
        raise e
    search_obj.main()
def test_trainer_regressor_train_valid_with_multiple_ndarray_inputs():
    from deephyper.benchmark.nas.linearRegMultiInputs.problem import Problem

    config = Problem.space

    config["hyperparameters"]["num_epochs"] = 2

    # load functions
    load_data = util.load_attr_from(config["load_data"]["func"])
    config["load_data"]["func"] = load_data
    config["create_search_space"]["func"] = util.load_attr_from(
        config["create_search_space"]["func"])

    # Loading data
    kwargs = config["load_data"].get("kwargs")
    (tX, ty), (vX, vy) = load_data() if kwargs is None else load_data(**kwargs)

    print("[PARAM] Data loaded")
    # Set data shape
    # interested in shape of data not in length
    input_shape = [np.shape(itX)[1:] for itX in tX]
    output_shape = list(np.shape(ty))[1:]

    config["data"] = {
        "train_X": tX,
        "train_Y": ty,
        "valid_X": vX,
        "valid_Y": vy
    }

    search_space = config["create_search_space"]["func"](
        input_shape, output_shape, **config["create_search_space"]["kwargs"])
    arch_seq = [random() for i in range(search_space.num_nodes)]
    print("arch_seq: ", arch_seq)
    search_space.set_ops(arch_seq)
    search_space.draw_graphviz("trainer_keras_regressor_test.dot")

    if config.get("preprocessing") is not None:
        preprocessing = util.load_attr_from(config["preprocessing"]["func"])
        config["preprocessing"]["func"] = preprocessing
    else:
        config["preprocessing"] = None

    model = search_space.create_model()
    plot_model(model,
               to_file="trainer_keras_regressor_test.png",
               show_shapes=True)

    trainer = TrainerTrainValid(config=config, model=model)

    res = trainer.train()
    assert res != sys.float_info.max
def test_trainer_regressor_train_valid_with_one_input():
    from deephyper.benchmark.nas.linearReg.problem import Problem
    config = Problem.space

    config['hyperparameters']['num_epochs'] = 2

    # load functions
    load_data = util.load_attr_from(config['load_data']['func'])
    config['load_data']['func'] = load_data
    config['create_search_space']['func'] = util.load_attr_from(
        config['create_search_space']['func'])

    # Loading data
    kwargs = config['load_data'].get('kwargs')
    (tX, ty), (vX, vy) = load_data() if kwargs is None else load_data(**kwargs)

    print('[PARAM] Data loaded')
    # Set data shape
    input_shape = np.shape(tX)[1:]  # interested in shape of data not in length
    output_shape = np.shape(ty)[1:]

    config['data'] = {
        'train_X': tX,
        'train_Y': ty,
        'valid_X': vX,
        'valid_Y': vy
    }

    search_space = config['create_search_space']['func'](
        input_shape, output_shape, **config['create_search_space']['kwargs'])
    arch_seq = [random() for i in range(search_space.num_nodes)]
    print('arch_seq: ', arch_seq)
    search_space.set_ops(arch_seq)
    search_space.draw_graphviz('trainer_keras_regressor_test.dot')

    if config.get('preprocessing') is not None:
        preprocessing = util.load_attr_from(config['preprocessing']['func'])
        config['preprocessing']['func'] = preprocessing
    else:
        config['preprocessing'] = None

    model = search_space.create_model()
    plot_model(model,
               to_file='trainer_keras_regressor_test.png',
               show_shapes=True)

    trainer = TrainerTrainValid(config=config, model=model)

    res = trainer.train()
    assert res != sys.float_info.max
def test_trainer_regressor_train_valid_with_multiple_ndarray_inputs():
    from deephyper.benchmark.nas.linearRegMultiInputs.problem import Problem
    config = Problem.space

    # load functions
    load_data = util.load_attr_from(config['load_data']['func'])
    config['load_data']['func'] = load_data
    config['create_structure']['func'] = util.load_attr_from(
        config['create_structure']['func'])

    # Loading data
    kwargs = config['load_data'].get('kwargs')
    (tX, ty), (vX, vy) = load_data() if kwargs is None else load_data(**kwargs)

    print('[PARAM] Data loaded')
    # Set data shape
    input_shape = [np.shape(itX)[1:]
                   for itX in tX]  # interested in shape of data not in length
    output_shape = list(np.shape(ty))[1:]

    config['data'] = {
        'train_X': tX,
        'train_Y': ty,
        'valid_X': vX,
        'valid_Y': vy
    }

    structure = config['create_structure']['func'](
        input_shape, output_shape, **config['create_structure']['kwargs'])
    arch_seq = [random() for i in range(structure.num_nodes)]
    print('arch_seq: ', arch_seq)
    structure.set_ops(arch_seq)
    structure.draw_graphviz('trainer_keras_regressor_test.dot')

    if config.get('preprocessing') is not None:
        preprocessing = util.load_attr_from(config['preprocessing']['func'])
        config['preprocessing']['func'] = preprocessing
    else:
        config['preprocessing'] = None

    model = structure.create_model()
    plot_model(model,
               to_file='trainer_keras_regressor_test.png',
               show_shapes=True)

    trainer = TrainerRegressorTrainValid(config=config, model=model)

    trainer.train()
示例#6
0
    def __init__(self, problem, run, evaluator, **kwargs):
        super().__init__(problem, run, evaluator, **kwargs)
        # set in super : self.problem
        # set in super : self.run_func
        # set in super : self.evaluator
        self.evaluator = Evaluator.create(self.run_func,
                                          cache_key=key,
                                          method=evaluator)

        self.num_episodes = kwargs.get('num_episodes')
        if self.num_episodes is None:
            self.num_episodes = math.inf

        self.reward_rule = util.load_attr_from(
            'deephyper.search.nas.agent.utils.' + kwargs['reward_rule'])

        self.space = self.problem.space

        logger.debug(f'evaluator: {type(self.evaluator)}')

        self.num_agents = MPI.COMM_WORLD.Get_size(
        ) - 1  # one is  the parameter server
        self.rank = MPI.COMM_WORLD.Get_rank()

        logger.debug(f'num_agents: {self.num_agents}')
        logger.debug(f'rank: {self.rank}')
def test_trainer_regressor_train_valid_with_multiple_generator_inputs():
    from deephyper.benchmark.nas.linearRegMultiInputsGen.problem import Problem
    config = Problem.space

    config['hyperparameters']['num_epochs'] = 2

    # load functions
    load_data = util.load_attr_from(config['load_data']['func'])
    config['load_data']['func'] = load_data
    config['create_search_space']['func'] = util.load_attr_from(
        config['create_search_space']['func'])

    # Loading data
    kwargs = config['load_data'].get('kwargs')
    # (t_X, t_y), (v_X, v_y) = load_data() if kwargs is None else load_data(**kwargs)
    data = load_data() if kwargs is None else load_data(**kwargs)
    print('[PARAM] Data loaded')

    # Set data shape
    config['data'] = data
    input_shape = [
        data['shapes'][0][f'input_{i}'] for i in range(len(data['shapes'][0]))
    ]
    output_shape = data['shapes'][1]

    search_space = config['create_search_space']['func'](
        input_shape, output_shape, **config['create_search_space']['kwargs'])
    arch_seq = [random() for i in range(search_space.num_nodes)]
    print('arch_seq: ', arch_seq)
    search_space.set_ops(arch_seq)
    search_space.draw_graphviz('trainer_keras_regressor_test.dot')

    if config.get('preprocessing') is not None:
        preprocessing = util.load_attr_from(config['preprocessing']['func'])
        config['preprocessing']['func'] = preprocessing
    else:
        config['preprocessing'] = None

    model = search_space.create_model()
    plot_model(model,
               to_file='trainer_keras_regressor_test.png',
               show_shapes=True)

    trainer = TrainerTrainValid(config=config, model=model)

    res = trainer.train()
    assert res != sys.float_info.max
示例#8
0
def run_model_posttraining(config):
    data = load_data_source()

    x_train_list = [data[f'x_train_{i}'] for i in range(4)]
    y_train = data['y_train']
    x_val_list = [data[f'x_val_{i}'] for i in range(4)]
    y_val = data['y_val']

    num_epochs = config['hyperparameters']['num_epochs']
    batch_size = 32  # config['hyperparameters']['batch_size']

    config['create_structure']['func'] = util.load_attr_from(
        config['create_structure']['func'])

    input_shape = [np.shape(a)[1:] for a in x_train_list]
    output_shape = (1, )

    cs_kwargs = config['create_structure'].get('kwargs')
    if cs_kwargs is None:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape)
    else:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape,
                                                       **cs_kwargs)

    arch_seq = config['arch_seq']

    structure.set_ops(arch_seq)
    structure.draw_graphviz('nas_model_uno.dot')

    model = structure.create_model()

    from keras.utils import plot_model
    plot_model(model, 'keras_model_uno.png', show_shapes=True)

    n_params = model.count_params()

    optimizer = optimizers.deserialize({'class_name': 'adam', 'config': {}})

    model.compile(loss='mse', optimizer=optimizer, metrics=[mae, r2])

    t1 = time.time()
    history = model.fit(x_train_list,
                        y_train,
                        batch_size=batch_size,
                        epochs=num_epochs,
                        validation_data=(x_val_list, y_val))
    t2 = time.time()

    data = history.history
    data['n_parameters'] = n_params
    data['training_time'] = t2 - t1

    print(data)

    return data
示例#9
0
def run(config):
    # load functions
    load_data = util.load_attr_from(config['load_data']['func'])
    config['load_data']['func'] = load_data
    config['create_structure']['func'] = util.load_attr_from(
        config['create_structure']['func'])

    # Loading data
    kwargs = config['load_data'].get('kwargs')
    (t_X, t_y), (v_X, v_y) = load_data() if kwargs is None else load_data(**kwargs)
    print('[PARAM] Data loaded')

    # Set data shape
    input_shape = list(np.shape(t_X))[1:]
    output_shape = list(np.shape(t_y))[1:]

    config['data'] = {
        'train_X': t_X,
        'train_Y': t_y,
        'valid_X': v_X,
        'valid_Y': v_y
    }

    structure = config['create_structure']['func'](input_shape, output_shape, **config['create_structure']['kwargs'])
    arch_seq = config['arch_seq']
    structure.set_ops(arch_seq)

    if config['regression']:
        if config.get('preprocessing') is not None:
            preprocessing = util.load_attr_from(config['preprocessing']['func'])
            config['preprocessing']['func'] = preprocessing
        else:
            config['preprocessing'] = None

        model = structure.create_model()
        trainer = TrainerRegressorTrainValid(config=config, model=model)
    else:
        model = structure.create_model(activation='softmax')
        trainer = TrainerClassifierTrainValid(config=config, model=model)

    result = -trainer.train() if config['regression'] else trainer.train()
    return result
示例#10
0
def load_config(config):
    # ! load functions
    config["load_data"]["func"] = util.load_attr_from(
        config["load_data"]["func"])

    # load augmentation strategy
    if not config.get("augment") is None:
        config["augment"]["func"] = util.load_attr_from(
            config["augment"]["func"])

    # load the function creating the search space
    config["create_search_space"]["func"] = util.load_attr_from(
        config["create_search_space"]["func"])

    if not config.get("preprocessing") is None:
        config["preprocessing"]["func"] = util.load_attr_from(
            config["preprocessing"]["func"])
    else:
        config["preprocessing"] = None

    if type(config["objective"]) is str and "." in config["objective"]:
        config["objective"] = util.load_attr_from(config["objective"])
示例#11
0
def add_subparser(parsers):
    parser_name = 'nas'

    parser = parsers.add_parser(
        parser_name, help='Command line to run neural architecture search.')

    subparsers = parser.add_subparsers()

    for name, module_attr in HPS_SEARCHES.items():
        search_cls = load_attr_from(module_attr)

        subparser = subparsers.add_parser(name=name,
                                          conflict_handler='resolve')
        subparser = search_cls.get_parser(subparser)

        subparser.set_defaults(func=main)
示例#12
0
def add_subparser(parsers):
    parser_name = "hps"

    parser = parsers.add_parser(
        parser_name, help="Command line to run hyper-parameter search."
    )

    subparsers = parser.add_subparsers()

    for name, module_attr in HPS_SEARCHES.items():
        search_cls = load_attr_from(module_attr)

        subparser = subparsers.add_parser(name=name, conflict_handler="resolve")
        subparser = search_cls.get_parser(subparser)

        subparser.set_defaults(func=main)
示例#13
0
def selectMetric(name: str):
    """Return the metric defined by name.

    Args:
        name (str): a string referenced in DeepHyper, one referenced in keras or an attribute name to import.

    Returns:
        str or callable: a string suppossing it is referenced in the keras framework or a callable taking (y_true, y_pred) as inputs and returning a tensor.
    """
    if metrics.get(name) == None:
        try:
            return util.load_attr_from(name)
        except:
            return name  # supposing it is referenced in keras metrics
    else:
        return metrics[name]
示例#14
0
def run_model(config):

    t1 = time.time()
    num_epochs = config['hyperparameters']['num_epochs']
    batch_size = config['hyperparameters']['batch_size']

    config['create_structure']['func'] = util.load_attr_from(
         config['create_structure']['func'])

    input_shape =  [(942, ), (3820, ), (3820, )]
    output_shape = (1, )

    cs_kwargs = config['create_structure'].get('kwargs')
    if cs_kwargs is None:
        structure = config['create_structure']['func'](input_shape, output_shape)
    else:
        structure = config['create_structure']['func'](input_shape, output_shape, **cs_kwargs)

    arch_seq = config['arch_seq']

    print(f'actions list: {arch_seq}')
    #sys.exit(0)
    structure.set_ops(arch_seq)
    structure.draw_graphviz('model_global_combo.dot')

    model = structure.create_model()

    from keras.utils import plot_model
    plot_model(model, 'model_global_combo.png', show_shapes=True)

    model.summary()
    #sys.exit(0)

    t2 = time.time()
    t_model_create = t2 - t1
    print('Time model creation: ', t_model_create)

    t1 = time.time()
    
    params = initialize_parameters()
    args = Struct(**params)
    print(args)
    set_seed(args.rng_seed)

    optimizer = optimizers.deserialize({'class_name': args.optimizer, 'config': {}})
    base_lr = args.base_lr or K.get_value(optimizer.lr)
    if args.learning_rate:
        K.set_value(optimizer.lr, args.learning_rate)

    model.compile(loss=args.loss, optimizer=optimizer, metrics=[mae, r2])

    # (x_train_list, y_train), (x_val_list, y_val) = load_data_deephyper(prop=0.1)
    data = combo_ld_numpy(args)

    x_train_list = [data['x_train_0'], data['x_train_1'], data['x_train_2']]
    y_train = data['y_train']
    x_val_list = [data['x_val_0'], data['x_val_1'], data['x_val_2']]
    y_val = data['y_val']
    t2 = time.time()
    t_data_loading = t2 - t1
    print('Time data loading: ', t_data_loading)

    # stop_if_unfeasible = StopIfUnfeasible(time_limit=900)
    t1 = time.time()
    history = model.fit(x_train_list, y_train,
                        batch_size=batch_size,
                        shuffle=args.shuffle,
                        epochs=num_epochs,
                        # callbacks=[stop_if_unfeasible],
                        validation_data=(x_val_list, y_val))
    t2 = time.time()
    t_training = t2 - t1
    print('Time training: ', t_training)

    # print('avr_batch_timing :', stop_if_unfeasible.avr_batch_time)
    # print('avr_timing: ', stop_if_unfeasible.estimate_training_time)
    # print('stopped: ', stop_if_unfeasible.stopped)

    print(history.history)

    try:
        return history.history['val_r2'][0]
    except:
        return -1.0
示例#15
0
def main(config):

    num_epochs = NUM_EPOCHS

    load_data = config['load_data']['func']

    print('[PARAM] Loading data')
    # Loading data
    kwargs = config['load_data'].get('kwargs')
    sig_load_data = signature(load_data)
    if len(sig_load_data.parameters) == 0:
        data = load_data()
    else:
        if 'prop' in sig_load_data.parameters:
            if kwargs is None:
                data = load_data(prop=PROP)
            else:
                kwargs['prop'] = PROP
                data = load_data(**kwargs)
        else:
            if kwargs is None:
                data = load_data()
            else:
                data = load_data(**kwargs)
    print('[PARAM] Data loaded')

    # Set data shape
    if type(data) is tuple:
        if len(data) != 2:
            raise RuntimeError(
                f'Loaded data are tuple, should ((training_input, training_output), (validation_input, validation_output)) but length=={len(data)}'
            )
        (t_X, t_y), (v_X, v_y) = data
        if type(t_X) is np.ndarray and  type(t_y) is np.ndarray and \
            type(v_X) is np.ndarray and type(v_y) is np.ndarray:
            input_shape = np.shape(t_X)[1:]
        elif type(t_X) is list and type(t_y) is np.ndarray and \
            type(v_X) is list and type(v_y) is np.ndarray:
            input_shape = [np.shape(itX)[1:] for itX in t_X
                           ]  # interested in shape of data not in length
        else:
            raise RuntimeError(
                f'Data returned by load_data function are of a wrong type: type(t_X)=={type(t_X)},  type(t_y)=={type(t_y)}, type(v_X)=={type(v_X)}, type(v_y)=={type(v_y)}'
            )
        output_shape = np.shape(t_y)[1:]
        config['data'] = {
            'train_X': t_X,
            'train_Y': t_y,
            'valid_X': v_X,
            'valid_Y': v_y
        }
    elif type(data) is dict:
        config['data'] = data
        input_shape = [
            data['shapes'][0][f'input_{i}']
            for i in range(len(data['shapes'][0]))
        ]
        output_shape = data['shapes'][1]
    else:
        raise RuntimeError(
            f'Data returned by load_data function are of an unsupported type: {type(data)}'
        )

    cs_kwargs = config['create_structure'].get('kwargs')
    if cs_kwargs is None:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape)
    else:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape,
                                                       **cs_kwargs)

    arch_seq = ARCH_SEQ
    structure.set_ops(arch_seq)
    try:
        structure.draw_graphviz('graph_full.dot')
    except:
        pass

    print('Model operations set.')

    if config.get('preprocessing') is not None:
        preprocessing = util.load_attr_from(config['preprocessing']['func'])
        config['preprocessing']['func'] = preprocessing
        print(f"Preprocessing set with: {config['preprocessing']}")
    else:
        print('No preprocessing...')
        config['preprocessing'] = None

    model_created = False
    if config['regression']:
        try:
            model = structure.create_model()
            model_created = True
        except:
            model_created = False
            print('Error: Model creation failed...')
            print('INFO STACKTRACE: ', traceback.format_exc())
        if model_created:
            try:
                plot_model(model, to_file='model.png', show_shapes=True)
                model.summary()
            except Exception as err:
                print('can\t create model.png file...')
                print('INFO STACKTRACE: ', traceback.format_exc())
            try:
                model.load_weights("model_weights.h5")
                print('model weights loaded!')
            except Exception as err:
                print('failed to load model weights...')
                print('INFO STACKTRACE: ', traceback.format_exc())
            trainer = TrainerRegressorTrainValid(config=config, model=model)
    else:
        try:
            model = structure.create_model(activation='softmax')
            model_created = True
        except Exception as err:
            model_created = False
            print('Error: Model creation failed...')
            print('INFO STACKTRACE: ', traceback.format_exc())
        if model_created:
            try:
                plot_model(model, to_file='model.png', show_shapes=True)
            except Exception as err:
                print('can\t create model.png file...')
                print('INFO STACKTRACE: ', traceback.format_exc())
            try:
                model.load_weights("model_weights.h5")
                print('model weights loaded!')
            except Exception as err:
                print('failed to load model weights...')
                print('INFO STACKTRACE: ', traceback.format_exc())
            trainer = TrainerClassifierTrainValid(config=config, model=model)

    tb_cb = keras.callbacks.TensorBoard(histogram_freq=0,
                                        batch_size=256,
                                        write_grads=True)
    trainer.add_callback(tb_cb)

    print('Trainer is ready.')
    print(f'Start training... num_epochs={num_epochs}')

    nparams = number_parameters()
    print('model number of parameters: ', nparams)
    if NUM_EPOCHS > 0:
        trainer.train(num_epochs=num_epochs)

        # serialize weights to HDF5
        model.save_weights("model_weights.h5")
        print("Saved model weight to disk: model_weights.h5")

    if config['regression']:
        y_orig, y_pred = trainer.predict('valid')
        r_list = list()
        for dim in range(np.shape(y_orig)[1]):
            r, _ = stats.pearsonr(y_orig[:, dim], y_pred[:, dim])
            r_list.append(r)
        print('r_list: ', r_list)
from random import random

import numpy as np
from tensorflow import keras

from deephyper.search.nas.model.keras.trainers.regressor_kfold import KerasTrainerRegressorKfold
from deephyper.search import util
from deephyper.benchmark.nas.toy.pb_keras import Problem

print(Problem)
config = dict(Problem.space)

load_data = util.load_attr_from(config['load_data']['func'])
config['load_data']['func'] = load_data

# config['create_structure']['func'] = util.load_attr_from(
#     config['create_structure']['func'])

print('[PARAM] Loading data')
# Loading data
kwargs = config['load_data'].get('kwargs')
(t_X, t_y), (v_X, v_y) = load_data() if kwargs is None else load_data(**kwargs)
print('[PARAM] Data loaded')

# Set data shape
input_shape = list(np.shape(t_X))[1:]
output_shape = list(np.shape(t_y))[1:]

config['data'] = {
    'train_X': t_X,
    'train_Y': t_y,
示例#17
0
def run(config):
    # load functions
    load_data = util.load_attr_from(config['load_data']['func'])
    config['load_data']['func'] = load_data
    config['create_structure']['func'] = util.load_attr_from(
        config['create_structure']['func'])

    # Loading data
    kwargs = config['load_data'].get('kwargs')
    data = load_data() if kwargs is None else load_data(**kwargs)
    logger.info(f'Data loaded with kwargs: {kwargs}')

    # Set data shape
    if type(data) is tuple:
        if len(data) != 2:
            raise RuntimeError(
                f'Loaded data are tuple, should ((training_input, training_output), (validation_input, validation_output)) but length=={len(data)}'
            )
        (t_X, t_y), (v_X, v_y) = data
        if type(t_X) is np.ndarray and type(t_y) is np.ndarray and \
                type(v_X) is np.ndarray and type(v_y) is np.ndarray:
            input_shape = np.shape(t_X)[1:]
        elif type(t_X) is list and type(t_y) is np.ndarray and \
                type(v_X) is list and type(v_y) is np.ndarray:
            # interested in shape of data not in length
            input_shape = [np.shape(itX)[1:] for itX in t_X]
        else:
            raise RuntimeError(
                f'Data returned by load_data function are of a wrong type: type(t_X)=={type(t_X)},  type(t_y)=={type(t_y)}, type(v_X)=={type(v_X)}, type(v_y)=={type(v_y)}'
            )
        output_shape = np.shape(t_y)[1:]
        config['data'] = {
            'train_X': t_X,
            'train_Y': t_y,
            'valid_X': v_X,
            'valid_Y': v_y
        }
    elif type(data) is dict:
        config['data'] = data
        input_shape = [
            data['shapes'][0][f'input_{i}']
            for i in range(len(data['shapes'][0]))
        ]
        output_shape = data['shapes'][1]
    else:
        raise RuntimeError(
            f'Data returned by load_data function are of an unsupported type: {type(data)}'
        )

    logger.info(f'input_shape: {input_shape}')
    logger.info(f'output_shape: {output_shape}')

    cs_kwargs = config['create_structure'].get('kwargs')
    if cs_kwargs is None:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape)
    else:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape,
                                                       **cs_kwargs)

    arch_seq = config['arch_seq']

    logger.info(f'actions list: {arch_seq}')

    structure.set_ops(arch_seq)

    if config.get('preprocessing') is not None:
        preprocessing = util.load_attr_from(config['preprocessing']['func'])
        config['preprocessing']['func'] = preprocessing
    else:
        config['preprocessing'] = None

    model_created = False
    if config['regression']:
        try:
            model = structure.create_model()
            model_created = True
        except:
            model_created = False
            logger.info('Error: Model creation failed...')
            logger.info(traceback.format_exc())
        if model_created:
            trainer = TrainerRegressorTrainValid(config=config, model=model)
    else:
        try:
            model = structure.create_model(activation='softmax')
            model_created = True
        except:
            model_created = False
            logger.info('Error: Model creation failed...')
            logger.info(traceback.format_exc())
        if model_created:
            trainer = TrainerClassifierTrainValid(config=config, model=model)

    if model_created:
        result = trainer.train()
    else:
        # penalising actions if model cannot be created
        result = -1
    return result
示例#18
0
def run_model(config):
    params = initialize_parameters()

    args = Struct(**params)

    data = load_data2()

    x_train_list = [data[f'x_train_{i}'] for i in range(4)]
    y_train = data['y_train']
    x_val_list = [data[f'x_val_{i}'] for i in range(4)]
    y_val = data['y_val']

    num_epochs = config['hyperparameters']['num_epochs']
    batch_size = args.batch_size  # config['hyperparameters']['batch_size']

    config['create_structure']['func'] = util.load_attr_from(
        config['create_structure']['func'])

    input_shape = [np.shape(a)[1:] for a in x_train_list]
    print('input_shape: ', input_shape)
    output_shape = (1, )
    print('output_shape: ', output_shape)

    cs_kwargs = config['create_structure'].get('kwargs')
    if cs_kwargs is None:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape)
    else:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape,
                                                       **cs_kwargs)

    arch_seq = config['arch_seq']

    print(f'actions list: {arch_seq}')

    structure.set_ops(arch_seq)
    #structure.draw_graphviz('model_global_uno.dot')

    model = structure.create_model()

    #from keras.utils import plot_model
    #plot_model(model, 'model_global_combo.png', show_shapes=True)

    model.summary()

    optimizer = optimizers.deserialize({'class_name': 'adam', 'config': {}})

    model.compile(loss='mse', optimizer=optimizer, metrics=[mae, r2])

    stop_if_unfeasible = StopIfUnfeasible(time_limit=900)

    history = model.fit(x_train_list,
                        y_train,
                        batch_size=batch_size,
                        epochs=num_epochs,
                        callbacks=[stop_if_unfeasible],
                        validation_data=(x_val_list, y_val))

    print('avr_batch_timing :', stop_if_unfeasible.avr_batch_time)
    print('avr_timing: ', stop_if_unfeasible.estimate_training_time)
    print('stopped: ', stop_if_unfeasible.stopped)

    print(history.history)

    try:
        return history.history['val_r2'][0]
    except:
        return -1.0
    def test_trainer_regressor_train_valid_with_one_input(self):
        import sys
        from random import random

        import numpy as np
        from deephyper.benchmark.nas.linearReg.problem import Problem
        from deephyper.nas.trainer import BaseTrainer
        from deephyper.search import util
        from tensorflow.keras.utils import plot_model

        config = Problem.space

        config["hyperparameters"]["num_epochs"] = 2

        # load functions
        load_data = util.load_attr_from(config["load_data"]["func"])
        config["load_data"]["func"] = load_data
        config["create_search_space"]["func"] = util.load_attr_from(
            config["create_search_space"]["func"])

        # Loading data
        kwargs = config["load_data"].get("kwargs")
        (tX, ty), (vX,
                   vy) = load_data() if kwargs is None else load_data(**kwargs)

        print("[PARAM] Data loaded")
        # Set data shape
        input_shape = np.shape(tX)[
            1:]  # interested in shape of data not in length
        output_shape = np.shape(ty)[1:]

        config["data"] = {
            "train_X": tX,
            "train_Y": ty,
            "valid_X": vX,
            "valid_Y": vy
        }

        search_space = config["create_search_space"]["func"](
            input_shape, output_shape,
            **config["create_search_space"]["kwargs"])
        arch_seq = [random() for i in range(search_space.num_nodes)]
        print("arch_seq: ", arch_seq)
        search_space.set_ops(arch_seq)
        search_space.plot("trainer_keras_regressor_test.dot")

        if config.get("preprocessing") is not None:
            preprocessing = util.load_attr_from(
                config["preprocessing"]["func"])
            config["preprocessing"]["func"] = preprocessing
        else:
            config["preprocessing"] = None

        model = search_space.create_model()
        plot_model(model,
                   to_file="trainer_keras_regressor_test.png",
                   show_shapes=True)

        trainer = BaseTrainer(config=config, model=model)

        res = trainer.train()
        assert res != sys.float_info.max