コード例 #1
0
def test_trainer_regressor_train_valid_with_multiple_ndarray_inputs():
    from deephyper.benchmark.nas.linearRegMultiInputs.problem import Problem
    config = Problem.space

    config['hyperparameters']['num_epochs'] = 2

    # load functions
    load_data = util.load_attr_from(config['load_data']['func'])
    config['load_data']['func'] = load_data
    config['create_search_space']['func'] = util.load_attr_from(
        config['create_search_space']['func'])

    # Loading data
    kwargs = config['load_data'].get('kwargs')
    (tX, ty), (vX, vy) = load_data() if kwargs is None else load_data(**kwargs)

    print('[PARAM] Data loaded')
    # Set data shape
    # interested in shape of data not in length
    input_shape = [np.shape(itX)[1:] for itX in tX]
    output_shape = list(np.shape(ty))[1:]

    config['data'] = {
        'train_X': tX,
        'train_Y': ty,
        'valid_X': vX,
        'valid_Y': vy
    }

    search_space = config['create_search_space']['func'](
        input_shape, output_shape, **config['create_search_space']['kwargs'])
    arch_seq = [random() for i in range(search_space.num_nodes)]
    print('arch_seq: ', arch_seq)
    search_space.set_ops(arch_seq)
    search_space.draw_graphviz('trainer_keras_regressor_test.dot')

    if config.get('preprocessing') is not None:
        preprocessing = util.load_attr_from(config['preprocessing']['func'])
        config['preprocessing']['func'] = preprocessing
    else:
        config['preprocessing'] = None

    model = search_space.create_model()
    plot_model(model,
               to_file='trainer_keras_regressor_test.png',
               show_shapes=True)

    trainer = TrainerTrainValid(config=config, model=model)

    res = trainer.train()
    assert res != sys.float_info.max
コード例 #2
0
def test_trainer_regressor_train_valid_with_multiple_generator_inputs():
    from deephyper.benchmark.nas.linearRegMultiInputsGen.problem import Problem
    config = Problem.space

    config['hyperparameters']['num_epochs'] = 2

    # load functions
    load_data = util.load_attr_from(config['load_data']['func'])
    config['load_data']['func'] = load_data
    config['create_search_space']['func'] = util.load_attr_from(
        config['create_search_space']['func'])

    # Loading data
    kwargs = config['load_data'].get('kwargs')
    # (t_X, t_y), (v_X, v_y) = load_data() if kwargs is None else load_data(**kwargs)
    data = load_data() if kwargs is None else load_data(**kwargs)
    print('[PARAM] Data loaded')

    # Set data shape
    config['data'] = data
    input_shape = [
        data['shapes'][0][f'input_{i}'] for i in range(len(data['shapes'][0]))
    ]
    output_shape = data['shapes'][1]

    search_space = config['create_search_space']['func'](
        input_shape, output_shape, **config['create_search_space']['kwargs'])
    arch_seq = [random() for i in range(search_space.num_nodes)]
    print('arch_seq: ', arch_seq)
    search_space.set_ops(arch_seq)
    search_space.draw_graphviz('trainer_keras_regressor_test.dot')

    if config.get('preprocessing') is not None:
        preprocessing = util.load_attr_from(config['preprocessing']['func'])
        config['preprocessing']['func'] = preprocessing
    else:
        config['preprocessing'] = None

    model = search_space.create_model()
    plot_model(model,
               to_file='trainer_keras_regressor_test.png',
               show_shapes=True)

    trainer = TrainerTrainValid(config=config, model=model)

    res = trainer.train()
    assert res != sys.float_info.max
コード例 #3
0
def test_trainer_regressor_train_valid_with_multiple_generator_inputs():
    from deephyper.search.nas.model.run.util import compute_objective, load_config, preproc_trainer, setup_data, setup_search_space
    from deephyper.benchmark.nas.linearRegMultiInputsGen.problem import Problem
    config = Problem.space

    load_config(config)

    input_shape, output_shape = setup_data(config)

    create_search_space = config["create_search_space"]["func"]
    cs_kwargs = config["create_search_space"].get("kwargs")
    if cs_kwargs is None:
        search_space = create_search_space(input_shape, output_shape, seed=42)
    else:
        search_space = create_search_space(input_shape,
                                           output_shape,
                                           seed=42,
                                           **cs_kwargs)

    arch_seq = [random() for i in range(search_space.num_nodes)]
    config["arch_seq"] = arch_seq
    search_space.set_ops(arch_seq)

    config['hyperparameters']['num_epochs'] = 2

    search_space.set_ops(arch_seq)
    search_space.draw_graphviz('trainer_keras_regressor_test.dot')

    model = search_space.create_model()
    plot_model(model,
               to_file='trainer_keras_regressor_test.png',
               show_shapes=True)

    trainer = TrainerTrainValid(config=config, model=model)

    res = trainer.train()
    assert res != sys.float_info.max
コード例 #4
0
def train(config):
    seed = config['seed']
    if seed is not None:
        np.random.seed(seed)
        tf.random.set_random_seed(seed)

    # Pre-settings: particularly import for BeholderCB to work
    sess = tf.Session()
    K.set_session(sess)

    # override hyperparameters with post_train hyperparameters
    keys = filter(lambda k: k in config['hyperparameters'],
                  config['post_train'].keys())
    for k in keys:
        config['hyperparameters'][k] = config['post_train'][k]

    load_config(config)

    input_shape, output_shape = setup_data(config)

    search_space = setup_search_space(config,
                                      input_shape,
                                      output_shape,
                                      seed=seed)
    search_space.draw_graphviz(f'structure_{config["id"]}.dot')
    logger.info('Model operations set.')

    model_created = False
    try:
        model = search_space.create_model()
        model_created = True
    except:
        model_created = False
        logger.info('Error: Model creation failed...')
        logger.info(traceback.format_exc())

    if model_created:
        # model.load_weights(default_cfg['model_checkpoint']['filepath'])

        # Setup callbacks
        callbacks = []
        callbacks_config = config['post_train'].get('callbacks')
        if callbacks_config is not None:
            for cb_name, cb_conf in callbacks_config.items():
                if cb_name in default_callbacks_config:
                    default_callbacks_config[cb_name].update(cb_conf)

                    if cb_name == 'ModelCheckpoint':
                        default_callbacks_config[cb_name][
                            'filepath'] = f'best_model_{config["id"]}.h5'
                    elif cb_name == 'TensorBoard':
                        if default_callbacks_config[cb_name]['beholder']:
                            callbacks.append(
                                BeholderCB(
                                    logdir=default_callbacks_config[cb_name]
                                    ['log_dir'],
                                    sess=sess))
                            default_callbacks_config[cb_name].pop('beholder')

                    Callback = getattr(keras.callbacks, cb_name)
                    callbacks.append(
                        Callback(**default_callbacks_config[cb_name]))

                    logger.info(
                        f'Adding new callback {type(Callback).__name__} with config: {default_callbacks_config[cb_name]}!'
                    )

                else:
                    logger.error(f"'{cb_name}' is not an accepted callback!")

        trainer = TrainerTrainValid(config=config, model=model)
        trainer.callbacks.extend(callbacks)

        json_fname = f'post_training_hist_{config["id"]}.json'
        # to log the number of trainable parameters before running training
        trainer.init_history()
        with open(json_fname, 'w') as f:
            json.dump(trainer.train_history, f, cls=Encoder)

        hist = trainer.train(with_pred=False, last_only=False)

        # Timing of prediction for validation dataset
        t = time()  # ! TIMING - START
        trainer.predict(dataset='valid')
        hist['val_predict_time'] = time() - t  # ! TIMING - END

        with open(json_fname, 'w') as f:
            json.dump(hist, f, cls=Encoder)
コード例 #5
0
ファイル: pipeline.py プロジェクト: JulianYu123456/Deephyper
def train(config):
    seed = config["seed"]
    repeat = config["post_train"]["repeat"]
    if seed is not None:
        np.random.seed(seed)
        # must be between (0, 2**32-1)
        seeds = [np.random.randint(0, 2**32 - 1) for _ in range(repeat)]

    for rep in range(repeat):
        tf.keras.backend.clear_session()

        default_callbacks_config = copy.deepcopy(CB_CONFIG)
        if seed is not None:
            np.random.seed(seeds[rep])
            tf.random.set_random_seed(seeds[rep])

        logger.info(f"Training replica {rep+1}")
        # Pre-settings: particularly import for BeholderCB to work
        sess = tf.Session()
        K.set_session(sess)

        # override hyperparameters with post_train hyperparameters
        keys = filter(lambda k: k in config["hyperparameters"],
                      config["post_train"].keys())
        for k in keys:
            config["hyperparameters"][k] = config["post_train"][k]

        load_config(config)

        input_shape, output_shape = setup_data(config)

        search_space = setup_search_space(config,
                                          input_shape,
                                          output_shape,
                                          seed=seed)
        search_space.draw_graphviz(f'model_{config["id"]}.dot')
        logger.info("Model operations set.")

        model_created = False
        try:
            model = search_space.create_model()
            model_created = True
        except:
            model_created = False
            logger.info("Error: Model creation failed...")
            logger.info(traceback.format_exc())

        if model_created:
            # model.load_weights(default_cfg['model_checkpoint']['filepath'])

            # Setup callbacks
            callbacks = []
            callbacks_config = config["post_train"].get("callbacks")
            if callbacks_config is not None:
                for cb_name, cb_conf in callbacks_config.items():
                    if cb_name in default_callbacks_config:
                        default_callbacks_config[cb_name].update(cb_conf)

                        if cb_name == "ModelCheckpoint":
                            default_callbacks_config[cb_name][
                                "filepath"] = f'best_model_id{config["id"]}_r{rep}.h5'
                        elif cb_name == "TensorBoard":
                            if default_callbacks_config[cb_name]["beholder"]:
                                callbacks.append(
                                    BeholderCB(
                                        logdir=default_callbacks_config[
                                            cb_name]["log_dir"],
                                        sess=sess,
                                    ))
                            default_callbacks_config[cb_name].pop("beholder")

                        Callback = getattr(keras.callbacks, cb_name)
                        callbacks.append(
                            Callback(**default_callbacks_config[cb_name]))

                        logger.info(
                            f"Adding new callback {type(Callback).__name__} with config: {default_callbacks_config[cb_name]}!"
                        )

                    else:
                        logger.error(
                            f"'{cb_name}' is not an accepted callback!")

            trainer = TrainerTrainValid(config=config, model=model)
            trainer.callbacks.extend(callbacks)

            json_fname = f'post_training_hist_{config["id"]}.json'
            # to log the number of trainable parameters before running training
            trainer.init_history()
            try:
                with open(json_fname, "r") as f:
                    fhist = json.load(f)
            except FileNotFoundError:
                fhist = trainer.train_history
                for k, v in fhist.items():
                    fhist[k] = [v]
                with open(json_fname, "w") as f:
                    json.dump(fhist, f, cls=Encoder)

            hist = trainer.train(with_pred=False, last_only=False)

            # Timing of prediction for validation dataset
            t = time()  # ! TIMING - START
            trainer.predict(dataset="valid")
            hist["val_predict_time"] = time() - t  # ! TIMING - END

            for k, v in hist.items():
                fhist[k] = fhist.get(k, [])
                fhist[k].append(v)

            with open(json_fname, "w") as f:
                json.dump(fhist, f, cls=Encoder)

        return model