def setUp(self):
     logging.basicConfig(
         format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
         level=logging.INFO)
     log_keras_version_info()
     os.makedirs(TEST_DIR, exist_ok=True)
     write_span_prediction_files()
Beispiel #2
0
def prepare_environment(params: Union[Params, dict]):
    """
    Sets random seeds for reproducible experiments. This may not work as expected
    if you use this from within a python project in which you have already imported Keras.
    If you use the scripts/run_model.py entry point to training models with this library,
    your experiments should be reproducible. If you are using this from your own project,
    you will want to call this function before importing Keras.

     Parameters
    ----------
    params: ``Params`` object or dict, required.
        A ``Params`` object or dict holding the json parameters.
    """
    seed = params.pop("random_seed", 13370)
    numpy_seed = params.pop("numpy_seed", 1337)
    if "keras" in sys.modules:
        logger.warning(
            "You have already imported Keras in your code. If you are using DeepQA"
            "functionality to set random seeds, they will have no effect, as code"
            "prior to calling this function will be non-deterministic. We will not"
            "the random seed here.")
        seed = None
        numpy_seed = None
    if seed is not None:
        random.seed(seed)
    if numpy_seed is not None:
        numpy.random.seed(numpy_seed)

    from deep_qa.common.checks import log_keras_version_info
    log_keras_version_info()
Beispiel #3
0
def main():
    if len(sys.argv) != 2:
        print('USAGE: run_model.py [param_file]')
        sys.exit(-1)

    log_keras_version_info()
    param_file = sys.argv[1]
    params = pyhocon.ConfigFactory.parse_file(param_file)
    params = replace_none(params)
    log_dir = params.get("model_serialization_prefix", None)  # pylint: disable=no-member
    if log_dir is not None:
        sys.stdout = TeeLogger(log_dir + "_stdout.log", sys.stdout)
        sys.stderr = TeeLogger(log_dir + "_stderr.log", sys.stderr)
        handler = logging.FileHandler(log_dir + "_python_logging.log")
        handler.setLevel(logging.INFO)
        handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s'))
        logging.getLogger().addHandler(handler)
        shutil.copyfile(param_file, log_dir + "_model_params.json")
    model_type = get_choice(params, 'model_class', concrete_models.keys())
    model_class = concrete_models[model_type]
    model = model_class(params)

    if model.can_train():
        logger.info("Training model")
        model.train()
    else:
        logger.info("Not enough training inputs.  Assuming you wanted to load a model instead.")
        # TODO(matt): figure out a way to specify which epoch you want to load a model from.
        model.load_model()
    if K.backend() == "tensorflow":
        K.clear_session()
Beispiel #4
0
def main():
    if len(sys.argv) != 2:
        print('USAGE: run_model.py [param_file]')
        sys.exit(-1)

    log_keras_version_info()
    param_file = sys.argv[1]
    params = pyhocon.ConfigFactory.parse_file(param_file)
    params = replace_none(params)
    model_type = get_choice(params, 'model_class', concrete_models.keys())
    model_class = concrete_models[model_type]
    model = model_class(params)

    if model.can_train():
        logger.info("Training model")
        model.train()
    else:
        logger.info(
            "Not enough training inputs.  Assuming you wanted to load a model instead."
        )
        # TODO(matt): figure out a way to specify which epoch you want to load a model from.
        model.load_model()
Beispiel #5
0
 def setUp(self):
     logging.basicConfig(
         format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
         level=logging.DEBUG)
     log_keras_version_info()
     os.makedirs(self.TEST_DIR, exist_ok=True)