Пример #1
0
def train():
    params = tf.contrib.training.HParams(learning_rate=FLAGS.learning_rate,
                                         batch_size=FLAGS.batch_size,
                                         max_steps=FLAGS.max_steps)

    generator = get_generator()
    dataset = get_dataset(generator, batch_size=params.batch_size)

    def input_fn():
        with tf.name_scope("input"):
            iterator = dataset.make_one_shot_iterator()
            next_items = iterator.get_next()

        return next_items

    config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir,
                                    tf_random_seed=FLAGS.random_seed)

    model_fn = get_model_fn(architecture_fn=architecture_fn)

    estimator = tf.estimator.Estimator(model_fn=model_fn,
                                       config=config,
                                       params=params)

    return estimator.train(input_fn=input_fn, max_steps=FLAGS.max_steps)
Пример #2
0
def evaluate(model_dir, dataset_dir):
    """
    Begins evaluating the entire architecture.
    """
    # Session configuration.
    sess_config = tf.ConfigProto(
        allow_soft_placement=True,
        log_device_placement=False,
        intra_op_parallelism_threads=0,  # Autocompute how many threads to run
        gpu_options=tf.GPUOptions(force_gpu_compatible=True))

    config = tf.contrib.learn.RunConfig(session_config=sess_config,
                                        model_dir=model_dir)

    eval_input_fn = functools.partial(input_fn,
                                      dataset_dir=dataset_dir,
                                      split_name='validation',
                                      is_training=False)

    # Get the number of classes from the label file
    labels_to_class_names, num_classes = read_label_file(dataset_dir)

    classifier = tf.estimator.Estimator(model_fn=get_model_fn(num_classes),
                                        config=config)

    # .predict() returns an iterator of dicts;
    y = classifier.predict(input_fn=eval_input_fn)

    num_food_image = {}

    for pred in y:
        predicted_class = labels_to_class_names[int(pred['classes'])]
        food_dir = '../Validations/%s/%s' % (os.path.basename(model_dir),
                                             predicted_class)

        if not os.path.exists(food_dir):
            os.makedirs(food_dir)

        file_name = os.path.join(
            food_dir, '%s.png' % num_food_image.get(predicted_class, 1))

        num_food_image[predicted_class] = num_food_image.get(
            predicted_class, 1) + 1

        scipy.misc.imsave(file_name, pred['features'])
Пример #3
0
def run():
    model_config = load_yaml(ARGS.model_config)
    embedding_config = model_config["embedding"]
    model_config = model_config["tagger"]

    # Dataset
    dataset_config = ARGS.dataset_path.split("/")
    if len(dataset_config) != 2:
        raise ValueError(
            "Dataset path should be in `data_folder/dataset_name` format")
    base_path = dataset_config[0]
    dataset_name = dataset_config[1]

    sentence, char_sentence, tags, val_iter, train_iter, _ = \
        get_dataset(base_path, dataset_name, ARGS.batch_size,
                    pretrained_embedding=embedding_config["pretrained"])

    # Net initialization
    # First, try to load existing model if any
    tagger = restore_model(
        ARGS.model_path + "/ner_cnn-bilstm-crf_*",
        restore=ARGS.restore_nth_model)
    # If none are found, fallback to default initialization
    if not tagger:
        model_fn = get_model_fn(embedding_config, model_config)
        tagger = model_fn(sentence, char_sentence, tags)

    # This will help to automatically registering the model on GPU
    # if one is available
    tagger = nn.DataParallel(tagger)

    tagger_params = filter(lambda p: p.requires_grad, tagger.parameters())
    opt = Adam(lr=ARGS.learning_rate, params=tagger_params)

    # Trainer initialization
    training_fn = create_training_function(tagger, opt)
    evaluation_fn = create_evaluation_function(tagger.module)

    # Create engines for both trainer and evaluator
    trainer = Engine(training_fn)
    evaluator = create_supervised_evaluator(
        model=tagger,
        inference_fn=evaluation_fn,
        metrics={
            "acc": SequenceTagAccuracy(tags.vocab),
        })

    # Handler Initialization
    checkpoint = ModelCheckpoint(
        ARGS.model_path,
        "ner",
        save_interval=ARGS.save_interval,
        n_saved=ARGS.num_retained_models,
        create_dir=True,
        require_empty=False)

    def score_fn(engine):
        return engine.state.metrics["loss"]

    early_stopper = EarlyStopping(
        trainer=trainer,
        patience=ARGS.early_stopping_patience,
        score_function=score_fn)

    trainer.add_event_handler(Events.ITERATION_COMPLETED, checkpoint,
                              {"cnn-bilstm-crf": tagger.module})
    trainer.add_event_handler(Events.COMPLETED, checkpoint,
                              {"cnn-bilstm-crf": tagger.module})

    trainer.add_event_handler(
        Events.ITERATION_COMPLETED,
        create_log_training_loss_handler(ARGS.log_interval))
    trainer.add_event_handler(
        Events.EPOCH_COMPLETED,
        create_log_validation_handler(evaluator, val_iter))
    trainer.add_event_handler(Events.EPOCH_COMPLETED, early_stopper)

    # Run the whole training process
    trainer.run(train_iter, max_epochs=ARGS.max_epochs)