Exemplo n.º 1
0
def main():
    dl = IMDBDataLoader()
    create_logging()
    estimator = tf.estimator.Estimator(model_fn)
    estimator.train(dl.train_input_fn())
    y_pred = np.fromiter(estimator.predict(dl.predict_input_fn()), np.int32)
    tf.logging.info('\n' + classification_report(dl.y_test, y_pred))
Exemplo n.º 2
0
def main():
    create_logging()
    tf.logging.info('\n'+pprint.pformat(args.__dict__))
    dl = DataLoader()

    if not os.path.exists(MODEL_PATH):
        os.makedirs(MODEL_PATH)
    prev_models = os.listdir(MODEL_PATH)
    if prev_models is not None:
        for f in prev_models:
            os.remove(os.path.join(MODEL_PATH, f))
    
    estimator = tf.estimator.Estimator(model_fn,
                                       model_dir=MODEL_PATH,
                                       config=tf.estimator.RunConfig(keep_checkpoint_max=1))
    
    y_true = get_val_labels()
    for _ in range(args.n_epochs):
        estimator.train(lambda: dl.train_input_fn())
        y_pred = list(estimator.predict(lambda: dl.val_input_fn()))
        tf.logging.info('\nVal Log Loss: %.3f\n' % log_loss(
            np.asarray(y_true, np.float64),
            np.asarray(y_pred, np.float64),
            labels=[0, 1]))
    submit_arr = np.asarray(list(estimator.predict(lambda: dl.predict_input_fn())))
    print(submit_arr.shape)
    
    submit = pd.DataFrame()
    submit['y_pre'] = submit_arr
    submit.to_csv(SUBMIT_PATH, index=False)
Exemplo n.º 3
0
def main():
    create_logging()
    sess = tf.Session()
    dl = IMDBDataLoader(sess)
    model = Model(dl)
    trainer = Trainer(sess, model, dl)
    trainer.train()
Exemplo n.º 4
0
def main():
    create_logging()
    sess = tf.Session()
    vocab = IMDBVocab()
    dl = VAEDataLoader(sess, vocab)

    model = VAE(dl, vocab)
    tf.logging.info('\n' + pprint.pformat(tf.trainable_variables()))
    trainer = VAETrainer(sess, model, dl, vocab)
    trainer.train()
Exemplo n.º 5
0
def main():
    create_logging()
    sess = tf.Session()
    vocab = IMDBVocab()
    dl = WakeSleepDataLoader(sess, vocab)

    model = WakeSleepController(dl, vocab)
    tf.logging.info('\n' + pprint.pformat(tf.trainable_variables()))
    trainer = WakeSleepTrainer(sess, model, dl, vocab)
    model.load(sess, args.vae_ckpt_dir)
    trainer.train()
Exemplo n.º 6
0
def main():
    create_logging()
    tf.logging.info('\n' + pprint.pformat(args.__dict__))
    sess = tf.Session()
    vocab = IMDBVocab()
    discri_dl = DiscriminatorDataLoader(sess, vocab)
    wake_sleep_dl = WakeSleepDataLoader(sess, vocab)

    model = WakeSleepController(discri_dl, wake_sleep_dl, vocab)
    tf.logging.info('\n' + pprint.pformat(tf.trainable_variables()))
    trainer = WakeSleepTrainer(sess, model, discri_dl, wake_sleep_dl, vocab)
    model.load(sess, args.vae_ckpt_dir)
    trainer.train()
Exemplo n.º 7
0
def main():
    create_logging()

    dl = DataLoader()

    estimator = tf.estimator.Estimator(model_fn)

    y_true = get_val_labels()
    for _ in range(args.n_epochs):
        estimator.train(lambda: dl.train_input_fn())
        y_pred = list(estimator.predict(lambda: dl.val_input_fn()))
        tf.logging.info('\nVal Log Loss: %.3f\n' %
                        log_loss(y_true, y_pred, eps=1e-15))
    submit_arr = np.asarray(
        list(estimator.predict(lambda: dl.predict_input_fn())))
    print(submit_arr.shape)

    submit = pd.DataFrame()
    submit['y_pre'] = submit_arr
    submit.to_csv('./submit_siamese_rnn.csv', index=False)