def save(ae, step, epoch, batch):
    # save_path = os.path.join(FLAGS.CHECKPOINT_DIR, FLAGS.task_name)
    saved_checkpoint = ae.saver.save(ae.sess, \
        FLAGS.CHECKPOINT_DIR + '/step%d-epoch%d-batch%d.ckpt' % (step, epoch, batch), \
        global_step=step)
    log_string(
        tf_util.toBlue("-----> Model saved to file: %s; step = %d" %
                       (saved_checkpoint, step)))
Beispiel #2
0
def save_pretrain(ae, step):
    ckpt_dir = get_restore_path()
    if not os.path.exists(FLAGS.LOG_DIR):
        os.mkdir(FLAGS.LOG_DIR)
    if not os.path.exists(ckpt_dir):
        os.mkdir(ckpt_dir)
    saved_checkpoint = ae.pretrain_saver.save(ae.sess, \
        os.path.join(ckpt_dir, 'pretrain_model.ckpt'), global_step=step)
    log_string(
        tf_util.toBlue("-----> Pretrain Model saved to file: %s; step = %d" %
                       (saved_checkpoint, step)))
Beispiel #3
0
def save(ae, step, epoch, batch):
    # save_path = os.path.join(FLAGS.CHECKPOINT_DIR, FLAGS.task_name)
    log_dir = FLAGS.LOG_DIR
    ckpt_dir = os.path.join(log_dir, FLAGS.CHECKPOINT_DIR)
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)
    if not os.path.exists(ckpt_dir):
        os.mkdir(ckpt_dir)
    saved_checkpoint = ae.saver.save(ae.sess, \
        os.path.join(ckpt_dir, 'step%d-epoch%d-batch%d.ckpt' % (step, epoch, batch)), \
        global_step=step)
    log_string(tf_util.toBlue("-----> Model saved to file: %s; step = %d" % (saved_checkpoint, step)))