def sample(args): # Need to load the model from somewhere params = pickle.load(open('./save/{0}.model_param'.format(args.params))) model = RegressionModel(params, infer=True) model.inference(model.data_placeholder) with tf.Session() as sess: # TODO: This loads the most recent checkpoint not tf.initialize_all_variables().run() saver = tf.train.Saver(tf.all_variables()) checkpoints = glob.glob("./save/model_{0}.ckpt-*".format(params.id)) if len(checkpoints) > 0: # We have worked on training this model before. Resume work rather than # starting from scratch # Get the iteration number for all of them iterations = np.array([int(c.split('-')[1]) for c in checkpoints]) # Index of the checkpoint with the most iterations idx = np.argmax(iterations) restore_path = checkpoints[idx] saver.restore(sess, restore_path) print "restoring {0}".format(restore_path) print model.sample(sess, args.n, args.prime) else: print "Unable to restore - no checkpoints found"
def sample(args): # Need to load the model from somewhere params = pickle.load(open("./save/{0}.model_param".format(args.params))) model = RegressionModel(params, infer=True) model.inference(model.data_placeholder) with tf.Session() as sess: # TODO: This loads the most recent checkpoint not tf.initialize_all_variables().run() saver = tf.train.Saver(tf.all_variables()) checkpoints = glob.glob("./save/model_{0}.ckpt-*".format(params.id)) if len(checkpoints) > 0: # We have worked on training this model before. Resume work rather than # starting from scratch # Get the iteration number for all of them iterations = np.array([int(c.split("-")[1]) for c in checkpoints]) # Index of the checkpoint with the most iterations idx = np.argmax(iterations) restore_path = checkpoints[idx] saver.restore(sess, restore_path) print "restoring {0}".format(restore_path) print model.sample(sess, args.n, args.prime) else: print "Unable to restore - no checkpoints found"