sess = tf.Session(config=session_conf)

    with sess.as_default():
        checkpoint_file = FLAGS.checkpoint_file
        saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))

        saver.restore(sess, checkpoint_file)

        input_x = graph.get_operation_by_name('input_x').outputs[0]
        dropout_keep_prob = graph.get_operation_by_name(
            'dropout_keep_prob').outputs[0]

        predictions = graph.get_operation_by_name('predictions').outputs[0]

        batches = batch_iter(list(x_test), FLAGS.batch_size, shuffle=False)

        for x_batch in batches:
            cand_predictions = sess.run(predictions, {
                input_x: x_batch,
                dropout_keep_prob: 1.0
            })

            all_predictions = np.concatenate(
                (all_predictions, cand_predictions))

print y_test[0]
print all_predictions[0]

print type(all_predictions)
        def dev_step(x_batch, y_batch):
            feed_dict = {
                rnn.input_x: x_batch,
                rnn.input_y: y_batch,
                rnn.dropout_keep_prob: 1.0
            }

            step, loss, accuracy = sess.run(
                [rnn.global_step, rnn.loss_val, rnn.accuracy], feed_dict)

            time_str = datetime.datetime.now().isoformat()
            print "dev_result: {}:step {}, loss {:g}, acc {:g}".format(
                time_str, step, loss, accuracy)

        for epoch_idx in range(FLAGS.num_epochs):
            batches = batch_iter(list(zip(x_train, y_train)), FLAGS.batch_size)

            for batch in batches:
                x_batch, y_batch = zip(*batch)

                train_step(x_batch, y_batch)

                if epoch_idx % FLAGS.validate_every == 0:
                    print '\n'
                    dev_step(x_dev, y_dev)

                path = saver.save(sess,
                                  checkpoints_prefix,
                                  global_step=epoch_idx)
                print("Saved model checkpoint to {}\n".format(path))