Пример #1
0
def evaluate(hparams, ckpt):
    if hparams.model_architecture == "rnn-model": model_creator = model.RNN
    else: raise ValueError("Unknown model architecture. Only simple_rnn is supported so far.")

    if hparams.val_target_path:
        eval_model = model_helper.create_eval_model(model_creator, hparams, tf.contrib.learn.ModeKeys.EVAL)
        eval_sess = tf.Session(config=utils.get_config_proto(), graph=eval_model.graph)
        with eval_model.graph.as_default():
            loaded_eval_model = model_helper.load_model(eval_model.model, eval_sess, "evaluation", ckpt)
        iterator_feed_dict={
            eval_model.input_file_placeholder: hparams.eval_input_path,
            eval_model.output_file_placeholder: hparams.eval_target_path
        }
        eval_loss = eval(loaded_eval_model, eval_sess, eval_model.iterator, iterator_feed_dict)
        print("Eval loss: %.3f"%eval_loss)
    print("Starting predictions:")

    prediction_model = model_helper.create_infer_model(model_creator, hparams, tf.contrib.learn.ModeKeys.INFER)
    prediction_sess = tf.Session(config=utils.get_config_proto(), graph=prediction_model.graph)
    with prediction_model.graph.as_default():
        loaded_prediction_model = model_helper.load_model(prediction_model.model, prediction_sess, "prediction", ckpt)
        iterator_feed_dict = {
            prediction_model.input_file_placeholder: hparams.val_input_path,
        }
    predictions=predict(loaded_prediction_model, prediction_sess, prediction_model.iterator, iterator_feed_dict)
    np.savetxt(os.path.join(hparams.eval_output_folder, "classes.txt"), predictions["classes"])
    np.savetxt(os.path.join(hparams.eval_output_folder, "probabilities.txt"), predictions["probabilities"])
Пример #2
0
def infer(ckpt, inference_input_file, inference_output_file, hparams):
    """
    Perform translation.
    """
    model_creator = gnmt_model.GNMTModel
    infer_model = model_helper.create_infer_model(model_creator, hparams)

    # Read data
    infer_data = utils.load_data(inference_input_file)

    config_proto = tf.ConfigProto()
    config_proto.gpu_options.allow_growth = True
    with tf.Session(
        graph=infer_model.graph, config=config_proto) as sess:

        loaded_infer_model = model_helper.load_model(
            infer_model.model, ckpt, sess, "infer")
        sess.run(
            infer_model.iterator.initializer,
            feed_dict={
                infer_model.src_placeholder: infer_data,
                infer_model.batch_size_placeholder: hparams.infer_batch_size
            })
        # Decode
        utils.log("Start decoding")
        loaded_infer_model.decode_and_evaluate(
            "infer",
            sess,
            inference_output_file,
            ref_file=None,
            beam_width=hparams.beam_width,
            tgt_eos=hparams.eos,
            num_translations_per_input=hparams.num_translations_per_input)
Пример #3
0
def start_sess_and_load_model(infer_model, ckpt_path):
    """Start session and load model."""
    sess = tf.Session(graph=infer_model.graph, config=utils.get_config_proto())
    with infer_model.graph.as_default():
        loaded_infer_model = model_helper.load_model(infer_model.model,
                                                     ckpt_path, sess, "infer")
    return sess, loaded_infer_model
Пример #4
0
def evaluate(hparams, ckpt):
    if hparams.model_architecture == "simple_rnn":
        model_creator = model.RNN
    else:
        raise ValueError("Unknown model architecture. Only simple_rnn is supported so far.")
    print("Starting evaluation and predictions:")
    # create eval graph.
    eval_model = model_helper.create_eval_model(model_creator, hparams, tf.contrib.learn.ModeKeys.EVAL)
    eval_sess = tf.Session(config=utils.get_config_proto(), graph=eval_model.graph)
    with eval_model.graph.as_default():
        # load pretrained model.
        loaded_eval_model = model_helper.load_model(eval_model.model, eval_sess, "evaluation", ckpt)
    iterator_feed_dict = {
        eval_model.input_file_placeholder: hparams.eval_input_path,
        eval_model.output_file_placeholder: hparams.eval_target_path
    }
    eval_loss, eval_accuracy, predictions = eval_and_precit(loaded_eval_model, eval_sess, eval_model.iterator,
                                                            iterator_feed_dict)
    print("Eval loss: %.3f, Eval accuracy: %.3f" % (eval_loss, eval_accuracy))
    # only models with CRF include trans. params.
    transition_params = eval_sess.run(loaded_eval_model.transition_params)
    if transition_params is not None:
        print("Saving transition parameters:")
        np.savetxt(os.path.join(hparams.eval_output_folder, "transition_params.txt"), transition_params)

    print("Saving predictions:")
    cPickle.dump(predictions,
                 open(os.path.join(hparams.eval_output_folder,
                                   hparams.predictions_filename.split(".")[0] + ".pickle"),
                      "wb"))
Пример #5
0
def determine_acceptance(input_dict):
    global model
    '''
    Take input form from stdin and print binary decision to stdout.
    '''

    # Parse input
    # input_dict = parse_input(argv[1:])
    for key in input_dict.keys():
        try:
            input_dict[key] = int(input_dict[key])
        except:
            input_dict[key] = True if input_dict[key] == u'on' else False

    # Preprocess
    preprocessed_data = md.predict(input_dict)

    # Load model and predict
    if model == None:
        model = mh.load_model()
    dec_val = model.predict(preprocessed_data)[0]
    print("DECISION VALUE: " + str(dec_val))
    decision = dec_val >= 0.5

    # Output to stdout
    return decision
Пример #6
0
def single_worker_inference(infer_model,
                            ckpt,
                            inference_input_file,
                            inference_output_file,
                            hparams):
    """Inference with a single worker."""
    output_infer = inference_output_file

    # Read data
    infer_data = load_data(inference_input_file, hparams)

    with tf.Session(config=utils.get_config_proto(), graph=infer_model.graph) as sess:
        loaded_infer_model = model_helper.load_model(infer_model.model, ckpt, sess, "infer")
        sess.run(infer_model.iterator.initializer,
                 feed_dict={
                     infer_model.src_placeholder: infer_data,
                     infer_model.batch_size_placeholder: hparams.infer_batch_size
                 })
        # Decode
        utils.print_out("# Start decoding")
        _decode_and_evaluate("infer",
                             loaded_infer_model,
                             sess,
                             output_infer,
                             ref_file=None,
                             subword_option=None,
                             beam_width=hparams.beam_width,
                             tgt_eos=hparams.eos,
                             num_translations_per_input=hparams.num_translations_per_input)
Пример #7
0
def single_worker_inference(  #emb_matrix,
        infer_model, ckpt, inference_input_file, inference_output_file,
        hparams, model_creator):
    """Inference with a single worker."""
    output_infer = inference_output_file

    # Read data
    infer_data = load_data(inference_input_file, hparams)
    #saver = tf.train.Saver()
    with tf.Session(graph=infer_model.graph,
                    config=utils.get_config_proto()) as sess:
        loaded_infer_model = model_helper.load_model(infer_model.model, ckpt,
                                                     sess, "infer")
        sess.run(infer_model.iterator.initializer,
                 feed_dict={
                     infer_model.src_placeholder: infer_data,
                     infer_model.batch_size_placeholder:
                     hparams.infer_batch_size
                 })
        #sess.run(model_creator._build_decoder.eval())
        # Decode
        #saver = tf.train.Saver()
        #emb=sess.run(emb_matrix)
        #fw=open('/home/yuzw/pun/nmt/inference/embedding_ds','w+')
        #fw.write('\n'.join(
        #           [' '.join([str(u) for u in e]) for e in emb]))
        #print("emb=sess.run(emb_matrix)",emb)
        #save_path = saver.save(sess, "/home/yuzw/pun/nmt/inference/emb.npz")
        #print("Model saved in path: %s" % save_path)
        utils.print_out("# Start decoding single_worker_inference")
        if hparams.inference_indices:
            _decode_inference_indices(
                loaded_infer_model,
                sess,
                output_infer=output_infer,
                output_infer_summary_prefix=output_infer,
                inference_indices=hparams.inference_indices,
                tgt_eos=hparams.eos,
                subword_option=hparams.subword_option)

        else:

            nmt_utils.decode_and_evaluate(
                "infer",
                loaded_infer_model,
                sess,
                output_infer,
                ref_file=None,
                metrics=hparams.metrics,
                subword_option=hparams.subword_option,
                beam_width=hparams.beam_width,
                tgt_eos=hparams.eos,
                num_translations_per_input=hparams.num_translations_per_input)
Пример #8
0
def single_worker_inference(infer_model, infer_sess, eval_model, eval_sess,
                            ckpt, summary_writer, global_step, hparams):
  """the actual function for inference."""
  # load datasets
  infer_src_data = load_data(hparams.infer_src_data)
  infer_tar_data = load_data(hparams.infer_tar_data)
  infer_kb = load_data(hparams.infer_kb)

  # load model and session
  start_time = time.time()
  with infer_model.graph.as_default():
    loaded_infer_model = model_helper.load_model(infer_model.model, ckpt,
                                                 infer_sess, "infer")
    infer_sess.run(
        infer_model.infer_iterator.initializer,
        feed_dict={
            infer_model.data_src_placeholder: infer_src_data,
            infer_model.kb_placeholder: infer_kb,
            infer_model.batch_size_placeholder: hparams.infer_batch_size
        })
    infer_handle = infer_sess.run(infer_model.infer_iterator.string_handle())

    # Decode
    utils.print_out("# Start decoding")
    evaluation_scores = dialogue_utils.decode_and_evaluate(
        "infer",
        loaded_infer_model,
        infer_handle,
        infer_sess,
        hparams.inference_output_file,
        ref_file=hparams.infer_tar_data,
        metrics=hparams.metrics,
        hparams=hparams,
        infer_src_data=infer_src_data)
    # summary writer
    for key in evaluation_scores:
      # utils.add_summary(summary_writer,)
      utils.add_summary(summary_writer, global_step, key,
                        evaluation_scores[key])
    # sample some dialogue and decode them for qualitative examination
    _sample_decode(loaded_infer_model, global_step, infer_handle, infer_sess,
                   hparams, infer_model.infer_iterator, infer_src_data,
                   infer_tar_data, infer_kb, infer_model.data_src_placeholder,
                   infer_model.kb_placeholder,
                   infer_model.batch_size_placeholder)
  # run eval model to get perplexity
  eval_handle = eval_sess.run(eval_model.eval_iterator.string_handle())
  dev_ppl, _ = run_internal_eval(eval_model, eval_handle, eval_sess,
                                 hparams.out_dir, hparams, summary_writer)
  utils.add_summary(summary_writer, global_step, "dev_ppl", dev_ppl)
  total_inference_time = time.time() - start_time
  utils.add_summary(summary_writer, global_step, "infer_time",
                    total_inference_time)
Пример #9
0
def start_sess_and_load_model(infer_model, ckpt_path, hparams):
    """Start session and load model."""
    print("num_intra_threads = %d, num_inter_threads = %d \n" %
          (hparams.num_intra_threads, hparams.num_inter_threads))
    sess = tf.Session(graph=infer_model.graph,
                      config=utils.get_config_proto(
                          num_intra_threads=hparams.num_intra_threads,
                          num_inter_threads=hparams.num_inter_threads))
    with infer_model.graph.as_default():
        loaded_infer_model = model_helper.load_model(infer_model.model,
                                                     ckpt_path, sess, "infer")
    return sess, loaded_infer_model
Пример #10
0
def inference(infer_data):
    with tf.Session(graph=infer_model.graph,
                    config=utils.get_config_proto()) as sess:
        loaded_infer_model = model_helper.load_model(infer_model.model, ckpt,
                                                     sess, "infer")

        sess.run(infer_model.iterator.initializer,
                 feed_dict={
                     infer_model.src_placeholder: [infer_data],
                     infer_model.batch_size_placeholder:
                     hparams.infer_batch_size
                 })

        translation = decode_inference_indices(loaded_infer_model, sess)

    return translation
Пример #11
0
def main(argv):
    try:
        opts, args = getopt.getopt(argv, 'hm:d',
                                   ['help', 'file=', 'dataset=', 'mode='])
    except getopt.GetoptError:
        print('\nUSAGE:')
        console.success('process_data.py --dataset <dataset_name>')
        sys.exit(2)

    for opt, arg in opts:
        if opt in ('-h', '--help'):
            print('\nUSAGE:')
            print(
                'process_data.py --dataset <dataset_name> --file <file_name> --mode <mode>'
            )
            sys.exit()
        elif opt in ('-f', '--file'):
            file_name = arg
        elif opt in ('-m', '--mode'):
            if arg in ['train', 'predict']:
                mode = arg
            else:
                mode = None
        elif opt in ('-d', '--dataset'):
            dataset_name = arg
        else:
            print('Invalid options. Run with --help to see usage')

    series = pd.read_csv(file_name,
                         sep=',',
                         header=0,
                         index_col=0,
                         squeeze=True)
    data = series.values

    model_file = 'models/' + dataset_name + '.json'
    model_weights_file = 'models/' + dataset_name + '.h5'

    if mode == 'train':
        train(data, model_file, model_weights_file)
    elif mode == 'predict':
        model = model_helper.load_model(model_file, model_weights_file)
        predict(data, model, 3)
    else:
        print('Invalid options. Run with --help to see usage')
Пример #12
0
def inference(infer_data):
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5,
                                allow_growth=True)
    with tf.Session(graph=infer_model.graph,
                    config=tf.ConfigProto(gpu_options=gpu_options,
                                          allow_soft_placement=True,
                                          log_device_placement=True)) as sess:
        loaded_infer_model = model_helper.load_model(infer_model.model, ckpt,
                                                     sess, "infer")

        sess.run(infer_model.iterator.initializer,
                 feed_dict={
                     infer_model.src_placeholder: [infer_data],
                     infer_model.batch_size_placeholder:
                     hparams.infer_batch_size
                 })

        translation = decode_inference_indices(loaded_infer_model, sess)

    return translation
Пример #13
0
def inference(ckpt, inference_output_folder, hparams):
    output_infer = inference_output_folder

    # Read data
    mnist = input_data.read_data_sets("data/", one_hot=True)
    if hparams.infer_source == "test":
        infer_data = mnist.test.images
        infer_labels = mnist.test.labels
    elif hparams.infer_source == "validation":
        infer_data = mnist.validation.images
        infer_labels = mnist.validation.labels
    elif hparams.infer_source == "train":
        infer_data = mnist.train.images
        infer_labels = mnist.train.labels
    infer_sample_idxs = random.sample(range(infer_data.shape[0]),
                                      hparams.infer_sample)
    infer_sample = infer_data[infer_sample_idxs, :]
    infer_sample_labels = infer_labels[infer_sample_idxs, :]

    dataset, x_placeholder, batch_placeholder = model_helper.create_batched_dataset(
        hparams.x_dim)
    iterator = tf.contrib.data.Iterator.from_structure(
        mnist.train.images.dtype, tf.TensorShape([None, hparams.x_dim]))
    init_op = iterator.make_initializer(dataset)
    next_x = iterator.get_next()
    dropout = tf.placeholder(tf.float32, name='dropout')
    model = vae_model.FullyConnectedVAE(hparams, next_x, dropout)

    with tf.Session() as session:
        model = model_helper.load_model(model, ckpt, session)
        session.run(init_op,
                    feed_dict={
                        x_placeholder: infer_sample,
                        batch_placeholder: infer_sample.shape[0],
                        dropout: 0.0
                    })
        # Decode
        outputs, zs, xs = model.infer(session)
        plot_images(outputs, xs, hparams, output_infer)
        if zs.shape[1] == 2:
            plot_zs(zs, infer_sample_labels, output_infer)
Пример #14
0
def single_worker_inference(infer_model, ckpt, inference_input_file,
                            inference_output_file, hparams):
    """Inference with a single worker."""
    output_infer = inference_output_file

    # Read data
    infer_data = load_data(inference_input_file, hparams)

    with tf.Session(graph=infer_model.graph,
                    config=utils.get_config_proto()) as sess:
        loaded_infer_model = model_helper.load_model(infer_model.model, ckpt,
                                                     sess, "infer")
        sess.run(infer_model.iterator.initializer,
                 feed_dict={
                     infer_model.src_placeholder: infer_data,
                     infer_model.batch_size_placeholder:
                     hparams.infer_batch_size
                 })
        # Decode
        utils.print_out("# Start decoding")
        if hparams.inference_indices:
            _decode_inference_indices(
                loaded_infer_model,
                sess,
                output_infer=output_infer,
                output_infer_summary_prefix=output_infer,
                inference_indices=hparams.inference_indices,
                tgt_sos=hparams.sos,
                tgt_eos=hparams.eos,
                bpe_delimiter=hparams.bpe_delimiter)
        else:
            nmt_utils.decode_and_evaluate("infer",
                                          loaded_infer_model,
                                          sess,
                                          output_infer,
                                          ref_file=None,
                                          metrics=hparams.metrics,
                                          bpe_delimiter=hparams.bpe_delimiter,
                                          beam_width=hparams.beam_width,
                                          tgt_sos=hparams.sos,
                                          tgt_eos=hparams.eos)
Пример #15
0
    def _load_model(self, checkpoint_path, default_hparams_path,
                    model_hparams_path, source_vocab_path, target_vocab_path):
        hparams = self._create_hparams(default_hparams_path,
                                       model_hparams_path, source_vocab_path,
                                       target_vocab_path)

        model_creator = gnmt_model.GNMTModel
        infer_model = model_helper.create_infer_model(model_creator,
                                                      hparams,
                                                      scope=None)

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=GPU_MEM_FRAC)
        sess = tf.Session(graph=infer_model.graph,
                          config=tf.ConfigProto(gpu_options=gpu_options,
                                                allow_soft_placement=True))
        with infer_model.graph.as_default():
            nmt_model = model_helper.load_model(infer_model.model,
                                                checkpoint_path, sess, "infer")

        return sess, nmt_model, infer_model, hparams
Пример #16
0
def train(data, model_file, model_weights_file):
    parameter_file = 'training_config.json'

    params = json.loads(open(parameter_file).read())

    x_train, y_train, x_test, y_test, x_test_raw, y_test_raw, last_window_raw, last_window = data_helper.load_timeseries(
        data, params)

    model_file_path = Path(model_file)
    if model_file_path.is_file():
        print('Existing trained model found.')
        model = model_helper.load_model(model_file, model_weights_file)
    else:
        print('Creating a new model.')
        lstm_layer = [1, params['window_size'], params['hidden_unit'], 1]
        model = model_helper.rnn_lstm(lstm_layer, params)

    model.fit(x_train,
              y_train,
              batch_size=params['batch_size'],
              epochs=params['epochs'],
              validation_split=params['validation_split'],
              verbose=1)

    predicted = model_helper.predict_next_timestamp(model, x_test)
    predicted_raw = []
    for i in range(len(x_test_raw)):
        predicted_raw.append((predicted[i] + 1) * x_test_raw[i][0])

    # serialize model to JSON
    model_json = model.to_json()
    with open(model_file, 'w') as json_file:
        json_file.write(model_json)
    # serialize weights to HDF5
    model.save_weights(model_weights_file)
    print("Saved model to disk")
Пример #17
0
def multi_worker_inference(infer_model, ckpt, inference_input_file,
                           inference_output_file, hparams, num_workers, jobid):
    """Inference using multiple workers."""
    assert num_workers > 1

    final_output_infer = inference_output_file
    output_infer = "%s_%d" % (inference_output_file, jobid)
    output_infer_done = "%s_done_%d" % (inference_output_file, jobid)

    # Read data
    infer_data = load_data(inference_input_file, hparams)

    # Split data to multiple workers
    total_load = len(infer_data)
    load_per_worker = int((total_load - 1) / num_workers) + 1
    start_position = jobid * load_per_worker
    end_position = min(start_position + load_per_worker, total_load)
    infer_data = infer_data[start_position:end_position]

    with tf.Session(graph=infer_model.graph,
                    config=utils.get_config_proto()) as sess:

        loaded_infer_model = model_helper.load_model(infer_model.model, ckpt,
                                                     sess, "infer")
        sess.run(infer_model.iterator.initializer,
                 {infer_model.src_placeholder: infer_data})

        # Decode
        utils.print_out("# Start decoding")
        nmt_utils.decode_and_evaluate("infer",
                                      loaded_infer_model,
                                      sess,
                                      output_infer,
                                      ref_file=None,
                                      metrics=hparams.metrics,
                                      bpe_delimiter=hparams.bpe_delimiter,
                                      beam_width=hparams.beam_width,
                                      tgt_eos=hparams.eos)

        # Change file name to indicate the file writing is completed.
        tf.gfile.Rename(output_infer, output_infer_done, overwrite=True)

        # Job 0 is responsible for the clean up.
        if jobid != 0: return

        # Now write all translations
        with codecs.getwriter("utf-8")(tf.gfile.GFile(final_output_infer,
                                                      mode="wb")) as final_f:
            for worker_id in range(num_workers):
                worker_infer_done = "%s_done_%d" % (inference_output_file,
                                                    worker_id)
                while not tf.gfile.Exists(worker_infer_done):
                    utils.print_out("  waitting job %d to complete." %
                                    worker_id)
                    time.sleep(10)

                with codecs.getreader("utf-8")(tf.gfile.GFile(
                        worker_infer_done, mode="rb")) as f:
                    for translation in f:
                        final_f.write("%s" % translation)
                tf.gfile.Remove(worker_infer_done)
Пример #18
0
import sys
from model_helper import load_model, save_weights, train_mnist

epoch = 5
mini_batch = 200

model_file = sys.argv[1]
weights_file = sys.argv[2]
weights_dest_file = sys.argv[3]

model = load_model(model_file, weights_file)

model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])

(x_train, y_train) = train_mnist()

model.fit(x_train, y_train, nb_epoch = epoch, batch_size = mini_batch)

save_weights(model.get_weights(), weights_dest_file)
Пример #19
0
from flask import request
from flask import jsonify
from datetime import datetime
import hashlib


import model_helper as mh
import cassandra_helper as ch


MODEL_FILE_PATH = r"./model_saved"
DATABASE_FILE_PATH = ""
OUTPUT_PATH =""

app = Flask(__name__)
model = mh.load_model(MODEL_FILE_PATH)


@app.route("/")
def index():
    return render_template('index.html', title='Sumbit Page')


@app.route("/view_classifier")
def classifier():
    comments = ['''Every once in a while a movie comes, that truly makes an impact. Joaquin's performance and scenography in all it's brilliance. Grotesque, 
    haunting and cringy. Hard to watch at times,... but so mesmerizing, you won't blink an eye watching it. Tragic, but with seriously funny moments. 
    Emotional rollercoaster - sometimes, with multiple emotions popping-up at the same time. 
    this is far from a typical action-riddled predictable super-hero movie - it's a proper psychological thriller/drama, with the single best character development I have ever seen.''']
    input_tensor = mh.Text_to_Tensor_converter(comments)
    predict_result = model.predict(input_tensor)
Пример #20
0
import KIPlayer
import HumanPlayer
import TicTacToe as ttt
import model_helper
import tensorflow as tf
import random
import config


continue_playing = True

print('loading AI...')
with tf.Session() as sessX:
    x_tensor, pred_tensor = model_helper.load_model(sessX, 'tf-model/X')
    with tf.Session() as sessO:
        model_helper.load_model(sessO, 'tf-model/O')
        while continue_playing:
            game = ttt.TicTacToe(config.board_size, config.board_size, config.runlength)
            # choose starter
            if random.choice([True, False]):
                fun_to_play_X = KIPlayer.next_move
                fun_to_visualize_X = KIPlayer.visualize
                fun_to_play_O = HumanPlayer.next_move
                fun_to_visualize_O = None
                print('X: Computer, O: You')
            else:
                fun_to_play_X = HumanPlayer.next_move
                fun_to_visualize_X = None
                fun_to_play_O = KIPlayer.next_move
                fun_to_visualize_O = KIPlayer.visualize
                print('X: You, O: Computer')