def __predict__(args, params):
    path = os.path.join(args.model_dir, "centroids_embeddings.npy")
    centrois = np.load(path)
    n_clusters = centrois.shape[1]  # remember to get it before reshape
    # print("centrois.shape=", centrois.shape)
    centrois = centrois.reshape(-1, centrois.shape[2])  # flatten it for knn
    # print("centrois.shape=", centrois.shape)

    # Define the model
    tf.logging.info("Creating the model...")
    config = tf.estimator.RunConfig(
        tf_random_seed=230,
        model_dir=args.model_dir,
        save_summary_steps=params.save_summary_steps)
    estimator = tf.estimator.Estimator(model_fn, params=params, config=config)

    # Compute embeddings on the test set
    tf.logging.info("Predicting")
    predictions = estimator.predict(
        lambda: test_input_fn(args.data_dir, params))

    embeddings = np.zeros((params.eval_size, params.embedding_size))
    for i, p in enumerate(
            predictions):  # i:enumerate_id, p:{'embeddings':array(64)}
        embeddings[i] = p['embeddings']

    labels = np.zeros(shape=[params.num_labels * n_clusters], dtype=np.int)
    for i in range(1, params.num_labels):
        for j in range(n_clusters):
            labels[n_clusters * i + j] = i

    knn = KNeighborsClassifier(n_neighbors=3)

    knn.fit(centrois, labels)
    y_predicted = knn.predict(embeddings)

    with tf.Session() as sess:
        dataset = mnist_dataset.test(args.data_dir)
        dataset = dataset.map(lambda img, lab: lab)
        dataset = dataset.batch(params.eval_size)
        labels_tensor = dataset.make_one_shot_iterator().get_next()
        y_true = sess.run(labels_tensor)

    print("Accuracy: " +
          str(metrics.accuracy_score(y_true, y_predicted) * 100) + "%")
Ejemplo n.º 2
0
def test(params):
    x, embeddings, labels, accuracy = model_fn(params, mode = 'test')

    sess = tf.Session()
    try:
        tf.train.Saver().restore(sess , os.path.join(os.getcwd(), params.model_file))
    except:
        print("Please create a model before using it for prediction")
        print("Run the following command-> python my_model.py train")
        exit(1)
    
    xdata, labeldata = test_input_fn()

    print ("Model restored!")

    start_time=time.time()
    out, acc= sess.run([embeddings, accuracy], feed_dict={x:xdata, labels: labeldata})
    test_time=time.time() - start_time
    print("Time taken for prediction= ", test_time, "seconds")

    print("Prediction accuracy", acc)

    return out
Ejemplo n.º 3
0
parser.add_argument('--data_dir',
                    default='data/mnist',
                    help="Directory containing the dataset")

if __name__ == '__main__':
    tf.reset_default_graph()
    tf.logging.set_verbosity(tf.logging.INFO)

    # Load the parameters
    args = parser.parse_args()
    json_path = os.path.join(args.model_dir, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params = Params(json_path)

    # Define the model
    tf.logging.info("Creating the model...")
    estimator = tf.estimator.Estimator(model_fn,
                                       params=params,
                                       model_dir=args.model_dir)

    # Evaluate the model on the test set
    tf.logging.info("Predict on the test set.")

    print("dataset,", test_input_fn(args.data_dir, params))

    res = estimator.predict(lambda: test_input_fn(args.data_dir, params))

    print(next(res)['embeddings'].shape)
                                    model_dir=args.model_dir,
                                    save_summary_steps=params.save_summary_steps)
    estimator = tf.estimator.Estimator(model_fn, params=params, config=config)

    # Train the model
    tf.logging.info("Starting training for {} epoch(s).".format(params.num_epochs))
    estimator.train(lambda: train_input_fn(args.data_dir, params))
    
    '''
    path = args.model_save 
    
    try:
        os.makedirs(path)

    except OSError:
        print ("Creation of the directory %s failed" % path)
    else:
        print ("Successfully created the directory %s " % path)

    estimator.export_saved_model(path, serving_input_receiver_fn())

    print("Model is SAVED !!")
    '''

    # Evaluate the model on the test set
    tf.logging.info("Evaluation on test set.")
    res = estimator.evaluate(lambda: test_input_fn(args.data_dir, params))

    for key in res:
        print("{}: {}".format(key, res[key]))
Ejemplo n.º 5
0
    params = Params(json_path)

    # Define the model
    tf.logging.info("Creating the model...")
    config = tf.estimator.RunConfig(
        tf_random_seed=230,
        model_dir=args.model_dir,
        save_summary_steps=params.save_summary_steps)
    estimator = tf.estimator.Estimator(model_fn, params=params, config=config)

    # EMBEDDINGS VISUALIZATION

    # Compute embeddings on the test set
    tf.logging.info("Predicting")
    predictions = estimator.predict(
        lambda: test_input_fn(args.data_dir, params))

    # TODO (@omoindrot): remove the hard-coded 10000
    # embeddings = np.zeros((10000, params.embedding_size))
    embeddings = np.zeros((params.eval_size, params.embedding_size))
    for i, p in enumerate(
            predictions):  # i:enumerate_id, p:{'embeddings':array(64)}
        embeddings[i] = p['embeddings']

    tf.logging.info("Embeddings shape: {}".format(
        embeddings.shape))  # (10000, 64)

    # Visualize test embeddings
    embedding_var = tf.Variable(embeddings, name='mnist_embedding')

    eval_dir = os.path.join(args.model_dir, "eval")
Ejemplo n.º 6
0
                    default='experiments/base_model_v2',
                    help="Experiment directory containing params.json")
parser.add_argument('--data_dir',
                    default='data_for_model_resized_448*448/',
                    help="Directory containing the dataset")

if __name__ == '__main__':
    tf.reset_default_graph()
    tf.logging.set_verbosity(tf.logging.INFO)

    # Load the parameters
    args = parser.parse_args()
    json_path = os.path.join(args.model_dir, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params = Params(json_path)

    # Define the model
    tf.logging.info("Creating the model...")
    estimator = tf.estimator.Estimator(model_fn,
                                       params=params,
                                       model_dir=args.model_dir)

    # Evaluate the model on the test set
    tf.logging.info("Evaluation on the test set.")
    eval_dir = os.path.join(args.model_dir, "eval")
    res = estimator.evaluate(
        lambda: test_input_fn(args.data_dir, params, eval_dir))
    for key in res:
        print("{}: {}".format(key, res[key]))
Ejemplo n.º 7
0
    params.vocab_fname, default_value='<unk>')
vocab_size = char_vocab.size()

params.pad_token_id = char_vocab.lookup(tf.constant(params.pad_token))
params.start_token_id = char_vocab.lookup(tf.constant(params.start_token))
params.end_token_id = char_vocab.lookup(tf.constant(params.end_token))

with tf.Session() as sess:
    sess.run(tf.tables_initializer())
    params.vocab_size_val = vocab_size.eval()

train_dataset = load_dataset_from_file(params.train_fname, char_vocab, params)
train_inputs = input_fn('train', train_dataset, params)
train_model_spec = model_fn('train', train_inputs, params, reuse=False)

test_inputs = test_input_fn('test', char_vocab, params)
test_model_spec = model_fn('test', test_inputs, params, reuse=True)

train_test(train_model_spec, test_model_spec, params, char_vocab_rev)

texts = ['ff', 'av']
preds, pred_logits = test_sess(texts,
                               test_model_spec,
                               params,
                               char_vocab_rev,
                               restore_from=os.path.join(
                                   params.model_dir, 'last_weights'))

for text, t_preds, t_pred_logits in zip(texts, preds, pred_logits):
    print('Input: {}'.format(text))
    print('Top {} predictions:'.format(params.beam_width))
Ejemplo n.º 8
0
    assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
    params = Params(json_path)

    # Define the model
    tf.logging.info("Creating the model...")
    config = tf.estimator.RunConfig(tf_random_seed=230,
                                    model_dir=args.model_dir,
                                    save_summary_steps=params.save_summary_steps)
    estimator = tf.estimator.Estimator(model_fn, params=params, config=config)


    # EMBEDDINGS VISUALIZATION

    # Compute embeddings on the test set
    tf.logging.info("Predicting")
    predictions = estimator.predict(lambda: test_input_fn(args.data_dir, params))

    # TODO (@omoindrot): remove the hard-coded 10000
    embeddings = np.zeros((10000, params.embedding_size))
    for i, p in enumerate(predictions):
        embeddings[i] = p['embeddings']

    tf.logging.info("Embeddings shape: {}".format(embeddings.shape))

    # Visualize test embeddings
    embedding_var = tf.Variable(embeddings, name='mnist_embedding')

    eval_dir = os.path.join(args.model_dir, "eval")
    summary_writer = tf.summary.FileWriter(eval_dir)

    config = projector.ProjectorConfig()