import numpy as np
import tensorflow as tf
from tqdm import tqdm

from python.dataset import AutoComplete
from python.model import PureGRU
from python.summary import ContextAccuracy

tf.logging.set_verbosity(tf.logging.INFO)

train_dataset = AutoComplete(repeat=True, batch_size=64)
test_dataset = AutoComplete(dataset='test', repeat=False, batch_size=16)
model = PureGRU(train_dataset,
                name='autocomplete_gru_600',
                embedding_size=600,
                verbose=True)

context_accuracy = ContextAccuracy()

for output_i, output in enumerate(
        tqdm(model.predict(dataset=test_dataset),
             total=test_dataset.observations)):
    probabilities = output['probabilities']
    predict_sorted = np.argsort(probabilities, axis=1)[:, ::-1]

    source = test_dataset.decode_source(output['source'])
    target = test_dataset.decode_target(output['target'])
    predict = test_dataset.decode_target(predict_sorted)

    context_accuracy.add(source, predict, target)
Exemple #2
0
from python.model import PureGRU

dirname = path.dirname(path.realpath(__file__))
article_dir = path.join(dirname, '..', '..', 'public')

tf.logging.set_verbosity(tf.logging.INFO)

train_dataset = AutoComplete(repeat=True)
test_dataset = AutoCompleteFixed(
    "context the formal study of grammar is an important part of education "
    "from a young age through advanced learning though the rules taught in "
    "schools are not a grammar in the sense most linguists use",
    batch_size=1,
    use_offsets=True)
model = PureGRU(train_dataset,
                name='autocomplete_gru_600',
                embedding_size=600,
                verbose=True)

data = []

print(f'sequence:')
for output_i, output in enumerate(model.predict(dataset=test_dataset)):
    probabilities = output['probabilities']
    predict_index_sorted = np.argsort(probabilities, axis=1)[:, ::-1]
    predict_value_sorted = np.sort(probabilities, axis=1)[:, ::-1]

    source = test_dataset.decode_source(output['source'])
    target = test_dataset.decode_target(output['target'])
    predict = test_dataset.decode_target(predict_index_sorted)
    connectivity = output['connectivity']
import numpy as np
import tensorflow as tf

from python.dataset import Generate
from python.model import PureGRU

tf.logging.set_verbosity(tf.logging.INFO)

train_dataset = Generate(repeat=True, batch_size=64)
test_dataset = Generate(dataset='test', repeat=False, batch_size=16)
model = PureGRU(train_dataset,
                name='generate_gru_1200',
                embedding_size=1200,
                verbose=True)
print(model.evaluate(dataset=test_dataset))
Exemple #4
0
import tensorflow as tf

from python.dataset import Generate
from python.model import PureGRU

tf.random.set_random_seed(450849059)  # From random.org
tf.logging.set_verbosity(tf.logging.INFO)

train_dataset = Generate(repeat=True, batch_size=64)
valid_dataset = Generate(repeat=True, dataset='valid', batch_size=64)

model = PureGRU(train_dataset,
                name='generate_gru_1200',
                embedding_size=1200,
                verbose=True)
model.train(max_steps=train_dataset.batches, valid_dataset=valid_dataset)
import numpy as np
import tensorflow as tf

from python.dataset import AutoComplete
from python.model import PureGRU

tf.logging.set_verbosity(tf.logging.INFO)

train_dataset = AutoComplete(repeat=True, batch_size=64)
test_dataset = AutoComplete(dataset='test', repeat=False, batch_size=1)
model = PureGRU(train_dataset,
                name='autocomplete_gru_600',
                embedding_size=600,
                verbose=True)
print(model.evaluate(dataset=test_dataset))
Exemple #6
0
import tensorflow as tf

from python.dataset import AutoComplete
from python.model import PureGRU

tf.random.set_random_seed(450849059)  # From random.org
tf.logging.set_verbosity(tf.logging.INFO)

train_dataset = AutoComplete(repeat=True, batch_size=64)
valid_dataset = AutoComplete(repeat=True, dataset='valid', batch_size=64)
test_dataset = AutoComplete(repeat=False, dataset='test', batch_size=64)

model = PureGRU(train_dataset,
                name='autocomplete_gru_600',
                embedding_size=600,
                verbose=True)
model.train(max_steps=train_dataset.batches * 2, valid_dataset=valid_dataset)