Ejemplo n.º 1
0
def main(_):
    PS = U.Params(params).init_comps()
    model = model_for(PS)

    @Q.function
    def train_step(x, y):
        with Q.GradientTape() as tape:
            logits = model(x)
            loss = PS.losses(y, logits)
            acc = PS.metrics(y, logits)
        grads = tape.gradient(loss, model.trainable_variables)
        PS.optimizer.apply_gradients(zip(grads, model.trainable_variables))
        return loss, acc

    def train():
        step, loss, acc = 0, 0.0, 0.0
        for x, y in dset_for(PS, 'train'):
            step += 1
            loss, acc = train_step(x, y)
            if Q.equal(step % 10, 0):
                m = PS.metrics.result()
                Q.print('Step:', step, ', loss:', loss, ', acc:', m)
        return step, loss, acc

    step, loss, acc = train()
    print('Final step:', step, ', loss:', loss, ', acc:', PS.metrics.result())
Ejemplo n.º 2
0
 def __init__(self):
     self.ps = U.Params(params).init_comps()
     self.pre = None
     self.post = None
     i = tf.constant([0.] * (4 * 10), shape=(4, 10))
     self.src_b = tf.Variable(initial_value=i)
     i = tf.constant([0.] * (4 * 10), shape=(4, 10))
     self.mem_b = tf.Variable(initial_value=i)
Ejemplo n.º 3
0
def main(_):
    ps = utils.Params(params).init_comps()
    ds = dset_for(ps, TRAIN)
    # with T.distribute.MirroredStrategy().scope():
    mdl = model_for(ps, compiled=True)
    mdl.train_on_batch(ds)
    mp = pth.Path.cwd() / ps.dir_model / ps.model
    assert tf.get_checkpoint_state(str(mp))
    mdl.load_weights(str(mp / TRAIN))
    c = tf.Checkpoint(model=mdl, optimizer=ps.optimizer)
    c.restore(str(mp / TRAIN)).expect_partial()  # .assert_consumed()
    for n, s in tf.list_variables(str(mp)):
        print(n)
    mp2 = pth.Path.cwd() / ps.dir_model / 'mnist_2'
    print('saving...')
    c.save(str(mp2 / TRAIN))
    for n, s in tf.list_variables(str(mp2)):
        print(n)
    assert tf.get_checkpoint_state(str(mp2))
    mdl.load_weights(str(mp2 / 'train-1'))
Ejemplo n.º 4
0
    PAD=0,
    brackets=None,
    dim_embed=4,
    dim_hidden=8,
    emb_one_hot=None,
    num_toks=16,
    num_types=4,
    len_src=3,
    len_tgt=3,
    pos_max_len=None,
    pos_max=1.0e4,
    pos_min=1.0,
    pos_start=0,
)

ps = U.Params(params).init_comps()


def test_tokembed():
    e = TokEmbed(ps)
    e.build((1, 5))
    src = tf.constant([1, 2, 0, 3, 0], shape=(1, 5))
    e.call(src)
    ps.emb_one_hot = True
    e = TokEmbed(ps)
    e.build((1, 5))
    e.call(src)


def test_w_grad():
    e = TokEmbed(ps)
Ejemplo n.º 5
0
def load_params():
    return utils.Params(params).init_comps()
Ejemplo n.º 6
0
def main(_):
    ps = utils.Params(params).init_comps()
    # tf.autograph.set_verbosity(1)
    # print(tf.autograph.to_code(Trafo.embed.python_function))
    session_for(ps)(dset_for, model_for)
Ejemplo n.º 7
0
def load_params():
    f = 'channels_first' if T.test.is_built_with_cuda() else 'channels_last'
    return U.Params(_params, data_format=f)
Ejemplo n.º 8
0
import qnarre.neura.utils as U

from qnarre.feeds.prep import utils
from qnarre.feeds.prep import encoder

ps = dict(
    lower_case=True,
    model='',
    tok_max_chars=None,
    vocab_pairs=None,
    bert_vocab='.model/bert/uncased_L-12_H-768_A-12/vocab.txt',
    gpt_2_vocab='.model/gpt_2/117M/encoder.json',
    gpt_2_pairs='.model/gpt_2/117M/vocab.bpe',
)

ps = U.Params(ps)


def test_encoders():
    txt = "sf!fg dfg'sdf?dfg xcxb'sdfg!sdg 324sdf.sdfa"
    ce = encoder.CharE(ps)
    ts, os, _ = zip(*ce(txt))
    d = ce.decode(ts, os)
    assert d == txt
    we = encoder.WordE(ps)
    ts, os, _ = zip(*we(txt))
    d = we.decode(ts, os)
    assert d == txt
    be = encoder.BertE(ps)
    ge = encoder.Gpt2E(ps)
    with zipfile.ZipFile('.data/text8/text8.zip') as z: