def ngram_asg():
    N = 81
    T = 250
    L = 44
    B = 1
    if len(sys.argv) > 1:
        B = int(sys.argv[1])

    tokens = [(i, ) for i in range(N)]
    graphemes_to_index = {i: i for i in range(N)}

    ITERATIONS = 20
    inputs = torch.randn(B, T, N, dtype=torch.float, requires_grad=True)

    targets = [tgt.squeeze() for tgt in torch.randint(N, size=(B, L)).split(1)]

    for ngram in [0, 1, 2]:
        crit = transducer.Transducer(tokens,
                                     graphemes_to_index,
                                     ngram=ngram,
                                     reduction="mean")

        def fwd_bwd():
            loss = crit(inputs, targets)
            loss.backward()

        time_func(fwd_bwd, iterations=20, name=f"asg fwd + bwd, ngram={ngram}")

        def viterbi():
            crit.viterbi(inputs)

        time_func(viterbi, iterations=20, name=f"asg viterbi, ngram={ngram}")
Пример #2
0
def time_forward_score():
    graphs = [gtn.linear_graph(1000, 100) for _ in range(B)]

    def fwd():
        gtn.forward_score(graphs)

    time_func(fwd, 100, "parallel forward_score Fwd")

    out = gtn.forward_score(graphs)

    def bwd():
        gtn.backward(out, [True])

    time_func(bwd, 100, "parallel forward_score bwd")
Пример #3
0
def time_indexed_func():
    N1 = 100
    N2 = 50
    A1 = 20
    A2 = 500
    graphs1 = [gtn.linear_graph(N1, A1) for _ in range(B)]
    graphs2 = [gtn.linear_graph(N2, A2) for _ in range(B)]
    for g in graphs2:
        for i in range(N2):
            for j in range(A2):
                g.add_arc(i, i, j)

    out = [None] * B

    def process(b):
        out[b] = gtn.forward_score(gtn.compose(graphs1[b], graphs2[b]))

    def indexed_func():
        gtn.parallel_for(process, range(B))

    time_func(indexed_func, 100, "parallel indexed python func")
def word_decompositions():
    tokens_path = "word_pieces_tokens_1000.txt"
    with open(tokens_path, "r") as fid:
        tokens = sorted([l.strip() for l in fid])
    graphemes = sorted(set(c for t in tokens for c in t))
    graphemes_to_index = {t: i for i, t in enumerate(graphemes)}

    N = len(tokens) + 1
    T = 100
    L = 15
    B = 1
    if len(sys.argv) > 1:
        B = int(sys.argv[1])

    inputs = torch.randn(B, T, N, dtype=torch.float, requires_grad=True)
    if torch.cuda.is_available():
        inputs = inputs.cuda()

    targets = []
    for b in range(B):
        pieces = (random.choice(tokens) for l in range(L))
        target = [graphemes_to_index[l] for wp in pieces for l in wp]
        targets.append(torch.tensor(target))

    crit = transducer.Transducer(tokens,
                                 graphemes_to_index,
                                 blank="optional",
                                 allow_repeats=False,
                                 reduction="mean")

    def fwd_bwd():
        loss = crit(inputs, targets)
        loss.backward()

    time_func(fwd_bwd, 20, "word decomps fwd + bwd")

    def viterbi():
        crit.viterbi(inputs)

    time_func(viterbi, 20, "word decomps viterbi")
Пример #5
0
def time_compose():
    N1 = 100
    N2 = 50
    A1 = 20
    A2 = 500
    graphs1 = [gtn.linear_graph(N1, A1) for _ in range(B)]
    graphs2 = [gtn.linear_graph(N2, A2) for _ in range(B)]
    for g in graphs2:
        for i in range(N2):
            for j in range(A2):
                g.add_arc(i, i, j)

    def fwd():
        gtn.compose(graphs1, graphs2)

    time_func(fwd, 20, "parallel compose Fwd")

    out = gtn.compose(graphs1, graphs2)

    def bwd():
        gtn.backward(out, [True])

    time_func(bwd, 20, "parallel compose bwd")
Пример #6
0
Copyright (c) Facebook, Inc. and its affiliates.

This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""

import random
import sys
import torch

sys.path.append("..")
from utils import CTCLoss

from time_utils import time_func

T = 250
L = 44
N = 80
B = int(sys.argv[1])
ITERATIONS = 100
inputs = torch.randn(B, T, N, dtype=torch.float, requires_grad=True).cuda()
tgt = torch.randint(N - 2, (B, L)).split(1)
tgt = [t.tolist()[0] for t in tgt]

def func():
    inputs.grad = None
    op = CTCLoss(inputs, tgt, N - 1)
    op.backward()

time_func(func, name="ctc fwd + bwd")
"""

import random
import sys
import torch

sys.path.append("..")
from utils import ASGLoss

from time_utils import time_func

T = 250
L = 44
N = 80
B = int(sys.argv[1])
inputs = torch.randn(B, T, N, dtype=torch.float, requires_grad=True).cuda()
transitions = torch.randn(N + 1, N, dtype=torch.float,
                          requires_grad=True).cuda()
tgt = torch.randint(N - 2, (B, L)).split(1)
tgt = [t.tolist()[0] for t in tgt]


def func():
    inputs.grad = None
    transitions.grad = None
    op = ASGLoss(inputs, transitions, tgt)
    op.backward()


time_func(func, name="asg fwd + bwd")