from tmlf.python import workspace, model_builder import numpy as np # fwd workspace.feed_tensor("xent", np.array( [0.2, 0.3, 0.4], dtype=np.float32, )) net = model_builder.Net() net.averaged_loss("xent", "loss") model_builder.run_net(net) loss = workspace.fetch_tensor("loss").reshape([-1]) print(loss) np.testing.assert_almost_equal(loss, np.array([0.3], dtype=np.float32)) # bwd net.add_backward_ops() workspace.feed_tensor("loss_grad", np.array( [1.0], dtype=np.float32, )) model_builder.run_net(net) xent_grad = workspace.fetch_tensor("xent_grad").reshape([-1]) np.testing.assert_almost_equal(xent_grad, np.full(3, 1.0 / 3.0, dtype=np.float32))
], [ 7, 5, ], ], dtype=np.float32) workspace.feed_tensor("feat", feat) workspace.feed_tensor("w", w) workspace.feed_tensor("b", b) net = model_builder.Net() net.fc(["feat", "w", "b"], ["out"]) model_builder.run_net(net) out = workspace.fetch_tensor('out') np.testing.assert_array_equal(out, expected) # backward net.add_backward_ops() workspace.feed_tensor("out_grad", np.array( [ [ 2, 4, ], [ 2, 8, ],
[ [ 0.1, 0.2, 0.7, ], [ 0.2, 0.1, 0.7, ], [ 0.3, 0.4, 0.3, ], ], dtype=np.float32, )) workspace.feed_tensor("label", np.array( [ 2, 0, 1, ], dtype=np.float32, )) net = model_builder.Net() net.label_cross_entropy(['pred', 'label'], 'xent') model_builder.run_net(net) xent = workspace.fetch_tensor('xent').reshape(-1) xent_expected = np.array( [ -math.log(0.7), -math.log(0.2), -math.log(0.4), ], dtype=np.float32, ) np.testing.assert_almost_equal(xent_expected, xent) # backward net.add_backward_ops() workspace.feed_tensor("xent_grad", np.array( [ 0.2, 0.3, 0.4 ], dtype=np.float32, )) model_builder.run_net(net) pred_grad = workspace.fetch_tensor("pred_grad") pred_grad_expected = np.array( [ [ 0, 0, -0.2 / 0.7],
from tmlf.python import workspace import numpy as np from tmlf.python import model_builder import math workspace.feed_tensor("in", np.array( [-0.25, 0, 0.25], np.float32, )) net = model_builder.Net() net.sigmoid("in", "out") model_builder.run_net(net) out = workspace.fetch_tensor("out").reshape(-1) aux = 1.0 / (1 + math.exp(0.25)) # for -0.25 np.testing.assert_array_equal(out, np.array([aux, 0.5, 1 - aux], np.float32)) # backward net.add_backward_ops() workspace.feed_tensor("out_grad", np.array([2, 3, 4], dtype=np.float32)) model_builder.run_net(net) in_grad = workspace.fetch_tensor("in_grad").reshape(-1) expected_grad = np.array([2 * aux * (1 - aux), 3 * 0.25, 4 * aux * (1 - aux)], dtype=np.float32) np.testing.assert_almost_equal(expected_grad, in_grad)
workspace.feed_tensor("v0", np.array( [1, 2], dtype=np.float32, )) workspace.feed_tensor("w0", np.array( [0.2], dtype=np.float32, )) workspace.feed_tensor("v1", np.array( [ 3, 4, ], dtype=np.float32, )) workspace.feed_tensor("w1", np.array( [0.4], dtype=np.float32, )) net.weighted_sum( ["v0", "w0", "v1", "w1"], ["v0"], ) model_builder.run_net(net) expected = np.array( [1.4, 2.0], dtype=np.float32, ) np.testing.assert_almost_equal(expected, workspace.fetch_tensor("v0").reshape(-1))
from tmlf.python import workspace, model_builder import numpy as np import math workspace.feed_tensor("sm_in", np.array( [ [ 3, -5, 8], [-6, 4, 7], ], dtype=np.float32)) net = model_builder.Net() net.softmax("sm_in", "sm_out") model_builder.run_net(net) sm_out = workspace.fetch_tensor("sm_out") exp_sum1 = math.exp(3) + math.exp(-5) + math.exp(8) exp_sum2 = math.exp(-6) + math.exp(4) + math.exp(7) sm_out_expected = np.array( [ [math.exp(3) / exp_sum1, math.exp(-5) / exp_sum1, math.exp(8) / exp_sum1], [math.exp(-6) / exp_sum2, math.exp(4) / exp_sum2, math.exp(7) / exp_sum2], ], dtype=np.float32) np.testing.assert_almost_equal(sm_out_expected, sm_out) # backward net.add_backward_ops() sm_out_grad = np.array( [ [ 0.2, -0.2, 0.3], [ -0.5, 0.4, 0.7], ], dtype=np.float32, ) workspace.feed_tensor("sm_out_grad", sm_out_grad)
from tmlf.python import model_builder from tmlf.python import tmlf_pybind, workspace import numpy as np net = model_builder.Net() net.constant_fill([], ["out"], shape=(3, 5), value=3.7) model_builder.run_net(net) out = tmlf_pybind.fetch_tensor('out') out_np = np.array(out) np.testing.assert_array_equal(out_np, np.full([3, 5], 3.7, dtype=np.float32)) # fill getting shape from another tensor net.constant_fill(["out"], ["out2"], value=8.8) model_builder.run_net(net) out2 = workspace.fetch_tensor('out2') np.testing.assert_array_equal(out2, np.full([3, 5], 8.8, dtype=np.float32))
from tmlf.python import workspace import numpy as np from tmlf.python import model_builder, workspace workspace.feed_tensor("fc", np.array( [-1, -0.5, 0.5, 1], np.float32, )) net = model_builder.Net() net.relu("fc", "relu") model_builder.run_net(net) out = workspace.fetch_tensor("relu").reshape(-1) np.testing.assert_array_equal(out, np.array([0, 0, 0.5, 1], np.float32)) # backward net.add_backward_ops() workspace.feed_tensor("relu_grad", np.array([2, 3, 4, 5], dtype=np.float32)) model_builder.run_net(net) fc_grad = workspace.fetch_tensor("fc_grad").reshape(-1) fc_grad_expected = np.array([0, 0, 4, 5], dtype=np.float32) np.testing.assert_almost_equal(fc_grad_expected, fc_grad)
0.2, 0.1, 0.7, ], [ 0.3, 0.4, 0.3, ], ], dtype=np.float32, )) workspace.feed_tensor("label", np.array( [ 2, 0, 1, ], dtype=np.float32, )) net = model_builder.Net() net.accuracy(['pred', 'label'], 'acc') model_builder.run_net(net) acc = workspace.fetch_tensor('acc').reshape(-1) acc_expected = np.array( [2.0 / 3.0], dtype=np.float32, ) np.testing.assert_almost_equal(acc_expected, acc)
workspace.feed_tensor('X', np.array([ [2, 3], [4, 5], [6, 7], ], dtype=np.float32)) workspace.feed_tensor('y', np.array([0, 1, 0], dtype=np.float32)) workspace.feed_tensor('cursor', np.array([0], dtype=np.float32)) net = model_builder.Net() net.circular_batch(['X', 'y', 'cursor'], ["X_sub", "y_sub", 'cursor'], batch_size=2) model_builder.run_net(net) np.testing.assert_almost_equal(workspace.fetch_tensor("X_sub"), np.array([ [2, 3], [4, 5], ], dtype=np.float32)) np.testing.assert_almost_equal( workspace.fetch_tensor("y_sub").reshape(-1), np.array( [0, 1], dtype=np.float32, )) np.testing.assert_almost_equal( workspace.fetch_tensor("cursor").reshape(-1), np.array( [2], dtype=np.float32,
from tmlf.python import workspace, model_builder import numpy as np import math workspace.feed_tensor("pred", np.array([0.25, 0.85], dtype=np.float32)) workspace.feed_tensor("label", np.array([0, 1], dtype=np.float32)) net = model_builder.Net() net.cross_entropy(["pred", "label"], "loss") model_builder.run_net(net) loss = workspace.fetch_tensor("loss") expected = np.array([ [-math.log(0.75)], [-math.log(0.85)], ], dtype=np.float32) print(f"expect: {expected}") print(f"actual: {loss}") np.testing.assert_almost_equal(expected, loss) # bwd net.add_backward_ops() workspace.feed_tensor("loss_grad", np.array([2.0, 4.0], dtype=np.float32)) model_builder.run_net(net) pred_grad = workspace.fetch_tensor("pred_grad").reshape(-1) expected_grad = np.array( [8.0 / 3, -4 / 0.85], dtype=np.float32, ) np.testing.assert_almost_equal(expected_grad, pred_grad, decimal=6)