Exemple #1
0
 def test_multidevice_op(self):
     a = np.asarray(np.random.uniform(0, 1, size=[1, 10000]),
                    dtype=np.float32)
     b = np.asarray(np.random.uniform(0, 1, size=[1, 10000]),
                    dtype=np.float32)
     tensora = dp.Tensor(a)
     tensorb = dp.Tensor(b)
     tensora.device("gpu")
     tensorb.device("gpu:1")
Exemple #2
0
 def test_host_device(self):
     a = np.asarray(np.random.uniform(0, 1, size=[1, 10000]),
                    dtype=np.float32)
     tensor = dp.Tensor(a)
     tensor.device("gpu")
     # TODO (we can do it better here.)
     self.assertIsNotNone(tensor.data)
Exemple #3
0
def _helper_test_backward(shps,
                          lmts,
                          tf_fxn,
                          deepops_fxn,
                          atol=1e-6,
                          rtol=1e-6,
                          pass_list=False):
    # random tensor
    tf.random.set_seed(111)
    tf_arrays = [
        tf.Variable(tf.random.uniform(s, l[0], l[-1], dtype=tf.float32))
        for s, l in zip(shps, lmts)
    ]
    dp_tensors = [dp.Tensor(na.numpy()) for na in tf_arrays]

    if pass_list:
        out = deepops_fxn(dp_tensors)
    else:
        out = deepops_fxn(*dp_tensors)
    out.backward()
    with tf.GradientTape() as gt:
        if pass_list:
            out_tf = tf_fxn(tf_arrays)
        else:
            out_tf = tf_fxn(*tf_arrays)
    out_tf = gt.gradient(out_tf, tf_arrays)
    for dp_tensor, tf_grad in zip(dp_tensors, out_tf):
        np.testing.assert_allclose(dp_tensor.grad,
                                   tf_grad.numpy(),
                                   atol=atol,
                                   rtol=rtol)
Exemple #4
0
 def test_repr(self):
     a = np.asarray(np.random.uniform(0, 1, size=[1, 10000]),
                    dtype=np.float32)
     dp_a = dp.Tensor(a)
     repr_a = dp_a.__repr__()
     shape = re.search("shape: \((.*?)\)", repr_a).groups()[0]
     self.assertEqual("(" + shape + ")", str(a.shape))
Exemple #5
0
def _helper_test(shps, lmts, np_fxn, deepops_fxn, atol=1e-6, rtol=1e-6):
    # random tensor
    np.random.seed(111)
    numpy_array = [
        np.asarray(np.random.uniform(l[0], l[-1], size=s), dtype=np.float32)
        for s, l in zip(shps, lmts)
    ]
    dp_tensors = [dp.Tensor(na) for na in numpy_array]

    # log speeds
    deepop_fp = (
        timeit.Timer(functools.partial(deepops_fxn, *dp_tensors)).timeit(5) *
        1000 / 5)
    np_fp = timeit.Timer(functools.partial(np_fxn, *
                                           numpy_array)).timeit(5) * 1000 / 5
    out = deepops_fxn(*dp_tensors).data
    np_out = np_fxn(*numpy_array)

    np.testing.assert_allclose(out, np_out, atol=atol, rtol=rtol)

    logging.info(
        "\nTesting the speed || DeepOps %s function, took %.2f ms and Numpy took %.2f ms",
        (deepops_fxn.__name__, deepop_fp, np_fp),
    )
Exemple #6
0
def loss(y, ypred):
    # MSE
    _loss = [(yb - ypb) * (yb - ypb) for yb, ypb in zip(y, ypred)]
    return sum(_loss) * dp.Tensor(1 / len(yb))
Exemple #7
0
def loss(y, ypred):
    # MSE
    _loss = [(yb - ypb) * (yb - ypb) for yb, ypb in zip(y, ypred)]
    return sum(_loss) * dp.Tensor(1 / len(yb))


sequential = StupidLittleModel()
batch_size = 20


for steps in range(1000):
    ri = np.random.permutation(X.shape[0])[:batch_size]
    xb = X[ri]
    yb = y[ri]
    xb = [list(map(dp.Tensor, x)) for x in xb]
    # forward
    y_pred_b = list(map(sequential.forward, xb))
    yb = [dp.Tensor(y) for y in yb]
    total_loss = loss(yb, y_pred_b)
    # backward
    sequential.init_backward()
    total_loss.backward()
    # mini optimizer
    learning_rate = 0.1
    for p in sequential.parameters():
        p._data -= learning_rate * p.grad
    if steps % 1 == 0:
        print(f"step {steps} loss {total_loss.data}")

breakpoint()