Beispiel #1
0
def _test_gather_backward(test_case, device):
    input = np.array([[1, 2], [3, 4]])
    index = np.array([[0, 0], [1, 0]])
    np_out = np.take_along_axis(input, index, 0)
    np_grad = _scatter_add_numpy(np.ones_like(np_out), 0, index, input.shape)
    of_input = flow.tensor(input,
                           dtype=flow.float32,
                           requires_grad=True,
                           device=flow.device(device))
    output = flow.gather(
        of_input,
        0,
        flow.tensor(index, dtype=flow.int, device=flow.device(device)),
    )
    out_sum = output.sum()
    out_sum.backward()
    test_case.assertTrue(np.array_equal(output.numpy(), np_out))
    test_case.assertTrue(np.array_equal(of_input.grad.numpy(), np_grad))
Beispiel #2
0
def _test_mean_negative_dim(test_case, shape, device):
    if len(shape) < 4:
        shape = (2, 3, 4, 5)
    input = flow.tensor(
        np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
    )
    of_out = flow.mean(input, dim=(-2, -1, -3))
    np_out = np.mean(input.numpy(), axis=(-2, -1, -3))
    test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
Beispiel #3
0
def _test_scatter_nd_t(test_case, device):
    indices = flow.tensor(np.array([[0], [4], [2]]),
                          dtype=flow.int,
                          device=flow.device(device))
    update = flow.tensor(
        np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]),
        dtype=flow.float,
        device=flow.device(device),
    )
    np_out = np.array([
        [1.0, 1.0, 1.0],
        [0.0, 0.0, 0.0],
        [3.0, 3.0, 3.0],
        [0.0, 0.0, 0.0],
        [2.0, 2.0, 2.0],
    ])
    output = flow.scatter_nd(indices, update, [5, 3])
    test_case.assertTrue(np.allclose(output.numpy(), np_out, 0.0001, 0.0001))
Beispiel #4
0
def _test_argmax_axis_postive(test_case, device):
    input = flow.tensor(np.random.randn(2, 6, 5, 3),
                        dtype=flow.float32,
                        device=flow.device(device))
    axis = 1
    of_out = flow.argmax(input, dim=axis)
    np_out = np.argmax(input.numpy(), axis=axis)
    test_case.assertTrue(
        np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
Beispiel #5
0
def _test_argwhere(test_case, shape, device):
    np_input = np.random.randn(*shape)
    input = flow.tensor(np_input,
                        dtype=flow.float32,
                        device=flow.device(device))
    of_out = flow.argwhere(input)
    np_out = np.argwhere(np_input)
    test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
    test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_out.shape))
 def test_glu_scalar_tensor_runtime_error(test_case):
     with test_case.assertRaises(Exception) as context:
         x = flow.tensor(1.0)
         m = flow.nn.GLU()
         y = m(x)
     test_case.assertTrue(
         "glu does not support scalars because halving size must be even"
         in str(context.exception)
     )
Beispiel #7
0
def inplace_mul_tensors_helper(test_case, device, arr_0, arr_y):
    of_x = flow.tensor(
        arr_0, dtype=flow.float32, device=flow.device(device), requires_grad=True,
    )
    of_inplace_x = of_x + 1
    of_y = flow.tensor(
        arr_y, dtype=flow.float32, device=flow.device(device), requires_grad=True,
    )
    id_inpalce_x = id(of_inplace_x)
    of_inplace_x.mul_(of_y)
    test_case.assertTrue(
        np.allclose(of_inplace_x.numpy(), np.multiply(arr_0 + 1, arr_y), 1e-05, 1e-05)
    )
    test_case.assertTrue(id_inpalce_x == id(of_inplace_x))
    of_inplace_x = of_inplace_x.sum()
    of_inplace_x.backward()
    test_case.assertTrue(np.allclose(arr_y, of_x.grad.numpy(), 1e-05, 1e-05))
    test_case.assertTrue(np.allclose(arr_0 + 1, of_y.grad.numpy(), 1e-05, 1e-05))
Beispiel #8
0
def _test_type_tensortype(test_case, tensortype_dict, shape, device, dtype,
                          tgt_tensortype):
    # test tensor.type(x: tensortype) rather than tensor.type_tensortype
    np_input = np.random.rand(*shape)
    input = flow.tensor(np_input, dtype=dtype, device=device)
    input = input.type(tgt_tensortype)
    tgt_dtype, tgt_device = tensortype_dict[tgt_tensortype]
    test_case.assertEqual(input.dtype, tgt_dtype)
    test_case.assertEqual(input.device, tgt_device)
Beispiel #9
0
 def __init__(self, num_patches, emb_dim, dropout_rate=0.1):
     super(PositionEmbs, self).__init__()
     self.pos_embedding = nn.Parameter(
         flow.tensor(np.random.randn(1, num_patches + 1, emb_dim),
                     dtype=flow.float32))
     if dropout_rate > 0:
         self.dropout = nn.Dropout(dropout_rate)
     else:
         self.dropout = None
Beispiel #10
0
def _test_softplus_threshold(test_case, device):
    m = flow.nn.Softplus(beta=1.11, threshold=1.55)
    arr = np.random.randn(2, 3, 4, 5)
    np_out = np.where(arr * 1.11 > 1.55, arr,
                      1.0 / 1.11 * np.log(1.0 + np.exp(1.11 * arr)))
    np_out = numpy_softplus(arr, 1.11, 1.55)
    x = flow.tensor(arr, device=flow.device(device))
    of_out = m(x)
    test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
 def test_tensor_detach(test_case):
     shape = (2, 3, 4, 5)
     x = flow.tensor(np.random.randn(*shape), dtype=flow.float32, requires_grad=True)
     test_case.assertTrue(np.allclose(x.detach().numpy(), x.numpy(), 0.0001, 0.0001))
     test_case.assertEqual(x.detach().requires_grad, False)
     y = x * 2
     z = y.detach()
     test_case.assertEqual(z.is_leaf, True)
     test_case.assertEqual(z.grad_fn, None)
def _test_nms(test_case, placement, sbp):
    iou = 0.5
    boxes, scores = create_tensors_with_iou(800, iou)

    global_boxes = flow.tensor(boxes, dtype=flow.float32).to_global(
        placement=flow.env.all_device_placement("cpu"), sbp=flow.sbp.broadcast)
    np_boxes = global_boxes.numpy()
    global_boxes = global_boxes.to_global(placement=placement, sbp=sbp)

    global_scores = flow.tensor(scores, dtype=flow.float32).to_global(
        placement=flow.env.all_device_placement("cpu"), sbp=flow.sbp.broadcast)
    np_scores = global_scores.numpy()
    global_scores = global_scores.to_global(placement=placement, sbp=sbp)

    keep_np = nms_np(np_boxes, np_scores, iou)

    keep = flow.nms(global_boxes, global_scores, iou)
    test_case.assertTrue(np.allclose(keep.numpy(), keep_np))
Beispiel #13
0
 def test_construct_from_numpy_or_list(test_case):
     shape = (2, 3, 4, 5)
     np_arr = np.random.rand(*shape).astype(np.float32)
     tensor = flow.tensor(np_arr)
     test_case.assertTrue(np.allclose(tensor.numpy(), np_arr))
     np_int_arr = np.random.randint(-100,
                                    high=100,
                                    size=shape,
                                    dtype=np.int32)
     tensor = flow.tensor(np_int_arr, dtype=flow.int32)
     test_case.assertEqual(tensor.dtype, flow.int32)
     test_case.assertTrue(np_arr.flags["C_CONTIGUOUS"])
     test_case.assertTrue(np.allclose(tensor.numpy(), np_int_arr))
     np_arr = np.random.random((1, 256, 256, 3)).astype(np.float32)
     np_arr = np_arr.transpose(0, 3, 1, 2)
     tensor = flow.tensor(np_arr)
     test_case.assertFalse(np_arr.flags["C_CONTIGUOUS"])
     test_case.assertTrue(np.allclose(tensor.numpy(), np_arr))
Beispiel #14
0
 def test_tensor_slice(test_case):
     x = np.random.randn(2, 3, 4, 5).astype(np.float32)
     input = flow.tensor(x)
     test_case.assertTrue(np.allclose(input[0].numpy(), x[0], 1e-05, 1e-05))
     test_case.assertTrue(np.allclose(input[1].numpy(), x[1], 1e-05, 1e-05))
     test_case.assertTrue(
         np.allclose(input[0, :].numpy(), x[0, :], 1e-05, 1e-05))
     test_case.assertTrue(
         np.allclose(input[0, :, 0:2].numpy(), x[0, :, 0:2], 1e-05, 1e-05))
Beispiel #15
0
def _test_tensor_argmin(test_case, device):
    input = flow.tensor(
        np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device)
    )
    axis = 0
    of_out = input.argmin(dim=axis)
    np_out = np.argmin(input.numpy(), axis=axis)
    test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_out.shape))
    test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
Beispiel #16
0
def _LoadSingleVariable(
        path: Optional[str],
        consistent_src_rank: Optional[int] = None) -> "flow.Tensor":
    if consistent_src_rank is not None:
        rank = flow.env.get_rank()
        if rank == consistent_src_rank:
            assert isinstance(path, str)
            file_backed_blob = FileBackendVariableBlob(path)
            loaded = flow.tensor(file_backed_blob.numpy(),
                                 dtype=file_backed_blob.dtype).to("cuda")
        else:
            loaded = flow.tensor([]).to("cuda")
        loaded = loaded.to_consistent(
            flow.placement("cuda", [consistent_src_rank]), flow.sbp.broadcast)
        return loaded

    assert isinstance(path, str)
    return flow.tensor(FileBackendVariableBlob(path).numpy())
def _test_less_equal_int_scalar(test_case, device):
    np_arr = np.random.randn(2, 3, 4, 5)
    input1 = flow.tensor(np_arr,
                         dtype=flow.float32,
                         device=flow.device(device))
    input2 = 1
    of_out = input1 <= input2
    np_out = np.less_equal(np_arr, input2)
    test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
Beispiel #18
0
def _test_greater_equal_float_scalar(test_case, device):
    np_arr = np.random.randn(3, 2, 5, 7)
    input1 = flow.tensor(np_arr,
                         dtype=flow.float32,
                         device=flow.device(device))
    input2 = 2.3
    of_out = input1 >= input2
    np_out = np.greater_equal(np_arr, input2)
    test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
Beispiel #19
0
def _test_squeeze_backward(test_case, device):
    np_arr = np.random.rand(1, 1, 1, 3)
    input = flow.tensor(
        np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
    )
    y = flow.squeeze(input, dim=1).sum()
    y.backward()
    np_grad = np.ones((1, 1, 1, 3))
    test_case.assertTrue(np.array_equal(input.grad.numpy(), np_grad))
    def test_stateful_local_kernel_in_consistent_mode(test_case):
        rank = int(os.getenv("RANK"))

        x = flow.tensor(np.array([1, 2]) * (rank + 1)).to("cuda")
        x = x.to_consistent(flow.placement("cuda", {0: range(2)}),
                            flow.sbp.split(0))

        y = flow.tensor([3, 4, 5]).to("cuda")
        y = y.to_consistent(flow.placement("cuda", {0: range(2)}),
                            flow.sbp.broadcast)

        # logical slice assign op needs sbp and logical shape from stateful local opkernel
        x[:3] = y

        x = x.to_consistent(sbp=flow.sbp.broadcast)

        test_case.assertTrue(
            np.array_equal(x.to_local().numpy(), np.array([3, 4, 5, 4])))
Beispiel #21
0
def _test_expand_flow_size(test_case, device):
    input_shape = (2, 4, 1, 32)
    expand_dim = flow.Size([2, 4, 2, 32])
    input, gout, out_np, gin_np = _np_get_expand(input_shape, expand_dim)
    of_input = flow.tensor(input, dtype=flow.int, device=flow.device(device))
    of_out = of_input.expand(expand_dim)

    test_case.assertTrue(
        np.array_equal(of_out.numpy(), out_np.astype(np.int32)))
Beispiel #22
0
 def train_one_iter(grad):
     grad_tensor = flow.tensor(grad,
                               requires_grad=False,
                               device=flow.device(device))
     loss = flow.sum(x * grad_tensor)
     loss.backward()
     adagrad.clip_grad()
     adagrad.step()
     adagrad.zero_grad()
Beispiel #23
0
def predict(model, text):
    model.eval()
    text = flow.tensor(text).to("cuda")
    text.unsqueeze(0)
    logits = model(text)
    logits = flow.softmax(logits)
    label = flow.argmax(logits)

    return label.numpy(), logits.numpy()
Beispiel #24
0
def _test_sum_impl(test_case, device, data_type):
    if device == "cpu" and data_type == flow.float16:
        return
    input = flow.tensor(np.random.randn(2, 3) - 0.5,
                        dtype=data_type,
                        device=flow.device(device))
    of_out = flow.sum(input, dim=0)
    np_out = np.sum(input.numpy(), axis=0)
    test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
    input = flow.tensor(np.random.randn(2, 3),
                        dtype=data_type,
                        device=flow.device(device))
    of_out = flow.sum(input, dim=0)
    np_out = np.sum(input.numpy(), axis=0)
    test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
    input = flow.tensor(np.random.randn(2, 3),
                        dtype=data_type,
                        device=flow.device(device))
    of_out = flow.sum(input, dim=1)
    of_out2 = input.sum(dim=1)
    np_out = np.sum(input.numpy(), axis=1)
    test_case.assertTrue(
        np.allclose(of_out2.numpy(), of_out.numpy(), 1e-05, 1e-05))
    test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
    input = flow.tensor(
        np.random.randn(4, 5, 6) - 0.5,
        dtype=data_type,
        device=flow.device(device),
        requires_grad=True,
    )
    of_out = flow.sum(input, dim=(2, 1))
    np_out = np.sum(input.numpy(), axis=(2, 1))
    test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
    of_out = of_out.sum()
    of_out.backward()
    np_grad = np.ones((4, 5, 6))
    test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-05,
                                     1e-05))

    # For 0-dim tensor test
    input = flow.tensor(1.0)
    of_out = input.sum()
    test_case.assertTrue(
        np.allclose(input.numpy(), of_out.numpy(), 1e-05, 1e-05))
Beispiel #25
0
def _test_convtranspose1d_group_large_in_channel(test_case, device):
    np_arr = np.array([
        [
            [-0.3939792, -0.34989742, 0.15775536],
            [0.927185, 0.25040535, -1.22738067],
            [-0.2187831, -0.24346108, -0.07109655],
            [-1.55353756, -0.37241986, 0.59579139],
        ],
        [
            [-0.01818884, -1.34408642, 1.31260516],
            [0.52124192, 0.52142919, 1.40499944],
            [0.7410308, 1.93069512, 0.25694943],
            [-0.30531658, 0.24990326, -0.9493729],
        ],
    ])
    weight = np.ones((4, 1, 3))
    test_out_data = np.array([
        [
            [0.5332058, 0.43371373, -0.6359115, -1.1691173, -1.0696253],
            [-1.7723207, -2.3882017, -1.8635068, -0.09118611, 0.52469486],
        ],
        [
            [0.50305307, -0.31960416, 2.3980005, 1.8949474, 2.7176046],
            [0.43571424, 2.6163127, 1.9238893, 1.488175, -0.69242346],
        ],
    ])
    test_out_grad = np.array([
        [[3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0]],
        [[3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0]],
    ])
    input_flow = flow.tensor(np_arr,
                             dtype=flow.float32,
                             device=flow.device(device),
                             requires_grad=True)
    m_f = nn.ConvTranspose1d(4, 2, 3, stride=1, groups=2, bias=False)
    m_f.weight.data = flow.tensor(weight, dtype=flow.float32)
    m_f = m_f.to(device)
    out_flow = m_f(input_flow)
    test_case.assertTrue(
        np.allclose(out_flow.numpy(), test_out_data, 1e-06, 1e-06))
    out_flow = out_flow.sum()
    out_flow.backward()
    test_case.assertTrue(
        np.allclose(input_flow.grad.numpy(), test_out_grad, 1e-06, 1e-06))
Beispiel #26
0
    def test_slice_update_graph(test_case):
        x = np.array([1, 1, 1, 1, 1]).astype(np.float32)
        input = flow.tensor(x, requires_grad=True)
        update = flow.tensor(np.array([2, 3, 4]).astype(np.float32),
                             requires_grad=True)
        output = np.array([1.0, 2.0, 3.0, 4.0, 1.0])

        class TestModule(flow.nn.Module):
            def __init__(self):
                super().__init__()
                self.weight = flow.nn.Parameter(flow.Tensor(x))

            def forward(self, x, update):
                flow._C.slice_update(x,
                                     update, [
                                         1,
                                     ], [
                                         4,
                                     ], [
                                         1,
                                     ],
                                     inplace=True)
                y = x + self.weight
                return x, y

        test_m = TestModule()
        of_sgd = flow.optim.SGD(test_m.parameters(), lr=0.001, momentum=0.9)

        class TestSliceUpdateGraph(flow.nn.Graph):
            def __init__(self):
                super().__init__()
                self.m = test_m
                self.add_optimizer(of_sgd)

            def build(self, x, update):
                x, y = self.m(x, update)
                z = y.sum()
                z.backward()
                return x

        slice_update_g = TestSliceUpdateGraph()

        y = slice_update_g(input, update)
        test_case.assertTrue(np.array_equal(y.numpy(), output))
def _test_convtranspose1d_group_bias_true(test_case, device):
    np_arr = np.array(
        [
            [
                [-0.77808793, 0.99824008, 0.57340066],
                [1.46278707, -0.65234252, -1.13087643],
            ],
            [
                [0.76053973, 0.62332447, -1.17157106],
                [0.60291466, -0.0472167, 0.89986403],
            ],
        ]
    )
    weight = np.ones((2, 1, 3))
    bias = np.array([0.32546719, 0.14995032])
    test_out_data = np.array(
        [
            [
                [-0.45262071, 0.54561937, 1.11902, 1.897108, 0.89886785],
                [1.6127374, 0.96039486, -0.1704815, -1.6332686, -0.9809261],
            ],
            [
                [1.0860069, 1.7093314, 0.5377604, -0.22277936, -0.8461038],
                [0.75286496, 0.70564824, 1.6055121, 1.0025976, 1.0498143],
            ],
        ]
    )
    test_out_grad = np.array(
        [[[3.0, 3.0, 3.0], [3.0, 3.0, 3.0]], [[3.0, 3.0, 3.0], [3.0, 3.0, 3.0]]]
    )
    input_flow = flow.tensor(
        np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
    )
    m_f = nn.ConvTranspose1d(2, 2, 3, stride=1, groups=2, bias=True)
    m_f.weight.data = flow.tensor(weight, dtype=flow.float32)
    m_f.bias = nn.Parameter(flow.Tensor(bias))
    m_f = m_f.to(device)
    out_flow = m_f(input_flow)
    test_case.assertTrue(np.allclose(out_flow.numpy(), test_out_data, 1e-06, 1e-06))
    out_flow = out_flow.sum()
    out_flow.backward()
    test_case.assertTrue(
        np.allclose(input_flow.grad.numpy(), test_out_grad, 1e-06, 1e-06)
    )
def _test_basic_slice(test_case, numpy_x):
    x = flow.tensor(numpy_x)

    test_case.assertTrue(np.allclose(numpy_x[1], x[1].numpy()))
    test_case.assertTrue(np.allclose(numpy_x[-2], x[-2].numpy()))

    test_case.assertTrue(np.allclose(numpy_x[0, 1], x[0, 1].numpy()))
    test_case.assertTrue(np.allclose(numpy_x[(0, 1)], x[(0, 1)].numpy()))
    test_case.assertTrue(np.allclose(numpy_x[((0, 1))], x[((0, 1))].numpy()))

    test_case.assertTrue(np.allclose(numpy_x[None], x[None].numpy()))
    test_case.assertTrue(np.allclose(numpy_x[True], x[True].numpy()))
    test_case.assertTrue(np.allclose(numpy_x[1, None], x[1, None].numpy()))
    test_case.assertTrue(np.allclose(numpy_x[1, None, 1], x[1, None, 1].numpy()))
    test_case.assertTrue(
        np.allclose(numpy_x[1, None, None, 1], x[1, None, None, 1].numpy())
    )

    test_case.assertTrue(np.allclose(numpy_x[:], x[:].numpy()))
    test_case.assertTrue(np.allclose(numpy_x[:1], x[:1].numpy()))
    test_case.assertTrue(np.allclose(numpy_x[0:1], x[0:1].numpy()))
    test_case.assertTrue(np.allclose(numpy_x[-2:-1], x[-2:-1].numpy()))
    test_case.assertTrue(np.allclose(numpy_x[2:100:200], x[2:100:200].numpy()))

    test_case.assertTrue(np.allclose(numpy_x[0:2, ...], x[0:2, ...].numpy()))
    test_case.assertTrue(np.allclose(numpy_x[0:2, ..., 1], x[0:2, ..., 1].numpy()))
    test_case.assertTrue(
        np.allclose(numpy_x[0:2, ..., 1, 1], x[0:2, ..., 1, 1].numpy())
    )

    test_case.assertTrue(np.allclose(numpy_x[0:4:2, ...], x[0:4:2, ...].numpy()))
    test_case.assertTrue(
        np.allclose(numpy_x[0:2, None, ..., True], x[0:2, None, ..., True].numpy())
    )
    test_case.assertTrue(
        np.allclose(numpy_x[None, ..., 0:4:2, True], x[None, ..., 0:4:2, True].numpy())
    )

    test_case.assertTrue(np.allclose(numpy_x[False, ...], x[False, ...].numpy()))
    test_case.assertTrue(
        np.allclose(numpy_x[False, True, ...], x[False, True, ...].numpy())
    )
    test_case.assertTrue(
        np.allclose(numpy_x[True, ..., False, True], x[True, ..., False, True].numpy())
    )
    test_case.assertTrue(
        np.allclose(
            numpy_x[True, None, ..., False, True],
            x[True, None, ..., False, True].numpy(),
        )
    )
    test_case.assertTrue(
        np.allclose(
            numpy_x[True, 1, ..., False, True], x[True, 1, ..., False, True].numpy()
        )
    )
Beispiel #29
0
def _test_expand_same_dim(test_case, device):
    input_shape = (2, 4, 1, 32)
    expand_dim = [2, 4, 2, 32]
    input, gout, out_np, gin_np = _np_get_expand(input_shape, expand_dim)
    of_input = flow.tensor(input,
                           dtype=flow.float32,
                           device=flow.device(device))
    of_out = of_input.expand(2, 4, 2, 32)

    test_case.assertTrue(np.array_equal(of_out.numpy(), out_np))
Beispiel #30
0
def _test_expand_same_dim_negative(test_case, device):
    input_shape = (1, 6, 5, 3)
    expand_dim = [4, -1, 5, 3]
    input, gout, out_np, gin_np = _np_get_expand(input_shape, expand_dim)
    of_input = flow.tensor(input,
                           dtype=flow.float32,
                           device=flow.device(device))
    of_out = of_input.expand(4, -1, 5, 3)

    test_case.assertTrue(np.array_equal(of_out.numpy(), out_np))