Ejemplo n.º 1
0
def _test_scatter_nd(test_case, placement, sbp):
    indices = (flow.tensor(np.array([[1], [6], [4]]),
                           dtype=flow.int).to_global(
                               flow.env.all_device_placement("cpu"), [
                                   flow.sbp.broadcast,
                               ]).to_global(placement, sbp))
    update = (flow.tensor(np.array([10.2, 5.1, 12.7]),
                          dtype=flow.float).to_global(
                              flow.env.all_device_placement("cpu"), [
                                  flow.sbp.broadcast,
                              ]).to_global(placement, sbp).requires_grad_())
    output = flow.scatter_nd(indices, update, [8])

    # forward
    of_local = output.to_global(flow.env.all_device_placement("cpu"), [
        flow.sbp.broadcast,
    ]).to_local()
    np_out = np.array([0.0, 10.2, 0.0, 0.0, 12.7, 0.0, 5.1, 0.0])
    test_case.assertTrue(np.allclose(of_local.numpy(), np_out, 1e-4, 1e-4))

    # backward
    output.sum().backward()
    of_grad_local = update.grad.to_global(flow.env.all_device_placement("cpu"),
                                          [
                                              flow.sbp.broadcast,
                                          ]).to_local()
    test_case.assertTrue(
        np.allclose(of_grad_local.numpy(), np.ones((3)), 1e-4, 1e-4))
Ejemplo n.º 2
0
 def scatter_nd_fn(
     indices_def: oft.ListNumpy.Placeholder(indices_static_shape,
                                            dtype=flow.int32),
     updates_def: oft.ListNumpy.Placeholder(updates_static_shape,
                                            dtype=flow.float),
 ):
     with flow.scope.placement("gpu", "0:0"):
         return flow.scatter_nd(indices_def, updates_def, params_shape)
Ejemplo n.º 3
0
def _test_scatter_nd(test_case, device):
    indices = flow.tensor(np.array([[1], [6], [4]]),
                          dtype=flow.int,
                          device=flow.device(device))
    update = flow.tensor(np.array([10.2, 5.1, 12.7]),
                         dtype=flow.float,
                         device=flow.device(device))
    np_out = np.array([0.0, 10.2, 0.0, 0.0, 12.7, 0.0, 5.1, 0.0])
    output = flow.scatter_nd(indices, update, [8])
    test_case.assertTrue(np.allclose(output.numpy(), np_out, 0.0001, 0.0001))
Ejemplo n.º 4
0
 def do_scatter_nd(indices_blob, updates_blob):
     with flow.scope.placement(device_type, "0:0"):
         x = flow.get_variable(
             "updates",
             shape=updates.shape,
             dtype=flow.float32,
             initializer=flow.constant_initializer(0),
         )
         x = flow.cast_to_current_logical_view(x)
         x = x + updates_blob
         y = flow.scatter_nd(indices_blob, x, shape)
         flow.losses.add_loss(y)
     flow.watch_diff(x, compare_fn)
     return y
Ejemplo n.º 5
0
 def do_scatter_nd(indices_blob, updates_blob):
     with flow.scope.placement(device_type, "0:0"):
         x = flow.get_variable(
             "updates",
             shape=updates.shape,
             dtype=flow.float32,
             initializer=flow.constant_initializer(0),
         )
         x = flow.cast_to_current_logical_view(x)
         x = x + updates_blob
         y = flow.scatter_nd(indices_blob, x, shape)
         flow.optimizer.SGD(
             flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
         ).minimize(y)
     flow.watch_diff(x, compare_fn)
     return y
Ejemplo n.º 6
0
def _test_scatter_nd_backward(test_case, device):
    indices = flow.tensor(np.array([[1], [6], [4]]),
                          dtype=flow.int,
                          device=flow.device(device))
    of_update = flow.tensor(
        np.array([10.2, 5.1, 12.7]),
        requires_grad=True,
        dtype=flow.float,
        device=flow.device(device),
    )
    np_out = np.array([0.0, 10.2, 0.0, 0.0, 12.7, 0.0, 5.1, 0.0])
    np_grad = np.array([1.0, 1.0, 1.0])
    output = flow.scatter_nd(indices, of_update, [8])
    out_sum = output.sum()
    out_sum.backward()
    test_case.assertTrue(np.allclose(output.numpy(), np_out, 0.0001, 0.0001))
    test_case.assertTrue(np.array_equal(of_update.grad.numpy(), np_grad))
Ejemplo n.º 7
0
def _test_scatter_nd_t(test_case, device):
    indices = flow.tensor(np.array([[0], [4], [2]]),
                          dtype=flow.int,
                          device=flow.device(device))
    update = flow.tensor(
        np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]),
        dtype=flow.float,
        device=flow.device(device),
    )
    np_out = np.array([
        [1.0, 1.0, 1.0],
        [0.0, 0.0, 0.0],
        [3.0, 3.0, 3.0],
        [0.0, 0.0, 0.0],
        [2.0, 2.0, 2.0],
    ])
    output = flow.scatter_nd(indices, update, [5, 3])
    test_case.assertTrue(np.allclose(output.numpy(), np_out, 0.0001, 0.0001))
Ejemplo n.º 8
0
    def __call__(self, x, padding=None):
        # Retrieve dynamically known shapes
        batch_size = x.shape[0]
        length = x.shape[1]

        if padding is not None:
            with flow.scope.namespace("remove_padding"):
                # Flatten padding to [batch_size*length]
                pad_mask = flow.reshape(padding, [-1])

                nonpad_ids = flow.cast(flow.where(pad_mask < 1e-9), dtype=flow.int32)
                # nonpad_ids = tf.to_int32(tf.where(pad_mask < 1e-9))

                # Reshape x to [batch_size*length, hidden_size] to remove padding
                x = flow.reshape(x, [-1, self.hidden_size])
                x = flow.gather_nd(x, indices=nonpad_ids)

                # Reshape x from 2 dimensions to 3 dimensions.

                # TODO:Maybe has a batch axis error in there
                x = flow.expand_dims(x, axis=0)

        output = self._build_dense(x, self.filter_size, name="filter_layer")
        if self.train:
            # In TensorFlow the param means `keep_prob` and use `1-dropout`,
            # but our dropout means drop rate so i just use dropout !
            output = flow.nn.dropout(output, self.relu_dropout)
        if padding is not None:
            with flow.scope.namespace("re_add_padding"):
                output = flow.squeeze(output, axis=[0, ])
                output = flow.scatter_nd(
                    indices=nonpad_ids,
                    updates=output,
                    shape=[batch_size * length, self.hidden_size]
                )
                output = flow.reshape(output, [batch_size, length, self.hidden_size])
        return output