Exemplo n.º 1
0
def parameter(input_var,
              length,
              initializer=tf.zeros_initializer(),
              dtype=tf.float32,
              trainable=True,
              name="parameter"):
    """
    Paramter function that creates  variables that could be
    broadcast to a certain shape to match with input var.

    Args:
        input_var: Input tf.Tensor.
        length: Integer dimension of the variables.
        initializer: Initializer of the variables.
        dtype: Data type of the variables.
        trainable: Whether these variables are trainable.
        name: variable scope of the variables.
    Return:
        A tensor of broadcasted variables
    """
    with tf.variable_scope(name):
        p = tf.get_variable("parameter",
                            shape=(length, ),
                            dtype=dtype,
                            initializer=initializer,
                            trainable=trainable)

        ndim = input_var.get_shape().ndims
        broadcast_shape = tf.concat(
            axis=0, values=[tf.shape(input_var)[:ndim - 1], [length]])
        p_broadcast = broadcast_to(p, shape=broadcast_shape)
        return p_broadcast
Exemplo n.º 2
0
    def testMatMulBroadcast(self):
        with self.session() as sess:
            with ops.device("/device:IPU:0"):
                in0 = array_ops.placeholder(np.float16, shape=[1024])
                in0_bcast = gen_array_ops.broadcast_to(in0, shape=[1024, 1024])
                in1 = array_ops.placeholder(np.float16, shape=[1024, 1024])

                with variable_scope.variable_scope("vs", use_resource=True):
                    weights = variable_scope.get_variable(
                        "x",
                        dtype=np.float16,
                        shape=[1024, 1024],
                        initializer=init_ops.constant_initializer(0.0))

                mm1 = math_ops.matmul(in0_bcast, weights, name="mm1")
                mm2 = math_ops.matmul(in1, mm1, name="mm2")

            report = ReportJSON(self, sess)
            tu.move_variable_initialization_to_cpu()

            sess.run(variables.global_variables_initializer())

            report.reset()

            sess.run(mm2, {in0: np.zeros(in0.shape), in1: np.zeros(in1.shape)})

            report.parse_log()

            report.assert_total_tile_memory(112509300)
            report.assert_max_tile_memory(100438)

            ok = ['__seed*', 'host-exchange-local-copy-', 'mm1/dot*', 'Copy_']
            report.assert_all_compute_sets_and_list(ok)
Exemplo n.º 3
0
 def model(device):
     with ops.device(device):
         x = array_ops.placeholder(np.float32, shape=[2])
         x_bcast = gen_array_ops.broadcast_to(
             x, shape=[2, 256, 256, 2])
         w_bcast = gen_array_ops.broadcast_to(x, shape=[2, 2, 2, 2])
         y = nn.conv2d(x_bcast,
                       w_bcast,
                       strides=1,
                       padding="SAME",
                       name="a")
         y = nn.conv2d(y,
                       w_bcast,
                       strides=1,
                       padding="SAME",
                       name="b")
         return sess.run(y, {x: np.ones(x.shape)})
Exemplo n.º 4
0
 def _get_one_input(data):
   result = []
   for i in range(len(shapes)):
     result.append(
         math_ops.cast(
             gen_array_ops.broadcast_to(data, shape=shapes[i]),
             dtype=dtypes[i]))
   return result
Exemplo n.º 5
0
def gru(name,
        gru_cell,
        all_input_var,
        step_input_var,
        step_hidden_var,
        output_nonlinearity_layer,
        hidden_state_init=tf.zeros_initializer(),
        hidden_state_init_trainable=False):
    """
    Gated Recurrent Unit (GRU).

    Args:
        name (str): Name of the variable scope.
        gru_cell (tf.keras.layers.Layer): GRU cell used to generate
            outputs.
        all_input_var (tf.Tensor): Place holder for entire time-series inputs.
        step_input_var (tf.Tensor): Place holder for step inputs.
        step_hidden_var (tf.Tensor): Place holder for step hidden state.
        output_nonlinearity (callable): Activation function for output dense
            layer. It should return a tf.Tensor. Set it to None to
            maintain a linear activation.
        hidden_state_init (callable): Initializer function for the
            initial hidden state. The functino should return a tf.Tensor.
        hidden_state_init_trainable (bool): Bool for whether the initial
            hidden state is trainable.

    Return:
        outputs (tf.Tensor): Entire time-series outputs.
        output (tf.Tensor): Step output.
        hidden (tf.Tensor): Step hidden state.
        hidden_init_var (tf.Tensor): Initial hidden state.
    """
    with tf.variable_scope(name):
        hidden_dim = gru_cell.units
        output, [hidden] = gru_cell(step_input_var, states=[step_hidden_var])
        output = output_nonlinearity_layer(output)

        hidden_init_var = tf.get_variable(
            name='initial_hidden',
            shape=(hidden_dim, ),
            initializer=hidden_state_init,
            trainable=hidden_state_init_trainable,
            dtype=tf.float32)

        hidden_init_var_b = broadcast_to(
            hidden_init_var, shape=[tf.shape(all_input_var)[0], hidden_dim])

        def step(hprev, x):
            _, [h] = gru_cell(x, states=[hprev])
            return h

        shuffled_input = tf.transpose(all_input_var, (1, 0, 2))
        hs = tf.scan(step, elems=shuffled_input, initializer=hidden_init_var_b)
        hs = tf.transpose(hs, (1, 0, 2))
        outputs = output_nonlinearity_layer(hs)

    return outputs, output, hidden, hidden_init_var
  def _createSimpleDataset(self, num_elems, tmp_dir=None):
    if not tmp_dir:
      tmp_dir = self._makeSnapshotDirectory()

    dataset = dataset_ops.Dataset.from_tensor_slices([1.0])
    dataset = dataset.map(
        lambda x: gen_array_ops.broadcast_to(x, [50, 50, 3]))
    dataset = dataset.repeat(num_elems)
    dataset = dataset.apply(snapshot.snapshot(tmp_dir))

    return dataset
Exemplo n.º 7
0
  def testSpecifyShardSize(self):
    tmpdir = self.makeSnapshotDirectory()

    dataset = dataset_ops.Dataset.from_tensor_slices([1.0])
    dataset = dataset.map(lambda x: gen_array_ops.broadcast_to(x, [1024, 1024]))
    dataset = dataset.repeat(10)
    dataset = dataset.apply(
        snapshot.snapshot(tmpdir, shard_size_bytes=10 * 1024 * 1024))
    next_fn = self.getNext(dataset)

    for _ in range(10):
      self.evaluate(next_fn())

    self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 4)
Exemplo n.º 8
0
    def _createSimpleDataset(self,
                             num_elements,
                             tmp_dir=None,
                             compression=snapshot.COMPRESSION_NONE):
        if not tmp_dir:
            tmp_dir = self._makeSnapshotDirectory()

        dataset = dataset_ops.Dataset.from_tensor_slices([1.0])
        dataset = dataset.map(
            lambda x: gen_array_ops.broadcast_to(x, [50, 50, 3]))
        dataset = dataset.repeat(num_elements)
        dataset = dataset.apply(
            snapshot.legacy_snapshot(tmp_dir, compression=compression))

        return dataset
Exemplo n.º 9
0
  def testSpecifyShardSize(self, compression):
    tmpdir = self.snapshot_dir

    dataset = dataset_ops.Dataset.from_tensor_slices([1.0])
    dataset = dataset.map(lambda x: gen_array_ops.broadcast_to(x, [1024, 1024]))
    dataset = dataset.repeat(10)
    dataset = dataset.apply(
        snapshot.snapshot(
            tmpdir, shard_size_bytes=10 * 1024 * 1024, compression=compression))
    next_fn = self.getNext(dataset)

    for _ in range(10):
      self.evaluate(next_fn())

    num_files = 1
    if compression == snapshot.COMPRESSION_NONE:
      num_files = 3
    self.assertSnapshotDirectoryContains(tmpdir, 1, 1, num_files)
Exemplo n.º 10
0
            def model(x, y, z):
                scale = gen_array_ops.broadcast_to(z, shape=[65536])
                offset = scale
                b_mean, b_var = nn.moments(x, [0, 1, 2], name='moments')
                a = nn.fused_batch_norm(x,
                                        scale,
                                        offset,
                                        b_mean,
                                        b_var,
                                        1e-3,
                                        is_training=False,
                                        name="a")
                b = nn.fused_batch_norm(y,
                                        scale,
                                        offset,
                                        b_mean,
                                        b_var,
                                        1e-3,
                                        is_training=False,
                                        name="b")

                return a[0] + b[0]
Exemplo n.º 11
0
def parameter(input_var,
              length,
              initializer=tf.zeros_initializer(),
              dtype=tf.float32,
              trainable=True,
              name='parameter'):
    """
    Parameter layer.

    Used as layer that could be broadcast to a certain shape to
    match with input variable during training.
    Example: A trainable parameter variable with shape (2,), it needs to be
    broadcasted to (32, 2) when applied to a batch with size 32.

    Args:
        input_var (tf.Tensor): Input tf.Tensor.
        length (int): Integer dimension of the variables.
        initializer (callable): Initializer of the variables. The function
            should return a tf.Tensor.
        dtype: Data type of the variables (default is tf.float32).
        trainable (bool): Whether these variables are trainable.
        name (str): Variable scope of the variables.

    Return:
        A tensor of the broadcasted variables.
    """
    with tf.variable_scope(name):
        p = tf.get_variable(
            'parameter',
            shape=(length, ),
            dtype=dtype,
            initializer=initializer,
            trainable=trainable)

        broadcast_shape = tf.concat(
            axis=0, values=[tf.shape(input_var)[:-1], [length]])
        p_broadcast = broadcast_to(p, shape=broadcast_shape)
        return p_broadcast
Exemplo n.º 12
0
def parameter(input_var,
              length,
              initializer=tf.zeros_initializer(),
              dtype=tf.float32,
              trainable=True,
              name="parameter"):
    """
    Parameter layer.

    Used as layer that could be broadcast to a certain shape to
    match with input variable during training.
    Example: A trainable parameter variable with shape (2,), it needs to be
    broadcasted to (32, 2) when applied to a batch with size 32.

    Args:
        input_var: Input tf.Tensor.
        length: Integer dimension of the variables.
        initializer: Initializer of the variables.
        dtype: Data type of the variables.
        trainable: Whether these variables are trainable.
        name: variable scope of the variables.

    Return:
        A tensor of the broadcasted variables.
    """
    with tf.variable_scope(name):
        p = tf.get_variable("parameter",
                            shape=(length, ),
                            dtype=dtype,
                            initializer=initializer,
                            trainable=trainable)

        ndim = input_var.get_shape().ndims
        broadcast_shape = tf.concat(
            axis=0, values=[tf.shape(input_var)[:ndim - 1], [length]])
        p_broadcast = broadcast_to(p, shape=broadcast_shape)
        return p_broadcast
Exemplo n.º 13
0
 def func(x):
     y_const = constant_op.constant([1., 2., 3.])
     y_broadcast = gen_array_ops.broadcast_to(y_const, [3, 3])
     return math_ops.matmul(x, y_broadcast)
Exemplo n.º 14
0
def lstm(name,
         lstm_cell,
         all_input_var,
         step_input_var,
         step_hidden_var,
         step_cell_var,
         output_nonlinearity_layer,
         hidden_state_init=tf.zeros_initializer(),
         hidden_state_init_trainable=False,
         cell_state_init=tf.zeros_initializer(),
         cell_state_init_trainable=False):
    """
    Long Short-Term Memory (LSTM).

    Args:
        name (str): Name of the variable scope.
        lstm_cell (tf.keras.layers.Layer): LSTM cell used to generate
            outputs.
        all_input_var (tf.Tensor): Place holder for entire time-seried inputs.
        step_input_var (tf.Tensor): Place holder for step inputs.
        step_hidden_var (tf.Tensor): Place holder for step hidden state.
        step_cell_var (tf.Tensor): Place holder for cell state.
        output_nonlinearity (callable): Activation function for output dense
            layer. It should return a tf.Tensor. Set it to None to
            maintain a linear activation.
        hidden_state_init (callable): Initializer function for the
            initial hidden state. The functino should return a tf.Tensor.
        hidden_state_init_trainable (bool): Bool for whether the initial
            hidden state is trainable.
        cell_state_init (callable): Initializer function for the
            initial cell state. The functino should return a tf.Tensor.
        cell_state_init_trainable (bool): Bool for whether the initial
            cell state is trainable.

    Return:
        outputs (tf.Tensor): Entire time-seried outputs.
        output (tf.Tensor): Step output.
        hidden (tf.Tensor): Step hidden state.
        cell (tf.Tensor): Step cell state.
        hidden_init_var (tf.Tensor): Initial hidden state.
        cell_init_var (tf.Tensor): Initial cell state.
    """
    with tf.variable_scope(name):
        hidden_dim = lstm_cell.units
        output, [hidden,
                 cell] = lstm_cell(step_input_var,
                                   states=(step_hidden_var, step_cell_var))
        output = output_nonlinearity_layer(output)

        hidden_init_var = tf.get_variable(
            name='initial_hidden',
            shape=(hidden_dim, ),
            initializer=hidden_state_init,
            trainable=hidden_state_init_trainable,
            dtype=tf.float32)
        cell_init_var = tf.get_variable(name='initial_cell',
                                        shape=(hidden_dim, ),
                                        initializer=cell_state_init,
                                        trainable=cell_state_init_trainable,
                                        dtype=tf.float32)

        hidden_init_var_b = broadcast_to(
            hidden_init_var, shape=[tf.shape(all_input_var)[0], hidden_dim])
        cell_init_var_b = broadcast_to(
            cell_init_var, shape=[tf.shape(all_input_var)[0], hidden_dim])

        def step(hcprev, x):
            hprev = hcprev[:, :hidden_dim]
            cprev = hcprev[:, hidden_dim:]
            h, c = lstm_cell(x, states=(hprev, cprev))[1]
            return tf.concat(axis=1, values=[h, c])

        shuffled_input = tf.transpose(all_input_var, (1, 0, 2))
        hcs = tf.scan(
            step,
            elems=shuffled_input,
            initializer=tf.concat(axis=1,
                                  values=[hidden_init_var_b, cell_init_var_b]),
        )
        hcs = tf.transpose(hcs, (1, 0, 2))
        hs = hcs[:, :, :hidden_dim]
        outputs = output_nonlinearity_layer(hs)

    return outputs, output, hidden, cell, hidden_init_var, cell_init_var
Exemplo n.º 15
0
def repeat_with_axis(data, repeats, axis, name=None):
  """Repeats elements of `data`.
  Args:
    data: An `N`-dimensional tensor.
    repeats: A 1-D integer tensor specifying how many times each element in
      `axis` should be repeated.  `len(repeats)` must equal `data.shape[axis]`.
      Supports broadcasting from a scalar value.
    axis: `int`.  The axis along which to repeat values.  Must be less than
      `max(N, 1)`.
    name: A name for the operation.
  Returns:
    A tensor with `max(N, 1)` dimensions.  Has the same shape as `data`,
    except that dimension `axis` has size `sum(repeats)`.
  Example usage:
  >>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)
  <tf.Tensor: shape=(5,), dtype=string,
  numpy=array([b'a', b'a', b'a', b'c', b'c'], dtype=object)>
  >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)
  <tf.Tensor: shape=(5, 2), dtype=int32, numpy=
  array([[1, 2],
         [1, 2],
         [3, 4],
         [3, 4],
         [3, 4]], dtype=int32)>
  >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)
  <tf.Tensor: shape=(2, 5), dtype=int32, numpy=
  array([[1, 1, 2, 2, 2],
         [3, 3, 4, 4, 4]], dtype=int32)>
  """
  if not isinstance(axis, int):
    raise TypeError("axis must be an int; got %s" % type(axis).__name__)

  with ops.name_scope(name, "Repeat", [data, repeats]):
    data = ops.convert_to_tensor(data, name="data")
    repeats = tf.cast(repeats, tf.int32)
    # repeats = tile_one_dimension.convert_to_int_tensor(repeats, name="repeats")
    repeats.shape.with_rank_at_most(1)

    # If `data` is a scalar, then upgrade it to a vector.
    data = _with_nonzero_rank(data)
    data_shape = array_ops.shape(data)

    # If `axis` is negative, then convert it to a positive value.
    axis = get_positive_axis(axis, len(data.shape.as_list()), ndims_name="rank(data)")
    # Check data Tensor shapes.
    if repeats.shape.ndims == 1:
      data.shape.dims[axis].assert_is_compatible_with(repeats.shape[0])

    # If we know that `repeats` is a scalar, then we can just tile & reshape.
    if repeats.shape.ndims == 0:
      expanded = array_ops.expand_dims(data, axis + 1)
      tiled = tile_one_dimension(expanded, axis + 1, repeats)
      result_shape = array_ops.concat([data_shape[:axis], [-1], data_shape[axis + 1:]],
                            axis=0)
      return array_ops.reshape(tiled, result_shape)

    # Broadcast the `repeats` tensor so rank(repeats) == axis + 1.
    if repeats.shape.ndims != axis + 1:
      repeats_shape = array_ops.shape(repeats)
      repeats_ndims = rank(repeats)
      broadcast_shape = array_ops.concat(
          [data_shape[:axis + 1 - repeats_ndims], repeats_shape], axis=0)
      repeats = gen_array_ops.broadcast_to(repeats, broadcast_shape)
      repeats.set_shape([None] * (axis + 1))

    # Create a "sequence mask" based on `repeats`, where slices across `axis`
    # contain one `True` value for each repetition.  E.g., if
    # `repeats = [3, 1, 2]`, then `mask = [[1, 1, 1], [1, 0, 0], [1, 1, 0]]`.
    max_repeat = gen_math_ops.maximum(
        0, gen_math_ops._max(repeats, _all_dimensions(repeats)))
    mask = array_ops.sequence_mask(repeats, max_repeat)

    # Add a new dimension around each value that needs to be repeated, and
    # then tile that new dimension to match the maximum number of repetitions.
    expanded = array_ops.expand_dims(data, axis + 1)
    tiled = tile_one_dimension(expanded, axis + 1, max_repeat)

    # Use `boolean_mask` to discard the extra repeated values.  This also
    # flattens all dimensions up through `axis`.
    masked = array_ops.boolean_mask(tiled, mask)

    # Reshape the output tensor to add the outer dimensions back.
    if axis == 0:
      result = masked
    else:
      result_shape = array_ops.concat([data_shape[:axis], [-1], data_shape[axis + 1:]],
                            axis=0)
      result = array_ops.reshape(masked, result_shape)

    # Preserve shape information.
    if data.shape.ndims is not None:
      new_axis_size = 0 if repeats.shape[0] == 0 else None
      result.set_shape(data.shape[:axis].concatenate(
          [new_axis_size]).concatenate(data.shape[axis + 1:]))

    return result
Exemplo n.º 16
0
 def func(x):
   y_const = constant_op.constant([1., 2., 3.])
   y_broadcast = gen_array_ops.broadcast_to(y_const, [3, 3])
   return math_ops.matmul(x, y_broadcast)