Esempio n. 1
0
def padding(input_tensor, padding_size, name="padding"):
    """Construct a tensor by padding a given tensor.

  Args:
    input_tensor: Input tensor.
    padding_size: A list in the format of {dim0_begin, dim0_end, dim1_begin, 
                  dim1_end, ...} that represent number of values padded to 
                  each dimension. Note that the order of dimensions of this 
                  must align with the data layout of input_tensor.
    name: Name of the operator.

  Returns:
    A padded version of the input tensor.
  """
    src_layout = input_tensor.shape.layout
    src_dims = input_tensor.shape.dims
    if len(padding_size) != 2 * len(src_dims):
        raise ValueError(
            "len(padding_size) should be 2x input_tensor.shape.dims")
    output_tensor_dims = [0] * len(src_dims)
    for i in range(len(src_dims)):
        output_tensor_dims[i] = src_dims[i] + padding_size[
            2 * i] + padding_size[2 * i + 1]
    params = node_pb2.Params()
    params.padding_params.padding_size.extend(padding_size)
    return common.add_node(name=name,
                           op=types_pb2.Padding,
                           input_tensors=[input_tensor],
                           output_tensors_dims=[output_tensor_dims],
                           output_tensor_layout=input_tensor.shape.layout,
                           params=params)[0]
Esempio n. 2
0
def concat(input_tensors, axis=0, name="concat"):
    """Concatenate tensors into one.

  Args:
    input_tensors: Input tensor to be concatenated.
    axis: The dimension along which to concatenate.
    name: Name of the operator.

  Returns:
    A concatenated tensor.
  """
    dims = np.delete(input_tensors[0].shape.dims, axis)
    if not all([
            np.array_equal(np.delete(x.shape.dims, axis), dims)
            for x in input_tensors
    ]):
        raise ValueError(
            "Tensors must have the same shape, except in axis %d along which to "
            "concatenate." % axis)
    output_tensor_dims = list(input_tensors[0].shape.dims)
    output_tensor_dims[axis] = sum(x.shape.dims[axis] for x in input_tensors)
    params = node_pb2.Params()
    params.concat_params.concat_axis = axis
    return common.add_node(name=name,
                           op=types_pb2.Concat,
                           input_tensors=input_tensors,
                           output_tensors_dims=[output_tensor_dims],
                           output_tensor_layout=input_tensors[0].shape.layout,
                           params=params)[0]
Esempio n. 3
0
def mat_mul(
    input_tensor, weight_tensor, activation=None, activation_params=None,
    name="mat_mul"):
  """Compute a matrix multiplication for `input_tensor` and `weight_tensor`.

  Args:
    input_tensor: A 2D `Tensor`. Shaped as `NC`, where `N` is batch size and `C`
      is number of channels.
    weight_tensor: A 2D `Tensor`. Shaped as `NC` or `CN`, where `N` is number of
      neurons and `C` is the same as in `input_tensor`.
    activation/activation_params: Activation function to use (optional).
    name: Operator name (optional).
  """
  input_tensor, weight_tensor = array_ops.check_and_add_layout_transform(
      name=name, op=types_pb2.InnerProduct,
      input_tensors=[input_tensor, weight_tensor])

  weight_layout = weight_tensor.shape.layout
  actIdx = 1 if weight_layout == types_pb2.NC else 0
  neuronIdx = 0 if weight_layout == types_pb2.NC else 1
  assert (len(input_tensor.shape.dims) == 2
          and len(weight_tensor.shape.dims) == 2
          and input_tensor.shape.dims[1] == weight_tensor.shape.dims[actIdx])
  output_tensor_dims = [
      input_tensor.shape.dims[0], weight_tensor.shape.dims[neuronIdx]
  ]
  params = node_pb2.Params()
  if activation is not None:
    params.act_params.CopyFrom(
        activation_ops.to_proto(activation, activation_params))
  return common.add_node(
      name=name, op=types_pb2.InnerProduct,
      input_tensors=[input_tensor, weight_tensor],
      output_tensors_dims=[output_tensor_dims],
      output_tensor_layout=types_pb2.NC, params=params)[0]
Esempio n. 4
0
def relu(input_tensor, name="relu"):
    """Rectified linear unit operator."""
    return common.add_node(name=name,
                           op=types_pb2.ReLU,
                           input_tensors=[input_tensor],
                           output_tensors_dims=[input_tensor.shape.dims],
                           output_tensor_layout=input_tensor.shape.layout)[0]
Esempio n. 5
0
def tanh(input_tensor, name="tanh"):
    """Tanh operator."""
    return common.add_node(name=name,
                           op=types_pb2.Tanh,
                           input_tensors=[input_tensor],
                           output_tensors_dims=[input_tensor.shape.dims],
                           output_tensor_layout=input_tensor.shape.layout)[0]
Esempio n. 6
0
def convolution(
    input_tensor, filter_tensor, stride, padding, activation=None,
    activation_params=None, name="conv"):
  """Compute a 3D Convolution given 4D `input_tensor` and `filter_tensor`.

  Args:
    input_tensor: A 4D `Tensor`.
    filter_tensor: A 4D `Tensor`.
    stride: A list of two integers: [row_stride, col_stride].
    padding: A string from: `same`, `valid`. The zero padding options.
    activation: A string representing the activation function (optional).
    activation_params: kwargs for the activation function (optional).
    name: Operator name (optional).
  """
  def compute_output_dim(input_dim, weight_dim, stride, padding):
    pad = 0
    if to_padding_type(padding) == types_pb2.SamePadding:
      pad = weight_dim - 1
    return (input_dim - weight_dim + pad) // stride + 1

  input_tensor, filter_tensor = array_ops.check_and_add_layout_transform(
      name=name, op=types_pb2.Convolution3d,
      input_tensors=[input_tensor, filter_tensor])

  row_idx = 2 if input_tensor.shape.layout == types_pb2.NCHW else 1
  col_idx = 3 if input_tensor.shape.layout == types_pb2.NCHW else 2
  chan_idx = 1 if input_tensor.shape.layout == types_pb2.NCHW else 3
  assert input_tensor.dims(chan_idx) == filter_tensor.dims(chan_idx), (
      "The weights must have the same number of channels as the inputs.")
  output_rows = compute_output_dim(input_tensor.shape.dims[row_idx],
                                   filter_tensor.shape.dims[row_idx], stride[0],
                                   padding)
  output_cols = compute_output_dim(input_tensor.shape.dims[col_idx],
                                   filter_tensor.shape.dims[col_idx], stride[1],
                                   padding)
  output_layout = input_tensor.shape.layout
  if output_layout == types_pb2.NCHW:
    output_tensor_dims = [
        input_tensor.shape.dims[0], filter_tensor.shape.dims[0], output_rows,
        output_cols
    ]
  elif output_layout == types_pb2.NHWC:
    output_tensor_dims = [
        input_tensor.shape.dims[0], output_rows, output_cols,
        filter_tensor.shape.dims[0]
    ]
  else:
    assert False, "Unsupported output layout!"
  params = node_pb2.Params()
  params.conv_params.padding = to_padding_type(padding)
  params.conv_params.stride.extend(stride)
  if activation is not None:
    params.act_params.CopyFrom(
        activation_ops.to_proto(activation, activation_params))
  return common.add_node(
      name=name, op=types_pb2.Convolution3d,
      input_tensors=[input_tensor, filter_tensor],
      output_tensors_dims=[output_tensor_dims],
      output_tensor_layout=output_layout, params=params)[0]
Esempio n. 7
0
def softmax(input_tensor, name=None):
  """Softmax operator."""
  input_tensor = array_ops.check_and_add_layout_transform(
      name=name, op=types_pb2.Softmax, input_tensors=[input_tensor])[0]
  return common.add_node(
      name=name, op=types_pb2.Softmax, input_tensors=[input_tensor],
      output_tensors_dims=[input_tensor.shape.dims],
      output_tensor_layout=input_tensor.shape.layout)[0]
Esempio n. 8
0
def lrelu(input_tensor, slope=0.2, name="lrelu"):
  """Leaky rectified linear unit operator: max(slope * x, 0)."""
  params = node_pb2.Params()
  params.act_params.lrelu_params.slope = slope
  return common.add_node(
      name=name, op=types_pb2.LReLU, input_tensors=[input_tensor],
      output_tensors_dims=[input_tensor.shape.dims],
      output_tensor_layout=input_tensor.shape.layout, params=params)[0]
Esempio n. 9
0
def sigmoid(input_tensor, name="sigmoid"):
  """Sigmoid operator.

  Defined as 1/(1 + exp(-input_tensor)).
  """
  return common.add_node(
      name=name, op=types_pb2.Sigmoid, input_tensors=[input_tensor],
      output_tensors_dims=[input_tensor.shape.dims],
      output_tensor_layout=input_tensor.shape.layout)[0]
Esempio n. 10
0
def _math_op_common(tensor_a, tensor_b, op, name, output_tensor_dtype=None):
  if tensor_a.shape.dims != tensor_b.shape.dims:
    tensor_a, tensor_b = common.broadcast_inputs(tensor_a, tensor_b, name)
  if output_tensor_dtype == None:
    output_tensor_dtype = tensor_a.data_type
  return common.add_node(
      name=name, op=op, input_tensors=[tensor_a, tensor_b],
      output_tensors_dims=[tensor_a.shape.dims],
      output_tensor_layout=tensor_a.shape.layout,
      output_tensor_dtype=output_tensor_dtype)[0]
Esempio n. 11
0
def input_data(input_tensor, name="data"):
    """Create an data node for the given tensor.

  This effectively turns a `Tensor` into a `Node`.
  """
    return common.add_node(name=name,
                           op=types_pb2.Data,
                           input_tensors=[input_tensor],
                           output_tensors_dims=[input_tensor.shape.dims],
                           output_tensor_layout=input_tensor.shape.layout)[0]
Esempio n. 12
0
def selu(input_tensor, alpha=1.6733, lambda_param=1.0507, name="selu"):
  """Scaled exponential linear unit function.

  Defined as: lambda_param * elu(input_tensor, alpha).
  """
  params = node_pb2.Params()
  params.act_params.elu_params.alpha = alpha
  params.act_params.elu_params.lambda_param = lambda_param
  return common.add_node(
      name=name, op=types_pb2.SELU, input_tensors=[input_tensor],
      output_tensors_dims=[input_tensor.shape.dims],
      output_tensor_layout=input_tensor.shape.layout, params=params)[0]
Esempio n. 13
0
def hard_tanh(input_tensor, min=-1, max=1, name="hard_tanh"):
  """Hard tanh operator.

  This bounds the min and max values of the tanh operator.
  """
  params = node_pb2.Params()
  params.act_params.hard_tanh_params.min = min
  params.act_params.hard_tanh_params.max = max
  return common.add_node(
      name=name, op=types_pb2.HardTanh, input_tensors=[input_tensor],
      output_tensors_dims=[input_tensor.shape.dims],
      output_tensor_layout=input_tensor.shape.layout, params=params)[0]
Esempio n. 14
0
def elu(input_tensor, alpha=0.1, name="relu"):
  """Exponential linear unit function.

  Defined as:
    if input_tensor > 0, alpha * exp(input_tensor - 1), else input_tensor.
  """
  params = node_pb2.Params()
  params.act_params.elu_params.alpha = alpha
  return common.add_node(
      name=name, op=types_pb2.ELU, input_tensors=[input_tensor],
      output_tensors_dims=[input_tensor.shape.dims],
      output_tensor_layout=input_tensor.shape.layout, params=params)[0]
Esempio n. 15
0
def merge(input_tensors, name="merge"):
    """Forward the value of an available tensor from inputs to output.

  Args:
    input_tensors: Input tensors. All are dead tensor except one.

  Returns:
    A tensor that the available input tensor forwards to.
  """
    return common.add_node(
        name=name,
        op=types_pb2.Merge,
        input_tensors=input_tensors,
        output_tensors_dims=[input_tensors[0].shape.dims],
        output_tensor_layout=input_tensors[0].shape.layout)[0]
Esempio n. 16
0
def reorder(input_tensor, target_layout, name="reorder"):
    """Reorder the data of a given `Tensor` with the target layout.

  Args:
    input_tensor: A `Tensor`.
    target_layout: The target layout.
    name: Operator name (optional).

  Returns:
    A new `Tensor` with the layout as `target_layout`.
  """
    src_layout = input_tensor.shape.layout
    src_dims = input_tensor.shape.dims
    if src_layout == types_pb2.NCHW:
        assert (target_layout == types_pb2.NHWC
                or target_layout == types_pb2.NC)
        if target_layout == types_pb2.NC:
            output_tensor_dims = [src_dims[0], np.prod(src_dims[1:])]
        else:
            output_tensor_dims = [
                src_dims[0], src_dims[2], src_dims[3], src_dims[1]
            ]
    elif src_layout == types_pb2.NHWC:
        assert (target_layout == types_pb2.NCHW
                or target_layout == types_pb2.NC)
        if target_layout == types_pb2.NC:
            output_tensor_dims = [src_dims[0], np.prod(src_dims[1:])]
        else:
            output_tensor_dims = [
                src_dims[0], src_dims[3], src_dims[1], src_dims[2]
            ]
    elif (src_layout == types_pb2.NTC and target_layout == types_pb2.NCT) or (
            src_layout == types_pb2.NCT and target_layout == types_pb2.NTC):
        output_tensor_dims = [src_dims[0], src_dims[2], src_dims[1]]
    elif (src_layout == types_pb2.NC and target_layout == types_pb2.CN) or (
            src_layout == types_pb2.CN and target_layout == types_pb2.NC):
        # 2D tensor transposition.
        output_tensor_dims = [src_dims[1], src_dims[0]]
    else:
        raise ValueError(
            "Unsupported reordering %s->%s!" %
            (DataLayout.Name(src_layout), DataLayout.Name(target_layout)))

    return common.add_node(name=name,
                           op=types_pb2.Reorder,
                           input_tensors=[input_tensor],
                           output_tensors_dims=[output_tensor_dims],
                           output_tensor_layout=target_layout)[0]
Esempio n. 17
0
def split(input_tensor, num_or_size_splits, axis=0, name="split"):
    """Split a tensor into sub tensors.

  Args:
    input_tensor: Input tensor.
    num_or_size_splits: Either an integer indicating the number of splits along
      axis or a 1D list containing the sizes of each output tensor along axis.
      If an integer, then it must evenly divide input_tensor.shape.dims[axis];
      otherwise the sum of sizes along the split axis must match that of the
      value.
    axis: The dimension to split.
    name: Name of the operator.

  Returns:
    A list of sub tensors.
  """
    splits = num_or_size_splits
    dim = input_tensor.shape.dims[axis]
    if not isinstance(num_or_size_splits, list):
        if dim % num_or_size_splits != 0:
            raise ValueError(
                "The size (%d) of the axis along which to split must divide the "
                "splits (%d)!" % (dim, num_or_size_splits))
        splits = [dim // num_or_size_splits] * num_or_size_splits
    if sum(splits) != input_tensor.shape.dims[axis]:
        raise ValueError(
            "the sum (%d) of sizes along the split axis must match that of the "
            "input (%d)!" % (sum(splits), input_tensor.shape.dims[axis]))
    if splits == [1]:
        warnings.warn(
            "Number of splits is 1 for the split operator, thus this operator is "
            "optimized out.")
        return [input_tensor]
    output_tensors_dims = []
    for s in splits:
        dims = list(input_tensor.shape.dims)
        dims[axis] = s
        output_tensors_dims.append(dims)
    params = node_pb2.Params()
    params.split_params.split_axis = axis
    return common.add_node(name=name,
                           op=types_pb2.Split,
                           input_tensors=[input_tensor],
                           output_tensors_dims=output_tensors_dims,
                           output_tensor_layout=input_tensor.shape.layout,
                           params=params)
Esempio n. 18
0
def switch(input_tensor, pred, name="switch"):
    """Forward the input to output port determined by the given predication.

  Args:
    input_tensor: Input tensor.
    pred: Predication tensor. The tensor should only contain a single boolean
      value.

  Returns:
    output_false, output_true: Two tensors representing the two branches of the
      switch. Input will only be forwarded to the taken branch.
  """
    return common.add_node(name=name,
                           op=types_pb2.Switch,
                           input_tensors=[input_tensor, pred],
                           output_tensors_dims=[input_tensor.shape.dims] * 2,
                           output_tensor_layout=input_tensor.shape.layout)
Esempio n. 19
0
def reshape(input_tensor, shape, layout, name="reshape"):
    """ Reshape the given tensor in the same order.

  Args:
    input_tensor: Input tensor.
    shape: New shape.
    layout: New layout.
    name: Name of the operator.

  Returns:
    Tensor with the new shape.
  """
    assert np.prod(input_tensor.shape.dims) == np.prod(shape)
    return common.add_node(name=name,
                           op=types_pb2.Reshape,
                           input_tensors=[input_tensor],
                           output_tensors_dims=[shape],
                           output_tensor_layout=layout)[0]
Esempio n. 20
0
def batch_norm(
    input_tensor, mean_tensor, var_tensor, gamma_tensor, beta_tensor,
    activation=None, activation_params=None, name="batch_norm"):
  """Perform batch normalization.

  Args:
    input_tensor: A 2D or 4D `Tensor`.
    mean_tensor: Mean parameter.
    var_tensor: Variance parameter. For performance reasons, this is
      precomputed as 1/sqrt(variance + eps).
    gamma_tensor: Gamma parameter.
    beta_tensor: Beta parameter.
    activation/activation_params: Activation function to use (optional).
    name: Operator name (optional).
  """
  assert (len(mean_tensor.shape.dims) == 2 and len(var_tensor.shape.dims) == 2
          and len(gamma_tensor.shape.dims) == 2
          and len(beta_tensor.shape.dims) == 2)
  # If the batch norm is after a FC layer, then the input/output tensors should
  # be in NC. Otherwise, the batch norm is after a convolution layer, and we
  # check backend_layouts for expected input/output layouts and do layout
  # transformation if needed.
  post_fc = False
  if len(input_tensor.shape.dims) == 2:
    post_fc = True

  if not post_fc:
    input_tensor = array_ops.check_and_add_layout_transform(
        name=name, op=types_pb2.BatchNorm, input_tensors=[input_tensor])[0]

  output_layout = types_pb2.UnknownLayout
  output_layout = types_pb2.NC if post_fc else input_tensor.shape.layout
  params = node_pb2.Params()
  if activation is not None:
    params.act_params.CopyFrom(
        activation_ops.to_proto(activation, activation_params))
  return common.add_node(
      name=name, op=types_pb2.BatchNorm, input_tensors=[
          input_tensor, mean_tensor, var_tensor, gamma_tensor, beta_tensor
      ], output_tensors_dims=[input_tensor.shape.dims],
      output_tensor_layout=output_layout, params=params)[0]
Esempio n. 21
0
def max_pool(input_tensor, pool_size, stride, name="max_pool"):
  """Compute max pooling.

  Args:
    input_tensor: A 4D `Tensor`.
    pool_size: A list of two integers: [pool_rows, pool_cols].
    stride: A list of two integers: [row_stride, col_stride].
    name: Operator name (optional).
  """
  def compute_output_dim(input_dim, pool_size, stride):
    return (input_dim - pool_size) // stride + 1

  input_tensor = array_ops.check_and_add_layout_transform(
      name=name, op=types_pb2.MaxPooling, input_tensors=[input_tensor])[0]

  row_idx = 2 if input_tensor.shape.layout == types_pb2.NCHW else 1
  col_idx = 3 if input_tensor.shape.layout == types_pb2.NCHW else 2
  output_rows = compute_output_dim(input_tensor.shape.dims[row_idx],
                                   pool_size[0], stride[0])
  output_cols = compute_output_dim(input_tensor.shape.dims[col_idx],
                                   pool_size[1], stride[1])
  output_layout = input_tensor.shape.layout
  if output_layout == types_pb2.NCHW:
    output_tensor_dims = [
        input_tensor.shape.dims[0], input_tensor.shape.dims[1], output_rows,
        output_cols
    ]
  else:
    output_tensor_dims = [
        input_tensor.shape.dims[0], output_rows, output_cols,
        input_tensor.shape.dims[3]
    ]
  params = node_pb2.Params()
  params.pool_params.stride.extend(stride)
  params.pool_params.pool_size.extend(pool_size)
  return common.add_node(
      name=name, op=types_pb2.MaxPooling, input_tensors=[input_tensor],
      output_tensors_dims=[output_tensor_dims],
      output_tensor_layout=output_layout, params=params)[0]
Esempio n. 22
0
def repeat(input_tensor, multiples, name="repeat"):
    """Construct a tensor by repeating a given tensor.

  Args:
    input_tensor: Input tensor.
    multiples: A list that represents the number of multiples in each dimension
      of the input tensor.
    name: Name of the operator.

  Returns:
    A repeated version of the input tensor.
  """
    if len(input_tensor.shape.dims) != len(multiples):
        raise ValueError(
            "The multiples of the repeat operator must have the same number of "
            "dims as the input tensor.")
    output_tensor_dims = np.multiply(input_tensor.shape.dims, multiples)
    return common.add_node(name=name,
                           op=types_pb2.Repeat,
                           input_tensors=[input_tensor],
                           output_tensors_dims=[output_tensor_dims],
                           output_tensor_layout=input_tensor.shape.layout)[0]