コード例 #1
0
    def __call__(self, input_layer, init=None, stddev=None, l2loss=None):
        """Performs a diagonal matrix multiplication with a learned vector.

    This creates the parameter vector.

    Args:
      input_layer: The input_layer.
      init: An optional initialization. If not specified, uses Xavier
        initialization.
      stddev: A standard deviation to use in parameter initialization.
      l2loss: An l2 weight decay to apply.
    Returns:
      A Pretty Tensor handle to the layer.
    Raises:
      ValueError: if the head_shape is not rank 2  or the number of input nodes
      (second dim) is not known.
    """
        size = input_layer.shape[-1]
        if init is None:
            if stddev is None:
                init = layers.xavier_init(size, 0)
            elif stddev:
                init = tf.truncated_normal_initializer(stddev=stddev)
            else:
                init = tf.zeros_initializer
        param = self.variable('weights', [size], init)
        layers.add_l2loss(input_layer.bookkeeper, param, l2loss)

        return input_layer.with_tensor(input_layer * param,
                                       parameters=self.vars)
コード例 #2
0
  def __call__(self, input_layer, init=None, stddev=None, l2loss=None):
    """Performs a diagonal matrix multiplication with a learned vector.

    This creates the parameter vector.

    Args:
      input_layer: The input_layer.
      init: An optional initialization. If not specified, uses Xavier
        initialization.
      stddev: A standard deviation to use in parameter initialization.
      l2loss: An l2 weight decay to apply.
    Returns:
      A Pretty Tensor handle to the layer.
    Raises:
      ValueError: if the head_shape is not rank 2  or the number of input nodes
      (second dim) is not known.
    """
    size = input_layer.shape[-1]
    if init is None:
      if stddev is None:
        init = layers.xavier_init(size, 0)
      elif stddev:
        init = tf.truncated_normal_initializer(stddev=stddev)
      else:
        init = tf.zeros_initializer
    param = self.variable('weights', [size], init)
    layers.add_l2loss(input_layer.bookkeeper, param, l2loss)

    return input_layer.with_tensor(input_layer * param, parameters=self.vars)
コード例 #3
0
    def __call__(self,
                 input_layer,
                 weights=None,
                 l2loss=None,
                 phase=prettytensor.Phase.train,
                 parameter_modifier=parameters.identity):
        """Performs a diagonal matrix multiplication with a learned vector.

    This creates the parameter vector.

    Args:
      input_layer: The input_layer.
      weights:  An initializer for weights or a Tensor. If not specified,
        uses Xavier initialization.
      l2loss: An l2 weight decay to apply.
      phase: The phase of graph construction.  See `pt.Phase`.
      parameter_modifier: A function to modify parameters that is applied after
        creation and before use.
    Returns:
      A Pretty Tensor handle to the layer.
    Raises:
      ValueError: if this is not rank 2 or the number of input nodes
      (second dim) is not known.
    """
        size = input_layer.shape[-1]
        if weights is None:
            weights = layers.xavier_init(size, 0)

        param = parameter_modifier('weights',
                                   self.variable('weights', [size], weights),
                                   phase)
        layers.add_l2loss(input_layer.bookkeeper, param, l2loss)

        return input_layer.with_tensor(input_layer * param,
                                       parameters=self.vars)
コード例 #4
0
    def __call__(
        self,
        input_layer,
        weights=None,
        l2loss=None,
        phase=prettytensor.Phase.train,
        parameter_modifier=parameters.identity,
    ):
        """Performs a diagonal matrix multiplication with a learned vector.

    This creates the parameter vector.

    Args:
      input_layer: The input_layer.
      weights:  An initializer for weights or a Tensor. If not specified,
        uses Xavier initialization.
      l2loss: An l2 weight decay to apply.
      phase: The phase of graph construction.  See `pt.Phase`.
      parameter_modifier: A function to modify parameters that is applied after
        creation and before use.
    Returns:
      A Pretty Tensor handle to the layer.
    Raises:
      ValueError: if this is not rank 2 or the number of input nodes
      (second dim) is not known.
    """
        size = input_layer.shape[-1]
        if weights is None:
            weights = layers.xavier_init(size, 0)

        param = parameter_modifier("weights", self.variable("weights", [size], weights), phase)
        layers.add_l2loss(input_layer.bookkeeper, param, l2loss)

        return input_layer.with_tensor(input_layer * param, parameters=self.vars)
コード例 #5
0
    def __call__(self,
                 input_layer,
                 kernel,
                 depth,
                 name=PROVIDED,
                 stride=None,
                 activation_fn=None,
                 l2loss=None,
                 init=None,
                 stddev=None,
                 bias=True,
                 edges=PAD_SAME,
                 batch_normalize=False,
                 phase=Phase.train):
        """Adds a convolution to the stack of operations.

        The current head must be a rank 4 Tensor.

        Args:
          input_layer: The chainable object, supplied.
          kernel: The size of the patch for the pool, either an int or a length 1 or
            2 sequence (if length 1 or int, it is expanded).
          depth: The depth of the new Tensor.
          name: The name for this operation is also used to create/find the
            parameter variables.
          stride: The strides as a length 1, 2 or 4 sequence or an integer. If an
            int, length 1 or 2, the stride in the first and last dimensions are 1.
          activation_fn: A tuple of (activation_function, extra_parameters). Any
            function that takes a tensor as its first argument can be used. More
            common functions will have summaries added (e.g. relu).
          l2loss: Set to a value greater than 0 to use L2 regularization to decay
            the weights.
          init: An optional initialization. If not specified, uses Xavier
            initialization.
          stddev: A standard deviation to use in parameter initialization.
          bias: Set to False to not have a bias.
          edges: Either SAME to use 0s for the out of bounds area or VALID to shrink
            the output size and only uses valid input pixels.
          batch_normalize: Set to True to batch_normalize this layer.
        Returns:
          Handle to the generated layer.
        Raises:
          ValueError: If head is not a rank 4 tensor or the  depth of the input
            (4th dim) is not known.
        """
        if len(input_layer.shape) != 4:
            raise ValueError('Cannot perform conv2d on tensor with shape %s' %
                             input_layer.shape)
        if input_layer.shape[3] is None:
            raise ValueError('Input depth must be known')
        kernel = _kernel(kernel)
        stride = _stride(stride)
        size = [kernel[0], kernel[1], depth, input_layer.shape[3]]

        books = input_layer.bookkeeper
        if init is None:
            if stddev is None:
                patch_size = size[0] * size[1]
                init = layers.xavier_init(size[2] * patch_size,
                                          size[3] * patch_size)
            elif stddev:
                init = tf.truncated_normal_initializer(stddev=stddev)
            else:
                init = tf.zeros_initializer
        elif stddev is not None:
            raise ValueError('Do not set both init and stddev.')
        dtype = input_layer.tensor.dtype
        params = self.variable('weights', size, init, dt=dtype)

        input_height = input_layer.shape[1]
        input_width = input_layer.shape[2]

        filter_height = kernel[0]
        filter_width = kernel[1]

        row_stride = stride[1]
        col_stride = stride[2]

        out_rows, out_cols = get2d_deconv_output_size(input_height,
                                                      input_width,
                                                      filter_height,
                                                      filter_width, row_stride,
                                                      col_stride, edges)

        output_shape = [input_layer.shape[0], out_rows, out_cols, depth]
        y = tf.nn.conv2d_transpose(input_layer, params, output_shape, stride,
                                   edges)
        layers.add_l2loss(books, params, l2loss)
        if bias:
            y += self.variable('bias', [size[-2]],
                               tf.zeros_initializer,
                               dt=dtype)
        books.add_scalar_summary(tf.reduce_mean(layers.spatial_slice_zeros(y)),
                                 '%s/zeros_spatial' % y.op.name)
        if batch_normalize:
            y = input_layer.with_tensor(y).batch_normalize(phase=phase)
        if activation_fn is not None:
            if not isinstance(activation_fn, collections.Sequence):
                activation_fn = (activation_fn, )
            y = layers.apply_activation(books,
                                        y,
                                        activation_fn[0],
                                        activation_args=activation_fn[1:])
        return input_layer.with_tensor(y)
コード例 #6
0
  def __call__(self,
               input_layer,
               size,
               activation_fn=None,
               l2loss=None,
               init=None,
               stddev=None,
               bias=True,
               bias_init=0.,
               transpose_weights=False,
               name=PROVIDED):
    """Adds the parameters for a fully connected layer and returns a tensor.

    The current head must be a rank 2 Tensor.

    Args:
      input_layer: The Pretty Tensor object, supplied.
      size: The number of neurons
      activation_fn: A tuple of (activation_function, extra_parameters). Any
        function that takes a tensor as its first argument can be used. More
        common functions will have summaries added (e.g. relu).
      l2loss: Set to a value greater than 0 to use L2 regularization to decay
        the weights.
      init: An optional initialization. If not specified, uses Xavier
        initialization.
      stddev: A standard deviation to use in parameter initialization.
      bias: Set to False to not have a bias.
      bias_init: The initial value for the bias.
      transpose_weights: Flag indicating if weights should be transposed;
        this is useful for loading models with a different shape.
      name: The name for this operation is also used to create/find the
        parameter variables.
    Returns:
      A Pretty Tensor handle to the layer.
    Raises:
      ValueError: if the head_shape is not rank 2  or the number of input nodes
      (second dim) is not known.
    """
    if input_layer.get_shape().ndims != 2:
      raise ValueError(
          'fully_connected requires a rank 2 Tensor with known second '
          'dimension: %s'
          % input_layer.get_shape())
    in_size = input_layer.shape[1]
    if input_layer.shape[1] is None:
      raise ValueError('Number of input nodes must be known.')
    books = input_layer.bookkeeper
    if init is None:
      if stddev is None:
        init = layers.xavier_init(in_size, size)
      elif stddev:
        init = tf.truncated_normal_initializer(stddev=stddev)
      else:
        init = tf.zeros_initializer
    elif stddev is not None:
      raise ValueError('Do not set both init and stddev.')
    dtype = input_layer.tensor.dtype
    weight_shape = [size, in_size] if transpose_weights else [in_size, size]

    params = self.variable(
        'weights',
        weight_shape,
        init,
        dt=dtype)
    y = tf.matmul(input_layer, params, transpose_b=transpose_weights)
    layers.add_l2loss(books, params, l2loss)
    if bias:
      y += self.variable(
          'bias',
          [size],
          tf.constant_initializer(bias_init),
          dt=dtype)

    if activation_fn is not None:
      if not isinstance(activation_fn, collections.Sequence):
        activation_fn = (activation_fn,)
      y = layers.apply_activation(
          books,
          y,
          activation_fn[0],
          activation_args=activation_fn[1:])
    books.add_histogram_summary(y, '%s/activations' % y.op.name)
    return input_layer.with_tensor(y, parameters=self.vars)
コード例 #7
0
  def __call__(self,
               input_layer,
               kernel,
               depth,
               activation_fn=None,
               stride=None,
               l2loss=None,
               init=None,
               stddev=None,
               bias=True,
               edges=PAD_SAME,
               batch_normalize=False,
               name=PROVIDED):
    """Adds a convolution to the stack of operations.

    The current head must be a rank 4 Tensor.

    Args:
      input_layer: The chainable object, supplied.
      kernel: The size of the patch for the pool, either an int or a length 1 or
        2 sequence (if length 1 or int, it is expanded).
      depth: The depth of the new Tensor.
      activation_fn: A tuple of (activation_function, extra_parameters). Any
        function that takes a tensor as its first argument can be used. More
        common functions will have summaries added (e.g. relu).
      stride: The strides as a length 1, 2 or 4 sequence or an integer. If an
        int, length 1 or 2, the stride in the first and last dimensions are 1.
      l2loss: Set to a value greater than 0 to use L2 regularization to decay
        the weights.
      init: An optional initialization. If not specified, uses Xavier
        initialization.
      stddev: A standard deviation to use in parameter initialization.
      bias: Set to False to not have a bias.
      edges: Either SAME to use 0s for the out of bounds area or VALID to shrink
        the output size and only uses valid input pixels.
      batch_normalize: Set to True to batch_normalize this layer.
      name: The name for this operation is also used to create/find the
        parameter variables.
    Returns:
      Handle to the generated layer.
    Raises:
      ValueError: If head is not a rank 4 tensor or the  depth of the input
        (4th dim) is not known.
    """
    if len(input_layer.shape) != 4:
      raise ValueError(
          'Cannot perform conv2d on tensor with shape %s' % input_layer.shape)
    if input_layer.shape[3] is None:
      raise ValueError('Input depth must be known')
    kernel = _kernel(kernel)
    stride = _stride(stride)
    size = [kernel[0], kernel[1], input_layer.shape[3], depth]

    books = input_layer.bookkeeper
    if init is None:
      if stddev is None:
        patch_size = size[0] * size[1]
        init = layers.xavier_init(size[2] * patch_size, size[3] * patch_size)
      elif stddev:
        init = tf.truncated_normal_initializer(stddev=stddev)
      else:
        init = tf.zeros_initializer
    elif stddev is not None:
      raise ValueError('Do not set both init and stddev.')
    dtype = input_layer.tensor.dtype
    params = self.variable('weights', size, init, dt=dtype)
    y = tf.nn.conv2d(input_layer, params, stride, edges)
    layers.add_l2loss(books, params, l2loss)
    if bias:
      y += self.variable(
          'bias',
          [size[-1]],
          tf.zeros_initializer,
          dt=dtype)
    books.add_scalar_summary(
        tf.reduce_mean(
            layers.spatial_slice_zeros(y)), '%s/zeros_spatial' % y.op.name)
    if batch_normalize:
      y = input_layer.with_tensor(y).batch_normalize()
    if activation_fn is not None:
      if not isinstance(activation_fn, collections.Sequence):
        activation_fn = (activation_fn,)
      y = layers.apply_activation(
          books,
          y,
          activation_fn[0],
          activation_args=activation_fn[1:])
    books.add_histogram_summary(y, '%s/activations' % y.op.name)
    return input_layer.with_tensor(y, parameters=self.vars)
コード例 #8
0
  def __call__(self,
               input_layer,
               size,
               name=PROVIDED,
               activation_fn=None,
               l2loss=None,
               init=None,
               stddev=None,
               bias=True,
               bias_init=0.):
    """Adds the parameters for a fully connected layer and returns a tensor.

    The current head must be a rank 2 Tensor.

    Args:
      input_layer: The Pretty Tensor object, supplied.
      size: The number of neurons
      name: The name for this operation is also used to create/find the
        parameter variables.
      activation_fn: A tuple of (activation_function, extra_parameters). Any
        function that takes a tensor as its first argument can be used. More
        common functions will have summaries added (e.g. relu).
      l2loss: Set to a value greater than 0 to use L2 regularization to decay
        the weights.
      init: An optional initialization. If not specified, uses Xavier
        initialization.
      stddev: A standard deviation to use in parameter initialization.
      bias: Set to False to not have a bias.
      bias_init: The initial value for the bias.
    Returns:
      A Pretty Tensor handle to the layer.
    Raises:
      ValueError: if the head_shape is not rank 2  or the number of input nodes
      (second dim) is not known.
    """
    if len(input_layer.shape) != 2:
      raise ValueError(
          'Cannot perform fully connected on tensor with shape %s' %
          input_layer.shape)
    in_size = input_layer.shape[1]
    if input_layer.shape[1] is None:
      raise ValueError('Number of input nodes must be known.')
    books = input_layer.bookkeeper
    if init is None:
      if stddev is None:
        init = layers.xavier_init(in_size, size)
      elif stddev:
        init = tf.truncated_normal_initializer(stddev=stddev)
      else:
        init = tf.zeros_initializer
    elif stddev is not None:
      raise ValueError('Do not set both init and stddev.')
    dtype = input_layer.tensor.dtype
    params = self.variable(
        'weights',
        [in_size, size],
        init,
        dt=dtype)
    y = tf.matmul(input_layer, params)
    layers.add_l2loss(books, params, l2loss)
    if bias:
      y += self.variable(
          'bias',
          [size],
          tf.constant_initializer(bias_init),
          dt=dtype)

    if activation_fn is not None:
      if not isinstance(activation_fn, collections.Sequence):
        activation_fn = (activation_fn,)
      return layers.apply_activation(
          books,
          y,
          activation_fn[0],
          activation_args=activation_fn[1:])
    else:
      return y
コード例 #9
0
    def __call__(self,
                 input_layer,
                 size,
                 activation_fn=None,
                 l2loss=None,
                 init=None,
                 stddev=None,
                 bias=True,
                 bias_init=0.,
                 name=PROVIDED):
        """Adds the parameters for a fully connected layer and returns a tensor.

    The current head must be a rank 2 Tensor.

    Args:
      input_layer: The Pretty Tensor object, supplied.
      size: The number of neurons
      activation_fn: A tuple of (activation_function, extra_parameters). Any
        function that takes a tensor as its first argument can be used. More
        common functions will have summaries added (e.g. relu).
      l2loss: Set to a value greater than 0 to use L2 regularization to decay
        the weights.
      init: An optional initialization. If not specified, uses Xavier
        initialization.
      stddev: A standard deviation to use in parameter initialization.
      bias: Set to False to not have a bias.
      bias_init: The initial value for the bias.
      name: The name for this operation is also used to create/find the
        parameter variables.
    Returns:
      A Pretty Tensor handle to the layer.
    Raises:
      ValueError: if the head_shape is not rank 2  or the number of input nodes
      (second dim) is not known.
    """
        if len(input_layer.shape) != 2:
            raise ValueError(
                'Cannot perform fully connected on tensor with shape %s' %
                input_layer.shape)
        in_size = input_layer.shape[1]
        if input_layer.shape[1] is None:
            raise ValueError('Number of input nodes must be known.')
        books = input_layer.bookkeeper
        if init is None:
            if stddev is None:
                init = layers.xavier_init(in_size, size)
            elif stddev:
                init = tf.truncated_normal_initializer(stddev=stddev)
            else:
                init = tf.zeros_initializer
        elif stddev is not None:
            raise ValueError('Do not set both init and stddev.')
        dtype = input_layer.tensor.dtype
        params = self.variable('weights', [in_size, size], init, dt=dtype)
        y = tf.matmul(input_layer, params)
        layers.add_l2loss(books, params, l2loss)
        if bias:
            y += self.variable('bias', [size],
                               tf.constant_initializer(bias_init),
                               dt=dtype)

        if activation_fn is not None:
            if not isinstance(activation_fn, collections.Sequence):
                activation_fn = (activation_fn, )
            y = layers.apply_activation(books,
                                        y,
                                        activation_fn[0],
                                        activation_args=activation_fn[1:])
        books.add_histogram_summary(y, '%s/activations' % y.op.name)
        return input_layer.with_tensor(y, parameters=self.vars)