Beispiel #1
0
def update_target_variables(target_variables,
                            source_variables,
                            tau=1.0,
                            use_locking=False,
                            name="update_target_variables"):
    """Returns an op to update a list of target variables from source variables.

  The update rule is:
  `target_variable = (1 - tau) * target_variable + tau * source_variable`.

  Args:
    target_variables: a list of the variables to be updated.
    source_variables: a list of the variables used for the update.
    tau: weight used to gate the update. The permitted range is 0 < tau <= 1,
      with small tau representing an incremental update, and tau == 1
      representing a full update (that is, a straight copy).
    use_locking: use `tf.Variable.assign`'s locking option when assigning
      source variable values to target variables.
    name: sets the `name_scope` for this op.

  Raises:
    TypeError: when tau is not a Python float
    ValueError: when tau is out of range, or the source and target variables
      have different numbers or shapes.

  Returns:
    An op that executes all the variable updates.
  """
    if not isinstance(tau, float) and not tf.is_tensor(tau):
        raise TypeError("Tau has wrong type (should be float) {}".format(tau))
    if not tf.is_tensor(tau) and not 0.0 < tau <= 1.0:
        raise ValueError("Invalid parameter tau {}".format(tau))
    if len(target_variables) != len(source_variables):
        raise ValueError("Number of target variables {} is not the same as "
                         "number of source variables {}".format(
                             len(target_variables), len(source_variables)))

    same_shape = all(trg.get_shape() == src.get_shape()
                     for trg, src in zip(target_variables, source_variables))
    if not same_shape:
        raise ValueError(
            "Target variables don't have the same shape as source "
            "variables.")

    def update_op(target_variable, source_variable, tau):
        if tau == 1.0:
            return target_variable.assign(source_variable, use_locking)
        else:
            return target_variable.assign(
                tau * source_variable + (1.0 - tau) * target_variable,
                use_locking)

    with tf.name_scope(name, values=target_variables + source_variables):
        update_ops = [
            update_op(target_var, source_var,
                      tau) for target_var, source_var in zip(
                          target_variables, source_variables)
        ]
        return tf.group(name="update_all_variables", *update_ops)
Beispiel #2
0
 def __init__(self,
              tensor_dict=None,
              every_secs=None,
              every_steps=None,
              stats_client=None):
     """
     Args:
         tensors: `dict` that maps name to metric_tensor pair.
     """
     if not isinstance(tensor_dict, dict):
         raise TypeError("tensors not a dict")
     if not tensor_dict:
         raise ValueError("tensors is empty")
     self._metric_names = []
     for name, tensor in tensor_dict.items():
         if not isinstance(name, str) or not name:
             raise ValueError("uninvalid metric name, "
                              "name: %s, type: %s" % (name, type(name)))
         if not tf.is_tensor(tensor) \
             or not (tensor.dtype.is_floating or tensor.dtype.is_integer):
             raise ValueError("uninvalid metric tensor, "
                              "name: %s, tensor: %s" % (name, tensor))
         self._metric_names.append(name)
     self._tensor_dict = tensor_dict
     self._stats_client = stats_client or _gctx.stats_client
     self._timer = tf.train.SecondOrStepTimer(every_secs=every_secs,
                                              every_steps=every_steps)
Beispiel #3
0
    def _register_axis_sizes(self, tensor, axes):
        """Update internal axis_sizes dict given a tensor and axes specification.

    Args:
      tensor: tf.Tensor to check the shape of.
      axes: List of strings and/or tuples of strings specifying assumed axes.

    Raises:
      ValueError: If the numerical size of a tensor's dim doesn't match the
          expected size for the axis name.
      ValueError: If any axis name in a packed dim is not stored in axis_sizes.
    """
        for dim, axis in enumerate(axes):
            if isinstance(axis, tuple):
                # Multiple axes packed together into one dim.
                for name in axis:
                    if name not in self.axis_sizes:
                        raise ValueError(
                            'Axis "{}" not known, set its size first.'.format(
                                name))
            else:
                # This dim is not packed.
                if axis in self.axis_sizes:
                    size = signal_util.static_or_dynamic_dim_size(tensor, dim)
                    if not tf.is_tensor(
                            size) and size != self.axis_sizes[axis]:
                        raise ValueError(
                            'Incorrect axis name "{axis}" for dim {dim}: got '
                            '{size} but expected {expected_size}.'.format(
                                axis=axis,
                                dim=dim,
                                size=size,
                                expected_size=self.axis_sizes[axis]))
                else:
                    self.axis_sizes[
                        axis] = signal_util.static_or_dynamic_dim_size(
                            tensor, dim)
Beispiel #4
0
def as_tensor(data, dtype=None):
    if tf.is_tensor(data):
        if dtype is None or data.dtype == dtype:
            return data
        return tf.cast(data, dtype)
    return tf.convert_to_tensor(data, dtype=dtype)
Beispiel #5
0
def is_tensor(obj):
    return tf.is_tensor(obj)
Beispiel #6
0
    def _generate(self, feature_map_shape_list, im_height=1, im_width=1):
        """Generates a collection of bounding boxes to be used as anchors.

        For training, we require the input image shape to be statically defined.
        That is, im_height and im_width should be integers rather than tensors.
        For inference, im_height and im_width can be either integers (for fixed
        image size), or tensors (for arbitrary image size).

        Args:
          feature_map_shape_list: list of pairs of convnet layer resolutions in the
            format [(height_0, width_0), (height_1, width_1), ...]. For example,
            setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that
            correspond to an 8x8 layer followed by a 7x7 layer.
          im_height: the height of the image to generate the grid for. If both
            im_height and im_width are 1, anchors can only be generated in
            absolute coordinates.
          im_width: the width of the image to generate the grid for. If both
            im_height and im_width are 1, anchors can only be generated in
            absolute coordinates.

        Returns:
          boxes_list: a list of BoxLists each holding anchor boxes corresponding to
            the input feature map shapes.
        Raises:
          ValueError: if im_height and im_width are not integers.
          ValueError: if im_height and im_width are 1, but normalized coordinates
            were requested.
        """
        anchor_grid_list = []
        for feat_shape, grid_info in zip(feature_map_shape_list,
                                         self._anchor_grid_info):
            level = grid_info['level']
            stride = 2**level
            scales, aspect_ratios, base_anchor_size, anchor_stride = grid_info[
                'info']
            feat_h = feat_shape[0]
            feat_w = feat_shape[1]
            anchor_offset = [0, 0]
            if isinstance(im_height, int) and isinstance(im_width, int):
                if im_height % 2.0**level == 0 or im_height == 1:
                    anchor_offset[0] = stride / 2.0
                if im_width % 2.0**level == 0 or im_width == 1:
                    anchor_offset[1] = stride / 2.0
            if tf.is_tensor(im_height) and tf.is_tensor(im_width):
                anchor_offset[0] = stride / 2.0
                anchor_offset[1] = stride / 2.0
            ag = grid_anchor_generator.GridAnchorGenerator(
                scales,
                aspect_ratios,
                base_anchor_size=base_anchor_size,
                anchor_stride=anchor_stride,
                anchor_offset=anchor_offset)
            (anchor_grid, ) = ag.generate(feature_map_shape_list=[(feat_h,
                                                                   feat_w)])

            if self._normalize_coordinates:
                if im_height == 1 or im_width == 1:
                    raise ValueError(
                        'Normalized coordinates were requested upon construction of the '
                        'MultiscaleGridAnchorGenerator, but a subsequent call to '
                        'generate did not supply dimension information.')
                anchor_grid = box_list_ops.to_normalized_coordinates(
                    anchor_grid, im_height, im_width, check_range=False)
            anchor_grid_list.append(anchor_grid)

        return anchor_grid_list
Beispiel #7
0
    def _generate(self, feature_map_shape_list, im_height=1, im_width=1):
        """Generates a collection of bounding boxes to be used as anchors.

    The number of anchors generated for a single grid with shape MxM where we
    place k boxes over each grid center is k*M^2 and thus the total number of
    anchors is the sum over all grids. In our box_specs_list example
    (see the constructor docstring), we would place two boxes over each grid
    point on an 8x8 grid and three boxes over each grid point on a 4x4 grid and
    thus end up with 2*8^2 + 3*4^2 = 176 anchors in total. The layout of the
    output anchors follows the order of how the grid sizes and box_specs are
    specified (with box_spec index varying the fastest, followed by width
    index, then height index, then grid index).

    Args:
      feature_map_shape_list: list of pairs of convnet layer resolutions in the
        format [(height_0, width_0), (height_1, width_1), ...]. For example,
        setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that
        correspond to an 8x8 layer followed by a 7x7 layer.
      im_height: the height of the image to generate the grid for. If both
        im_height and im_width are 1, the generated anchors default to
        absolute coordinates, otherwise normalized coordinates are produced.
      im_width: the width of the image to generate the grid for. If both
        im_height and im_width are 1, the generated anchors default to
        absolute coordinates, otherwise normalized coordinates are produced.

    Returns:
      boxes_list: a list of BoxLists each holding anchor boxes corresponding to
        the input feature map shapes.

    Raises:
      ValueError: if feature_map_shape_list, box_specs_list do not have the same
        length.
      ValueError: if feature_map_shape_list does not consist of pairs of
        integers
    """
        if not (isinstance(feature_map_shape_list, list)
                and len(feature_map_shape_list) == len(self._box_specs)):
            raise ValueError(
                'feature_map_shape_list must be a list with the same '
                'length as self._box_specs')
        if not all([
                isinstance(list_item, tuple) and len(list_item) == 2
                for list_item in feature_map_shape_list
        ]):
            raise ValueError('feature_map_shape_list must be a list of pairs.')

        im_height = tf.cast(im_height, dtype=tf.float32)
        im_width = tf.cast(im_width, dtype=tf.float32)

        if not self._anchor_strides:
            anchor_strides = [(1.0 / tf.cast(pair[0], dtype=tf.float32),
                               1.0 / tf.cast(pair[1], dtype=tf.float32))
                              for pair in feature_map_shape_list]
        else:
            anchor_strides = [
                (tf.cast(stride[0], dtype=tf.float32) / im_height,
                 tf.cast(stride[1], dtype=tf.float32) / im_width)
                for stride in self._anchor_strides
            ]
        if not self._anchor_offsets:
            anchor_offsets = [(0.5 * stride[0], 0.5 * stride[1])
                              for stride in anchor_strides]
        else:
            anchor_offsets = [
                (tf.cast(offset[0], dtype=tf.float32) / im_height,
                 tf.cast(offset[1], dtype=tf.float32) / im_width)
                for offset in self._anchor_offsets
            ]

        for arg, arg_name in zip([anchor_strides, anchor_offsets],
                                 ['anchor_strides', 'anchor_offsets']):
            if not (isinstance(arg, list)
                    and len(arg) == len(self._box_specs)):
                raise ValueError('%s must be a list with the same length '
                                 'as self._box_specs' % arg_name)
            if not all([
                    isinstance(list_item, tuple) and len(list_item) == 2
                    for list_item in arg
            ]):
                raise ValueError('%s must be a list of pairs.' % arg_name)

        anchor_grid_list = []
        min_im_shape = tf.minimum(im_height, im_width)
        scale_height = min_im_shape / im_height
        scale_width = min_im_shape / im_width
        if not tf.is_tensor(self._base_anchor_size):
            base_anchor_size = [
                scale_height *
                tf.constant(self._base_anchor_size[0], dtype=tf.float32),
                scale_width *
                tf.constant(self._base_anchor_size[1], dtype=tf.float32)
            ]
        else:
            base_anchor_size = [
                scale_height * self._base_anchor_size[0],
                scale_width * self._base_anchor_size[1]
            ]
        for feature_map_index, (grid_size, scales, aspect_ratios, stride,
                                offset) in enumerate(
                                    zip(feature_map_shape_list, self._scales,
                                        self._aspect_ratios, anchor_strides,
                                        anchor_offsets)):
            tiled_anchors = grid_anchor_generator.tile_anchors(
                grid_height=grid_size[0],
                grid_width=grid_size[1],
                scales=scales,
                aspect_ratios=aspect_ratios,
                base_anchor_size=base_anchor_size,
                anchor_stride=stride,
                anchor_offset=offset)
            if self._clip_window is not None:
                tiled_anchors = box_list_ops.clip_to_window(
                    tiled_anchors,
                    self._clip_window,
                    filter_nonoverlapping=False)
            num_anchors_in_layer = tiled_anchors.num_boxes_static()
            if num_anchors_in_layer is None:
                num_anchors_in_layer = tiled_anchors.num_boxes()
            anchor_indices = feature_map_index * tf.ones(
                [num_anchors_in_layer])
            tiled_anchors.add_field('feature_map_index', anchor_indices)
            anchor_grid_list.append(tiled_anchors)

        return anchor_grid_list
def to_array(values):
    """Converts input values to a np.ndarray."""
    if tf.executing_eagerly() and tf.is_tensor(values):
        return values.numpy()
    else:
        return np.asarray(values)