def serialize_tensor_value(value, type_spec=None):
  """Serializes a tensor value into `executor_pb2.Value`.

  Args:
    value: A Numpy array or other object understood by `tf.make_tensor_proto`.
    type_spec: An optional type spec, a `tff.TensorType` or something
      convertible to it.

  Returns:
    A tuple `(value_proto, ret_type_spec)` in which `value_proto` is an instance
    of `executor_pb2.Value` with the serialized content of `value`, and
    `ret_type_spec` is the type of the serialized value. The `ret_type_spec` is
    the same as the argument `type_spec` if that argument was not `None`. If
    the argument was `None`, `ret_type_spec` is a type determined from `value`.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the value is malformed.
  """
  if isinstance(value, tf.Tensor):
    if type_spec is None:
      type_spec = computation_types.TensorType(
          dtype=tf.DType(value.dtype), shape=tf.TensorShape(value.shape))
    value = value.numpy()
  if type_spec is not None:
    type_spec = computation_types.to_type(type_spec)
    py_typecheck.check_type(type_spec, computation_types.TensorType)
    if isinstance(value, np.ndarray):
      tensor_proto = tf.make_tensor_proto(
          value, dtype=type_spec.dtype, verify_shape=False)
      type_utils.check_assignable_from(
          type_spec,
          computation_types.TensorType(
              dtype=tf.DType(tensor_proto.dtype),
              shape=tf.TensorShape(tensor_proto.tensor_shape)))
    else:
      tensor_proto = tf.make_tensor_proto(
          value,
          dtype=type_spec.dtype,
          shape=type_spec.shape,
          verify_shape=True)
  else:
    tensor_proto = tf.make_tensor_proto(value)
    type_spec = computation_types.TensorType(
        dtype=tf.DType(tensor_proto.dtype),
        shape=tf.TensorShape(tensor_proto.tensor_shape))
  any_pb = any_pb2.Any()
  any_pb.Pack(tensor_proto)
  return executor_pb2.Value(tensor=any_pb), type_spec
Example #2
0
def summarize_graph(model_path, output_nodes_for_freeze=None, reshape_net=None):
    placeholders = dict()
    variables = list()
    outputs = list()
    graph = load_graph(model_path, output_nodes_for_freeze)
    unlikely_output_types = ['Const', 'Assign', 'NoOp', 'Placeholder', 'Assert', 'switch_t', 'switch_f']
    for node in graph.as_graph_def().node:
        if node.op == 'Placeholder':
            node_dict = dict()
            node_dict['type'] = tf.DType(node.attr['dtype'].type).name
            node_dict['shape'] = str(node.attr['shape'].shape.dim).replace('\n', '').replace(' ', '').replace(
                'size:', '').replace('[', '').replace(']', '')
            node_dict['shape'] = tuple(map(lambda x: int(x), node_dict['shape'].split(',')))
            placeholders[node.name] = node_dict
        if node.op == "Variable" or node.op == "VariableV2":
            variables.append(node.name)
        if len(children(node.name, graph)) == 0:
            if node.op not in unlikely_output_types and node.name.split('/')[-1] not in unlikely_output_types:
                outputs.append(node.name)
    result = dict()
    result['inputs'] = placeholders
    result['outputs'] = outputs

    if reshape_net:
        out_layer = list(result['inputs'].keys()) + result['outputs']
        feed_dict = {}
        for inputl in reshape_net:
            feed_dict.update({inputl: np.ones(shape=reshape_net[inputl])})
        scoring_res = collect_tf_references(model_path=model_path, feed_dict=feed_dict, out_layer=out_layer)
        for layer in scoring_res:
            if layer in result['inputs']:
                result['inputs'][layer]['shape'] = scoring_res[layer].shape

    return result
Example #3
0
 def testRepr(self):
     for enum, name in dtypes._TYPE_TO_STRING.items():
         dtype = tf.DType(enum)
         self.assertEquals(repr(dtype), 'tf.' + name)
         dtype2 = eval(repr(dtype))
         self.assertEquals(type(dtype2), tf.DType)
         self.assertEquals(dtype, dtype2)
def deserialize_tensor_value(value_proto):
  """Deserializes a tensor value from `executor_pb2.Value`.

  Args:
    value_proto: An instance of `executor_pb2.Value`.

  Returns:
    A tuple `(value, type_spec)`, where `value` is a Numpy array that represents
    the deserialized value, and `type_spec` is an instance of `tff.TensorType`
    that represents its type.

  Raises:
    TypeError: If the arguments are of the wrong types.
    ValueError: If the value is malformed.
  """
  py_typecheck.check_type(value_proto, executor_pb2.Value)
  which_value = value_proto.WhichOneof('value')
  if which_value != 'tensor':
    raise ValueError('Not a tensor value: {}'.format(which_value))

  # TODO(b/134543154): Find some way of creating the `TensorProto` using a
  # proper public interface rather than creating a dummy value that we will
  # overwrite right away.
  tensor_proto = tf.make_tensor_proto(values=0)
  if not value_proto.tensor.Unpack(tensor_proto):
    raise ValueError('Unable to unpack the received tensor value.')

  tensor_value = tf.make_ndarray(tensor_proto)
  value_type = computation_types.TensorType(
      dtype=tf.DType(tensor_proto.dtype),
      shape=tf.TensorShape(tensor_proto.tensor_shape))

  return tensor_value, value_type
Example #5
0
def _parse_tensor_info_proto(tensor_info):
  """Returns a ParsedTensorInfo instance from a TensorInfo proto."""
  encoding = tensor_info.WhichOneof("encoding")
  if encoding == "name":
    dtype = tf.DType(tensor_info.dtype)
    shape = tf.TensorShape(tensor_info.tensor_shape)
    return ParsedTensorInfo(dtype=dtype, shape=shape, is_sparse=False)
  elif encoding == "coo_sparse":
    dtype = tf.DType(tensor_info.dtype)
    shape = tf.TensorShape(tensor_info.tensor_shape)
    return ParsedTensorInfo(dtype=dtype, shape=shape, is_sparse=True)
  elif encoding == "composite_tensor":
    spec = tf_utils.composite_tensor_info_to_type_spec(tensor_info)
    return ParsedTensorInfo.from_type_spec(spec)
  else:
    raise ValueError("Unsupported TensorInfo encoding %r" % encoding)
Example #6
0
    def device_function(self, var):
        """Choose a device for the input variable.

    Args:
      var: an Variable.

    Returns:
      The device for placing the var.
    """
        if var.type not in ('Variable', 'VariableV2', 'VarHandleOp'):
            tf.logging.debug('Place {} on last device: {}.'.format(
                var.name, self._last_device))
            return self._last_device

        shape = tf.TensorShape(var.get_attr('shape'))
        assert shape.num_elements() is not None

        size = tf.DType(var.get_attr('dtype')).size
        mem, device = heapq.heappop(self._mem_device_heap)
        mem += shape.num_elements() * size
        heapq.heappush(self._mem_device_heap, (mem, device))
        tf.logging.debug(
            'Place variable {} on {} and consumes {} Bytes.'.format(
                var.name, device, mem))
        self._last_device = device

        return device
def deserialize_type(type_proto):
    """Deserializes 'type_proto' as a computation_types.Type.

  NOTE: Currently only deserialization for tensor, named tuple, sequence, and
  function types is implemented.

  Args:
    type_proto: An instance of pb.Type or None.

  Returns:
    The corresponding instance of computation_types.Type (or None if the
    argument was None).

  Raises:
    TypeError: if the argument is of the wrong type.
    NotImplementedError: for type variants for which deserialization is not
      implemented.
  """
    # TODO(b/113112885): Implement deserialization of the remaining types.
    if type_proto is None:
        return None
    py_typecheck.check_type(type_proto, pb.Type)
    type_variant = type_proto.WhichOneof('type')
    if type_variant is None:
        return None
    elif type_variant == 'tensor':
        return computation_types.TensorType(
            dtype=tf.DType(type_proto.tensor.dtype),
            shape=tf.TensorShape(type_proto.tensor.shape))
    elif type_variant == 'sequence':
        return computation_types.SequenceType(
            deserialize_type(type_proto.sequence.element))
    elif type_variant == 'tuple':
        return computation_types.NamedTupleType([
            (lambda k, v: (k, v)
             if k else v)(e.name, deserialize_type(e.value))
            for e in type_proto.tuple.element
        ])
    elif type_variant == 'function':
        return computation_types.FunctionType(
            parameter=deserialize_type(type_proto.function.parameter),
            result=deserialize_type(type_proto.function.result))
    elif type_variant == 'placement':
        return computation_types.PlacementType()
    elif type_variant == 'federated':
        placement_oneof = type_proto.federated.placement.WhichOneof(
            'placement')
        if placement_oneof == 'value':
            return computation_types.FederatedType(
                member=deserialize_type(type_proto.federated.member),
                placement=placement_literals.uri_to_placement_literal(
                    type_proto.federated.placement.value.uri),
                all_equal=type_proto.federated.all_equal)
        else:
            raise NotImplementedError(
                'Deserialization of federated types with placement spec as {} '
                'is not currently implemented yet.'.format(placement_oneof))
    else:
        raise NotImplementedError(
            'Unknown type variant {}.'.format(type_variant))
Example #8
0
def attr_value_to_python_type(attr_value: tf.AttrValue) -> Any:
    """
  Inverse of python_type_to_attr_value().

  Args:
    attr_value: Protocol buffer version of a node's attribute value

  Returns:
    A Python object or built-in type corresponding to the field in
    `attr_value` that is in use.
  """
    # TODO(frreiss): Handle AttrValues that are lists
    if attr_value.HasField("s"):  # str
        # TODO(frreiss): Should we return the binary value here?
        return tf.compat.as_str(attr_value.s)
    elif attr_value.HasField("i"):  # int
        return attr_value.i
    elif attr_value.HasField("f"):  # float
        return attr_value.f
    elif attr_value.HasField("b"):  # bool
        return attr_value.b
    elif attr_value.HasField("type"):  # DType
        return tf.DType(attr_value.type)
    elif attr_value.HasField("shape"):  # TensorShape
        # Undocumented behavior of public API: tf.TensorShape constructor accepts
        # a TensorShapeProto.
        return tf.TensorShape(attr_value.shape)
    elif attr_value.HasField("tensor"):  # TensorProto
        return tf.make_ndarray(attr_value.tensor)
    # TODO(frreiss): Convert the "func" and "placeholder" fields of the union
    #  here
    else:
        raise ValueError("Don't know how to convert AttrValue {} to "
                         "a Python object".format(attr_value))
def summarize_graph(graph_def):
    unlikely_output_types = [
        'Const', 'Assign',
        'NoOp', 'Placeholder',
        'Assert', 'switch_t', 'switch_f'
    ]
    placeholders = dict()
    outputs = list()
    graph = tf.Graph()
    with graph.as_default():  # pylint: disable=not-context-manager
        tf.import_graph_def(graph_def, name='')
    for node in graph.as_graph_def().node:  # pylint: disable=no-member
        if node.op == 'Placeholder':
            node_dict = dict()
            node_dict['type'] = tf.DType(node.attr['dtype'].type).name
            new_shape = tf.TensorShape(node.attr['shape'].shape)
            node_dict['shape'] = str(new_shape).replace(' ', '').replace('?', '-1')
            placeholders[node.name] = node_dict
        if len(children(node.name, graph)) == 0:
            if node.op not in unlikely_output_types and \
                node.name.split('/')[-1] not in unlikely_output_types:
                outputs.append(node.name)
    result = dict()
    result['inputs'] = placeholders
    result['outputs'] = outputs
    return result
Example #10
0
 def PrintNodeInfo(self, node):
     get_real_shape = lambda dims: [ dim.size for dim in dims]
     if "shape" in node.attr:
         shape = get_real_shape(node.attr["shape"].shape.dim)
     if "dtype" in node.attr:
         dtype = tf.DType(node.attr["dtype"].type)
     print("    <<=== (name={}, type={}, shape={} )".format(node.name, dtype, shape))
     return (node.name, dtype, shape)
def get_dtype(self):
  if 'T' in self.attr:        ## if op
    dtype_enum = self.attr['T'].type
  elif 'dtype' in self.attr:  ## if tensors
    dtype_enum = self.attr['dtype'].type
  elif 'output_types' in self.attr:       ## if IteratorGetNext
    dtype_enum = self.attr['output_types'].list.type[0]
  else:
    raise AttributeError('NodeDef not supperted')
  return tf.DType(dtype_enum)
Example #12
0
def main(_):
    is_saved_model = tf.saved_model.loader.maybe_saved_model_directory(
        FLAGS.model_dir)
    if not is_saved_model:
        print('"{0}" is not a saved_model directory'.format(FLAGS.model_dir))
        sys.exit(-1)

    sess = tf.Session()
    mymodel = tf.saved_model.loader.load(
        sess, [tf.saved_model.tag_constants.SERVING], FLAGS.model_dir)
    serving_sig = mymodel.signature_def[
        tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]

    print('Model Inputs:')
    ind = 1
    for key in serving_sig.inputs:
        print('  Input {0}:'.format(ind))
        print('    Name: {0}'.format(key))
        input_tmp = serving_sig.inputs[key]
        print('    Dtype: {0}'.format(tf.DType(input_tmp.dtype).name))

        ts = tf.TensorShape(input_tmp.tensor_shape)
        if ts.dims is None:
            print('    Shape: {0}'.format(ts.dims))
        else:
            print('    Shape: {0}'.format(ts.as_list()))
        ind = ind + 1

    print('Model Outputs:')
    ind = 1
    for key in serving_sig.outputs:
        print('  Output {0}:'.format(ind))
        print('    Name: {0}'.format(key))
        output_tmp = serving_sig.outputs[key]
        print('    Dtype: {0}'.format(tf.DType(output_tmp.dtype).name))

        ts = tf.TensorShape(output_tmp.tensor_shape)
        if ts.dims is None:
            print('    Shape: {0}'.format(ts.dims))
        else:
            print('    Shape: {0}'.format(ts.as_list()))
        ind = ind + 1
Example #13
0
def generic_input(processor, *args, **kwargs):
  # pylint: disable=protected-access
  if not isinstance(processor, function._DefinedFunction):
    # Helper if processor is a python callable.
    processor = function.Defun(tf.string)(processor)
  out_types = [
      tf.DType(a.type) for a in processor.definition.signature.output_arg
  ]
  assert out_types[-1] == tf.int32, ('%s is not expected.' % out_types[-1])
  return gen_x_ops.generic_input(
      processor=processor, out_types=out_types[:-1], *args, **kwargs)
Example #14
0
def _convert_dtype(arg_type):
    """Get a DLHub type for a TensorFlow type

    Args:
        arg_type (int): Enumeration number of a Tensorflow Type
    Returns:
        (string) DLHub-schema-compatible name of the type
    """
    # Get the name of the type
    dtype = tf.DType(arg_type)
    return simplify_numpy_dtype(np.dtype(dtype.as_numpy_dtype))
Example #15
0
 def _get_values(self, data_blob, dtype_enum, shape_string):
   """Obtains values for histogram data given blob and dtype enum.
   Args:
     data_blob: The blob obtained from the database.
     dtype_enum: The enum representing the dtype.
     shape_string: A comma-separated string of numbers denoting shape.
   Returns:
     The histogram values as a list served to the frontend.
   """
   buf = np.frombuffer(data_blob, dtype=tf.DType(dtype_enum).as_numpy_dtype)
   return buf.reshape([int(i) for i in shape_string.split(',')]).tolist()
 def _extract_inputs(self):
     inputs = []
     for node in self.graph_def.node:
         if node.op == 'Placeholder':
             origin_inputs = {}
             origin_inputs['name'] = node.name
             origin_inputs['dType'] = tf.DType(
                 node.attr['dtype'].type).as_numpy_dtype.__name__
             dim = node.attr['shape'].shape.dim
             origin_inputs['size'] = [dim[i].size for i in range(len(dim))]
             inputs.append(origin_inputs)
     return inputs
Example #17
0
  def _get_value(self, scalar_data_blob, dtype_enum):
    """Obtains value for scalar event given blob and dtype enum.

    Args:
      scalar_data_blob: The blob obtained from the database.
      dtype_enum: The enum representing the dtype.

    Returns:
      The scalar value.
    """
    tensorflow_dtype = tf.DType(dtype_enum)
    buf = np.frombuffer(scalar_data_blob, dtype=tensorflow_dtype.as_numpy_dtype)
    return np.asscalar(buf)
Example #18
0
 def _extract_outputs(self):
     outputs = []
     v = self.signature
     for key in v.outputs:
         origin_outputs = {}
         origin_outputs['signatureConst'] = key
         origin_outputs['name'] = v.outputs[key].name
         origin_outputs['dtype'] = tf.DType(
             v.outputs[key].dtype).as_numpy_dtype.__name__
         dim = v.outputs[key].tensor_shape.dim
         origin_outputs['size'] = [dim[i].size for i in range(len(dim))]
         outputs.append(origin_outputs)
     return outputs
Example #19
0
 def __setitem__(self, key, new_value):
   name = key[0] if isinstance(key, tuple) else key
   tensor = self.nodes[name].attr["value"].tensor
   node_shape = tuple([int(d.size) for d in tensor.tensor_shape.dim])
   if isinstance(key, tuple):
     array = np.frombuffer(tensor.tensor_content, dtype="float32")
     array = array.reshape(node_shape).copy()
     array[key[1:]] = new_value
     tensor.tensor_content = array.tostring()
   else:
     assert new_value.shape == node_shape
     dtype = tf.DType(tensor.dtype).as_numpy_dtype
     tensor.tensor_content = new_value.astype(dtype).tostring()
Example #20
0
def test_get_op():
    for np_type in types:
        for kDLContext, th_device in devices.items():
            np_array = np.array([1, 2, 3], dtype=np_type)
            th_tensor = th_device(th.tensor(np_array))
            dl_cap = to_dlpack(th_tensor)
            tf_device_and_dtype = tfdlpack.get_device_and_dtype(dl_cap)
            device_id = th_tensor.device.index
            device_id = 0 if device_id is None else device_id
            assert kDLContext == tf_device_and_dtype[0].item()
            assert device_id == tf_device_and_dtype[1].item()
            assert tf.DType(
                tf_device_and_dtype[2].item()).as_numpy_dtype == np_type
Example #21
0
 def _AssignVar(self, var_op):
     size = tf.DType(var_op.get_attr('dtype')).size
     shape = tf.TensorShape(var_op.get_attr('shape'))
     assert self._var_space_pq, ('No ps devices to use.')
     allocated, device = heapq.heappop(self._var_space_pq)
     if shape.num_elements() is None:
         assert var_op.name.endswith(
             'wb/var'), 'Unexpected name pattern: %s' % var_op.name
         # CuDNN RNN vars shape aren't known statically, decide to make a constant
         # estimate to avoid introducing more complexities.
         allocated += 10 * 1024**2 * size
     else:
         allocated += shape.num_elements() * size
     heapq.heappush(self._var_space_pq, (allocated, device))
     tf.logging.info('Place variable %s on %s %d', var_op.name, device,
                     allocated)
     return device
Example #22
0
    def _split_in_quantiles(self, data):
        """Given data, finds quantiles of it along zeroth axis. Each quantile is assigned to a
        component. Taken from "Sum-Product Networks: A New Deep Architecture"
        (Poon and Domingos 2012), https://arxiv.org/abs/1202.3732.

        Params:
            data (numpy.ndarray): Numpy array containing data to split into quantiles.

        Returns:
            Data per quantile: a list of numpy.ndarray corresponding to quantiles.
        """
        batch_size = data.shape[0]
        quantile_sections = np.arange(
            batch_size // self._num_components, batch_size,
            int(np.ceil(batch_size / self._num_components)))
        sorted_features = np.sort(data, axis=0).astype(
            tf.DType(conf.dtype).as_numpy_dtype())
        values_per_quantile = np.split(sorted_features,
                                       indices_or_sections=quantile_sections,
                                       axis=0)
        return values_per_quantile
Example #23
0
def from_dlpack(dl_capsule):
    """Convert capsule to tf tensor"""
    device_and_dtype = get_device_and_dtype(dl_capsule)
    device = device_and_dtype[:2]
    dtype = device_and_dtype[2]
    ptr = get_capsule_address(dl_capsule, consume=True)
    if device[0] == 1:
        tf_device_type = "cpu"
        tf_device_id = int(device[1])
    elif device[0] == 2:
        tf_device_type = "gpu"
        tf_device_id = int(device[1])
    else:
        raise RuntimeError("Unsupported Device")
    tf_device = "/{}:{}".format(tf_device_type, tf_device_id)
    with tf.device("cpu:0"):
        ad_tensor = tf.constant([ptr], dtype=tf.uint64)
    with tf.device(tf_device):
        tf_tensor = _from_dlpack(ad_tensor, T=tf.DType(dtype))

    return tf_tensor
Example #24
0
 def testInvalid(self):
     with self.assertRaises(TypeError):
         tf.DType(types_pb2.DT_INVALID)
     with self.assertRaises(TypeError):
         tf.as_dtype(types_pb2.DT_INVALID)
Example #25
0
 def testAllTypesConstructible(self):
     for datatype_enum in types_pb2.DataType.values():
         if datatype_enum == types_pb2.DT_INVALID:
             continue
         self.assertEqual(datatype_enum,
                          tf.DType(datatype_enum).as_datatype_enum)
Example #26
0
    def create_hist(self, image, nbins, source_range, normalize):
        """
		Creates a histogram of a given image. The histogram is computed on the flattened image,
		so for colour images, the function should be used separately on each channel to obtain a histogram 
		for each colour channel.

		Parameters
		__________
		
		image: array - an array representation of the image
		nbins: int (optional) - number of bins used to calculate histogram
		source_range: string (optional) - 'image' (default) gets the range from the input image,
        'dtype' determines the range from the expected range of the images of that data type.
		normalize: bool (optional) - If True, the histogram will be normalized by the sum of its values.

		Returns
		_______
		
		hist: array - the values of the histogram
		bin_centers: array - the values of center of the bins.

		Example
		_______

		See main.py for example script

		"""
        # check the shape of image
        shape = tf.shape(image)

        if (tf.size(shape) == 3):
            print(
                "If this is a colour image, the histogram will be computed on the flattened image.\
				You can instead apply this function to each color channel.")

        # setup
        sess = tf.InteractiveSession()
        image = tf.constant(image)

        # flatten image
        image_flatten = tf.reshape(image, [-1])

        # specify the source range
        if (source_range == 'image'):

            # general range
            min_val = tf.reduce_min(image_flatten).eval()
            max_val = tf.reduce_max(image_flatten).eval()
            hist_range = tf.constant([min_val, max_val], dtype=tf.float64)

        elif (source_range == 'dtype'):

            # get the limits of the type
            hist_range = tf.DType(image_flatten.dtype).limits
            hist_range = tf.constant(hist_range, dtype=tf.float64)

        else:
            print('Wrong value for `source range` parameter')

        # cast
        image_flatten = tf.dtypes.cast(image_flatten, tf.float64)

        # get values and bin edges of the histogram
        hist = tf.histogram_fixed_width(image_flatten, hist_range, nbins=nbins)
        bins = tf.histogram_fixed_width_bins(image_flatten,
                                             hist_range,
                                             nbins=nbins)

        bin_centres = (bins[:-1] + bins[1:]) / 2

        # normalize if specified
        if (normalize):
            hist = hist / tf.reduce_sum(hist)

        return hist.eval(), bin_centres.eval()
Example #27
0
def GenericInput(processor, *args, **kwargs):
    """Builds a generic input pipeline.

  Example usage::

    def ParseRecord(record):
      # Given a tf.string record, return a (NestedMap, bucketing key) pair.
      feature_map = ...
      features = tf.parse_single_example(record, feature_map)
      # Each example is represented by a NestedMap of tensors (without a
      # batch dimension).
      example = py_utils.NestedMap(field1=..., field2=...)
      # bucketing_key is an int scalar tensor.
      # Use 1 if all examples are of the same size.
      bucketing_key = tf.to_int32(1)
      return example, bucketing_key

    input_batch = GenericInput(ParseRecord, file_pattern=..., ...)
    # input_batch is a NestedMap of tensors, where dim 0 of each tensor
    # represents the batch dimension.
    input_batch.field1 = ...

  Args:
    processor: a function that takes a string record as input and returns a list
      of tensors or NestedMaps representing one example. The last return value
      of processor must be an int32 scalar tensor that represents the bucketing
      key (e.g., sequence length for sequence inputs).
    *args: additional args for x_ops.generic_input.
    **kwargs: additional keyword args for x_ops.generic_input.

  Returns:
    A list of tensors or NestedMaps, similar `processor`'s return, except:
      * The bucket key is not included in the output.
      * Every tensor will have an additional dimension 0 that represents the
        batch dimension.
  """
    output_tmpl = py_utils.NestedMap()

    def _FlatOutputProcessor(inputs):
        """Returns a flattened list of 'processor(inputs)'."""
        outputs = processor(inputs)
        tf.logging.debug('Processor outputs=%s', outputs)
        assert len(outputs) > 1, outputs
        # Add 'outputs' as a list so that each element will be flattened.
        output_tmpl.values = list(outputs)
        flat_outputs = output_tmpl.Flatten()
        tf.logging.debug('Processor flat outputs=%s', flat_outputs)
        tf.logging.debug('extra_inputs=%s extra_args=%s extra_vars=%s',
                         function.get_extra_inputs(),
                         function.get_extra_args(), function.get_extra_vars())
        assert not function.get_extra_args(), (
            'fns {} is not pure: extra_args={}'.format(
                processor, function.get_extra_args()))
        return flat_outputs

    proc_fn = function.Defun(tf.string)(_FlatOutputProcessor)

    out_types = [
        tf.DType(a.type) for a in proc_fn.definition.signature.output_arg
    ]
    assert out_types[-1] == tf.int32, ('%s is not expected.' % out_types[-1])
    flat_outputs = py_x_ops.gen_x_ops.generic_input(processor=proc_fn,
                                                    out_types=out_types[:-1],
                                                    *args,
                                                    **kwargs)
    tf.logging.debug('x_ops.generic_input flat_outputs=%s', flat_outputs)
    if not output_tmpl:
        return flat_outputs
    # Pack flat_outputs to outputs.
    output_tmpl.values.pop(-1)
    outputs = output_tmpl.Pack(flat_outputs).values
    tf.logging.debug('x_ops.generic_input outputs=%s', outputs)
    return outputs
Example #28
0
    def Summary(self):
        placeholders = []
        variables = []
        print("Graph Version: {}.{}".format(self.graph.versions.producer, self.graph.versions.min_consumer))
        for node in self.graph.node:
            if node.op == "Placeholder":
                placeholders.append(node)
            if node.op == "Variable" or node.op == "VariableV2":
                variables.append(node)
        if len(placeholders) == 0:
            print("No inputs spotted")
        else:
            print("Found {} possible inputs: ".format(len(placeholders)))
            self.summray_dict["inputs"] = []
            for node in placeholders:
                in_info = self.PrintNodeInfo(node)
                self.summray_dict["inputs"].append(in_info)
        if len(variables) == 0:
            pass
            print("No variables spotted")
        else:
            print("Found {} variables".format(len(variables)))
            self.summray_dict["variables"] = []
            for node in variables:
                var_info = self.PrintNodeInfo(node)
                self.summray_dict["variables"].append(var_info)

        output_map = {}
        self.MapNodesToOutputs(output_map)
        outputs = []
        unlikely_output_types = ["Const", "Assign", "NoOp", "Placeholder", "VarIsInitializedOp"]
        for node in self.graph.node:
            if (node.name not in output_map) and (node.op not in unlikely_output_types):
                outputs.append(node)
        if len(outputs) == 0:
            print("No outputs spotted")
        else:
            print("Found {} possible outputs:".format(len(outputs)))
            self.summray_dict["outputs"] = []
            for node in outputs:
                print("    ===>> (name={}, op={})".format(node.name, node.op))
                self.summray_dict["outputs"].append(node.name)

        const_parameter_count = 0
        variable_parameter_count = 0
        control_edge_count = 0
        device_counts = {}
        for node in self.graph.node:
            for input in node.input:
                if input[0] == "^":
                    control_edge_count+=1
            if len(node.device)!=0:
                device_counts[node.device] = 0 if node.device not in device_counts else device_counts[node.device]+1
            if node.op in ["Const", "Variable", "VariableV2"]:
                if "value" in node.attr:
                    tensor = tf.io.parse_tensor(node.attr["value"].tensor.SerializeToString(), tf.DType(node.attr["value"].tensor.dtype))
                    num_elements = tensor.shape.num_elements()
                    if node.op == "Const":
                        const_parameter_count += num_elements if num_elements is not None else 0
                    else:
                        variable_parameter_count += num_elements
        self.summray_dict["const_parameter_count"] = const_parameter_count
        self.summray_dict["variable_parameter_count"] = variable_parameter_count
        self.summray_dict["control_edge_count"] = control_edge_count
        print("Found {} const parameters, {} variable parameters, and {} control_edges".format(
            const_parameter_count, variable_parameter_count, control_edge_count))
        if len(device_counts.keys()) != 0:
            str_dev_info = ""
            for device_info in device_counts:
                str_dev_info += "%s nodes assigned to device %s, " % (str(device_info.second), str(device_info.first))
            print(str_dev_info)
Example #29
0
  def pr_curves_impl(self, runs, tag):
    """Creates the JSON object for the PR curves response for a run-tag combo.

    Arguments:
      runs: A list of runs to fetch the curves for.
      tag: The tag to fetch the curves for.

    Raises:
      ValueError: If no PR curves could be fetched for a run and tag.

    Returns:
      The JSON object for the PR curves route response.
    """
    if self._db_connection_provider:
      # Serve data from the database.
      db = self._db_connection_provider()

      # We select for steps greater than -1 because the writer inserts
      # placeholder rows en masse. The check for step filters out those rows.
      cursor = db.execute('''
        SELECT
          Runs.run_name,
          Tensors.step,
          Tensors.computed_time,
          Tensors.data,
          Tensors.dtype,
          Tensors.shape,
          Tags.plugin_data
        FROM Tensors
        JOIN Tags
          ON Tensors.series = Tags.tag_id
        JOIN Runs
          ON Tags.run_id = Runs.run_id
        WHERE
          Runs.run_name IN (%s)
          AND Tags.tag_name = ?
          AND Tags.plugin_name = ?
          AND Tensors.step > -1
        ORDER BY Tensors.step
      ''' % ','.join(['?'] * len(runs)), runs + [tag, metadata.PLUGIN_NAME])
      response_mapping = {}
      for (run, step, wall_time, data, dtype, shape, plugin_data) in cursor:
        if run not in response_mapping:
          response_mapping[run] = []
        dtype_for_buf = tf.DType(dtype) if USE_TF else tf.dtypes.DType(dtype)
        buf = np.frombuffer(data, dtype=dtype_for_buf.as_numpy_dtype)
        data_array = buf.reshape([int(i) for i in shape.split(',')])
        plugin_data_proto = plugin_data_pb2.PrCurvePluginData()
        string_buffer = np.frombuffer(plugin_data, dtype=np.dtype('b'))
        plugin_data_proto.ParseFromString(tf.compat.as_bytes(
            string_buffer.tostring()))
        thresholds = self._compute_thresholds(plugin_data_proto.num_thresholds)
        entry = self._make_pr_entry(step, wall_time, data_array, thresholds)
        response_mapping[run].append(entry)
    else:
      # Serve data from events files.
      response_mapping = {}
      for run in runs:
        try:
          tensor_events = self._multiplexer.Tensors(run, tag)
        except KeyError:
          raise ValueError(
              'No PR curves could be found for run %r and tag %r' % (run, tag))

        content = self._multiplexer.SummaryMetadata(
            run, tag).plugin_data.content
        pr_curve_data = metadata.parse_plugin_metadata(content)
        thresholds = self._compute_thresholds(pr_curve_data.num_thresholds)
        response_mapping[run] = [
            self._process_tensor_event(e, thresholds) for e in tensor_events]
    return response_mapping
Example #30
0
def infer_type(arg):
    """Infers the TFF type of the argument (a `computation_types.Type` instance).

  WARNING: This function is only partially implemented.

  The kinds of arguments that are currently correctly recognized:
  - tensors, variables, and data sets,
  - things that are convertible to tensors (including numpy arrays, builtin
    types, as well as lists and tuples of any of the above, etc.),
  - nested lists, tuples, namedtuples, anonymous tuples, dict, and OrderedDicts.

  Args:
    arg: The argument, the TFF type of which to infer.

  Returns:
    Either an instance of `computation_types.Type`, or `None` if the argument is
    `None`.
  """
    # TODO(b/113112885): Implement the remaining cases here on the need basis.
    if arg is None:
        return None
    elif isinstance(arg, typed_object.TypedObject):
        return arg.type_signature
    elif tf.contrib.framework.is_tensor(arg):
        return computation_types.TensorType(arg.dtype.base_dtype, arg.shape)
    elif isinstance(arg, (tf.data.Dataset, tf.compat.v1.data.Dataset,
                          tf.compat.v2.data.Dataset)):
        return computation_types.SequenceType(
            tf_dtypes_and_shapes_to_type(
                tf.compat.v1.data.get_output_types(arg),
                tf.compat.v1.data.get_output_shapes(arg)))
    elif isinstance(arg, anonymous_tuple.AnonymousTuple):
        return computation_types.NamedTupleType([
            (k, infer_type(v)) if k else infer_type(v)
            for k, v in anonymous_tuple.to_elements(arg)
        ])
    elif py_typecheck.is_attrs(arg):
        items = attr.asdict(arg,
                            dict_factory=collections.OrderedDict,
                            recurse=False)
        return computation_types.NamedTupleTypeWithPyContainerType(
            [(k, infer_type(v)) for k, v in six.iteritems(items)], type(arg))
    elif py_typecheck.is_named_tuple(arg):
        items = arg._asdict()
        return computation_types.NamedTupleTypeWithPyContainerType(
            [(k, infer_type(v)) for k, v in six.iteritems(items)], type(arg))
    elif isinstance(arg, dict):
        if isinstance(arg, collections.OrderedDict):
            items = six.iteritems(arg)
        else:
            items = sorted(six.iteritems(arg))
        return computation_types.NamedTupleTypeWithPyContainerType(
            [(k, infer_type(v)) for k, v in items], type(arg))
    elif isinstance(arg, (tuple, list)):
        elements = []
        all_elements_named = True
        for element in arg:
            all_elements_named &= py_typecheck.is_name_value_pair(element)
            elements.append(infer_type(element))
        # If this is a tuple of (name, value) pairs, the caller most likely intended
        # this to be a NamedTupleType, so we avoid storing the Python container.
        if all_elements_named:
            return computation_types.NamedTupleType(elements)
        else:
            return computation_types.NamedTupleTypeWithPyContainerType(
                elements, type(arg))
    elif isinstance(arg, six.string_types):
        return computation_types.TensorType(tf.string)
    elif isinstance(arg, (np.generic, np.ndarray)):
        return computation_types.TensorType(tf.as_dtype(arg.dtype), arg.shape)
    else:
        dtype = {
            bool: tf.bool,
            int: tf.int32,
            float: tf.float32
        }.get(type(arg))
        if dtype:
            return computation_types.TensorType(dtype)
        else:
            # Now fall back onto the heavier-weight processing, as all else failed.
            # Use make_tensor_proto() to make sure to handle it consistently with
            # how TensorFlow is handling values (e.g., recognizing int as int32, as
            # opposed to int64 as in NumPy).
            try:
                # TODO(b/113112885): Find something more lightweight we could use here.
                tensor_proto = tf.make_tensor_proto(arg)
                return computation_types.TensorType(
                    tf.DType(tensor_proto.dtype),
                    tf.TensorShape(tensor_proto.tensor_shape))
            except TypeError as err:
                raise TypeError(
                    'Could not infer the TFF type of {}: {}.'.format(
                        py_typecheck.type_string(type(arg)), str(err)))