示例#1
0
 def test_with_int_list(self):
   t = type_conversions.infer_type([1, 2, 3])
   self.assertEqual(str(t), '<int32,int32,int32>')
   self.assertIsInstance(t, computation_types.StructWithPythonType)
   self.assertIs(t.python_container, list)
def create_constant(value, type_spec: computation_types.Type) -> ProtoAndType:
    """Returns a tensorflow computation returning a constant `value`.

  The returned computation has the type signature `( -> T)`, where `T` is
  `type_spec`.

  `value` must be a value convertible to a tensor or a structure of values, such
  that the dtype and shapes match `type_spec`. `type_spec` must contain only
  named tuples and tensor types, but these can be arbitrarily nested.

  Args:
    value: A value to embed as a constant in the tensorflow graph.
    type_spec: A `computation_types.Type` to use as the argument to the
      constructed binary operator; must contain only named tuples and tensor
      types.

  Raises:
    TypeError: If the constraints of `type_spec` are violated.
  """
    if not type_analysis.is_generic_op_compatible_type(type_spec):
        raise TypeError(
            'Type spec {} cannot be constructed as a TensorFlow constant in TFF; '
            ' only nested tuples and tensors are permitted.'.format(type_spec))
    inferred_value_type = type_conversions.infer_type(value)
    if (inferred_value_type.is_struct()
            and not type_spec.is_assignable_from(inferred_value_type)):
        raise TypeError(
            'Must pass a only tensor or structure of tensor values to '
            '`create_tensorflow_constant`; encountered a value {v} with inferred '
            'type {t!r}, but needed {s!r}'.format(v=value,
                                                  t=inferred_value_type,
                                                  s=type_spec))
    if inferred_value_type.is_struct():
        value = structure.from_container(value, recursive=True)
    tensor_dtypes_in_type_spec = []

    def _pack_dtypes(type_signature):
        """Appends dtype of `type_signature` to nonlocal variable."""
        if type_signature.is_tensor():
            tensor_dtypes_in_type_spec.append(type_signature.dtype)
        return type_signature, False

    type_transformations.transform_type_postorder(type_spec, _pack_dtypes)

    if (any(x.is_integer for x in tensor_dtypes_in_type_spec)
            and (inferred_value_type.is_tensor()
                 and not inferred_value_type.dtype.is_integer)):
        raise TypeError(
            'Only integers can be used as scalar values if our desired constant '
            'type spec contains any integer tensors; passed scalar {} of dtype {} '
            'for type spec {}.'.format(value, inferred_value_type.dtype,
                                       type_spec))

    result_type = type_spec

    def _create_result_tensor(type_spec, value):
        """Packs `value` into `type_spec` recursively."""
        if type_spec.is_tensor():
            type_spec.shape.assert_is_fully_defined()
            result = tf.constant(value,
                                 dtype=type_spec.dtype,
                                 shape=type_spec.shape)
        else:
            elements = []
            if inferred_value_type.is_struct():
                # Copy the leaf values according to the type_spec structure.
                for (name, elem_type), value in zip(
                        structure.iter_elements(type_spec), value):
                    elements.append(
                        (name, _create_result_tensor(elem_type, value)))
            else:
                # "Broadcast" the value to each level of the type_spec structure.
                for _, elem_type in structure.iter_elements(type_spec):
                    elements.append(
                        (None, _create_result_tensor(elem_type, value)))
            result = structure.Struct(elements)
        return result

    with tf.Graph().as_default() as graph:
        result = _create_result_tensor(result_type, value)
        _, result_binding = tensorflow_utils.capture_result_from_graph(
            result, graph)

    type_signature = computation_types.FunctionType(None, result_type)
    tensorflow = pb.TensorFlow(graph_def=serialization_utils.pack_graph_def(
        graph.as_graph_def()),
                               parameter=None,
                               result=result_binding)
    return _tensorflow_comp(tensorflow, type_signature)
示例#3
0
 def test_with_numpy_float64_scalar(self):
   self.assertEqual(str(type_conversions.infer_type(np.float64(1))), 'float64')
示例#4
0
 def test_with_none(self):
   self.assertIsNone(type_conversions.infer_type(None))
示例#5
0
 def test_with_scalar_bool_tensor(self):
   self.assertEqual(
       str(type_conversions.infer_type(tf.constant(False))), 'bool')
示例#6
0
 def test_with_ordered_dict(self):
   t = type_conversions.infer_type(
       collections.OrderedDict([('b', 2.0), ('a', 1)]))
   self.assertEqual(str(t), '<b=float32,a=int32>')
   self.assertIsInstance(t, computation_types.StructWithPythonType)
   self.assertIs(t.python_container, collections.OrderedDict)
示例#7
0
 def test_with_nested_dataset_list_tuple(self):
   t = type_conversions.infer_type(
       tuple([(tf.data.Dataset.from_tensors(x),) for x in [1, True, [0.5]]]))
   self.assertEqual(str(t), '<<int32*>,<bool*>,<float32[1]*>>')
   self.assertIsInstance(t, computation_types.StructWithPythonType)
   self.assertIs(t.python_container, tuple)
示例#8
0
 def test_with_int(self):
     self.assertEqual(str(type_conversions.infer_type(10)), 'int32')
示例#9
0
def zero_or_one_arg_fn_to_building_block(
    fn,
    parameter_name: Optional[str],
    parameter_type: Optional[computation_types.Type],
    context_stack: context_stack_base.ContextStack,
    suggested_name: Optional[str] = None,
) -> Tuple[building_blocks.ComputationBuildingBlock, computation_types.Type]:
    """Converts a zero- or one-argument `fn` into a computation building block.

  Args:
    fn: A function with 0 or 1 arguments that contains orchestration logic,
      i.e., that expects zero or one `values_base.Value` and returns a result
      convertible to the same.
    parameter_name: The name of the parameter, or `None` if there is't any.
    parameter_type: The `tff.Type` of the parameter, or `None` if there's none.
    context_stack: The context stack to use.
    suggested_name: The optional suggested name to use for the federated context
      that will be used to serialize this function's body (ideally the name of
      the underlying Python function). It might be modified to avoid conflicts.

  Returns:
    A tuple of `(building_blocks.ComputationBuildingBlock,
    computation_types.Type)`, where the first element contains the logic from
    `fn`, and the second element contains potentially annotated type information
    for the result of `fn`.

  Raises:
    ValueError: if `fn` is incompatible with `parameter_type`.
  """
    py_typecheck.check_callable(fn)
    py_typecheck.check_type(context_stack, context_stack_base.ContextStack)
    if suggested_name is not None:
        py_typecheck.check_type(suggested_name, str)
    if isinstance(context_stack.current,
                  federated_computation_context.FederatedComputationContext):
        parent_context = context_stack.current
    else:
        parent_context = None
    context = federated_computation_context.FederatedComputationContext(
        context_stack, suggested_name=suggested_name, parent=parent_context)
    if parameter_name is not None:
        py_typecheck.check_type(parameter_name, str)
        parameter_name = '{}_{}'.format(context.name, str(parameter_name))
    with context_stack.install(context):
        if parameter_type is not None:
            result = fn(
                value_impl.ValueImpl(
                    building_blocks.Reference(parameter_name, parameter_type),
                    context_stack))
        else:
            result = fn()
        if result is None:
            raise ValueError(
                'The function defined on line {} of file {} has returned a '
                '`NoneType`, but all TFF functions must return some non-`None` '
                'value.'.format(fn.__code__.co_firstlineno,
                                fn.__code__.co_filename))
        annotated_result_type = type_conversions.infer_type(result)
        result = value_impl.to_value(result, annotated_result_type,
                                     context_stack)
        result_comp = value_impl.ValueImpl.get_comp(result)
        symbols_bound_in_context = context_stack.current.symbol_bindings
        if symbols_bound_in_context:
            result_comp = building_blocks.Block(
                local_symbols=symbols_bound_in_context, result=result_comp)
        annotated_type = computation_types.FunctionType(
            parameter_type, annotated_result_type)
        return building_blocks.Lambda(parameter_name, parameter_type,
                                      result_comp), annotated_type
示例#10
0
 def test_with_scalar_int_array_variable_tensor(self):
     self.assertEqual(str(type_conversions.infer_type(tf.Variable([10]))),
                      'int32[1]')
示例#11
0
 def test_with_int_dataset(self):
     self.assertEqual(
         str(type_conversions.infer_type(tf.data.Dataset.from_tensors(10))),
         'int32*')
示例#12
0
 def test_with_scalar_float_variable_tensor(self):
     self.assertEqual(str(type_conversions.infer_type(tf.Variable(0.5))),
                      'float32')
示例#13
0
 def test_with_scalar_bool_variable_tensor(self):
     self.assertEqual(str(type_conversions.infer_type(tf.Variable(True))),
                      'bool')
示例#14
0
 def test_with_float(self):
     self.assertEqual(str(type_conversions.infer_type(0.5)), 'float32')
示例#15
0
 def test_with_nested_float_list(self):
   t = type_conversions.infer_type([[0.1], [0.2], [0.3]])
   self.assertEqual(str(t), '<<float32>,<float32>,<float32>>')
   self.assertIsInstance(t, computation_types.StructWithPythonType)
   self.assertIs(t.python_container, list)
示例#16
0
 def test_with_np_float64(self):
   self.assertEqual(
       str(type_conversions.infer_type(np.float64(10))), 'float64')
示例#17
0
 def test_with_namedtuple(self):
   test_named_tuple = collections.namedtuple('TestNamedTuple', 'y x')
   t = type_conversions.infer_type(test_named_tuple(1, True))
   self.assertEqual(str(t), '<y=int32,x=bool>')
   self.assertIsInstance(t, computation_types.StructWithPythonType)
   self.assertIs(t.python_container, test_named_tuple)
示例#18
0
 def test_with_np_bool(self):
   self.assertEqual(str(type_conversions.infer_type(np.bool(True))), 'bool')
示例#19
0
 def test_with_dataset_list(self):
   t = type_conversions.infer_type(
       [tf.data.Dataset.from_tensors(x) for x in [1, True, [0.5]]])
   self.assertEqual(str(t), '<int32*,bool*,float32[1]*>')
   self.assertIsInstance(t, computation_types.StructWithPythonType)
   self.assertIs(t.python_container, list)
示例#20
0
 def test_with_unicode_string(self):
   self.assertEqual(str(type_conversions.infer_type(u'abc')), 'string')
示例#21
0
 def test_with_empty_tuple(self):
   t = type_conversions.infer_type(())
   self.assertEqual(t, computation_types.StructWithPythonType([], tuple))
示例#22
0
 def test_with_numpy_int_array(self):
   self.assertEqual(
       str(type_conversions.infer_type(np.array([10, 20]))), 'int64[2]')
示例#23
0
 def test_with_scalar_int_tensor(self):
   self.assertEqual(str(type_conversions.infer_type(tf.constant(1))), 'int32')
示例#24
0
 def test_with_numpy_nested_int_array(self):
   self.assertEqual(
       str(type_conversions.infer_type(np.array([[10], [20]]))), 'int64[2,1]')
示例#25
0
 def test_with_int_array_tensor(self):
   self.assertEqual(
       str(type_conversions.infer_type(tf.constant([10, 20]))), 'int32[2]')
示例#26
0
def create_constant(scalar_value, type_spec) -> pb.Computation:
  """Returns a tensorflow computation returning a constant `scalar_value`.

  The returned computation has the type signature `( -> T)`, where `T` is
  `type_spec`.

  `scalar_value` must be a scalar, and cannot be a float if any of the tensor
  leaves of `type_spec` contain an integer data type. `type_spec` must contain
  only named tuples and tensor types, but these can be arbitrarily nested.

  Args:
    scalar_value: A scalar value to place in all the tensor leaves of
      `type_spec`.
    type_spec: A type convertible to instance of `computation_types.Type` via
      `computation_types.to_type` and whose resulting type tree can only contain
      named tuples and tensors.

  Raises:
    TypeError: If the constraints of `type_spec` are violated.
  """
  type_spec = computation_types.to_type(type_spec)

  if not type_analysis.is_generic_op_compatible_type(type_spec):
    raise TypeError(
        'Type spec {} cannot be constructed as a TensorFlow constant in TFF; '
        ' only nested tuples and tensors are permitted.'.format(type_spec))
  inferred_scalar_value_type = type_conversions.infer_type(scalar_value)
  if (not isinstance(inferred_scalar_value_type, computation_types.TensorType)
      or inferred_scalar_value_type.shape != tf.TensorShape(())):
    raise TypeError(
        'Must pass a scalar value to `create_tensorflow_constant`; encountered '
        'a value {}'.format(scalar_value))
  tensor_dtypes_in_type_spec = []

  def _pack_dtypes(type_signature):
    """Appends dtype of `type_signature` to nonlocal variable."""
    if isinstance(type_signature, computation_types.TensorType):
      tensor_dtypes_in_type_spec.append(type_signature.dtype)
    return type_signature, False

  type_transformations.transform_type_postorder(type_spec, _pack_dtypes)

  if (any(x.is_integer for x in tensor_dtypes_in_type_spec) and
      not inferred_scalar_value_type.dtype.is_integer):
    raise TypeError(
        'Only integers can be used as scalar values if our desired constant '
        'type spec contains any integer tensors; passed scalar {} of dtype {} '
        'for type spec {}.'.format(scalar_value,
                                   inferred_scalar_value_type.dtype, type_spec))

  def _create_result_tensor(type_spec, scalar_value):
    """Packs `scalar_value` into `type_spec` recursively."""
    if isinstance(type_spec, computation_types.TensorType):
      type_spec.shape.assert_is_fully_defined()
      result = tf.constant(
          scalar_value, dtype=type_spec.dtype, shape=type_spec.shape)
    else:
      elements = []
      for _, type_element in anonymous_tuple.iter_elements(type_spec):
        elements.append(_create_result_tensor(type_element, scalar_value))
      result = elements
    return result

  with tf.Graph().as_default() as graph:
    result = _create_result_tensor(type_spec, scalar_value)
    _, result_binding = tensorflow_utils.capture_result_from_graph(
        result, graph)

  type_signature = computation_types.FunctionType(None, type_spec)
  tensorflow = pb.TensorFlow(
      graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
      parameter=None,
      result=result_binding)
  return pb.Computation(
      type=type_serialization.serialize_type(type_signature),
      tensorflow=tensorflow)