Exemple #1
0
  def _compute_is_bus_day_table(self):
    """Computes and caches "is business day" table."""
    if self._table_cache.is_bus_day is not None:
      return self._table_cache.is_bus_day

    with tf.init_scope():
      ordinals = tf.range(self._ordinal_offset,
                          self._ordinal_offset + self._calendar_size)
      # Apply weekend mask
      week_days = (ordinals - 1) % 7
      is_holiday = tf.gather(self._weekend_mask, week_days)

      # Apply holidays
      if self._holidays is not None:
        indices = self._holidays.ordinal() - self._ordinal_offset
        ones_at_indices = tf.scatter_nd(
            tf.expand_dims(indices, axis=-1), tf.ones_like(indices),
            is_holiday.shape)
        is_holiday = tf.bitwise.bitwise_or(is_holiday, ones_at_indices)

      # Add a business day at the beginning and at the end, i.e. at 31 Dec of
      # start_year-1 and at 1 Jan of end_year+1. This trick is to avoid dealing
      # with special cases on boundaries.
      # For example, for Following and Preceding conventions we'd need a special
      # value that means "unknown" in the tables. More complicated conventions
      # then combine the Following and Preceding tables, and would need special
      # treatment of the "unknown" values.
      # With these "fake" business days, all computations are automatically
      # correct, unless we land on those extra days - for this reason we add
      # assertions in all API calls before returning.
      is_bus_day_table = tf.concat([[1], 1 - is_holiday, [1]], axis=0)
      self._table_cache.is_bus_day = is_bus_day_table
    return is_bus_day_table
Exemple #2
0
  def apply_gradients(self, grads_and_vars):
    """Apply gradients to variables.

    Args:
      grads_and_vars: List of (gradient, variable) pairs.

    Returns:
      None

    Raises:
      TypeError: If `grads_and_vars` is malformed.
    """
    if isinstance(self._learning_rate,
                  learning_rate_schedule.LearningRateSchedule):
      # Compute the current learning rate at the beginning of variable update.
      self._current_learning_rate.assign(self._learning_rate(self.iterations))
    grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars)
    grads, trainable_variables = zip(*grads_and_vars)
    scope_name = self._name or "optimizer"
    with tf.name_scope(scope_name):
      with tf.init_scope():
        # Lift variable creation to init scope to avoid environment issues.
        self.build(trainable_variables)
    grads = self._clip_gradients(grads)
    grads_and_vars = list(zip(grads, trainable_variables))
    self._internal_apply_gradients(grads_and_vars)
Exemple #3
0
    def __init__(self,
                 name,
                 clipnorm=None,
                 clipvalue=None,
                 global_clipnorm=None):
        """Create a new Optimizer.

    Args:
      name: String. The name to use for momentum accumulator weights created by
        the optimizer.
      clipnorm: float. If set, the gradient of each weight is individually
        clipped so that its norm is no higher than this value.
      clipvalue: float. If set, the gradient of each weight is clipped to be
        no higher than this value.
      global_clipnorm: float. If set, the gradient of all weights is clipped
        so that their global norm is no higher than this value.
    """
        self._name = name
        self._clipnorm = clipnorm
        self._global_clipnorm = global_clipnorm
        if self._clipnorm is not None and self._global_clipnorm is not None:
            raise ValueError(
                f"At most one of `clipnorm` and `global_clipnorm` can "
                f"be set. Received: clipnorm={self._clipnorm}, "
                f"global_clipnorm={self._global_clipnorm}.")
        self._clipvalue = clipvalue
        with tf.init_scope():
            # Lift the variable creation to init scope to avoid environment issue.
            self._iterations = tf.Variable(0, name="iteration", dtype=tf.int64)
    def _compute_rolled_dates_table(self, roll_convention):
        """Computes and caches rolled dates table."""
        already_computed = self._table_cache.rolled_dates.get(
            roll_convention, None)
        if already_computed is not None:
            return already_computed

        roll_convention_np = _to_np_roll_convention(roll_convention)
        holidays_arg = self._holidays_np
        if holidays_arg is None:
            holidays_arg = []  # np.busday_offset doesn't accept None
        adjusted_np = np.busday_offset(dates=self._dates_np,
                                       offsets=0,
                                       roll=roll_convention_np,
                                       weekmask=1 - self._weekend_mask,
                                       holidays=holidays_arg)
        rolled_date_table = adjusted_np.astype(np.int32) + _ORDINAL_OF_1_1_1970

        # To make tensor caching safe, lift the ops out of the current scope using
        # tf.init_scope(). This allows e.g. to cache these tensors in one
        # tf.function and reuse them in another tf.function.
        with tf.init_scope():
            rolled_date_table = tf.convert_to_tensor(rolled_date_table,
                                                     name="rolled_date_table")
        self._table_cache.rolled_dates[roll_convention] = rolled_date_table
        return rolled_date_table
Exemple #5
0
    def load_dataset():
        """Function that actually loads the dataset."""
        if load_dataset.dataset is not None:
            return load_dataset.dataset

        with tf.name_scope('german_credit_numeric'), tf.init_scope():
            dataset = _tfds().load('german_credit_numeric:1.*.*')
            features = []
            labels = []
            for entry in _tfds().as_numpy(dataset)['train']:
                features.append(entry['features'])
                # We're reversing the labels to match what's in the original dataset,
                # rather the TFDS encoding.
                labels.append(1 - entry['label'])
            features = np.stack(features, axis=0)
            labels = np.stack(labels, axis=0)

            train_features = features[:num_train]
            test_features = features[num_train:]

            if normalize_fn is not None:
                train_features, test_features = normalize_fn(
                    train_features, test_features)

            load_dataset.dataset = dict(
                train_features=train_features,
                train_labels=labels[:num_train].astype(np.int32),
                test_features=test_features,
                test_labels=labels[num_train:].astype(np.int32),
            )

        return load_dataset.dataset
Exemple #6
0
 def _create_iteration_variable(self):
     """Create the iterations counter variable."""
     with tf.init_scope():
         # Lift the variable creation to init scope to avoid environment issue.
         self._iterations = tf.Variable(
             0, name="iteration", dtype=tf.int64, trainable=False
         )
    def preprocessing_fn(inputs):
        """Preprocess input columns into transformed columns."""
        # Since we are modifying some features and leaving others unchanged, we
        # start by setting `outputs` to a copy of `inputs.
        outputs = inputs.copy()

        # Scale numeric columns to have range [0, 1].
        for key in NUMERIC_FEATURE_KEYS:
            outputs[key] = tft.scale_to_0_1(inputs[key])

        for key in OPTIONAL_NUMERIC_FEATURE_KEYS:
            # This is a SparseTensor because it is optional. Here we fill in a default
            # value when it is missing.
            sparse = tf.sparse.SparseTensor(inputs[key].indices,
                                            inputs[key].values,
                                            [inputs[key].dense_shape[0], 1])
            dense = tf.sparse.to_dense(sp_input=sparse, default_value=0.)
            # Reshaping from a batch of vectors of size 1 to a batch to scalars.
            dense = tf.squeeze(dense, axis=1)
            outputs[key] = tft.scale_to_0_1(dense)

        # For all categorical columns except the label column, we generate a
        # vocabulary, and convert the string feature to a one-hot encoding.
        for key in CATEGORICAL_FEATURE_KEYS:
            integerized = tft.compute_and_apply_vocabulary(
                tf.strings.strip(inputs[key]),
                num_oov_buckets=NUM_OOV_BUCKETS,
                vocab_filename=key)
            depth = (tft.experimental.get_vocabulary_size_by_name(key) +
                     NUM_OOV_BUCKETS)
            one_hot_encoded = tf.one_hot(integerized,
                                         depth=tf.cast(depth, tf.int32),
                                         on_value=1.0,
                                         off_value=0.0)
            # This output is now one-hot encoded. If saving transformed data to disk,
            # this can incur significant memory cost.
            outputs[key] = tf.reshape(one_hot_encoded, [-1, depth])

        # For the label column we provide the mapping from string to index.
        table_keys = ['>50K', '<=50K']
        with tf.init_scope():
            initializer = tf.lookup.KeyValueTensorInitializer(
                keys=table_keys,
                values=tf.cast(tf.range(len(table_keys)), tf.int64),
                key_dtype=tf.string,
                value_dtype=tf.int64)
            table = tf.lookup.StaticHashTable(initializer, default_value=-1)
        # Remove trailing periods for test data when the data is read with tf.data.
        label_str = tf.strings.regex_replace(inputs[LABEL_KEY], r'\.', '')
        label_str = tf.strings.strip(label_str)
        data_labels = table.lookup(label_str)
        transformed_label = tf.one_hot(indices=data_labels,
                                       depth=len(table_keys),
                                       on_value=1.0,
                                       off_value=0.0)
        outputs[LABEL_KEY] = tf.reshape(transformed_label,
                                        [-1, len(table_keys)])

        return outputs
Exemple #8
0
 def _create_iteration_variable(self):
     init_val = tf.constant(0, dtype=tf.int64)
     if self._mesh:
         init_val = dtensor.copy_to_mesh(
             init_val, dtensor.Layout.replicated(self._mesh, rank=0))
     with tf.init_scope():
         # Lift the variable creation to init scope to avoid environment issue.
         self._iterations = dtensor.DVariable(init_val, name='iteration')
Exemple #9
0
 def _lookup_table_from_tokens(self, tokens):
     with tf.init_scope():
         token_start = self._token_start_index()
         token_end = token_start + tf.size(tokens)
         indices = tf.range(token_start, token_end, dtype=tf.int64)
         keys, values = (indices, tokens) if self.invert else (tokens,
                                                               indices)
         initializer = tf.lookup.KeyValueTensorInitializer(
             keys, values, self._key_dtype, self._value_dtype)
         return tf.lookup.StaticHashTable(initializer, self._default_value)
Exemple #10
0
  def _compute_cumul_bus_days_table(self):
    """Computes and caches cumulative business days table."""
    if self._table_cache.cumul_bus_days is not None:
      return self._table_cache.cumul_bus_days

    is_bus_day_table = self._compute_is_bus_day_table()
    with tf.init_scope():
      cumul_bus_days_table = tf.math.cumsum(is_bus_day_table, exclusive=True,
                                            name="cumul_bus_days_table")
      self._table_cache.cumul_bus_days = cumul_bus_days_table
    return cumul_bus_days_table
Exemple #11
0
  def _compute_bus_day_ordinals_table(self):
    """Computes and caches rolled business day ordinals table."""
    if self._table_cache.bus_day_ordinals is not None:
      return self._table_cache.bus_day_ordinals

    is_bus_day_table = self._compute_is_bus_day_table()
    with tf.init_scope():
      bus_day_ordinals_table = (
          tf.cast(tf.where(is_bus_day_table)[:, 0], tf.int32) +
          self._ordinal_offset - 1)
      self._table_cache.bus_day_ordinals = bus_day_ordinals_table
    return bus_day_ordinals_table
Exemple #12
0
 def _build(self, shape):
   self._shape = tf.TensorShape(shape)
   self._build_input_shape = self._shape
   # Create new state variables
   self._total = self.add_weight(
       name='total', shape=shape, initializer='zeros')
   self._count = self.add_weight(
       name='count', shape=shape, initializer='zeros')
   with tf.init_scope():
     if not tf.executing_eagerly():
       backend._initialize_variables(backend._get_session())  # pylint: disable=protected-access
   self._built = True
Exemple #13
0
 def patched_call(self, shape, dtype):
     """Monkey-patched verison of `Initializer.__call__`."""
     cls = type(self)
     orig_call = all_initializers[cls]
     try:
         with tf.init_scope():
             return orig_call(self, shape, dtype)
     except:  # pylint: disable=bare-except
         if not tf.executing_eagerly():
             logging.exception(
                 "Failed to create initial value eagerly for %s shape=%s dtype=%s",
                 type(self).__name__, shape, dtype)
         return orig_call(self, shape, dtype)
Exemple #14
0
  def _compute_rolled_dates_table(self, convention):
    """Computes and caches rolled dates table."""
    already_computed = self._table_cache.rolled_dates.get(convention, None)
    if already_computed is not None:
      return already_computed

    # To make tensor caching safe, lift the ops out of the current scope using
    # tf.init_scope(). This allows e.g. to cache these tensors in one
    # tf.function and reuse them in another tf.function.
    with tf.init_scope():
      rolled_date_table = (
          self._compute_rolled_dates_table_without_cache(convention))
      self._table_cache.rolled_dates[convention] = rolled_date_table
    return rolled_date_table
Exemple #15
0
 def _build(self, shape):
     self._shape = tf.TensorShape(shape)
     self._build_input_shape = self._shape
     # Create new state variables
     self._total = self.add_weight(
         name="total", shape=shape, initializer="zeros"
     )
     self._count = self.add_weight(
         name="count", shape=shape, initializer="zeros"
     )
     with tf.init_scope():
         if not tf.executing_eagerly():
             backend._initialize_variables(backend._get_session())
     self._built = True
Exemple #16
0
def _eager_variable_creator(getter, initial_value, **kwargs):
    """Attempts to force variable creation to be eager."""
    eager_initial_value = None

    if isinstance(initial_value, tf.Tensor):
        eager_initial_value = tf.get_static_value(initial_value)

    if eager_initial_value is not None:
        # If we have an eager initial value we can create variables in eager mode.
        with tf.init_scope():
            return getter(initial_value=eager_initial_value, **kwargs)

    else:
        # Fall back to creating in whatever context we're in with user input.
        return getter(initial_value=initial_value, **kwargs)
Exemple #17
0
 def _lookup_table_from_file(self, filename):
     if self.invert:
         key_index = tf.lookup.TextFileIndex.LINE_NUMBER
         value_index = tf.lookup.TextFileIndex.WHOLE_LINE
     else:
         key_index = tf.lookup.TextFileIndex.WHOLE_LINE
         value_index = tf.lookup.TextFileIndex.LINE_NUMBER
     with tf.init_scope():
         initializer = tf.lookup.TextFileInitializer(
             filename=filename,
             key_dtype=self._key_dtype,
             key_index=key_index,
             value_dtype=self._value_dtype,
             value_index=value_index,
             value_index_offset=self._token_start_index())
         return tf.lookup.StaticHashTable(initializer, self._default_value)
Exemple #18
0
def maybe_init_scope(layer):
    """Open an `init_scope` if in V2 mode and using the keras graph.

  Args:
    layer: The Layer/Model that is currently active.

  Yields:
    None
  """
    # Don't open an init_scope in V1 mode or when using legacy tf.layers.
    if (tf.compat.v1.executing_eagerly_outside_functions()
            and getattr(layer, '_keras_style', True)):
        with tf.init_scope():
            yield
    else:
        yield
Exemple #19
0
 def _maybe_freeze_vocab_size(self):
     if self.output_mode == INT or self.pad_to_max_tokens:
         return
     with tf.init_scope():
         new_vocab_size = self.vocabulary_size()
     if new_vocab_size == self._token_start_index():
         raise RuntimeError(
             "When using `output_mode={}` and `pad_to_max_tokens=False`, you "
             "must set the layer's vocabulary before calling it. Either pass "
             "a `vocabulary` argument to the layer, or call `adapt` with some "
             "sample data.".format(self.output_mode))
     elif (self._frozen_vocab_size is not None
           and new_vocab_size != self._frozen_vocab_size):
         raise RuntimeError(
             "When using `output_mode={}` and `pad_to_max_tokens=False`, the "
             "vocabulary size cannot be changed after the layer is called. "
             "Vocab size is {}, new vocab size is {}".format(
                 self.output_mode, self._frozen_vocab_size, new_vocab_size))
     self._frozen_vocab_size = new_vocab_size
Exemple #20
0
  def _set_state_variables(self, updates):
    """Directly update the internal state of this Layer.

    This method expects a string-keyed dict of {state_variable_name: state}. The
    precise nature of the state, and the names associated, are describe by
    the subclasses of CombinerPreprocessingLayer.

    Args:
      updates: A string keyed dict of weights to update.

    Raises:
      RuntimeError: if 'build()' was not called before 'set_processing_state'.
    """
    # TODO(momernick): Do we need to do any more input sanitization?
    if not self.built:
      raise RuntimeError('_set_state_variables() must be called after build().')

    with tf.init_scope():
      for var_name, value in updates.items():
        self.state_variables[var_name].assign(value)
Exemple #21
0
    def add_weight(
        self,
        name,
        shape=(),
        aggregation=tf.VariableAggregation.SUM,
        synchronization=tf.VariableSynchronization.ON_READ,
        initializer=None,
        dtype=None,
    ):
        """Adds state variable. Only for use by subclasses."""
        if tf.distribute.has_strategy():
            strategy = tf.distribute.get_strategy()
        else:
            strategy = None

        # TODO(b/120571621): Make `ON_READ` work with Keras metrics on TPU.
        if backend.is_tpu_strategy(strategy):
            synchronization = tf.VariableSynchronization.ON_WRITE
        if getattr(self, "_mesh", None) is not None:
            # When self._mesh is set, it means this metric is used for DTensor.
            additional_kwargs = {
                "layout":
                dtensor.Layout.replicated(self._mesh,
                                          tf.TensorShape(shape).rank)
            }
        else:
            additional_kwargs = {}

        with tf.init_scope():
            return super().add_weight(
                name=name,
                shape=shape,
                dtype=self._dtype if dtype is None else dtype,
                trainable=False,
                initializer=initializer,
                collections=[],
                synchronization=synchronization,
                aggregation=aggregation,
                **additional_kwargs,
            )
Exemple #22
0
    def apply_gradients(self, grads_and_vars):
        """Apply gradients to variables.

    Args:
      grads_and_vars: List of (gradient, variable) pairs.

    Returns:
      None

    Raises:
      TypeError: If `grads_and_vars` is malformed.
    """
        grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars)
        grads, trainable_variables = zip(*grads_and_vars)
        scope_name = self._name or "optimizer"
        with tf.name_scope(scope_name):
            with tf.init_scope():
                # Lift variable creation to init scope to avoid enviroment issues.
                self.build(trainable_variables)
        grads = self._clip_gradients(grads)
        grads_and_vars = list(zip(grads, trainable_variables))
        self._internal_apply_gradients(grads_and_vars)
Exemple #23
0
 def _maybe_freeze_vocab_size(self):
     if self.output_mode == INT or self.pad_to_max_tokens:
         return
     with tf.init_scope():
         if not tf.executing_eagerly():
             raise RuntimeError(
                 "When using `output_mode={}` eager execution must "
                 "be enabled.".format(self.output_mode))
         new_vocab_size = self.vocabulary_size()
     if new_vocab_size == self._token_start_index():
         raise RuntimeError(
             "When using `output_mode={}` and `pad_to_max_tokens=False`, "
             "you must set the layer's vocabulary before calling it. Either "
             "pass a `vocabulary` argument to the layer, or call `adapt` "
             "with some sample data.".format(self.output_mode))
     elif (self._frozen_vocab_size is not None
           and new_vocab_size != self._frozen_vocab_size):
         raise RuntimeError(
             "When using `output_mode={}` and `pad_to_max_tokens=False`, "
             "the vocabulary size cannot be changed after the layer is "
             "called. Vocab size is {}, new vocab size is {}".format(
                 self.output_mode, self._frozen_vocab_size, new_vocab_size))
     self._frozen_vocab_size = new_vocab_size
    def _compute_is_bus_day_table(self):
        """Computes and caches "is business day" table."""
        if self._table_cache.is_bus_day is not None:
            return self._table_cache.is_bus_day

        is_bus_day_table = np.ones_like(self._dates_np, dtype=np.int32)

        ordinals = np.arange(self._ordinal_offset,
                             self._ordinal_offset + len(is_bus_day_table))
        # Apply week mask
        week_days = (ordinals - 1) % 7
        is_bus_day_table[self._weekend_mask[week_days] == 1] = 0

        # Apply holidays
        if self._holidays_np is not None:
            holiday_ordinals = (np.array(self._holidays_np, dtype=np.int32) +
                                _ORDINAL_OF_1_1_1970)
            is_bus_day_table[holiday_ordinals - self._ordinal_offset] = 0

        with tf.init_scope():
            is_bus_day_table = tf.convert_to_tensor(is_bus_day_table,
                                                    name="is_bus_day_table")
        self._table_cache.is_bus_day = is_bus_day_table
        return is_bus_day_table
Exemple #25
0
  def __init__(self,
               name,
               gradients_clip_option=None,
               ema_option=None,
               jit_compile=False,
               **kwargs):
    """Create a new Optimizer.

    Args:
      name: String. The name to use for momentum accumulator weights created by
        the optimizer.
      gradients_clip_option: an instance of
        `optimizer_experimental.GradientsClipOption`, for attributes related to
        gradients clipping, such as clipnorm and clipvalue. Default to None
        (not applying gradients clipping).
      ema_option: an instance of `optimizer_experimental.EMAOption`, for
        attributes related to exponenatial moving average, such as use_ema (a
        boolean field indicates if EMA is used) and EMA momentum. Default to
        None (not applying EMA).
      jit_compile: Bool, default to False. If True, the optimizer will use XLA
        acceleration. `jit_compile` can only be False when using Parameter
        Server Strategy.
      **kwargs: keyword arguments only used for backward compatibility with
        `optimizer_v2.OptimizerV2`. Any new code using
        `optimizer_experimental.Optimizer` should leave this parameter empty.
    """
    self._name = name
    self._gradients_clip_option = gradients_clip_option
    self._ema_option = ema_option
    self._jit_compile = jit_compile

    with tf.init_scope():
      # Lift the variable creation to init scope to avoid environment issue.
      self._iterations = tf.Variable(0, name="iteration", dtype=tf.int64)

    self._process_kwargs(kwargs)
Exemple #26
0
 def _uninitialized_lookup_table(self):
     with tf.init_scope():
         initializer = NullInitializer(self._key_dtype, self._value_dtype)
         return tf.lookup.StaticHashTable(initializer, self._default_value)
Exemple #27
0
    def add_weight(self,
                   name,
                   shape,
                   dtype=None,
                   initializer=None,
                   regularizer=None,
                   trainable=None,
                   constraint=None,
                   use_resource=None,
                   synchronization=tf.VariableSynchronization.AUTO,
                   aggregation=tf.compat.v1.VariableAggregation.NONE,
                   partitioner=None,
                   **kwargs):
        """Adds a new variable to the layer, or gets an existing one; returns it.

    Args:
      name: variable name.
      shape: variable shape.
      dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
      initializer: initializer instance (callable).
      regularizer: regularizer instance (callable).
      trainable: whether the variable should be part of the layer's
        "trainable_variables" (e.g. variables, biases)
        or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
        Note, if the current variable scope is marked as non-trainable
        then this parameter is ignored and any added variables are also
        marked as non-trainable. `trainable` defaults to `True` unless
        `synchronization` is set to `ON_READ`.
      constraint: constraint instance (callable).
      use_resource: Whether to use `ResourceVariable`.
      synchronization: Indicates when a distributed a variable will be
        aggregated. Accepted values are constants defined in the class
        `tf.VariableSynchronization`. By default the synchronization is set to
        `AUTO` and the current `DistributionStrategy` chooses
        when to synchronize. If `synchronization` is set to `ON_READ`,
        `trainable` must not be set to `True`.
      aggregation: Indicates how a distributed variable will be aggregated.
        Accepted values are constants defined in the class
        `tf.VariableAggregation`.
      partitioner: (optional) partitioner instance (callable).  If
        provided, when the requested variable is created it will be split
        into multiple partitions according to `partitioner`.  In this case,
        an instance of `PartitionedVariable` is returned.  Available
        partitioners include `tf.compat.v1.fixed_size_partitioner` and
        `tf.compat.v1.variable_axis_size_partitioner`.  For more details, see
        the documentation of `tf.compat.v1.get_variable` and the  "Variable
        Partitioners and Sharding" section of the API guide.
      **kwargs: Additional keyword arguments.

    Returns:
      The created variable.  Usually either a `Variable` or `ResourceVariable`
      instance.  If `partitioner` is not `None`, a `PartitionedVariable`
      instance is returned.

    Raises:
      RuntimeError: If called with partitioned variable regularization and
        eager execution is enabled.
      ValueError: When trainable has been set to True with synchronization
        set as `ON_READ`.
    """
        for kwarg in kwargs:
            if kwarg != 'experimental_autocast':
                raise TypeError('Unknown keyword argument:', kwarg)
        if self._keras_style:
            return super(Layer, self).add_weight(
                name=name,
                shape=shape,
                dtype=dtype,
                initializer=initializer,
                regularizer=regularizer,
                trainable=trainable and self.trainable,
                constraint=constraint,
                use_resource=use_resource,
                synchronization=tf.VariableSynchronization.AUTO,
                aggregation=tf.compat.v1.VariableAggregation.NONE,
                partitioner=partitioner,
                **kwargs)

        if synchronization == tf.VariableSynchronization.ON_READ:
            if trainable:
                raise ValueError(
                    'Synchronization value can be set to '
                    'VariableSynchronization.ON_READ only for non-trainable variables. '
                    'You have specified trainable=True and '
                    'synchronization=VariableSynchronization.ON_READ.')
            else:
                # Set trainable to be false when variable is to be synced on read.
                trainable = False
        elif trainable is None:
            trainable = True

        def _should_add_regularizer(variable, existing_variable_set):
            if base_layer_utils.is_split_variable(variable):
                for var in variable:
                    if var in existing_variable_set:
                        return False
                return True
            else:
                return variable not in existing_variable_set

        init_graph = None
        if not tf.executing_eagerly():
            default_graph = tf.compat.v1.get_default_graph()
            if default_graph.building_function:
                with tf.init_scope():
                    # Retrieve the variables from the graph into which variables
                    # will be lifted; if initialization ops will be lifted into
                    # the eager context, then there is nothing to retrieve, since variable
                    # collections are not supported when eager execution is enabled.
                    if not tf.executing_eagerly():
                        init_graph = tf.compat.v1.get_default_graph()
                        existing_variables = set(
                            tf.compat.v1.global_variables())
            else:
                # Initialization ops will not be lifted out of the default graph.
                init_graph = default_graph
                existing_variables = set(tf.compat.v1.global_variables())

        if dtype is None:
            dtype = self.dtype or tf.float32

        self._set_scope(None)
        reuse = self.built or self._reuse
        prev_len_trainable = len(self._trainable_weights)
        with tf.compat.v1.variable_scope(self._scope,
                                         reuse=reuse,
                                         auxiliary_name_scope=False) as scope:
            self._current_scope = scope
            with backend.name_scope(self._name_scope()):
                use_resource = (use_resource or self._use_resource_variables
                                or scope.use_resource)
                if initializer is None:
                    initializer = scope.initializer
                variable = super(Layer, self).add_weight(
                    name,
                    shape,
                    dtype=tf.as_dtype(dtype),
                    initializer=initializer,
                    trainable=trainable and self.trainable,
                    constraint=constraint,
                    partitioner=partitioner,
                    use_resource=use_resource,
                    synchronization=synchronization,
                    aggregation=aggregation,
                    getter=tf.compat.v1.get_variable,
                    **kwargs)

                if regularizer:
                    if (tf.compat.v1.executing_eagerly_outside_functions()
                            or _should_add_regularizer(variable,
                                                       existing_variables)):
                        self._handle_weight_regularization(
                            name, variable, regularizer)

                if init_graph is not None:
                    # Handle edge case where a custom getter has overridden `trainable`.
                    # There is one known occurrence of this, in unit test
                    # testBasicRNNCellNotTrainable in
                    # contrib.rnn.python.kernel_tests.core_rnn_cell_test
                    with init_graph.as_default():
                        trainable_variables = tf.compat.v1.trainable_variables(
                        )
                    if (trainable and self.trainable
                            and variable not in trainable_variables):
                        # A custom getter / variable scope overrode the trainable flag.
                        extra_trainable_vars = self._trainable_weights[
                            prev_len_trainable:]
                        self._trainable_weights = self._trainable_weights[:
                                                                          prev_len_trainable]
                        self._non_trainable_weights += extra_trainable_vars
        return variable
Exemple #28
0
def _create_keras_history_helper(tensors, processed_ops, created_layers):
    """Helper method for `create_keras_history`.

  Args:
    tensors: A structure of Tensors for which to create Keras metadata.
    processed_ops: Set. TensorFlow operations that have already been wrapped in
      `TensorFlowOpLayer` instances.
    created_layers: List. The `TensorFlowOpLayer` instances created.

  Returns:
    Tuple. First element is the updated set of TensorFlow Operations that
    have been wrapped in `TensorFlowOpLayer` instances. Second element is
    a list of the `TensorFlowOpLayer` instances created.
  """
    if tf.compat.v1.executing_eagerly_outside_functions():
        raise ValueError(
            '`create_keras_history` should only be called if eager is disabled!'
        )
    # Import of `base_layer` needed in order to create `TensorFlowOpLayer`.
    # Cannot be imported at top because of circular dependencies.
    # TODO(omalleyt): Resolve circular dependency.
    from keras.engine import base_layer  # pylint: disable=g-import-not-at-top
    tensor_list = tf.nest.flatten(tensors)
    sparse_ops = []
    ragged_tensors = []
    for tensor in tensor_list:
        if getattr(tensor, '_keras_history', None) is not None:
            continue
        if isinstance(tensor,
                      (tf.SparseTensor, tf.compat.v1.SparseTensorValue)):
            sparse_ops.append(tensor.op)
            continue
        if tf_utils.is_ragged(tensor):
            # Ragged tensors don't have an op property
            ragged_tensors.append(tensor)
            continue
        op = tensor.op  # The Op that created this Tensor.
        if op not in processed_ops:
            # Recursively set `_keras_history`.
            op_inputs = list(op.inputs)
            constants = {}
            layer_inputs = []
            for i, op_input in enumerate(op_inputs):
                if uses_keras_history(op_input):
                    layer_inputs.append(op_input)
                else:
                    # Treat any value not originating from a `keras.Input` as
                    # a constant. Variables cannot be supported.
                    ds_with_session = (
                        tf.distribute.in_cross_replica_context() and
                        not tf.compat.v1.executing_eagerly_outside_functions())
                    using_xla = control_flow_util.GraphOrParentsInXlaContext(
                        tf.compat.v1.get_default_graph())
                    if ds_with_session or using_xla or _UNSAFE_GRAPH_OP_LAYER_CREATION:
                        # In Legacy Graph mode, evaluating here makes Session be
                        # configured improperly. The downside of this is that saving
                        # via `get_config` breaks, but SavedModel still works.
                        constants[i] = op_input
                    else:
                        with tf.init_scope():
                            constants[i] = backend.function([], op_input)([])
            layer_inputs = unnest_if_single_tensor(layer_inputs)
            processed_ops, created_layers = _create_keras_history_helper(
                layer_inputs, processed_ops, created_layers)
            name = op.name
            node_def = op.node_def.SerializeToString()
            op_layer = base_layer.TensorFlowOpLayer(node_def,
                                                    constants=constants,
                                                    name=name)
            created_layers.append(op_layer)
            op_layer._set_connectivity_metadata(  # pylint: disable=protected-access
                args=(layer_inputs, ),
                kwargs={},
                outputs=op.outputs)
            processed_ops.update([op])
    if sparse_ops or ragged_tensors:
        lambda_example = """
    weights_mult = lambda x: tf.sparse.sparse_dense_matmul(x, weights)
    output = tf.keras.layers.Lambda(weights_mult)(input)
    """
        raise ValueError(
            'Tensorflow ops that generate ragged or sparse tensor '
            'outputs are currently not supported by Keras automatic '
            'op wrapping. Please wrap these ops in a Lambda layer: '
            '\n\n```\n{example}\n```\n'
            'Sparse ops encountered: {sparse_ops}\n'
            'Ragged tensors encountered: {ragged_tensors}\n'.format(
                example=lambda_example,
                sparse_ops=str(sparse_ops),
                ragged_tensors=str(ragged_tensors)))
    return processed_ops, created_layers
Exemple #29
0
 def _build_graph_network_for_inferred_shape(self,
                                             input_shape,
                                             input_dtype=None):
     if input_shape is None or not self.layers:
         return
     if (not tf.__internal__.tf2.enabled()
             or not tf.compat.v1.executing_eagerly_outside_functions()):
         # This behavior is disabled in V1 or when eager execution is
         # disabled.
         return
     if (not self._has_explicit_input_shape
             and not self._use_legacy_deferred_behavior):
         # Determine whether the input shape is novel, i.e. whether the model
         # should be rebuilt.
         input_shape = tuple(input_shape)
         if self._inferred_input_shape is None:
             new_shape = input_shape
         else:
             new_shape = relax_input_shape(self._inferred_input_shape,
                                           input_shape)
         if (new_shape is not None
                 and new_shape != self._inferred_input_shape):
             # A novel shape has been received: we need to rebuild the model.
             # In case we are inside a graph function, we step out of it.
             with tf.init_scope():
                 inputs = input_layer.Input(
                     batch_shape=new_shape,
                     dtype=input_dtype,
                     name=self.layers[0].name + "_input",
                 )
                 layer_input = inputs
                 created_nodes = set()
                 for layer in self.layers:
                     # Clear nodes previously created via this method. This
                     # prevents node accumulation and ensures that e.g.
                     # `layer.output` is always connected to `model.inputs`
                     # (this is important e.g. for the feature extraction use
                     # case).  We don't just do `layer._inbound_nodes = []`
                     # in order not to break shared layers added to
                     # Sequential models (which is technically illegal as per
                     # the `add()` docstring, but wasn't previously
                     # disabled).
                     clear_previously_created_nodes(layer,
                                                    self._created_nodes)
                     try:
                         # Create Functional API connection by calling the
                         # current layer
                         layer_output = layer(layer_input)
                     except:  # noqa: E722
                         # Functional API calls may fail for a number of
                         # reasons: 1) The layer may be buggy. In this case
                         # it will be easier for the user to debug if we fail
                         # on the first call on concrete data, instead of our
                         # own call on a symbolic input. 2) The layer is
                         # dynamic (graph-incompatible) and hasn't overridden
                         # `compute_output_shape`. In this case, it is
                         # impossible to build a graph network. 3) The layer
                         # is otherwise incompatible with the Functional API
                         # (e.g. this is the case for some probabilistic
                         # layers that rely on hacks and that do not return
                         # tensors). In all these cases, we should avoid
                         # creating a graph network (or we simply can't).
                         self._use_legacy_deferred_behavior = True
                         return
                     if len(tf.nest.flatten(layer_output)) != 1:
                         raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
                     # Keep track of nodes just created above
                     track_nodes_created_by_last_call(layer, created_nodes)
                     layer_input = layer_output
                     outputs = layer_output
                 self._created_nodes = created_nodes
                 try:
                     # Initialize a graph Network. This call will never fail
                     # for a stack of valid Keras layers. However some users
                     # have layers that are fundamentally incompatible with
                     # the Functional API, which do not return tensors. In
                     # this case, we fall back to the legacy deferred
                     # behavior.
                     # TODO(fchollet): consider raising here, as we should
                     # not be supporting such layers.
                     self._init_graph_network(inputs, outputs)
                     self._graph_initialized = True
                 except:  # noqa: E722
                     self._use_legacy_deferred_behavior = True
             self._inferred_input_shape = new_shape
Exemple #30
0
    def load_dataset():
        """Function that actually loads the dataset."""
        if load_dataset.dataset is not None:
            return load_dataset.dataset

        with tf.name_scope('radon'), tf.init_scope():

            dataset = _tfds().load(name='radon:1.*.*',
                                   split='train',
                                   batch_size=-1)
            dataset = _tfds().as_numpy(dataset)

            states = dataset['features']['state'].astype('U13')
            floor = dataset['features']['floor'].astype(np.int32)
            radon_val = dataset['activity'].astype(np.float64)
            county_strings = dataset['features']['county'].astype('U13')
            uranium = dataset['features']['Uppm'].astype(np.float64)

            if state is not None:
                floor = floor[states == state]
                radon_val = radon_val[states == state]
                county_strings = county_strings[states == state]
                uranium = uranium[states == state]

            radon_val[radon_val <= 0.] = 0.1
            log_radon = np.log(radon_val)
            log_uranium = np.log(uranium)
            unique_counties, county = np.unique(county_strings,
                                                return_inverse=True)
            county = county.astype(np.int32)

            if log_radon.size != num_examples:
                raise ValueError(
                    'The size of the filtered dataset must equal the input '
                    '`num_examples`. Saw dataset size = {}, `num_examples` = {}'
                    ''.format(log_radon.size, num_examples))
            if unique_counties.size != num_counties:
                raise ValueError(
                    'The number of counties present in the filtered dataset must equal '
                    'the input `num_counties`. Saw {} counties but `num_counties` = {}'
                    ''.format(unique_counties.size, num_counties))

            if shuffle:
                shuffle_idxs = np.arange(num_examples)
                np.random.RandomState(shuffle_seed).shuffle(shuffle_idxs)
                log_uranium = log_uranium[shuffle_idxs]
                floor = floor[shuffle_idxs]
                county = county[shuffle_idxs]
                log_radon = log_radon[shuffle_idxs]

            train_floor = floor[:num_train]
            train_county = county[:num_train]
            test_floor = floor[num_train:]
            test_county = county[num_train:]

            # Create a new features for mean of floor across counties.
            xbar = []
            for i in range(num_counties):
                xbar.append(train_floor[county == i].mean())
            floor_by_county = np.array(xbar, dtype=log_radon.dtype)

            load_dataset.dataset = dict(
                train_log_uranium=log_uranium[:num_train],
                train_floor=train_floor,
                train_county=train_county,
                train_floor_by_county=floor_by_county[train_county],
                train_log_radon=log_radon[:num_train],
                test_log_uranium=log_uranium[num_train:],
                test_floor=test_floor,
                test_county=test_county,
                test_floor_by_county=floor_by_county[test_county],
                test_log_radon=log_radon[num_train:],
            )

        return load_dataset.dataset