Example #1
0
    def __init__(self,
                 seg_nums,
                 feat_idx_list,
                 embed_trainable_list=None,
                 input_val_range=(0., 1.),
                 out_dim_calcor=log_out_dim_calcor,
                 max_param_num=int(1e6),
                 period=100,
                 pave_momentum=0.5,
                 index_learnable=False,
                 embeddings_initializer='uniform',
                 embeddings_regularizer=None,
                 embeddings_constraint=None,
                 **kwargs):
        super(SegEmbedding, self).__init__(**kwargs)
        self.supports_masking = True

        self.seg_nums = np.array(seg_nums)
        self.feat_idx_list = feat_idx_list
        self.embed_trainable_list = embed_trainable_list
        self.input_val_range = input_val_range
        self.out_dim_calcor = out_dim_calcor
        self.max_param_num = max_param_num
        self.period = period
        self.pave_momentum = pave_momentum
        self.index_learnable = index_learnable

        self.embeddings_initializer = initializers.get(embeddings_initializer)
        self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
        self.embeddings_constraint = constraints.get(embeddings_constraint)

        self.phase = len(self.feat_idx_list)
        self.embedding_list = [[] for i in range(self.phase)]
        if not self.embed_trainable_list:
            self.embed_trainable_list = [True] * self.phase
        self.update_cnt_list = [[] for i in range(self.phase)]
        self.seg_num_list = [[] for i in range(self.phase)]
        self.seg_num_mul_list = [[] for i in range(self.phase)]

        feats_num = self.seg_nums.shape[0]
        if isinstance(self.input_val_range[0], (int, float)):
            self.min_vals = bk.constant([self.input_val_range[0]] * feats_num)
        else:
            self.min_vals = bk.constant(self.input_val_range[0])
        if isinstance(self.input_val_range[1], (int, float)):
            self.max_vals = bk.constant([self.input_val_range[1]] * feats_num)
        else:
            self.max_vals = bk.constant(self.input_val_range[1])
        self.min_val_list = [[] for i in range(self.phase)]
        self.max_val_list = [[] for i in range(self.phase)]

        self.call_cnt = None

        self.unique_supported = True
        try:
            from tensorflow.python.tpu import tpu
            self.unique_supported = not tpu.is_tpu_strategy(
                tf.distribute.get_strategy())
        except ModuleNotFoundError:
            self.unique_supported = True
Example #2
0
    def __init__(self,
                 rate,
                 anneal=0.1,
                 agg_method='mean',
                 smooth_rate=0.,
                 noise_type='gaussian',
                 keep_amp_type='abs',
                 epsilon=1e-6,
                 period=None,
                 axis=None,
                 seed=None,
                 **kwargs):
        super(SegDropout, self).__init__(rate, seed=seed, **kwargs)
        self.supports_masking = True

        self.anneal = 0.5 + anneal
        self.agg_method = agg_method
        self.smooth_rate = max(min(smooth_rate, 1.), 0.)
        self.noise_type = noise_type
        self.keep_amp_type = keep_amp_type
        self.epsilon = epsilon

        self.period = period
        self.axis = axis

        self.call_cnt = 0

        self.unique_supported = True
        try:
            from tensorflow.python.tpu import tpu
            self.unique_supported = not tpu.is_tpu_strategy(
                tf.distribute.get_strategy())
        except ModuleNotFoundError:
            self.unique_supported = True
Example #3
0
  def decorated(metric_obj, *args, **kwargs):
    """Decorated function with `add_update()`."""
    strategy = distribution_strategy_context.get_strategy()
    # TODO(b/142574744): Remove this check if a better solution is found for
    # declaring keras Metric outside of TPUStrategy and then updating it per
    # replica.

    for weight in metric_obj.weights:
      if (tpu.is_tpu_strategy(strategy) and
          not strategy.extended.variable_created_in_scope(weight)
          and not distribution_strategy_context.in_cross_replica_context()):
        raise ValueError(
            'Trying to run metric.update_state in replica context when '
            'the metric was not created in TPUStrategy scope. '
            'Make sure the keras Metric is created in TPUstrategy scope. ')

    with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):
      update_op = update_state_fn(*args, **kwargs)
    if update_op is not None:  # update_op will be None in eager execution.
      metric_obj.add_update(update_op)
    return update_op