def compute_surprisal_loss(model, loss, updated_states, sample_probabilities,
                           surprisal_influence, mask):
    """
    Compute penalization term on the average surprisal of the unread samples.
    """
    if using_skip_rnn(model):
        neg_updated_states = tf.subtract(
            tf.ones(updated_states.get_shape(), dtype=tf.dtypes.float32),
            updated_states)
        surprisal_values = tf.multiply(
            tf.multiply(tf.constant(-1.0), (tf.log(sample_probabilities))),
            mask)
        # printer_0 = tf.Print(neg_updated_states, [neg_updated_states], "Inverse of the updated states is ")
        surprisals = tf.multiply(
            neg_updated_states,
            tf.where(tf.is_nan(surprisal_values),
                     tf.zeros_like(surprisal_values), surprisal_values))
        tot_surprisal = tf.reduce_sum(surprisals)
        # printer_1 = tf.Print(tot_surprisal, [tot_surprisal], "Total surprisal is ")
        non_read_samples = tf.reduce_sum(tf.multiply(neg_updated_states, mask))
        # printer_2 = tf.Print(non_read_samples, [non_read_samples], "Non read samples is ")
        # with tf.control_dependencies([printer_0, printer_1, printer_2]):
        average_surprisal = tf.div_no_nan(tot_surprisal, non_read_samples)
        surprisal_loss = surprisal_influence * average_surprisal
        # print = tf.cond(tf.math.is_nan(surprisal_loss),
        #                 true_fn=lambda: tf.Print(surprisal_loss, [surprisal_loss], "Surprisal loss is null "),
        #                 false_fn=lambda: tf.no_op())
        # with tf.control_dependencies([print]):
        return surprisal_loss
    else:
        return tf.zeros(loss.get_shape())
Exemplo n.º 2
0
        def metric_fn(per_example_loss, label_ids, logits, is_real_example):
          """Compute Matthew's correlations for STS-B."""
          predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
          # https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
          tp, tp_op = tf.metrics.true_positives(
              predictions, label_ids, weights=is_real_example)
          tn, tn_op = tf.metrics.true_negatives(
              predictions, label_ids, weights=is_real_example)
          fp, fp_op = tf.metrics.false_positives(
              predictions, label_ids, weights=is_real_example)
          fn, fn_op = tf.metrics.false_negatives(
              predictions, label_ids, weights=is_real_example)

          # Compute Matthew's correlation
          mcc = tf.div_no_nan(
              tp * tn - fp * fn,
              tf.pow((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn), 0.5))

          # Compute accuracy
          accuracy = tf.metrics.accuracy(
              labels=label_ids, predictions=predictions,
              weights=is_real_example)

          loss = tf.metrics.mean(
              values=per_example_loss,
              weights=is_real_example)

          return {"matthew_corr": (mcc, tf.group(tp_op, tn_op, fp_op, fn_op)),
                  "eval_accuracy": accuracy, "eval_loss": loss,}
Exemplo n.º 3
0
def embedding_lookup_sparse(embedding_params,
                            sparse_indices,
                            weights,
                            combiner,
                            name=None):
    bs = tf.cast(sparse_indices.dense_shape[0], dtype=tf.int32)
    if combiner != 'mean':
        embedding_tensor_reduce = tf.nn.embedding_lookup_sparse(
            embedding_params,
            sparse_indices,
            weights,
            combiner=combiner,
            name=name)
        embedding_tensor_scatter = tf.pad(
            embedding_tensor_reduce,
            [[0, bs - tf.shape(embedding_tensor_reduce)[0]], [0, 0]],
            'CONSTANT',
            constant_values=0)
    else:
        embedding_tensor_reduce = tf.nn.embedding_lookup_sparse(
            embedding_params, sparse_indices, weights, combiner='sum')
        embedding_tensor_scatter = tf.pad(
            embedding_tensor_reduce,
            [[0, bs - tf.shape(embedding_tensor_reduce)[0]], [0, 0]],
            'CONSTANT',
            constant_values=0)
        line_number = tf.cast(sparse_indices.indices[:, 0], tf.int32)
        line_count = tf.expand_dims(
            tf.math.bincount(line_number, minlength=bs, dtype=tf.float32), 1)
        embedding_tensor_scatter = tf.div_no_nan(embedding_tensor_scatter,
                                                 line_count,
                                                 name=name)
    return embedding_tensor_scatter
Exemplo n.º 4
0
    def encode(self, x, encode_params):
        """See base class."""
        x = self._validate_and_expand_encode_input(x)
        dims = x.shape.as_list()
        # Get static or dynamic leading dimension if static not available.
        dim_0 = dims[0] if dims[0] else tf.shape(x)[0]
        dim_1 = dims[1]
        kashin_coefficients = tf.zeros([dim_0, self._get_pad_dim(dim_1)],
                                       dtype=x.dtype)
        clip_level = tf.norm(x, axis=1, keepdims=True) / tf.math.sqrt(
            tf.cast(encode_params[self.DELTA_PARAMS_KEY], x.dtype) * dim_1)
        last_iter_clip = self._last_iter_clip
        residual = x
        signs = self._random_signs(dim_1, encode_params[self.SEED_PARAMS_KEY],
                                   x.dtype)

        # Compute the Kashin coefficients.
        for _ in range(self._num_iters - 1):
            residual, kashin_coefficients = self._kashin_iter(
                residual, kashin_coefficients, signs, clip_level)
            clip_level *= tf.cast(encode_params[self.ETA_PARAMS_KEY], x.dtype)
        # The last iteration can be with or without clipping.
        kashin_coefficients += self._kashin_forward(residual, signs,
                                                    clip_level, last_iter_clip)
        if last_iter_clip:
            # If there is clipping in the last iteration, this can result in
            # biased representation of smaller magnitude. We compensate for this
            # by scaling such that the norm is preserved.
            kashin_coefficients *= tf.div_no_nan(
                tf.norm(x, axis=1, keepdims=True),
                tf.norm(kashin_coefficients, axis=1, keepdims=True))

        return {self.ENCODED_VALUES_KEY: kashin_coefficients}
Exemplo n.º 5
0
def sparse_reduce(sparse_tensor, reduce_type):
    bs = tf.cast(sparse_tensor.dense_shape[0], dtype=tf.int32)
    if reduce_type == 'sum':
        sparse_tensor_reduce = tf.sparse.reduce_sum(sparse_tensor,
                                                    axis=1,
                                                    keepdims=True)
    elif reduce_type == 'max':
        sparse_tensor_reduce = tf.sparse.reduce_max(sparse_tensor,
                                                    axis=1,
                                                    keepdims=True)
    elif reduce_type == 'mean':
        sparse_tensor_reduce = tf.sparse.reduce_sum(sparse_tensor,
                                                    axis=1,
                                                    keepdims=True)
        indices = tf.cast(sparse_tensor.indices, tf.int32)
        line_number_indices = indices[:, 0]
        line_count = tf.expand_dims(
            tf.math.bincount(line_number_indices,
                             minlength=bs,
                             dtype=tf.float32), 1)
        sparse_tensor_reduce = tf.div_no_nan(sparse_tensor_reduce, line_count)
    else:
        assert False, "no this way"
    sparse_tensor_scatter = tf.reshape(sparse_tensor_reduce, shape=[bs, 1])
    return sparse_tensor_scatter
Exemplo n.º 6
0
    def forward(self, tensors, mode: str = None):
        """Computes Triplet Precision

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            BPR loss
        """
        # Retrieve positives and negatives logits
        positives, negatives, mask, weights = tensors
        positives, negatives = make_same_shape([positives, negatives],
                                               broadcast=False)

        # One triplet precision per event
        event_triplet = WeightedAverage()((tf.cast(
            positives > negatives, tf.float32), tf.cast(mask, tf.float32)),
                                          mode)

        # Each event contributes according to its weight
        event_weights = weights * tf.to_float(tf.reduce_any(mask, axis=-1))
        return tf.div_no_nan(tf.reduce_sum(event_triplet * event_weights),
                             tf.reduce_sum(event_weights))
Exemplo n.º 7
0
    def __call__(self, tensors: Dict[str, tf.Tensor]) -> Dict[str, Tuple]:
        # Retrieve tensors
        logits = tensors[self.logits]
        targets = tensors[self.targets]
        ndims = len(targets.shape) - 1

        # Set logits of inputs to -inf
        if self.inputs is not None:
            inputs = tensors[self.inputs]
            logits = logits + tf.cast(inputs, tf.float32) * tf.float32.min

        # Retrieve top k predictions
        _, indices = tf.math.top_k(logits, k=self.k, sorted=True)
        for _ in range(ndims):
            indices = tf.expand_dims(indices, axis=-2)
        equal_topk = tf.equal(tf.cast(indices, tf.int64), tf.expand_dims(targets, axis=-1))

        # Discounted cumulative gain
        pos_in_target = tf.reduce_sum(tf.cast(equal_topk, tf.float32), axis=-2)
        discount = tf.math.log(2.0) / tf.math.log(tf.range(2, self.k + 2, dtype=tf.float32))
        for _ in range(ndims + 1):
            discount = tf.expand_dims(discount, axis=0)
        dcg = tf.reduce_sum(discount * pos_in_target, axis=-1)

        # Ideal discounted cumulative gain
        num_targets = tf.reduce_sum(tf.cast(tf.not_equal(targets, -1), tf.int64), axis=-1)
        num_targets = tf.math.minimum(num_targets, self.k)
        all_in_target = tf.cast(tf.sequence_mask(num_targets, maxlen=self.k), tf.float32)
        idcg = tf.reduce_sum(discount * all_in_target, axis=-1)

        # Normalized DCG
        ndcg = tf.div_no_nan(dcg, idcg)
        return {self.name: tf.metrics.mean(ndcg)}
Exemplo n.º 8
0
def get_IOU(y_true, y_pred):

    # true box
    true_halfwidth = y_true[..., 3] / 2.
    true_halfheight = y_true[..., 4] / 2.
    true_x1 = y_true[..., 1] - true_halfwidth
    true_y1 = y_true[..., 2] - true_halfheight
    true_x2 = y_true[..., 1] + true_halfwidth
    true_y2 = y_true[..., 2] + true_halfheight

    # pred box
    pred_halfwidth = y_pred[..., 3] / 2.
    pred_halfheight = y_pred[..., 4] / 2.
    pred_x1 = y_pred[..., 1] - pred_halfwidth
    pred_y1 = y_pred[..., 2] - pred_halfheight
    pred_x2 = y_pred[..., 1] + pred_halfwidth
    pred_y2 = y_pred[..., 2] + pred_halfheight

    xA = tf.maximum(pred_x1, true_x1)
    yA = tf.maximum(pred_y1, true_y1)
    xB = tf.minimum(pred_x2, true_x2)
    yB = tf.minimum(pred_y2, true_y2)

    intersect = (tf.maximum(xB - xA, tf.zeros(tf.shape(xB))) *
                 tf.maximum(yB - yA, tf.zeros(tf.shape(yB))))

    trueArea = y_true[..., 3] * y_true[..., 4]
    predArea = y_pred[..., 3] * y_pred[..., 4]

    union = trueArea + predArea - intersect

    return tf.div_no_nan(intersect, union)
Exemplo n.º 9
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            BPR loss
        """
        positives, negatives, mask, weights = tensors
        positives, negatives = make_same_shape([positives, negatives],
                                               broadcast=False)
        # One score per negative : (batch, num_events, num_negatives)
        scores = -tf.log_sigmoid(positives - negatives)
        # One loss per event, average of scores : (batch, num_events)
        event_scores = WeightedAverage()((scores, tf.to_float(mask)))
        # Each event contributes according to its weight
        event_weights = weights * tf.to_float(tf.reduce_any(mask, axis=-1))
        event_losses = event_scores * event_weights
        return tf.div_no_nan(tf.reduce_sum(event_losses),
                             tf.reduce_sum(event_weights))
Exemplo n.º 10
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer
        (details: https://arxiv.org/pdf/1706.03847.pdf)

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            TopOne Max loss
        """
        positives, negatives, mask, weights = tensors
        positives, negatives = make_same_shape([positives, negatives], broadcast=False)
        softmax_scores = Softmax()((negatives, tf.to_float(mask)))
        losses = tf.multiply(softmax_scores, tf.nn.sigmoid(negatives - positives) + tf.nn.sigmoid(tf.square(negatives)))
        # One loss per event, average of scores : (batch, num_events)
        event_scores = WeightedAverage()((losses, tf.to_float(mask)))
        # Each event contributes according to its weight
        event_weights = weights * tf.to_float(tf.reduce_any(mask, axis=-1))
        event_losses = event_scores * event_weights
        return tf.div_no_nan(tf.reduce_sum(event_losses), tf.reduce_sum(event_weights))
Exemplo n.º 11
0
  def encode(self, x, encode_params):
    """See base class."""
    min_x = tf.reduce_min(x)
    max_x = tf.reduce_max(x)

    max_value = tf.cast(encode_params[self.MAX_INT_VALUE_PARAMS_KEY], x.dtype)
    # Shift the values to range [0, max_value].
    # In the case of min_x == max_x, this will return all zeros.
    x = tf.div_no_nan(x - min_x, max_x - min_x) * max_value

    # Randomized rounding.
    floored_x = tf.floor(x)
    random_seed = tf.random.uniform((2,), maxval=tf.int64.max, dtype=tf.int64)
    num_elements = tf.reduce_prod(tf.shape(x))
    rounding_floats = tf.reshape(
        self._random_floats(num_elements, random_seed, x.dtype), tf.shape(x))

    bernoulli = rounding_floats < (x - floored_x)
    quantized_x = floored_x + tf.cast(bernoulli, x.dtype)

    # Include the random seed in the encoded tensors so that it can be used to
    # generate the same random sequence in the decode method.
    encoded_tensors = {
        self.ENCODED_VALUES_KEY: quantized_x,
        self.SEED_PARAMS_KEY: random_seed,
        self.MIN_MAX_VALUES_KEY: tf.stack([min_x, max_x])
    }

    return encoded_tensors
Exemplo n.º 12
0
def UserEmbedding(tensors: tf.Tensor,
                  mode: str,
                  keep_prob: float,
                  reduce_mode: str = "average"):
    """Compute Weighted Sum (randomly masking inputs in TRAIN mode)."""
    embeddings, mask = tensors

    # Drop entries without re-scaling (not classical dropout)
    if mode == deepr.TRAIN:
        LOGGER.info("Applying random mask to inputs (TRAIN only)")
        mask_random = tf.random.uniform(tf.shape(mask)) <= keep_prob
        mask = tf.logical_and(mask, mask_random)

    weights = tf.cast(mask, tf.float32)

    # Scale the weights depending on the reduce mode
    if reduce_mode == "l2":
        weights = tf.nn.l2_normalize(weights, axis=-1)
    elif reduce_mode == "average":
        weights = tf.div_no_nan(weights,
                                tf.reduce_sum(weights, axis=-1, keepdims=True))
    elif reduce_mode == "sum":
        pass
    else:
        raise ValueError(
            f"Reduce mode {reduce_mode} unknown (must be 'l2', 'average' or 'sum')"
        )

    return tf.reduce_sum(embeddings * tf.expand_dims(weights, axis=-1),
                         axis=-2)
Exemplo n.º 13
0
  def encode(self, x, encode_params):
    """See base class."""
    dim = tf.shape(x)[-1]
    x = tf.reshape(x, [-1, dim])

    # Per-channel min and max.
    min_x = tf.reduce_min(x, axis=0)
    max_x = tf.reduce_max(x, axis=0)

    max_value = tf.cast(encode_params[self.MAX_INT_VALUE_PARAMS_KEY], x.dtype)
    # Shift the values to range [0, max_value].
    # In the case of min_x == max_x, this will return all zeros.
    x = tf.div_no_nan(x - min_x, max_x - min_x) * max_value
    if self._stochastic:  # Randomized rounding.
      floored_x = tf.floor(x)
      bernoulli = tf.random_uniform(tf.shape(x), dtype=x.dtype)
      bernoulli = bernoulli < (x - floored_x)
      quantized_x = floored_x + tf.cast(bernoulli, x.dtype)
    else:  # Deterministic rounding.
      quantized_x = tf.round(x)

    encoded_tensors = {
        self.ENCODED_VALUES_KEY: quantized_x,
        self.MIN_MAX_VALUES_KEY: tf.stack([min_x, max_x])
    }

    return encoded_tensors
def masked_softmax_cross_entropy_with_logit(y_true, y_pred):
    """
    Masks elements that is not equal to -1 in y_true and
    averages alive-elements.
    
    Sample weight and the standard mask of layers should not be enabled.
    If they work averaging will be done twice time. 
    
    Standard mask system of Kreras does not seem to work correctly on TPU
    So this loss is a temporary measure.
    """
        
    if len(y_true.shape)+1 != len(y_pred.shape):
        y_true = tf.squeeze(y_true, axis=[-1])
           
    mask = tf.not_equal(y_true, -1)
    safe_y_true = tf.where(mask, y_true, tf.zeros_like(y_true))
    safe_y_true = tf.cast(safe_y_true, dtype=tf.int32)
           
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=safe_y_true,
            logits=y_pred,
        )  
           
    mask = tf.cast(mask, tf.float32)
           
    sum_cross_entropy = tf.reduce_sum(cross_entropy*mask)
    sum_mask = tf.reduce_sum(mask)
           
    averaged_loss = tf.div_no_nan(sum_cross_entropy, sum_mask)
           
    return averaged_loss
def height_normal_consistency_loss(depth, normal, mask3,
                                   consistency_loss_factor: float,
                                   imgSize: int, max_outputs: int):
    near = uncompressDepth(1)
    far = uncompressDepth(0)
    d = uncompressDepth(depth)
    h = tf.div_no_nan(d - near, far - near)

    sobel = tf.image.sobel_edges(
        h)  # b,h,w,1,[dy,dx] - 1 because height has 1 channel
    dx = sobel[:, :, :, :, 1]  # b,h,w,1
    dy = -sobel[:, :, :, :, 0]
    # We're using a depth map instead of a height. Which means bright
    # values are at a greater depth. Thus, we need to invert the gradient
    texelSize = 1 / imgSize
    dz = tf.ones_like(dx) * texelSize * 2

    cn = tf.nn.l2_normalize(tf.concat([dx, dy, dz], axis=-1), axis=-1)

    cn = cn * 0.5 + 0.5
    cl = masked_loss(l1_loss(cn, normal), mask3)

    clr = tf.reduce_mean(cl, name="consistency_loss") * consistency_loss_factor
    add_moving_summary(clr)
    tf.losses.add_loss(clr, tf.GraphKeys.LOSSES)

    repeat = [1 for _ in range(len(depth.shape))]
    repeat[-1] = 3
    tbutil.four_side_by_side(tf.tile(depth, repeat), cn, normal, cl,
                             "consistency", max_outputs)
Exemplo n.º 16
0
    def __call__(self, tensors: Dict[str, tf.Tensor]) -> Dict[str, Tuple]:
        # Retrieve tensors
        logits = tensors[self.logits]  # (batch, num_classes)
        targets = tensors[self.targets]  # (batch, ..., num_targets)
        ndims = len(targets.shape) - 1

        # Set logits of inputs to -inf
        if self.inputs is not None:
            inputs = tensors[self.inputs]  # (batch, num_classes)
            logits = logits + tf.cast(inputs, tf.float32) * tf.float32.min

        # Retrieve top k predictions, shape = (batch, k)
        _, indices = tf.math.top_k(logits, k=self.k, sorted=True)
        # shape = (batch, ..., 1, k)
        for _ in range(ndims):
            indices = tf.expand_dims(indices, axis=-2)
        # shape = (batch, ..., num_targets, k)
        equal_topk = tf.equal(tf.cast(indices, tf.int64), tf.expand_dims(targets, axis=-1))

        # Compute number of items in top k
        num_in_topk = tf.reduce_sum(tf.reduce_sum(tf.cast(equal_topk, tf.int64), axis=-1), axis=-1)
        num_targets = tf.reduce_sum(tf.cast(tf.not_equal(targets, -1), tf.int64), axis=-1)
        num_targets = tf.math.minimum(num_targets, self.k)
        recall_at_k = tf.div_no_nan(tf.cast(num_in_topk, tf.float32), tf.cast(num_targets, tf.float32))
        return {self.name: tf.metrics.mean(recall_at_k)}
Exemplo n.º 17
0
def reduce_mean_masked(input_tensor,
                       is_valid,
                       axis=None,
                       keepdims=False,
                       try_fast=True):
    """Compute the mean of elements across dimensions of a tensor, ignoring elements if
    the corresponding element in `mask` is False.

    In general, `K = dim(mask) <= dim(input_tensor) = L`, and `mask`'s shape must match
    the first K dimensions of `tensor`'s shape. Then `input_tensor[i1,...,iK,...iL]` is
    ignored iff `mask[i1,...,iK]` is False.
    """
    if try_fast:
        return tf.cond(
            tf.reduce_all(is_valid),
            lambda: tf.reduce_mean(input_tensor, axis=axis, keepdims=keepdims),
            lambda: reduce_mean_masked(
                input_tensor, is_valid, axis, keepdims, try_fast=False))

    if axis is None and not keepdims:
        return tf.reduce_mean(tf.boolean_mask(input_tensor, is_valid))

    n_new_dims = input_tensor.get_shape().ndims - is_valid.get_shape().ndims
    is_valid = expand_dims(is_valid, [-1] * n_new_dims)
    is_valid = broadcast_like(is_valid, input_tensor)
    replaced = tf.where(is_valid, input_tensor, tf.zeros_like(input_tensor))
    sum_valid = tf.reduce_sum(replaced, axis=axis, keepdims=keepdims)
    n_valid = tf.count_nonzero(is_valid,
                               axis=axis,
                               keepdims=keepdims,
                               dtype=input_tensor.dtype)
    return tf.div_no_nan(sum_valid, n_valid)
Exemplo n.º 18
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer

        Parameters
        ----------
        tensors : Tuple[tf.Tensor]
            - positives : shape = (batch, num_events)
            - negatives : shape = (batch, num_events, num_negatives)
            - mask : shape = (batch, num_events, num_negatives)
            - weights : shape = (batch, num_events)

        Returns
        -------
        tf.Tensor
            Negative Sampling  loss
        """
        positives, negatives, mask, weights = tensors
        true_losses = tf.nn.sigmoid_cross_entropy_with_logits(
            labels=tf.ones_like(positives), logits=positives)
        sampled_losses = tf.nn.sigmoid_cross_entropy_with_logits(
            labels=tf.zeros_like(negatives), logits=negatives)
        event_scores = true_losses + WeightedAverage()(
            (sampled_losses, tf.to_float(mask)))
        event_weights = weights * tf.to_float(tf.reduce_any(mask, axis=-1))
        return tf.div_no_nan(tf.reduce_sum(event_scores * event_weights),
                             tf.reduce_sum(event_weights))
    def encode(self, x, encode_params):
        """See base class."""
        if self.MIN_MAX_VALUES_KEY in encode_params:
            min_max = tf.cast(encode_params[self.MIN_MAX_VALUES_KEY], x.dtype)
            min_x, max_x = min_max[0], min_max[1]
            x = tf.clip_by_value(x, min_x, max_x)
        else:
            min_x = tf.reduce_min(x)
            max_x = tf.reduce_max(x)

        max_value = tf.cast(encode_params[self.MAX_INT_VALUE_PARAMS_KEY],
                            x.dtype)
        # Shift the values to range [0, max_value].
        # In the case of min_x == max_x, this will return all zeros.
        x = tf.div_no_nan(x - min_x, max_x - min_x) * max_value
        if self._stochastic:  # Randomized rounding.
            floored_x = tf.floor(x)
            bernoulli = tf.random_uniform(tf.shape(x), dtype=x.dtype)
            bernoulli = bernoulli < (x - floored_x)
            quantized_x = floored_x + tf.cast(bernoulli, x.dtype)
        else:  # Deterministic rounding.
            quantized_x = tf.round(x)

        encoded_tensors = {self.ENCODED_VALUES_KEY: quantized_x}
        if self.MIN_MAX_VALUES_KEY not in encode_params:
            encoded_tensors[self.MIN_MAX_VALUES_KEY] = tf.stack([min_x, max_x])
        return encoded_tensors
Exemplo n.º 20
0
    def apply_embed_on_doc_id(self):
        """Applies embedding lookup and averaging for doc id features

        :return Tensor Shape=[batch_size, max_group_size, num_doc_id_fields, num_units_for_id_ftr]
        """
        hparams = self._hparams

        doc_ftrs = []
        for i, doc_field in enumerate(self._doc_id_fields):
            seq_mask = tf.cast(
                tf.not_equal(doc_field, hparams.pad_id_for_id_ftr),
                dtype=tf.float32)  # [batch_size, max_group_size, num_doc_id]
            seq_mask = tf.expand_dims(
                seq_mask,
                axis=-1)  # [batch_size, max_group_size, num_doc_id, 1]
            seq_length = tf.reduce_sum(
                seq_mask,
                axis=-2,
            )  # [batch_size, max_group_size, 1]

            doc_id_embeddings = tf.nn.embedding_lookup(
                self.embedding,
                doc_field,
            )  # [batch_size, max_group_size, num_doc_id, num_units_for_id_ftr]
            sum_doc_id_embedding = tf.reduce_sum(
                doc_id_embeddings * seq_mask,
                axis=2)  # [batch_size, max_group_size, num_units_for_id_ftr]
            doc_id_avg_embedding = tf.div_no_nan(
                sum_doc_id_embedding, seq_length
            )  # [batch_size, max_group_size, num_units_for_id_ftr]
            doc_ftrs.append(doc_id_avg_embedding)
        return tf.stack(doc_ftrs, axis=2)
    def spec(
        self,
        ndl: tf.Tensor,
        ndv: tf.Tensor,
        ndh: tf.Tensor,
        ldh: tf.Tensor,
        vdh: tf.Tensor,
        F0: tf.Tensor,
        roughness: tf.Tensor,
    ) -> tf.Tensor:
        with tf.variable_scope("Specular"):
            alpha = saturate(roughness * roughness, 1e-3)

            F = self.F(F0, ldh)
            G = self.G(alpha, ndl, ndv)
            D = self.D(alpha, ndh)

            ret = tf.div_no_nan(F * G * D, 4.0 * ndl)

            ret = tf.where(tf.math.less(self._to_vec3(ndh), EPS),
                           tf.zeros_like(ret), ret)
            ret = tf.where(tf.math.less(self._to_vec3(ldh * ndl), EPS),
                           tf.zeros_like(ret), ret)
            ret = tf.where(tf.math.less(self._to_vec3(vdh * ndv), EPS),
                           tf.zeros_like(ret), ret)
            return ret
Exemplo n.º 22
0
 def divide_no_nan(self, x, y):
     if version.parse(tf.__version__) >= version.parse('1.11.0'):
         return tf.div_no_nan(x, y)
     else:
         result = x / y
         return tf.where(tf.is_finite(result), result,
                         tf.zeros_like(result))
Exemplo n.º 23
0
    def forward(self, tensors, mode: str = None):
        """Forward method of the layer"""
        values, weights = tensors

        # Values and weights need to have the same shape up to axis
        # and compatible after axis
        axis = len(weights.shape) - 1
        weights = tf.broadcast_to(
            weights,
            tf.maximum(tf.shape(weights),
                       tf.shape(values)[:len(weights.shape)]))
        values, weights = make_same_shape([values, weights], broadcast=False)

        # Reduce weighted values and weights
        weighted_values = tf.reduce_sum(values * weights, axis=axis)
        sum_weights = tf.reduce_sum(weights, axis=axis)

        # Average values and weights, take care of all weights zeros
        if self.default is None:
            return weighted_values / sum_weights
        elif self.default == 0:
            weighted_average = tf.div_no_nan(weighted_values, sum_weights)
        else:
            weighted_average = tf.where(
                tf.equal(sum_weights, 0),
                self.default * tf.ones_like(weighted_values),
                weighted_values / sum_weights)
        return weighted_average
    def D(self, alpha: tf.Tensor, ndh: tf.Tensor) -> tf.Tensor:
        with tf.variable_scope("Distribution"):
            a2 = alpha * alpha

            denom = (ndh * ndh) * (a2 - 1) + 1.0
            denom2 = denom * denom

            return tf.div_no_nan(a2, np.pi * denom2)
Exemplo n.º 25
0
def uncompressDepth(d: tf.Tensor,
                    sigma: float = 2.5,
                    epsilon: float = 0.7) -> tf.Tensor:
    """From 0-1 values to full depth range. The possible depth range
        is modelled by sigma and epsilon and with sigma=2.5 and epsilon=0.7
        it is between 0.17 and 1.4.
        """
    return tf.div_no_nan(1.0, 2.0 * sigma * d + epsilon)
Exemplo n.º 26
0
    def bbox_iou_circle(self, boxes1, boxes2):

        boxes1_area = boxes1[..., 2] * boxes1[..., 3]
        boxes2_area = boxes2[..., 2] * boxes2[..., 3]

        # boxes [...,0:2] x0,y0; [...,2:4] x1,y1
        boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,
                            boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)
        boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,
                            boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)

        # constant pi / 4
        C = tf.constant(math.pi/4.)

        # three cases: pred enclose GT; GT enclose pred; neither
        pred_enc_GT_mask = tf.cast(boxes1[...,0]<boxes2[...,0], tf.float32) *\
                           tf.cast(boxes1[...,1]<boxes2[...,1], tf.float32) *\
                           tf.cast(boxes1[...,2]>boxes2[...,2], tf.float32) *\
                           tf.cast(boxes1[...,3]>boxes2[...,3], tf.float32)

        GT_enc_pred_mask = tf.cast(boxes1[...,0]>boxes2[...,0], tf.float32) *\
                           tf.cast(boxes1[...,1]>boxes2[...,1], tf.float32) *\
                           tf.cast(boxes1[...,2]<boxes2[...,2], tf.float32) *\
                           tf.cast(boxes1[...,3]<boxes2[...,3], tf.float32)

        part_intersect_mask = 1.0 - GT_enc_pred_mask - pred_enc_GT_mask

        left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])
        right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])

        inter_section = tf.maximum(right_down - left_up, 0.0)
        inter_area = inter_section[..., 0] * inter_section[..., 1]
        union_area = boxes1_area + boxes2_area - inter_area

        # case 1 IOU:
        pred_enc_GT_iou = pred_enc_GT_mask * tf.div_no_nan(inter_area, C * union_area)
        # case 2 IOU:
        GT_enc_pred_iou = GT_enc_pred_mask * tf.div_no_nan(C * inter_area, union_area)
        # case 3 IOU
        part_intersect_iou = part_intersect_mask * tf.div_no_nan(C * inter_area, \
            (C * boxes1_area + boxes2_area - C * inter_area))

        #iou = 1.0 * tf.div_no_nan(inter_area, union_area)
        iou_circle = pred_enc_GT_iou + GT_enc_pred_iou + part_intersect_iou

        return iou_circle
Exemplo n.º 27
0
    def loss(self, y_true, y_pred):
        ndims = len(y_pred.get_shape().as_list()) - 2
        vol_axes = list(range(1, ndims+1))

        top = 2 * tf.reduce_sum(y_true * y_pred, vol_axes)
        bottom = tf.reduce_sum(y_true + y_pred, vol_axes)
        dice = tf.reduce_mean(tf.div_no_nan(top, bottom))
        return -dice
Exemplo n.º 28
0
    def __call__(self) -> Image:
        declare_eager_execution()
        if self.emap is None:
            self.generate_emap()

        if self.atten_corr is not None:
            listmode_ = self.atten_corr(self.listmode)
        else:
            listmode_ = self.listmode

        x_tf = Image(data=tf.Variable(
            np.ones(self.image_config.shape, dtype=np.float32)),
                     center=self.image_config.center,
                     size=self.image_config.size)
        emap_data_n0_zero = copy(self.emap.data)
        emap_data_n0_zero[emap_data_n0_zero == 0.0] = 1e8
        emap_tf = self.emap.update(data=tf.constant(emap_data_n0_zero))
        lors_tf = self.listmode.lors.update(
            data=tf.constant(self.listmode.lors.data))
        listmode_tf = self.listmode.update(data=tf.constant(listmode_.data),
                                           lors=lors_tf)

        for _ in tqdm(range(self.n_iter)):
            _listmode_tf = Project('tf-eager')(x_tf, lors_tf)
            listmode_div = tf.div_no_nan(listmode_tf.data, _listmode_tf.data)
            _bp = BackProject('tf-eager')(
                listmode_tf.update(data=listmode_div), emap_tf)
            x_tf = x_tf * _bp / emap_tf

        x = x_tf.update(data=x_tf.data.numpy())

        # if self.scatter_corr is not None:
        #     if self.atten_corr is not None:
        #         listmode_ = self.scatter_corr(x, self.atten_corr.u_map, self.scanner, self.listmode)

        #         x_tf = Image(data = tf.Variable(np.ones(self.image_config.shape, dtype = np.float32)),
        #                  center = self.image_config.center,
        #                  size = self.image_config.size)
        #         emap_data_n0_zero = copy(self.emap.data)
        #         emap_data_n0_zero[emap_data_n0_zero == 0.0] = 1e8
        #         emap_tf = self.emap.update(data = tf.constant(emap_data_n0_zero))
        #         lors_tf = self.listmode.lors.update(data = tf.constant(self.listmode.lors.data))
        #         listmode_tf = self.listmode.update(data = tf.constant(listmode_.data), lors = lors_tf)

        #         for _ in tqdm(range(self.n_iter)):
        #             _listmode_tf = Project('tf-eager')(x_tf, lors_tf)
        #             listmode_div = tf.div_no_nan(listmode_tf.data, _listmode_tf.data)
        #             _bp = BackProject('tf-eager')(listmode_tf.update(data = listmode_div), emap_tf)
        #             x_tf = x_tf * _bp / emap_tf

        #         x = x_tf.update(data = x_tf.data.numpy())

        if self.psf_corr is not None:
            image_ = self.psf_corr(x)
        else:
            image_ = x
        return image_
Exemplo n.º 29
0
def nonzero_reduce_mean(emb):  # nonzero-mean-pooling
    axis_2_sum = tf.reduce_sum(emb, axis=2)
    multi_cate_nonzero = tf.count_nonzero(axis_2_sum,
                                          1,
                                          keepdims=True,
                                          dtype=float)
    multi_cate_sum = tf.reduce_sum(emb, axis=1)
    reduce_mean_emb = tf.div_no_nan(multi_cate_sum, multi_cate_nonzero)
    return reduce_mean_emb
Exemplo n.º 30
0
    def batch_semi_hard(labels, embeddings):
        """Computes the triplet loss with semi-hard negative mining.
        The loss encourages the positive distances (between a pair of embeddings with
        the same labels) to be smaller than the minimum negative distance among
        which are at least greater than the positive distance plus the margin constant
        (called semi-hard negative) in the mini-batch. If no such negative exists,
        uses the largest negative distance instead.
        See: https://arxiv.org/abs/1503.03832.

        :param labels: 1-D tf.int32 `Tensor` with shape [batch_size] of multiclass integer labels.
        :param embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
        :return loss: tf.float32 scalar.
        """
        labels = tf.reshape(labels, [-1, 1])
        batch_size = tf.size(labels)
        # Build pairwise squared distance matrix.
        dist = euclidean_distance(embeddings, squared=True)
        # Build pairwise binary adjacency matrix (equal label mask).
        adjacency = tf.equal(labels, tf.transpose(labels))
        # Invert so we can select negatives only.
        adjacency_not = tf.logical_not(adjacency)

        # Compute the mask.
        dist_tile = tf.tile(
            dist,
            [batch_size, 1])  # stack dist matrix batch_size times, axis=0
        mask = tf.logical_and(tf.tile(adjacency_not, [batch_size, 1]),
                              tf.greater(dist_tile, tf.reshape(dist, [-1, 1])))
        mask = tf.cast(mask, dtype=tf.float32)
        is_negatives_outside = tf.reshape(
            tf.greater(tf.reduce_sum(mask, axis=1, keepdims=True), 0.0),
            [batch_size, batch_size])
        is_negatives_outside = tf.transpose(is_negatives_outside)

        # negatives_outside: smallest D_an where D_an > D_ap.
        negatives_outside = tf.reshape(masked_minimum(dist_tile, mask),
                                       [batch_size, batch_size])
        negatives_outside = tf.transpose(negatives_outside)

        # negatives_inside: largest D_an.
        adjacency_not = tf.cast(adjacency_not, dtype=tf.float32)
        negatives_inside = tf.tile(masked_maximum(dist, adjacency_not),
                                   [1, batch_size])

        semi_hard_negatives = tf.where(is_negatives_outside, negatives_outside,
                                       negatives_inside)

        # In lifted-struct, the authors multiply 0.5 for upper triangular
        #   in semihard, they take all positive pairs except the diagonal.
        mask_positives = tf.cast(adjacency, dtype=tf.float32) - tf.diag(
            tf.ones([batch_size]))
        n_positives = tf.reduce_sum(mask_positives)

        loss_mat = get_loss_tensor(dist, semi_hard_negatives)
        loss = tf.div_no_nan(
            tf.reduce_sum(tf.multiply(loss_mat, mask_positives)), n_positives)
        return loss