def create_target_mapping(example, is_target, seq_len, num_predict, **kwargs):
    """Create target mapping and retrieve the corresponding kwargs."""
    if num_predict is not None:
        # Get masked indices
        indices = tf.range(seq_len, dtype=tf.int64)
        indices = tf.boolean_mask(indices, is_target)

        # Handle the case that actual_num_predict < num_predict
        actual_num_predict = tf.shape(indices)[0]
        pad_len = num_predict - actual_num_predict

        # Create target mapping
        target_mapping = tf.one_hot(indices, seq_len, dtype=tf.float32)
        paddings = tf.zeros([pad_len, seq_len], dtype=target_mapping.dtype)
        target_mapping = tf.concat([target_mapping, paddings], axis=0)
        example["target_mapping"] = tf.reshape(target_mapping,
                                               [num_predict, seq_len])

        # Handle fields in kwargs
        for k, v in kwargs.items():
            pad_shape = [pad_len] + v.shape.as_list()[1:]
            tgt_shape = [num_predict] + v.shape.as_list()[1:]
            example[k] = tf.concat([
                tf.boolean_mask(v, is_target),
                tf.zeros(shape=pad_shape, dtype=v.dtype)
            ], 0)
            example[k].set_shape(tgt_shape)
    else:
        for k, v in kwargs.items():
            example[k] = v
Exemple #2
0
def roc_auc_score(y_pred, y_true):
    """ ROC AUC Score.

    Approximates the Area Under Curve score, using approximation based on
    the Wilcoxon-Mann-Whitney U statistic.

    Yan, L., Dodier, R., Mozer, M. C., & Wolniewicz, R. (2003).
    Optimizing Classifier Performance via an Approximation to the Wilcoxon-Mann-Whitney Statistic.

    Measures overall performance for a full range of threshold levels.

    Arguments:
        y_pred: `Tensor`. Predicted values.
        y_true: `Tensor` . Targets (labels), a probability distribution.

    """
    with tf.name_scope("RocAucScore"):

        pos = tf.boolean_mask(y_pred, tf.cast(y_true, tf.bool))
        neg = tf.boolean_mask(y_pred, ~tf.cast(y_true, tf.bool))

        pos = tf.expand_dims(pos, 0)
        neg = tf.expand_dims(neg, 1)

        # original paper suggests performance is robust to exact parameter choice
        gamma = 0.2
        p     = 3

        difference = tf.zeros_like(pos * neg) + pos - neg - gamma

        masked = tf.boolean_mask(difference, difference < 0.0)

        return tf.reduce_sum(tf.pow(-masked, p))
Exemple #3
0
def boolean_mask(boxlist, indicator, fields=None, scope=None,
                 use_static_shapes=False, indicator_sum=None):
  """Select boxes from BoxList according to indicator and return new BoxList.

  `boolean_mask` returns the subset of boxes that are marked as "True" by the
  indicator tensor. By default, `boolean_mask` returns boxes corresponding to
  the input index list, as well as all additional fields stored in the boxlist
  (indexing into the first dimension).  However one can optionally only draw
  from a subset of fields.

  Args:
    boxlist: BoxList holding N boxes
    indicator: a rank-1 boolean tensor
    fields: (optional) list of fields to also gather from.  If None (default),
      all fields are gathered from.  Pass an empty fields list to only gather
      the box coordinates.
    scope: name scope.
    use_static_shapes: Whether to use an implementation with static shape
      gurantees.
    indicator_sum: An integer containing the sum of `indicator` vector. Only
      required if `use_static_shape` is True.

  Returns:
    subboxlist: a BoxList corresponding to the subset of the input BoxList
      specified by indicator
  Raises:
    ValueError: if `indicator` is not a rank-1 boolean tensor.
  """
  with tf.name_scope(scope, 'BooleanMask'):
    if indicator.shape.ndims != 1:
      raise ValueError('indicator should have rank 1')
    if indicator.dtype != tf.bool:
      raise ValueError('indicator should be a boolean tensor')
    if use_static_shapes:
      if not (indicator_sum and isinstance(indicator_sum, int)):
        raise ValueError('`indicator_sum` must be a of type int')
      selected_positions = tf.cast(indicator, dtype=tf.float32)
      indexed_positions = tf.cast(
          tf.multiply(
              tf.cumsum(selected_positions), selected_positions),
          dtype=tf.int32)
      one_hot_selector = tf.one_hot(
          indexed_positions - 1, indicator_sum, dtype=tf.float32)
      sampled_indices = tf.cast(
          tf.tensordot(
              tf.cast(tf.range(tf.shape(indicator)[0]), dtype=tf.float32),
              one_hot_selector,
              axes=[0, 0]),
          dtype=tf.int32)
      return gather(boxlist, sampled_indices, use_static_shapes=True)
    else:
      subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator))
      if fields is None:
        fields = boxlist.get_extra_fields()
      for field in fields:
        if not boxlist.has_field(field):
          raise ValueError('boxlist must contain all specified fields')
        subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator)
        subboxlist.add_field(field, subfieldlist)
      return subboxlist
Exemple #4
0
    def add_loss(self, separated_waveforms):
        """Add loss for given separated_waveforms."""
        # Permute separated to match references through self.loss_fns.
        _, separated_waveforms = groupwise.apply(self.loss_fns,
                                                 self.signal_types,
                                                 self.source_waveforms,
                                                 separated_waveforms,
                                                 self.unique_signal_types)
        separated_waveforms_nonzero = tf.boolean_mask(
            separated_waveforms, self.source_is_nonzero)[:, tf.newaxis]
        separated_waveforms_zero = tf.boolean_mask(
            separated_waveforms, self.source_is_zero)[:, tf.newaxis]
        # Use eventual loss function as log_mse_loss.
        # Loss for zero references only if self.loss_zero_ref_weight provided.
        if self.loss_zero_ref_weight:
            loss = tf.reduce_sum(
                log_mse_loss(self.source_waveforms_zero,
                             separated_waveforms_zero,
                             max_snr=self.max_snr_for_zero_sources,
                             bias_ref_signal=self.mixture_waveforms_zero))
            loss_zero = tf.identity(self.loss_zero_ref_weight * self.weight *
                                    loss,
                                    name='loss_ref_zero')
            tf.losses.add_loss(loss_zero)

        # Loss for nonzero references.
        loss = tf.reduce_sum(
            log_mse_loss(self.source_waveforms_nonzero,
                         separated_waveforms_nonzero,
                         max_snr=self.max_snr))
        loss_nonzero = tf.identity(self.weight * loss, name='loss_ref_nonzero')
        tf.losses.add_loss(loss_nonzero)
        return separated_waveforms
Exemple #5
0
    def single_image_nms(b, batch_boxes, batch_scores, batch_classes):
        boxes_ = []
        scores_ = []
        classes_ = []
        for c in range(num_classes):
            # TODO: use keras backend instead of tf.
            class_boxes = tf.boolean_mask(boxes[b], mask[b, :, c])
            class_box_scores = tf.boolean_mask(box_scores[b, :, c], mask[b, :,
                                                                         c])
            nms_index = tf.image.non_max_suppression(
                class_boxes,
                class_box_scores,
                max_boxes_tensor,
                iou_threshold=iou_threshold)
            class_boxes = K.gather(class_boxes, nms_index)
            class_box_scores = K.gather(class_box_scores, nms_index)
            classes = K.ones_like(class_box_scores, 'int32') * c
            boxes_.append(class_boxes)
            scores_.append(class_box_scores)
            classes_.append(classes)

        boxes_ = K.concatenate(boxes_, axis=0)
        scores_ = K.concatenate(scores_, axis=0)
        classes_ = K.concatenate(classes_, axis=0)

        batch_boxes = batch_boxes.write(b, boxes_)
        batch_scores = batch_scores.write(b, scores_)
        batch_classes = batch_classes.write(b, classes_)

        return b + 1, batch_boxes, batch_scores, batch_classes
Exemple #6
0
    def fn(x):
        encoded_boxes, scores = x

        is_confident = scores >= score_threshold  # shape [N]
        encoded_boxes = tf.boolean_mask(
            encoded_boxes, is_confident)  # shape [num_confident, 4]
        scores = tf.boolean_mask(scores, is_confident)  # shape [num_confident]
        chosen_anchors = tf.boolean_mask(
            anchors, is_confident)  # shape [num_confident, 4]

        boxes = decode(encoded_boxes,
                       chosen_anchors)  # shape [num_confident, 4]
        boxes = tf.clip_by_value(boxes, 0.0, 1.0)

        selected_indices = tf.image.non_max_suppression(
            boxes,
            scores,
            max_output_size=max_detections,
            iou_threshold=iou_threshold,
            score_threshold=score_threshold)

        boxes = tf.gather(boxes, selected_indices)
        scores = tf.gather(scores, selected_indices)
        num_boxes = tf.to_int32(tf.size(selected_indices))

        zero_padding = max_detections - num_boxes
        boxes = tf.pad(boxes, [[0, zero_padding], [0, 0]])
        scores = tf.pad(scores, [[0, zero_padding]])

        boxes.set_shape([max_detections, 4])
        scores.set_shape([max_detections])
        return boxes, scores, num_boxes
Exemple #7
0
def non_max_suppression(inputs, n_classes, max_output_size, iou_threshold,
                        confidence_threshold):
    batch = tf.unstack(inputs)
    boxes_dicts = []
    for boxes in batch:
        boxes = tf.boolean_mask(boxes, boxes[:, 4] > confidence_threshold)
        classes = tf.argmax(boxes[:, 5:], axis=-1)
        classes = tf.expand_dims(tf.to_float(classes), axis=-1)
        boxes = tf.concat([boxes[:, :5], classes], axis=-1)

        boxes_dict = dict()
        for cls in range(n_classes):
            mask = tf.equal(boxes[:, 5], cls)
            mask_shape = mask.get_shape()
            if mask_shape.ndims != 0:
                class_boxes = tf.boolean_mask(boxes, mask)
                boxes_coords, boxes_conf_scores, _ = tf.split(class_boxes,
                                                              [4, 1, -1],
                                                              axis=-1)
                boxes_conf_scores = tf.reshape(boxes_conf_scores, [-1])
                indices = tf.image.non_max_suppression(boxes_coords,
                                                       boxes_conf_scores,
                                                       max_output_size,
                                                       iou_threshold)
                class_boxes = tf.gather(class_boxes, indices)
                boxes_dict[cls] = class_boxes[:, :5]

        boxes_dicts.append(boxes_dict)

    return boxes_dicts
Exemple #8
0
    def calc_reconstruction(self, z, y):
        with tf.name_scope("VICI_decoder"):

            # Reshape input to a 3D tensor - single channel
            if self.n_conv is not None:
                if self.by_channel == True:
                    conv_pool = tf.reshape(y, shape=[-1, 1, y.shape[1], self.num_det])
                    for i in range(self.n_conv):            
                        weight_name = 'w_conv_' + str(i)
                        bias_name = 'b_conv_' + str(i)
                        conv_pre = tf.add(tf.nn.conv2d(conv_pool, self.weights['VICI_decoder'][weight_name],strides=[1,1,self.conv_strides[i],1],padding='SAME'),self.weights['VICI_decoder'][bias_name])
                        conv_post = self.nonlinearity(conv_pre)
                        if self.batch_norm == True:
                            conv_batchNorm = tf.nn.batch_normalization(conv_post,tf.Variable(tf.zeros([1,conv_post.shape[2],conv_post.shape[3]], dtype=tf.float32)),tf.Variable(tf.ones([1,conv_post.shape[2],conv_post.shape[3]], dtype=tf.float32)),None,None,0.000001)
                            conv_dropout = tf.layers.dropout(conv_batchNorm,rate=self.drate)
                        else:
                            conv_dropout = tf.layers.dropout(conv_post,rate=self.drate)
                        conv_pool = tf.nn.max_pool(conv_dropout,ksize=[1, 1, self.maxpool[i], 1],strides=[1, 1, self.pool_strides[i], 1],padding='SAME')

                    fc = tf.concat([z,tf.reshape(conv_pool, [-1, int(conv_pool.shape[2]*conv_pool.shape[3])])],axis=1)            
                if self.by_channel == False:
                    conv_pool = tf.reshape(y, shape=[-1, y.shape[1], y.shape[2], 1])
                    for i in range(self.n_conv):
                        weight_name = 'w_conv_' + str(i)
                        bias_name = 'b_conv_' + str(i)
                        conv_pre = tf.add(tf.nn.conv2d(conv_pool, self.weights['VICI_decoder'][weight_name],strides=[1,self.conv_strides[i],self.conv_strides[i],1],padding='SAME'),self.weights['VICI_decoder'][bias_name])
                        conv_post = self.nonlinearity(conv_pre)
                        if self.batch_norm == True:
                            conv_batchNorm = tf.nn.batch_normalization(conv_post,tf.Variable(tf.zeros([conv_post.shape[1],conv_post.shape[2],conv_post.shape[3]], dtype=tf.float32)),tf.Variable(tf.ones([conv_post.shape[1],conv_post.shape[2],conv_post.shape[3]], dtype=tf.float32)),None,None,0.000001)
                        conv_pool = tf.nn.max_pool(conv_batchNorm,ksize=[1, self.maxpool[i], self.maxpool[i], 1],strides=[1, self.pool_strides[i], self.pool_strides[i], 1],padding='SAME')

                    fc = tf.concat([z,tf.reshape(conv_pool, [-1, int(conv_pool.shape[1]*conv_pool.shape[2]*conv_pool.shape[3])])],axis=1)
            else:
                fc = tf.concat([z,y],axis=1)

            hidden_dropout = fc
            for i in range(self.n_hlayers):
                weight_name = 'w_hidden_' + str(i)
                bias_name = 'b_hidden' + str(i)
                hidden_pre = tf.add(tf.matmul(hidden_dropout, self.weights['VICI_decoder'][weight_name]), self.weights['VICI_decoder'][bias_name])
                hidden_post = self.nonlinearity(hidden_pre)
                if self.batch_norm == True:
                    hidden_batchNorm = tf.nn.batch_normalization(hidden_post,tf.Variable(tf.zeros([hidden_post.shape[1]], dtype=tf.float32)),tf.Variable(tf.ones([hidden_post.shape[1]], dtype=tf.float32)),None,None,0.000001)
                    hidden_dropout = tf.layers.dropout(hidden_batchNorm,rate=self.drate)
                else:
                    hidden_dropout = tf.layers.dropout(hidden_post,rate=self.drate)
            loc_all = tf.add(tf.matmul(hidden_dropout, self.weights['VICI_decoder']['w_loc']), self.weights['VICI_decoder']['b_loc'])
            scale_all = tf.add(tf.matmul(hidden_dropout, self.weights['VICI_decoder']['w_scale']), self.weights['VICI_decoder']['b_scale'])

            # split up the output into non-wrapped and wrapped params and apply appropriate activation
            loc_nowrap = self.nonlinear_loc_nowrap(tf.boolean_mask(loc_all,self.nowrap_mask,axis=1))
            scale_nowrap = self.nonlinear_scale_nowrap(tf.boolean_mask(scale_all,self.nowrap_mask,axis=1))
            if np.sum(self.wrap_mask)>0:
                loc_wrap = self.nonlinear_loc_wrap(tf.boolean_mask(loc_all,self.wrap_mask,axis=1))
                scale_wrap = -1.0*self.nonlinear_scale_wrap(tf.boolean_mask(scale_all,self.wrap_mask,axis=1))
                return loc_nowrap, scale_nowrap, loc_wrap, scale_wrap
            else:
                return loc_nowrap, scale_nowrap
Exemple #9
0
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=0.6):
    box_scores = box_confidence * box_class_probs

    box_classes = k.argmax(box_scores, axis=-1)
    box_class_scores = k.max(box_scores, axis=-1)

    filtering_mask = box_class_scores >= threshold
    scores = tf.boolean_mask(box_class_scores, filtering_mask)
    boxes = tf.boolean_mask(boxes, filtering_mask)
    classes = tf.boolean_mask(box_classes, filtering_mask)

    return scores, boxes, classes
Exemple #10
0
def yolo_eval(
    yolo_outputs,
    anchors,
    num_classes,
    image_shape,
    max_boxes=20,
    score_threshold=0.6,
    iou_threshold=0.5,
):
    """Evaluate YOLO model on given input and return filtered boxes."""
    num_layers = len(yolo_outputs)
    anchor_mask = (
        [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]]
    )  # default setting
    input_shape = K.shape(yolo_outputs[0])[1:3] * 32
    boxes = []
    box_scores = []
    for l in range(num_layers):
        _boxes, _box_scores = yolo_boxes_and_scores(
            yolo_outputs[l],
            anchors[anchor_mask[l]],
            num_classes,
            input_shape,
            image_shape,
        )
        boxes.append(_boxes)
        box_scores.append(_box_scores)
    boxes = K.concatenate(boxes, axis=0)
    box_scores = K.concatenate(box_scores, axis=0)

    mask = box_scores >= score_threshold
    max_boxes_tensor = K.constant(max_boxes, dtype="int32")
    boxes_ = []
    scores_ = []
    classes_ = []
    for c in range(num_classes):
        # TODO: use keras backend instead of tf.
        class_boxes = tf.boolean_mask(boxes, mask[:, c])
        class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
        nms_index = tf.image.non_max_suppression(
            class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold
        )
        class_boxes = K.gather(class_boxes, nms_index)
        class_box_scores = K.gather(class_box_scores, nms_index)
        classes = K.ones_like(class_box_scores, "int32") * c
        boxes_.append(class_boxes)
        scores_.append(class_box_scores)
        classes_.append(classes)
    boxes_ = K.concatenate(boxes_, axis=0)
    scores_ = K.concatenate(scores_, axis=0)
    classes_ = K.concatenate(classes_, axis=0)

    return boxes_, scores_, classes_
Exemple #11
0
def yolo2_filter_boxes(boxes, box_confidence, box_class_probs, threshold=.6):
    """Filter YOLOv2 boxes based on object and class confidence."""
    box_scores = box_confidence * box_class_probs
    box_classes = K.argmax(box_scores, axis=-1)
    box_class_scores = K.max(box_scores, axis=-1)
    prediction_mask = box_class_scores >= threshold

    # TODO: Expose tf.boolean_mask to Keras backend?
    boxes = tf.boolean_mask(boxes, prediction_mask)
    scores = tf.boolean_mask(box_class_scores, prediction_mask)
    classes = tf.boolean_mask(box_classes, prediction_mask)
    return boxes, scores, classes
Exemple #12
0
def sequence_accuracy(gt_seqs,
                      decode_seqs,
                      gt_seq_lengths,
                      pr_seq_lengths,
                      debug=False,
                      name=""):
    """Computes the complete and the partial sequence accuracy."""
    gt_shape = common_layers.shape_list(gt_seqs)
    pr_shape = common_layers.shape_list(decode_seqs)
    batch_size = gt_shape[0]
    depth = gt_shape[-1]
    gt_len = gt_shape[1]
    pr_len = pr_shape[1]
    max_len = tf.maximum(gt_len, pr_len)
    gt_seqs = tf.pad(gt_seqs, [[0, 0], [0, max_len - gt_len], [0, 0]])
    decode_seqs = tf.pad(decode_seqs, [[0, 0], [0, max_len - pr_len], [0, 0]])
    gt_seqs = tf.where(
        tf.tile(
            tf.expand_dims(tf.sequence_mask(gt_seq_lengths, maxlen=max_len),
                           2), [1, 1, depth]), gt_seqs,
        tf.fill(tf.shape(gt_seqs), -1))
    decode_seqs = tf.where(
        tf.tile(
            tf.expand_dims(tf.sequence_mask(pr_seq_lengths, maxlen=max_len),
                           2), [1, 1, depth]), decode_seqs,
        tf.fill(tf.shape(decode_seqs), -1))
    # [batch_size, decode_length]
    corrects = tf.reduce_all(tf.equal(gt_seqs, decode_seqs), -1)
    correct_mask = tf.reduce_all(corrects, -1)
    # [batch_size]
    if debug:
        incorrect_mask = tf.logical_not(correct_mask)
        incorrect_gt = tf.boolean_mask(gt_seqs, incorrect_mask)
        incorrect_pr = tf.boolean_mask(decode_seqs, incorrect_mask)
        with tf.control_dependencies([
                tf.print(name + "_mismatch",
                         incorrect_gt,
                         incorrect_pr,
                         summarize=1000)
        ]):
            correct_mask = tf.identity(correct_mask)
    correct_seqs = tf.to_float(correct_mask)
    total_correct_seqs = tf.reduce_sum(correct_seqs)
    mean_complete_accuracy = total_correct_seqs / tf.to_float(batch_size)
    # Compute partial accuracy
    errors = tf.logical_not(corrects)
    errors = tf.cast(tf.cumsum(tf.to_float(errors), axis=-1), tf.bool)
    # [batch_size]
    correct_steps = tf.reduce_sum(tf.to_float(tf.logical_not(errors)), axis=-1)
    mean_partial_accuracy = tf.reduce_mean(
        tf.div(tf.minimum(correct_steps, gt_seq_lengths), gt_seq_lengths))
    return mean_complete_accuracy, mean_partial_accuracy
 def map_box_encodings(i):
   """Produces box K-hot and score encodings for each class index."""
   box_mask = tf.equal(
       unique_indices, i * tf.ones(num_boxes, dtype=tf.int64))
   box_mask = tf.reshape(box_mask, [-1])
   box_indices = tf.boolean_mask(classes, box_mask)
   box_confidences = tf.boolean_mask(confidences, box_mask)
   box_class_encodings = tf.sparse_to_dense(
       box_indices, [num_classes], tf.constant(1, dtype=tf.int64),
       validate_indices=False)
   box_confidence_encodings = tf.sparse_to_dense(
       box_indices, [num_classes], box_confidences, validate_indices=False)
   return box_class_encodings, box_confidence_encodings
def _batch_stitch(features, mean_length=4.0, stddev=2.0):
    """Stitches a batch of single-step data to a batch of multi-step data."""
    batch_size = common_layers.shape_list(features['task'])[0]
    num_sequences = tf.maximum(
        tf.to_int32(tf.to_float(batch_size) / mean_length), 1)
    lengths = tf.random.truncated_normal(shape=[num_sequences],
                                         mean=mean_length,
                                         stddev=stddev)
    max_length = tf.reduce_max(lengths) * (tf.to_float(batch_size) /
                                           tf.reduce_sum(lengths))
    max_length = tf.to_int32(tf.ceil(max_length))
    total_items = max_length * num_sequences
    num_paddings = total_items - batch_size
    indices = tf.random.shuffle(tf.range(total_items))
    for key in features:
        shape_list = common_layers.shape_list(features[key])
        assert len(shape_list) >= 1
        with tf.control_dependencies([
                tf.assert_greater_equal(num_paddings,
                                        0,
                                        name='num_paddings_positive')
        ]):
            paddings = [[0, num_paddings]] + [[0, 0]] * (len(shape_list) - 1)
        features[key] = tf.pad(features[key],
                               paddings,
                               constant_values=-1 if key == 'obj_type' else 0)
        features[key] = tf.gather(features[key], indices)
        shape = [num_sequences, max_length]
        if len(shape_list) >= 2:
            shape += shape_list[1:]
        features[key] = tf.reshape(features[key], shape)
    # Remove all-padding seqs
    step_mask = tf.reduce_any(tf.greater(features['task'], 1), axis=-1)
    mask = tf.reduce_any(step_mask, axis=-1)
    step_mask = tf.boolean_mask(step_mask, mask)
    for key in features:
        features[key] = tf.boolean_mask(features[key], mask=mask)
    num_sequences = tf.shape(features['task'])[0]
    # Sort steps within each seq
    _, step_indices = tf.math.top_k(tf.to_int32(step_mask), k=max_length)
    step_indices = step_indices + tf.expand_dims(
        tf.range(num_sequences) * max_length, 1)
    step_indices = tf.reshape(step_indices, [-1])
    for key in features:
        shape_list = common_layers.shape_list(features[key])
        features[key] = tf.gather(
            tf.reshape(features[key], [-1] + shape_list[2:]), step_indices)
        features[key] = tf.reshape(features[key], shape_list)
    features = _stitch(features)
    return features
def _word_span_mask(inputs, tgt_len, num_predict, boundary, stride=1):
    """Sample whole word spans as prediction targets."""
    # Note: 1.2 is roughly the token-to-word ratio
    non_pad_len = tgt_len + 1 - stride
    chunk_len_fp = non_pad_len / num_predict / 1.2
    round_to_int = lambda x: tf.cast(tf.round(x), tf.int64)

    # Sample span lengths from a zipf distribution
    span_len_seq = np.arange(FLAGS.min_word, FLAGS.max_word + 1)
    probs = np.array([1.0 / (i + 1) for i in span_len_seq])
    probs /= np.sum(probs)
    logits = tf.constant(np.log(probs), dtype=tf.float32)

    # Sample `num_predict` words here: note that this is over sampling
    span_lens = tf.random.categorical(
        logits=logits[None],
        num_samples=num_predict,
        dtype=tf.int64,
    )[0] + FLAGS.min_word

    # Sample the ratio [0.0, 1.0) of left context lengths
    span_lens_fp = tf.cast(span_lens, tf.float32)
    left_ratio = tf.random.uniform(shape=[num_predict], minval=0.0, maxval=1.0)
    left_ctx_len = left_ratio * span_lens_fp * (chunk_len_fp - 1)

    left_ctx_len = round_to_int(left_ctx_len)
    right_offset = round_to_int(span_lens_fp * chunk_len_fp) - left_ctx_len

    beg_indices = (tf.cumsum(left_ctx_len) +
                   tf.cumsum(right_offset, exclusive=True))
    end_indices = beg_indices + span_lens

    # Remove out of range `boundary` indices
    max_boundary_index = tf.cast(tf.shape(boundary)[0] - 1, tf.int64)
    valid_idx_mask = end_indices < max_boundary_index
    beg_indices = tf.boolean_mask(beg_indices, valid_idx_mask)
    end_indices = tf.boolean_mask(end_indices, valid_idx_mask)

    beg_indices = tf.gather(boundary, beg_indices)
    end_indices = tf.gather(boundary, end_indices)

    # Shuffle valid `position` indices
    num_valid = tf.cast(tf.shape(beg_indices)[0], tf.int64)
    order = tf.random.shuffle(tf.range(num_valid, dtype=tf.int64))
    beg_indices = tf.gather(beg_indices, order)
    end_indices = tf.gather(end_indices, order)

    return _idx_pair_to_mask(beg_indices, end_indices, inputs, tgt_len,
                             num_predict)
def I_sgaba(G, V):
    G4 = tf.pow(G, 4) / (tf.pow(G, 4) + K_sgaba)
    G_ = tf.Variable([0.0] * n_n**2, dtype=tf.float64)
    ind = tf.boolean_mask(tf.range(n_n**2), sgaba_mat.reshape(-1) == 1)
    G_ = tf.scatter_update(G_, ind, G4)
    G_ = tf.transpose(tf.reshape(G_, (n_n, n_n)))
    return tf.reduce_sum(tf.transpose((G_ * (V - E_sgaba)) * G_sgaba), 1)
Exemple #17
0
    def _build_target_q_op(self):
        # TODO: include actual trajectory length in the transition so we don't use the wrong cumulative_gamma
        """Build an op used as a target for the Q-value.

    Returns:
      target_q_op: An op calculating the Q-value.
    """
        if self.sarsa:
            flat_next_a = tf.reshape(self._replay.transition['next_action'],
                                     (-1, ))
            next_a_mask = tf.reshape(
                tf.one_hot(flat_next_a,
                           depth=self.num_actions,
                           axis=-1,
                           on_value=True,
                           off_value=False), (-1, ))
            flat_next_q = tf.reshape(
                self._replay_next_target_net_outputs.q_values, (-1, ))
            flat_target = tf.boolean_mask(flat_next_q, next_a_mask)
            replay_next_qt_max = tf.reshape(flat_target, (-1, 1))
        else:
            replay_next_qt_max = tf.reduce_max(
                self._replay_next_target_net_outputs.q_values, 1)

        # Calculate the Bellman target value.
        #   Q_t = R_t + \gamma^N * Q'_t+1
        # where,
        #   Q'_t+1 = \argmax_a Q(S_t+1, a)
        #          (or) 0 if S_t is a terminal state,
        # and
        #   N is the update horizon (by default, N=1).

        r = self._build_reward_op()
        return r + self.cumulative_gamma * replay_next_qt_max * (
            1. - tf.cast(self._replay.terminals, tf.float32))
Exemple #18
0
 def print_vars(self, var_list, show_values=False, extra=False):
     for num, var in enumerate(var_list):
         print_strs = []
         if show_values:
             if 'sess' in self.objs:
                 red_sum = self.objs['sess'].run(tf.reduce_sum(var))
                 print_strs += ["mag %f" % (red_sum)]
             else:
                 print_strs += ["init"]
         if extra:
             if 'sess' in self.objs:
                 nonzerovar = tf.boolean_mask(var,
                                              tf.greater(var, 0.000001))
                 tmin = self.objs['sess'].run(
                     tf.math.reduce_max(nonzerovar))
                 print_strs += ["tmax %f" % tmin]
                 nz = self.objs['sess'].run(tf.math.count_nonzero(var))
                 print_strs += ["nonzero %d" % nz]
                 num_elements = self.objs['sess'].run(
                     tf.reduce_sum(tf.ones_like(var)))
                 print_strs += ["total %d" % num_elements]
         print_str = "\t(%d) %s" % (num + 1, var.name)
         if len(print_strs) > 0:
             print_str += " => %s" % " , ".join(print_strs)
         print(print_str)
     print("Number of vars: %d" % len(var_list))
Exemple #19
0
def mask_by_partial_sequence_length(tensors,
                                    partial_sequence_lengths=None,
                                    target_length=None):
    """Selects examples with partial sequence length equal to target_length.

  Args:
    tensors: Tuple of tensors to mask.
    partial_sequence_lengths: Integer tensor with shape [batch_size].
        Default None.
    target_length: Integer. Only examples with partial sequence length equal to
        target_length will be used. If None (the default), all examples in
        batch will be used.

  Returns:
    A tuple of masked tensors.

  Raises:
    ValueError: if partial_sequence_lengths is None when target_length is not
        None.
  """
    if target_length is not None:
        if partial_sequence_lengths is None:
            raise ValueError('partial_sequence_lengths is expected '
                             'when target_length is not None.')
        # A mask on batch_size dimension.
        partial_sequence_length_mask = tf.equal(partial_sequence_lengths,
                                                target_length)
        masked_tensors = []
        for tensor in tensors:
            masked_tensors.append(
                tf.boolean_mask(tensor, partial_sequence_length_mask))
        return tuple(masked_tensors)
    else:
        return tensors
    def my_fn(x):
        """Helper function to transform e-Snli dataset to inputs/targets."""
        labels = ['entailment', 'neutral', 'contradiction']
        inputs = tf.strings.join(
            [prefix, 'hypothesis:', x['hypothesis'], 'premise:', x['premise']],
            separator=' ')
        if add_choices:
            inputs = tf.strings.join([
                inputs,
                'choice: entailment choice: neutral choice: contradiction'
            ],
                                     separator=' ')

        class_label = tf.gather(labels, x['label'])

        if drop_explanations:
            targets = class_label
        else:
            explanations = [
                x.get('explanation_%d' % i, '') for i in range(1, 4)
            ]
            explanations = tf.boolean_mask(explanations,
                                           tf.not_equal(explanations, ''))
            targets = _explanation_targets(class_label, explanations)

        return {'inputs': inputs, 'targets': targets}
Exemple #21
0
def calc_neighbor_embed(elements_neighbors, elements_enc, elements_mask):
    """Calculates the sum of the embeddings of neighboring elements."""
    with tf.variable_scope('calc_neighbor_embed'):
        elements_enc_orig_shape = elements_enc.get_shape().as_list()

        elements_enc = undo_mask(elements_enc, elements_mask)

        elements_enc_shape = tf.shape(elements_enc)
        elements_enc_expand = tf.tile(elements_enc,
                                      [1, elements_enc_shape[1], 1])
        elements_enc_expand = tf.reshape(elements_enc_expand, [
            -1, elements_enc_shape[1], elements_enc_shape[1],
            elements_enc_shape[2]
        ])

        elements_neighbors = tf.cast(tf.expand_dims(elements_neighbors, 3),
                                     tf.float32)

        neighbor_embed = elements_enc_expand * elements_neighbors
        neighbor_embed = tf.reduce_mean(neighbor_embed, axis=2)

        neighbor_embed = tf.boolean_mask(neighbor_embed, elements_mask)

        neighbor_embed.set_shape(elements_enc_orig_shape)

        return neighbor_embed
Exemple #22
0
def _slice_with_actions(embeddings, actions):
    """Slice a Tensor.

  Take embeddings of the form [batch_size, num_actions, embed_dim]
  and actions of the form [batch_size, 1], and return the sliced embeddings
  like embeddings[:, actions, :].

  Args:
    embeddings: Tensor of embeddings to index.
    actions: int Tensor to use as index into embeddings

  Returns:
    Tensor of embeddings indexed by actions
  """
    batch_size, num_actions = embeddings.get_shape()[:2]

    # Values are the 'values' in a sparse tensor we will be setting
    act_indx = tf.cast(actions, tf.int64)[:, None]
    values = tf.reshape(tf.cast(tf.ones(tf.shape(actions)), tf.bool), [-1])

    # Create a range for each index into the batch
    act_range = tf.range(0, batch_size, dtype=tf.int64)[:, None]
    # Combine this into coordinates with the action indices
    indices = tf.concat([act_range, act_indx], 1)

    actions_mask = tf.SparseTensor(indices, values, [batch_size, num_actions])
    actions_mask = tf.stop_gradient(
        tf.sparse_tensor_to_dense(actions_mask, default_value=False))
    sliced_emb = tf.boolean_mask(embeddings, actions_mask)
    return sliced_emb
Exemple #23
0
def reduce_median_masked(tensor, mask, axis=None, keepdims=False):
    tensor_masked = tf.boolean_mask(tensor, mask)
    #print('shapes', tensor.shape, mask.shape, tensor_masked.shape)
    return tf.contrib.distributions.percentile(tensor_masked,
                                               50.,
                                               axis=axis,
                                               keep_dims=keepdims)
Exemple #24
0
    def stn_diffeo(self):
        with tf.variable_scope("atn"):
            x_tensor = tf.reshape(self.X,[-1,self.img_sz[0],self.img_sz[1],self.num_channels])
            # x_tensor = tf.Print(x_tensor,[x_tensor],message="x_tensor: ",summarize=100)
            c = tf.reduce_mean(tf.boolean_mask(x_tensor, tf.is_finite(x_tensor)), 0)
            # c = tf.Print(c,[c],message="c: ",summarize=100)

            # self.theta, self.affine_maps, d2 = transfromation_parameters_regressor(self.requested_transforms,self.X,
            #                                                                  self.keep_prob,self.img_sz,self.weight_stddev,self.num_channels,self.activation_func)
            #
            # self.theta = tf.Print(self.theta,[self.theta],message="self.theta: ",summarize=100)
            # out_size = (self.img_sz[0], self.img_sz[1])
            # self.theta_exp = expm(-self.theta)  # compute matrix exponential on {-theta}
            # # self.theta_exp = tf.Print(self.theta_exp,[self.theta_exp],message="theta_exp: ", summarize=100)
            # x_theta, d = transformer(x_tensor, self.theta_exp, out_size)
            # #to avoid the sparse indexing warning, comment the next line, and uncomment the one after it.
            # self.x_theta = tf.reshape(x_theta,shape=[-1,self.img_sz[0],self.img_sz[1],self.num_channels])
            # d.update({'params':d2['params']})

            # Working with recurrent STN: get self.theta and self.theta_exp in shape: [num_STN, batch_sz, 6]
            d = c
            self.x_theta, self.theta, self.theta_exp = transfromation_parameters_regressor(self.requested_transforms, self.X,
                                                                                 self.keep_prob,self.img_sz,
                                                                                 self.weight_stddev,self.num_channels,
                                                                                 self.activation_func, self.num_stn)

            return self.x_theta, d, c
Exemple #25
0
    def _build_reward_op(self):
        off = self.epsilon_eval / self.num_actions
        on = (1 - self.epsilon_eval) + off
        s = self._replay.transition['traj_state']
        a = self._replay.transition['traj_action']
        r = self._replay.transition['traj_reward']
        p = self._replay.transition['traj_prob']
        gamma = self._replay.transition['traj_discount']

        state_shape = self.observation_shape + (self.stack_size, )
        flat_s = tf.reshape(s, shape=(-1, ) + state_shape)  # b*h x 84 x 84 x 4
        flat_qs = tf.stop_gradient(
            self.target_convnet(flat_s).q_values)  # b*h x num_actions
        flat_qmax = tf.argmax(flat_qs, axis=1)  # b*h
        flat_pi = tf.one_hot(flat_qmax,
                             depth=self.num_actions,
                             axis=-1,
                             on_value=on,
                             off_value=off)  # b*h x num_actions

        flat_a = tf.reshape(a, (-1, ))
        action_mask = tf.one_hot(flat_a,
                                 depth=self.num_actions,
                                 dtype=tf.bool,
                                 on_value=True,
                                 off_value=False)

        flat_behavior_probs = tf.boolean_mask(flat_pi, action_mask)  #b*h
        behavior_probs = tf.reshape(flat_behavior_probs,
                                    (-1, self.update_horizon))  #b x h
        importance_weights = behavior_probs / p  #b x h
        importance_weights = tf.clip_by_value(importance_weights, 0.99, 1.01)
        w = tf.math.cumprod(importance_weights, axis=1)  #b x h
        #q = tf.numpy_func(...)
        return tf.reduce_sum(gamma * w * r, axis=1)  #b
Exemple #26
0
def _lovasz_softmax_flat(probas, labels, only_present=True):
    """
    Multi-class Lovasz-Softmax loss
      probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
      labels: [P] Tensor, ground truth labels (between 0 and C - 1)
      only_present: average only on classes present in ground truth
    """
    C = probas.shape[1]
    losses = []
    present = []
    for c in range(C):
        fg = tf.cast(tf.equal(labels, c),
                     probas.dtype)  # foreground for class c
        if only_present:
            present.append(tf.reduce_sum(fg) > 0)
        errors = tf.abs(fg - probas[:, c])
        errors_sorted, perm = tf.nn.top_k(errors,
                                          k=tf.shape(errors)[0],
                                          name="descending_sort_{}".format(c))
        fg_sorted = tf.gather(fg, perm)
        grad = _lovasz_grad(fg_sorted)
        losses.append(
            tf.tensordot(errors_sorted,
                         tf.stop_gradient(grad),
                         1,
                         name="loss_class_{}".format(c)))
    losses_tensor = tf.stack(losses)
    if only_present:
        present = tf.stack(present)
        losses_tensor = tf.boolean_mask(losses_tensor, present)
    return losses_tensor
Exemple #27
0
def get_bag_vectors(model):
    """
    Represents snapshots as a bag of clinical observations. Specifically, returns a V-length
    binary vector such that the v-th index is 1 iff the v-th observation occurs in the given snapshot
    :param model: CANTRIP model
    :type model: modeling.CANTRIPModel
    :return: clinical snapshot encoding
    """
    # 1. Evaluate which entries in model.observations are non-zero
    mask = tf.not_equal(model.observations, 0)
    where = tf.where(mask)

    # 2. Get the vocabulary indices for non-zero observations
    vocab_indices = tf.boolean_mask(model.observations, mask)
    vocab_indices = tf.expand_dims(vocab_indices[:], axis=-1)
    vocab_indices = tf.cast(vocab_indices, dtype=tf.int64)

    # 3. Get batch and sequence indices for non-zero observations
    tensor_indices = where[:, :-1]

    # Concat batch, sequence, and vocabulary indices
    indices = tf.concat([tensor_indices, vocab_indices], axis=-1)

    # Our sparse tensor will be 1 for observed observations, 0, otherwise
    ones = tf.ones_like(indices[:, 0], dtype=tf.float32)

    # The dense shape will be the same as model.observations, but using the entire vocabulary as the final dimension
    dense_shape = model.observations.get_shape().as_list()
    dense_shape[2] = model.vocabulary_size

    # Store as a sparse tensor because they're neat
    st = tf.SparseTensor(indices=indices, values=ones, dense_shape=dense_shape)
    return tf.sparse.reorder(st)
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=0.6):
    # 第一步:计算锚框的得分
    box_scores = box_confidence * box_class_probs
    # 第二步:找到最大值的锚框的索引以及对应的最大值的锚框的分数
    box_classes = K.argmax(box_scores,
                           axis=-1)  # 在pyhton中,-1代表倒数第一个,就是i选最后一个维度的最大值
    box_class_scores = K.max(box_scores, axis=-1)
    # 第三步:根据阈值创建掩码
    filtering_mask = (box_class_scores >= threshold)
    # print(filtering_mask)
    # 对scores, boxes以及classes使用掩码
    scores = tf.boolean_mask(box_class_scores, filtering_mask)
    boxes = tf.boolean_mask(boxes, filtering_mask)
    classes = tf.boolean_mask(box_classes, filtering_mask)

    return scores, boxes, classes
Exemple #29
0
def _ref_accuracy(features,
                  pred_dict,
                  nonpadding,
                  name,
                  metrics,
                  decode_refs=None,
                  measure_beginning_eos=False,
                  debug=False):
    """Computes the accuracy of reference prediction.

  Args:
    features: the feature dict.
    pred_dict: the dictionary to hold the prediction results.
    nonpadding: a 2D boolean tensor for masking out paddings.
    name: the name of the feature to be predicted.
    metrics: the eval metrics.
    decode_refs: decoded references.
    measure_beginning_eos: whether to measure the beginning and the end.
    debug: whether to output mismatches.
  """
    if decode_refs is not None:
        gt_seq_lengths = decode_utils.verb_refs_to_lengths(
            features["task"], features["verb_refs"])
        pr_seq_lengths = decode_utils.verb_refs_to_lengths(
            decode_refs["task"], decode_refs["verb_refs"])
        full_acc, partial_acc = decode_utils.sequence_accuracy(
            features[name],
            decode_refs[name],
            gt_seq_lengths,
            pr_seq_lengths,
            debug=debug,
            name=name)
        metrics[name + "_full_accuracy"] = tf.metrics.mean(full_acc)
        metrics[name + "_partial_accuracy"] = tf.metrics.mean(partial_acc)
    if measure_beginning_eos:
        nonpadding = tf.reshape(nonpadding, [-1])
        refs = tf.reshape(features[name], [-1, 2])
        predict_refs = tf.reshape(pred_dict[name], [-1, 2])
        metrics[name + "_start"] = tf.metrics.accuracy(
            labels=tf.boolean_mask(refs[:, 0], nonpadding),
            predictions=tf.boolean_mask(predict_refs[:, 0], nonpadding),
            name=name + "_start_accuracy")
        metrics[name + "_end"] = tf.metrics.accuracy(
            labels=tf.boolean_mask(refs[:, 1], nonpadding),
            predictions=tf.boolean_mask(predict_refs[:, 1], nonpadding),
            name=name + "_end_accuracy")
Exemple #30
0
 def loop_body(b, ignore_mask):
     true_box = tf.boolean_mask(y_true[l][b, ..., 0:4],
                                object_mask_bool[b, ..., 0])
     iou = box_iou(pred_box[b], true_box)
     best_iou = K.max(iou, axis=-1)
     ignore_mask = ignore_mask.write(
         b, K.cast(best_iou < ignore_thresh, K.dtype(true_box)))
     return b + 1, ignore_mask