Ejemplo n.º 1
0
def greater(f, other):
    """Element-wise comparison applied to the `Functional` objects.

    # Arguments
        f: Functional object.
        other: A python number or a tensor or a functional object.

    # Returns
        A Functional.
    """
    validate_functional(f)

    inputs = f.inputs.copy()
    if is_functional(other):
        inputs += to_list(other.inputs)
        lmbd = [
            Lambda(lambda x: K.cast_to_floatx(K.greater(x[0], x[1])),
                   name=graph_unique_name("greater")) for X in f.outputs
        ]
    else:
        _warn_for_ndarray(other)
        lmbd = [
            Lambda(lambda x: K.cast_to_floatx(K.greater(x, other)),
                   name=graph_unique_name("greater")) for X in f.outputs
        ]

    Functional = f.get_class()
    res = Functional(inputs=unique_tensors(inputs),
                     outputs=_apply_operation(lmbd, f, other),
                     layers=lmbd)
    return res
Ejemplo n.º 2
0
    def call(self, inputs, **kwargs):
        input_shape = K.int_shape(inputs)
        sequence_length, d_model = input_shape[-2:]
        # output of the "sigmoid halting unit" (not the probability yet)
        halting = K.sigmoid(
            K.reshape(
                K.bias_add(K.dot(K.reshape(inputs, [-1, d_model]),
                                 self.act_weights['halting_kernel']),
                           self.act_weights['halting_biases'],
                           data_format='channels_last'),
                [-1, sequence_length]))
        if self.zeros_like_halting is None:
            self.initialize_control_tensors(halting)
        # useful flags
        step_is_active = K.greater(self.halt_budget, 0)
        no_further_steps = K.less_equal(self.halt_budget - halting, 0)
        # halting probability is equal to
        # a. halting output if this isn't the last step (we have some budget)
        # b. to remainder if it is,
        # c. and zero for the steps that shouldn't be executed at all
        #    (out of budget for them)
        halting_prob = K.switch(
            step_is_active, K.switch(no_further_steps, self.remainder,
                                     halting), self.zeros_like_halting)
        self.active_steps += K.switch(step_is_active, self.ones_like_halting,
                                      self.zeros_like_halting)
        # We don't know which step is the last, so we keep updating
        # expression for the loss with each call of the layer
        self.ponder_cost = (self.act_weights['time_penalty_t'] *
                            K.mean(self.remainder + self.active_steps))
        # Updating "the remaining probability" and the halt budget
        self.remainder = K.switch(no_further_steps, self.remainder,
                                  self.remainder - halting)
        self.halt_budget -= halting  # OK to become negative

        # If none of the inputs are active at this step, then instead
        # of zeroing them out by multiplying to all-zeroes halting_prob,
        # we can simply use a constant tensor of zeroes, which means that
        # we won't even calculate the output of those steps, saving
        # some real computational time.
        if self.zeros_like_input is None:
            self.zeros_like_input = K.zeros_like(inputs,
                                                 name='zeros_like_input')
        # just because K.any(step_is_active) doesn't work in PlaidML
        any_step_is_active = K.greater(K.sum(K.cast(step_is_active, 'int32')),
                                       0)
        step_weighted_output = K.switch(
            any_step_is_active,
            K.expand_dims(halting_prob, -1) * inputs, self.zeros_like_input)
        if self.weighted_output is None:
            self.weighted_output = step_weighted_output
        else:
            self.weighted_output += step_weighted_output
        return [inputs, self.weighted_output]
    def call(self, y):
        # Sanity Check
        if isinstance(y, list):
            raise ValueError('TSG layer has only 1 input')
        # y = tf_print(y, [y], message='{}: The unconstrained action is:'.format(y.name.split('/')[0]), summarize=-1)
        y = check_numerics(y, 'Problem with input y')

        # Calculate A.c
        Ac = tensordot(self.A_graph, self.c_graph, 1)

        # Calculate b - Ac
        bMinusAc = self.b_graph - Ac

        # Calculate y - c
        yMinusc = y - self.c_graph

        # Calculate A.(y - c)
        ADotyMinusc = K.sum((self.A_graph * expand_dims(yMinusc, -2)), axis=2)

        # Do elem-wise division
        intersection_points = bMinusAc / (ADotyMinusc + K.epsilon()
                                          )  # Do we need the K.epsilon()?

        # Enforce 0 <= intersection_points <= 1 because the point must lie between c and y
        greater_1 = K.greater(intersection_points,
                              K.ones_like(intersection_points))
        candidate_alpha = K.switch(greater_1,
                                   K.ones_like(intersection_points) + 1,
                                   intersection_points)

        less_0 = K.less(candidate_alpha, K.zeros_like(intersection_points))
        candidate_alpha = K.switch(less_0,
                                   K.ones_like(intersection_points) + 1,
                                   candidate_alpha)

        # Find farthest intersection point from y to get projection point
        alpha = K.min(candidate_alpha, axis=-1, keepdims=True)

        # If it is an interior point, y itself is the projection point
        interior_point = K.greater(alpha, K.ones_like(alpha))
        alpha = K.switch(interior_point, K.ones_like(alpha), alpha)
        # alpha = tf_print(alpha, [alpha], message="{}: The value of alpha is: ".format(alpha.name.split('/')[0]))

        # Return \alpha.y + (1 - \alpha).c
        z = alpha * y + ((1 - alpha) * self.c_graph)
        # z = tf_print(z, [z], message='{}: The constrained action is:'.format(z.name.split('/')[0]), summarize=-1)

        return z
Ejemplo n.º 4
0
def step(x):
    """Computes step (Heaviside) of x element-wise.
       H(x) = 0 if x<=0
       H(x) = 1 if x>0

    # Arguments
        x: Functional object.

    # Returns
        A new functional object.
    """
    validate_functional(x)

    lmbd = []
    for i in range(len(x.outputs)):
        lmbd.append(
            Lambda(
                lambda x: K.cast(K.greater(x, 0.0), x.dtype), 
                name=graph_unique_name("step")
            )
        )
        
    Functional = x.get_class()
    res = Functional(
        inputs = x.inputs.copy(),
        outputs = _apply_operation(lmbd, x),
        layers = lmbd
    )
    return res
Ejemplo n.º 5
0
def weighted_bce_dice_loss(y_true, y_pred):
    y_true = K.cast(y_true, 'float32')
    y_pred = K.cast(y_pred, 'float32')
    # if we want to get same size of output, kernel size must be odd number
    if K.int_shape(y_pred)[1] == 128:
        kernel_size = 11
    elif K.int_shape(y_pred)[1] == 256:
        kernel_size = 21
    elif K.int_shape(y_pred)[1] == 512:
        kernel_size = 21
    elif K.int_shape(y_pred)[1] == 1024:
        kernel_size = 41
    else:
        raise ValueError('Unexpected image size')
    averaged_mask = K.pool2d(y_true,
                             pool_size=(kernel_size, kernel_size),
                             strides=(1, 1),
                             padding='same',
                             pool_mode='avg')
    border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(
        K.less(averaged_mask, 0.995), 'float32')
    weight = K.ones_like(averaged_mask)
    w0 = K.sum(weight)
    weight += border * 2
    w1 = K.sum(weight)
    weight *= (w0 / w1)
    loss = weighted_bce_loss(y_true, y_pred, weight) + (
        1 - weighted_dice_coeff(y_true, y_pred, weight))
    return loss
Ejemplo n.º 6
0
 def sparse():
     # number of dimensions in input might be < |k|. account for that
     actual_k = tf.minimum(K.shape(inputs)[-1] - 1, self.k)
     # multiply all values greater than the k smallest with 1, the rest with 0
     kth_smallest = tf.sort(inputs)[...,
                                    K.shape(inputs)[-1] - 1 - actual_k]
     return inputs * K.cast(K.greater(inputs, kth_smallest[:, None]),
                            K.floatx())
Ejemplo n.º 7
0
def my_entropy(y_true, y_pred):
    """ Cross entropy loss for yaw angle prediction.
      Uses the global variables network_output_size and min_overlap_for_angle.
  """
    y_true = K.greater(y_true, min_overlap_for_angle)
    y_true = K.cast(y_true, dtype='float32')
    return tf.nn.weighted_cross_entropy_with_logits(y_true,
                                                    y_pred,
                                                    network_output_size,
                                                    name='loss')
Ejemplo n.º 8
0
def not_equal(f, other, tol=None):
    """Element-wise comparison applied to the `Functional` objects.

    # Arguments
        f: Functional object.
        other: A python number or a tensor or a functional object.
        tol: (float) If you need a tolerance measure.

    # Returns
        A Functional.
    """
    validate_functional(f)
    assert isinstance(
        tol, (type(None), float)), 'Expected a floating value for `tol`.'

    inputs = f.inputs.copy()
    if is_functional(other):
        inputs += to_list(other.inputs)
        if tol is None:
            lambda_opr = lambda x: K.cast_to_floatx(K.not_equal(x[0], x[1]))
        else:
            lambda_opr = lambda x: K.cast_to_floatx(
                K.greater(K.abs(x[0] - x[1]), tol))
    else:
        _warn_for_ndarray(other)
        if tol is None:
            lambda_opr = lambda x: K.cast_to_floatx(K.not_equal(x, other))
        else:
            lambda_opr = lambda x: K.cast_to_floatx(
                K.greater(K.abs(x - other), tol))

    lmbd = [
        Lambda(lambda_opr, name=graph_unique_name("not_equal"))
        for X in f.outputs
    ]

    Functional = f.get_class()
    res = Functional(inputs=unique_tensors(inputs),
                     outputs=_apply_operation(lmbd, f, other),
                     layers=lmbd)
    return res
Ejemplo n.º 9
0
def fulldiaggreater3(x):
    xs = []
    for filt in [FULLDIAG1, FULLDIAG2, FULLDIAG3, FULLDIAG4]:
        x1 = x * tf.constant(filt)
        x1 = K.sum(x1, axis=1)
        x1 = K.sum(x1, axis=1)
        x1 = K.sum(x1, axis=1)
        x1 = K.greater(x1, 3)
        xs.append(x1)

    x = K.stack(xs)
    return K.any(x)
Ejemplo n.º 10
0
def antidiaggreater3(x):
    x1 = x * tf.constant(ANTIDIAG23)
    x1 = K.sum(x1, axis=2)
    x1 = K.sum(x1, axis=2)
    x1 = K.greater(x1, 3)
    x1 = K.any(x1)

    x2 = x * tf.constant(ANTIDIAG13)
    x2 = K.sum(x2, axis=2)
    x2 = K.sum(x2, axis=2)
    x2 = K.greater(x2, 3)
    x2 = K.any(x2)

    x3 = x * tf.constant(ANTIDIAG12)
    x3 = K.sum(x3, axis=2)
    x3 = K.sum(x3, axis=2)
    x3 = K.greater(x3, 3)
    x3 = K.any(x3)

    x = K.stack([x1, x2, x3])
    return K.any(x)
Ejemplo n.º 11
0
def tp_score(y_true, y_pred, threshold=0.1):

    tp_3d = K.concatenate([
        K.cast(K.expand_dims(K.flatten(y_true)), 'bool'),
        K.cast(
            K.expand_dims(K.flatten(K.greater(y_pred, K.constant(threshold)))),
            'bool'),
        K.cast(K.ones_like(K.expand_dims(K.flatten(y_pred))), 'bool')
    ],
                          axis=1)

    tp = K.sum(K.cast(K.all(tp_3d, axis=1), 'int32'))

    return tp
Ejemplo n.º 12
0
 def recall(y_true, y_pred):
     """Recall metric.
     Computes the recall over the whole batch using threshold_value.
     """
     threshold_value = threshold
     # Adaptation of the "round()" used before to get the predictions. Clipping to make sure that the predicted raw values are between 0 and 1.
     y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold_value),
                     K.floatx())
     # Compute the number of true positives. Rounding in prevention to make sure we have an integer.
     true_positives = K.round(K.sum(K.clip(y_true * y_pred, 0, 1)))
     # Compute the number of positive targets.
     possible_positives = K.sum(K.clip(y_true, 0, 1))
     recall_ratio = true_positives / (possible_positives + K.epsilon())
     return recall_ratio
Ejemplo n.º 13
0
def diaggreater3_on_axis(x, i, j, k):
    assert j < k
    # x = tf.Print(x, [x], summarize=64, message='initial x:          ')
    diag = tf.diag(np.array([1, 1, 1, 1], dtype=np.int32))
    # x = tf.Print(x, [diag], summarize=64, message='diagnonal:          ')
    diag = K.stack([diag, diag, diag, diag], axis=i)
    # x = tf.Print(x, [diag], summarize=64, message='diagnonal:          ')
    x = x * diag
    # x = tf.Print(x, [x], summarize=64, message='x * diag:           ')
    x = K.sum(x, axis=k + 1)
    # x = tf.Print(x, [x], summarize=64, message='x after first sum:  ')
    x = K.sum(x, axis=j + 1)
    # x = tf.Print(x, [x], summarize=64, message='x after second sum: ')
    x = K.greater(x, 3)
    x = K.any(x)
    return x
Ejemplo n.º 14
0
def fn_score(y_true, y_pred, threshold=0.1):

    fn_3d = K.concatenate([
        K.cast(K.expand_dims(K.flatten(y_true)), 'bool'),
        K.cast(
            K.expand_dims(
                K.flatten(
                    K.abs(
                        K.cast(K.greater(y_pred, K.constant(threshold)),
                               'float') - K.ones_like(y_pred)))), 'bool'),
        K.cast(K.ones_like(K.expand_dims(K.flatten(y_pred))), 'bool')
    ],
                          axis=1)

    fn = K.sum(K.cast(K.all(fn_3d, axis=1), 'int32'))

    return fn
Ejemplo n.º 15
0
    def _filter_detections(scores, labels):
        # threshold based on score
        indices = tf.where(K.greater(scores, score_threshold))

        if nms:
            filtered_boxes = tf.gather_nd(boxes, indices)
            filtered_scores = K.gather(scores, indices)[:, 0]

            # perform NMS
            nms_indices = tf.image.non_max_suppression(
                filtered_boxes,
                filtered_scores,
                max_output_size=max_detections,
                iou_threshold=nms_threshold)

            # filter indices based on NMS
            indices = K.gather(indices, nms_indices)

        # add indices to list of all indices
        labels = tf.gather_nd(labels, indices)
        indices = K.stack([indices[:, 0], labels], axis=1)

        return indices
Ejemplo n.º 16
0
def discriminator_loss(y_true, y_pred):
    loss = mean_squared_error(y_true, y_pred)
    is_large = k.greater(loss, k.constant(_disc_train_thresh))  # threshold
    is_large = k.cast(is_large, k.floatx())
    return loss * is_large  # binary threshold the loss to prevent overtraining the discriminator
Ejemplo n.º 17
0
def greater3_on_axis(x, axis):
    x = K.sum(x, axis=axis)
    x = K.greater(x, 3)
    x = K.any(x)
    return x
Ejemplo n.º 18
0
def hamming_loss(y_true, y_pred, tval = 0.4):
    tmp = K.abs(y_true - y_pred)
    return K.mean(K.cast(K.greater(tmp, tval), dtype = float))