def __init__(self,
                 max_value=None,
                 negative_slope=0,
                 threshold=0,
                 **kwargs):
        if type(max_value) is dict:
            max_value = max_value['value']
        if type(negative_slope) is dict:
            negative_slope = negative_slope['value']
        if type(threshold) is dict:
            threshold = threshold['value']

        super(ReLU, self).__init__(**kwargs)
        if max_value is not None and max_value < 0.:
            raise ValueError('max_value of Relu layer '
                             'cannot be negative value: ' + str(max_value))
        if negative_slope < 0.:
            raise ValueError('negative_slope of Relu layer '
                             'cannot be negative value: ' +
                             str(negative_slope))

        self.support_masking = True
        if max_value is not None:
            max_value = K.cast_to_floatx(max_value)
        self.max_value = max_value
        self.negative_slope = K.cast_to_floatx(negative_slope)
        self.threshold = K.cast_to_floatx(threshold)
Beispiel #2
0
def less(f, other):
    """Element-wise comparison applied to the `Functional` objects.

    # Arguments
        f: Functional object.
        other: A python number or a tensor or a functional object.

    # Returns
        A Functional.
    """
    validate_functional(f)

    inputs = f.inputs.copy()
    if is_functional(other):
        inputs += to_list(other.inputs)
        lmbd = [
            Lambda(lambda x: K.cast_to_floatx(K.less(x[0], x[1])),
                   name=graph_unique_name("less")) for X in f.outputs
        ]
    else:
        _warn_for_ndarray(other)
        lmbd = [
            Lambda(lambda x: K.cast_to_floatx(K.less(x, other)),
                   name=graph_unique_name("less")) for X in f.outputs
        ]

    Functional = f.get_class()
    res = Functional(inputs=unique_tensors(inputs),
                     outputs=_apply_operation(lmbd, f, other),
                     layers=lmbd)
    return res
Beispiel #3
0
def tol_equal(f, other, tol=1e-8):
    """Element-wise comparison applied to the `Functional` objects.

    # Arguments
        f: Functional object.
        other: A python number or a tensor or a functional object.
        tol: float - defaulted to 1e-8.

    # Returns
        A Functional.
    """
    validate_functional(f)
    assert isinstance(tol, float), "Expected a float for tolerance. "

    inputs = f.inputs.copy()
    if is_functional(other):
        inputs += to_list(other.inputs)
        lmbd = [
            Lambda(lambda x: K.cast_to_floatx(
                K.less_equal(K.abs(x[0] - x[1]), tol)),
                   name=graph_unique_name("tol_equal")) for X in f.outputs
        ]
    else:
        _warn_for_ndarray(other)
        lmbd = [
            Lambda(lambda x: K.cast_to_floatx(
                K.less_equal(K.abs(x - other), tol)),
                   name=graph_unique_name("tol_equal")) for X in f.outputs
        ]

    Functional = f.get_class()
    res = Functional(inputs=unique_tensors(inputs),
                     outputs=_apply_operation(lmbd, f, other),
                     layers=lmbd)
    return res
Beispiel #4
0
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [state_ops.assign_add(self.iterations, 1)]

        lr = self.lr
        if self.initial_decay > 0:
            lr = lr * ( 1. / (1. + self.decay * math_ops.cast(self.iterations,K.dtype(self.decay))) )

        t = math_ops.cast(self.iterations, K.floatx()) + 1

        # Due to the recommendations in [2], i.e. warming momentum schedule
        momentum_cache_t = self.beta_1 * (
            1. - 0.5 *
            (math_ops.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
        momentum_cache_t_1 = self.beta_1 * (
            1. - 0.5 *
            (math_ops.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
        m_schedule_new = self.m_schedule * momentum_cache_t
        m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
        self.updates.append((self.m_schedule, m_schedule_new))

        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        if self.amsgrad:
            vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        else:
            vhats = [K.zeros(1) for _ in params]

        self.weights = [self.iterations] + ms + vs + vhats

        for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
            # the following equations given in [1]
            g_prime = g / (1. - m_schedule_new)
            m_t = self.beta_1 * m + (1. - self.beta_1) * g
            m_t_prime = m_t / (1. - m_schedule_next)
            v_t = self.beta_2 * v + (1. - self.beta_2) * math_ops.square(g)
            if self.amsgrad:
                vhat_t = math_ops.maximum(vhat, v_t)
                self.updates.append(state_ops.assign(vhat, vhat_t))
                v_t_prime = vhat_t / (1. - math_ops.pow(self.beta_2, t))
            else:
                v_t_prime = v_t / (1. - math_ops.pow(self.beta_2, t))
            m_t_bar = (1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime

            self.updates.append(state_ops.assign(m, m_t))
            self.updates.append(state_ops.assign(v, v_t))

            p_t = p - lr * m_t_bar / (gen_math_ops.sqrt(v_t_prime) + self.epsilon)
            
            new_p = p_t

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(state_ops.assign(p, new_p))
        return self.updates
  def __init__(self, l1=0., l2=0.):  # pylint: disable=redefined-outer-name
    # The default value for l1 and l2 are different from the value in l1_l2
    # for backward compatibility reason. Eg, L1L2(l2=0.1) will only have l2
    # and no l1 penalty.
    l1 = 0. if l1 is None else l1
    l2 = 0. if l2 is None else l2
    _check_penalty_number(l1)
    _check_penalty_number(l2)

    self.l1 = backend.cast_to_floatx(l1)
    self.l2 = backend.cast_to_floatx(l2)
  def __init__(self, max_value=None, negative_slope=0, threshold=0, **kwargs):
    super(ReLU, self).__init__(**kwargs)
    if max_value is not None and max_value < 0.:
      raise ValueError('max_value of Relu layer '
                       'cannot be negative value: ' + str(max_value))
    if negative_slope < 0.:
      raise ValueError('negative_slope of Relu layer '
                       'cannot be negative value: ' + str(negative_slope))

    self.support_masking = True
    self.max_value = K.cast_to_floatx(max_value)
    self.negative_slope = K.cast_to_floatx(negative_slope)
    self.threshold = K.cast_to_floatx(threshold)
Beispiel #7
0
  def __init__(self, 
               kernel_size,
               strides,
               depthwise_initializer, 
               padding,
               use_bias,               
               runtimes=None,
               dropout_rate=None,
               **kwargs):
    
    super(DepthwiseConv2DMasked, self).__init__(
            kernel_size=kernel_size, 
            strides=strides, 
            depthwise_initializer=depthwise_initializer, 
            padding=padding, 
            use_bias=use_bias,
            **kwargs)

    self.runtimes = runtimes
    self.dropout_rate = tf.stop_gradient(dropout_rate)

    if kernel_size[0] != 5: # normal Depthwise type
      self.custom = False
    else:
      self.custom = True 
      if self.runtimes is not None:
        self.R50c = K.cast_to_floatx(self.runtimes[2]) # 50% of the 5x5
        self.R100c = K.cast_to_floatx(self.runtimes[3]) # 100% of the 5x5
        self.R5x5 = K.cast_to_floatx(self.runtimes[3]) # 5x5 for 100%
        self.R3x3 = K.cast_to_floatx(self.runtimes[1]) # 3x3 for 100%
      else:
        self.R50c = K.cast_to_floatx(0.0)
        self.R100c = K.cast_to_floatx(0.0)
        self.R5x5 = K.cast_to_floatx(0.0)
        self.R3x3 = K.cast_to_floatx(0.0)
Beispiel #8
0
 def __init__(self, max_value=None, **kwargs):
   super(ReLU, self).__init__(**kwargs)
   self.support_masking = True
   self.max_value = K.cast_to_floatx(max_value)
   if self.max_value < 0.:
     raise ValueError('max_value of Relu layer '
                      'cannot be negative value: ' + str(max_value))
  def __init__(self, max_value=None, negative_slope=0, threshold=0, **kwargs):
    super(ReLU, self).__init__(**kwargs)
    if max_value is not None and max_value < 0.:
      raise ValueError('max_value of Relu layer '
                       'cannot be negative value: ' + str(max_value))
    if negative_slope < 0.:
      raise ValueError('negative_slope of Relu layer '
                       'cannot be negative value: ' + str(negative_slope))

    self.support_masking = True
    if max_value is not None:
      max_value = K.cast_to_floatx(max_value)
    self.max_value = max_value
    self.negative_slope = K.cast_to_floatx(negative_slope)
    self.threshold = K.cast_to_floatx(threshold)
    self._can_use_graph_functions = True
Beispiel #10
0
    def __init__(self,
                 deviceName,
                 biasValue=None,
                 fractionZero=0.9,
                 min=-1,
                 max=1,
                 **kwargs):
        """

            :param deviceName: device to use for computation
            :param biasValue: either None(random but constant through layer) or 1d-array of size units (nb of output neurons).
            :param fractionZero: fraction of weight that should remain at 0
            :param min: min value for weights
            :param max: max value for weights
            :param kwargs:
        """
        super(clippedSparseBioDenseLayer, self).__init__(**kwargs)
        self.supports_masking = True
        if biasValue:
            self.hasPredefinedBias = True
            self.theta = K.cast_to_floatx(biasValue)
        else:
            self.hasPredefinedBias = False
        self.sparseInitializer = sparseInitializer(fractionZero,
                                                   minval=min,
                                                   maxval=max)
        self.deviceName = deviceName  #the device on which the main operations will be conducted (forward and backward propagations)
Beispiel #11
0
def weighted_categorical_crossentropy(y_true, y_pred,
                                      n_classes=3, axis=None,
                                      from_logits=False):
    """Categorical crossentropy between an output tensor and a target tensor.
    Automatically computes the class weights from the target image and uses
    them to weight the cross entropy

    Args:
        y_true: A tensor of the same shape as y_pred.
        y_pred: A tensor resulting from a softmax
            (unless from_logits is True, in which
            case y_pred is expected to be the logits).
        from_logits: Boolean, whether y_pred is the
            result of a softmax, or is a tensor of logits.

    Returns:
        tensor: Output tensor.
    """
    if from_logits:
        raise Exception('weighted_categorical_crossentropy cannot take logits')
    if axis is None:
        axis = 1 if K.image_data_format() == 'channels_first' else K.ndim(y_pred) - 1
    reduce_axis = [x for x in list(range(K.ndim(y_pred))) if x != axis]
    # scale preds so that the class probas of each sample sum to 1
    y_pred = y_pred / K.sum(y_pred, axis=axis, keepdims=True)
    # manual computation of crossentropy
    _epsilon = tf.convert_to_tensor(K.epsilon(), y_pred.dtype.base_dtype)
    y_pred = tf.clip_by_value(y_pred, _epsilon, 1. - _epsilon)
    y_true_cast = K.cast(y_true, K.floatx())
    total_sum = K.sum(y_true_cast)
    class_sum = K.sum(y_true_cast, axis=reduce_axis, keepdims=True)
    class_weights = 1.0 / K.cast_to_floatx(n_classes) * tf.divide(total_sum, class_sum + 1.)
    return - K.sum((y_true * K.log(y_pred) * class_weights), axis=axis)
 def __init__(self, alpha=0.3, **kwargs):
   super(LeakyReLU, self).__init__(**kwargs)
   if alpha is None:
     raise ValueError('alpha of leaky Relu layer '
                      'cannot be None. Required a float')
   self.supports_masking = True
   self.alpha = K.cast_to_floatx(alpha)
Beispiel #13
0
    def __init__(self,biasValue=[1.0], fractionZero=0.9, min=-1, max=1, rate = 10., rateInhib = 10. ,use_bias = True, gpuName=None, **kwargs):
        """

            :param biasValue: either None(random but constant through layer) or 1d-array of size units (nb of output neurons).
            :param fractionZero: fraction of weight that should remain at 0
            :param min: min value for weights
            :param max: max value for weights
            :param kwargs:
            :param rate: float constant, the rate of production
            :param gpuName: for compatibility reasons, not used
        """
        super(autoRegulLayer, self).__init__(**kwargs)
        self.supports_masking = True
        if not biasValue is None:
            self.hasPredefinedBias = True
            self.theta = K.cast_to_floatx(biasValue)
        else:
            self.hasPredefinedBias = False
        assert fractionZero<=1 and fractionZero>=0
        self.sparseInitializer = sparseInitializer(fractionZero, minval=min, maxval=max)
        assert type(rate)==float
        self.rate = rate
        assert type(rateInhib) == float
        self.rateInhib = rateInhib
        self.use_bias = use_bias
Beispiel #14
0
 def __init__(self, alpha=1.0, **kwargs):
     super(ELU, self).__init__(**kwargs)
     if alpha is None:
         raise ValueError('Alpha of an ELU layer cannot be None, '
                          'requires a float. Got %s' % alpha)
     self.supports_masking = True
     self.alpha = backend.cast_to_floatx(alpha)
Beispiel #15
0
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = []

        with ops.control_dependencies(
            [state_ops.assign_add(self.iterations, 1)]):
            t = math_ops.cast(self.iterations, K.floatx())

        # Due to the recommendations in [2], i.e. warming momentum schedule
        momentum_cache_t = self.beta_1 * (
            1. - 0.5 *
            (math_ops.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
        momentum_cache_t_1 = self.beta_1 * (
            1. - 0.5 * (math_ops.pow(K.cast_to_floatx(0.96),
                                     (t + 1) * self.schedule_decay)))
        m_schedule_new = self.m_schedule * momentum_cache_t
        m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
        self.updates.append((self.m_schedule, m_schedule_new))

        shapes = [K.int_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]

        self.weights = [self.iterations, self.m_schedule] + ms + vs

        for p, g, m, v in zip(params, grads, ms, vs):
            # the following equations given in [1]
            g_prime = g / (1. - m_schedule_new)
            m_t = self.beta_1 * m + (1. - self.beta_1) * g
            m_t_prime = m_t / (1. - m_schedule_next)
            v_t = self.beta_2 * v + (1. - self.beta_2) * math_ops.square(g)
            v_t_prime = v_t / (1. - math_ops.pow(self.beta_2, t))
            m_t_bar = (1. - momentum_cache_t
                       ) * g_prime + momentum_cache_t_1 * m_t_prime

            self.updates.append(state_ops.assign(m, m_t))
            self.updates.append(state_ops.assign(v, v_t))

            p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
            new_p = p_t

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(state_ops.assign(p, new_p))
        return self.updates
Beispiel #16
0
 def _fd_conditional(y_true, y_pred):
     # if there are no masks annotations, return 0; else, compute fdl loss
     return tf.cond(
         K.any(K.equal(K.shape(y_true), 0)),
         lambda: K.cast_to_floatx(0.0),
         lambda: _fd_batch(y_true, y_pred,
                           iou_threshold=self.fdl_iou_threshold,
                           parallel_iterations=self.parallel_iterations))
Beispiel #17
0
 def __init__(self, alpha=0.3, **kwargs):
     super(LeakyReLU, self).__init__(**kwargs)
     if alpha is None:
         raise ValueError('The alpha value of a Leaky ReLU layer '
                          'cannot be None, needs a float. '
                          'Got %s' % alpha)
     self.supports_masking = True
     self.alpha = backend.cast_to_floatx(alpha)
Beispiel #18
0
  def get_updates(self, loss, params):
    grads = self.get_gradients(loss, params)
    self.updates = []

    with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
      t = math_ops.cast(self.iterations, K.floatx())

    # Due to the recommendations in [2], i.e. warming momentum schedule
    momentum_cache_t = self.beta_1 * (
        1. - 0.5 *
        (math_ops.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
    momentum_cache_t_1 = self.beta_1 * (
        1. - 0.5 *
        (math_ops.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
    m_schedule_new = self.m_schedule * momentum_cache_t
    m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
    self.updates.append((self.m_schedule, m_schedule_new))

    shapes = [K.int_shape(p) for p in params]
    ms = [K.zeros(shape) for shape in shapes]
    vs = [K.zeros(shape) for shape in shapes]

    self.weights = [self.iterations, self.m_schedule] + ms + vs

    for p, g, m, v in zip(params, grads, ms, vs):
      # the following equations given in [1]
      g_prime = g / (1. - m_schedule_new)
      m_t = self.beta_1 * m + (1. - self.beta_1) * g
      m_t_prime = m_t / (1. - m_schedule_next)
      v_t = self.beta_2 * v + (1. - self.beta_2) * math_ops.square(g)
      v_t_prime = v_t / (1. - math_ops.pow(self.beta_2, t))
      m_t_bar = (1. -
                 momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime

      self.updates.append(state_ops.assign(m, m_t))
      self.updates.append(state_ops.assign(v, v_t))

      p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
      new_p = p_t

      # Apply constraints.
      if getattr(p, 'constraint', None) is not None:
        new_p = p.constraint(new_p)

      self.updates.append(state_ops.assign(p, new_p))
    return self.updates
Beispiel #19
0
    def __init__(self, l2=0.01, **kwargs):  # pylint: disable=redefined-outer-name
        l2 = kwargs.pop('l', l2)  # Backwards compatibility
        if kwargs:
            raise TypeError('Argument(s) not recognized: %s' % (kwargs, ))

        _check_penalty_number(l2)

        self.l2 = backend.cast_to_floatx(l2)
  def __init__(self, max_value=None, negative_slope=0, threshold=0, **kwargs):
    super(ReLU, self).__init__(**kwargs)
    if max_value is not None and max_value < 0.:
      raise ValueError('max_value of Relu layer '
                       'cannot be negative value: ' + str(max_value))
    if negative_slope < 0.:
      raise ValueError('negative_slope of Relu layer '
                       'cannot be negative value: ' + str(negative_slope))
    if threshold is None:
      raise ValueError('threshold of Relu layer '
                       'cannot be None. Required a float')

    self.supports_masking = True
    if max_value is not None:
      max_value = K.cast_to_floatx(max_value)
    self.max_value = max_value
    self.negative_slope = K.cast_to_floatx(negative_slope)
    self.threshold = K.cast_to_floatx(threshold)
Beispiel #21
0
 def __init__(self, theta=1.0, **kwargs):
     super(ThresholdedReLU, self).__init__(**kwargs)
     if theta is None:
         raise ValueError('Theta of a Thresholded ReLU layer cannot be '
                          'None, requires a float. Got %s' % theta)
     if theta < 0:
         raise ValueError('The theta value of a Thresholded ReLU layer '
                          'should be >=0, got %s' % theta)
     self.supports_masking = True
     self.theta = backend.cast_to_floatx(theta)
Beispiel #22
0
def not_equal(f, other, tol=None):
    """Element-wise comparison applied to the `Functional` objects.

    # Arguments
        f: Functional object.
        other: A python number or a tensor or a functional object.
        tol: (float) If you need a tolerance measure.

    # Returns
        A Functional.
    """
    validate_functional(f)
    assert isinstance(
        tol, (type(None), float)), 'Expected a floating value for `tol`.'

    inputs = f.inputs.copy()
    if is_functional(other):
        inputs += to_list(other.inputs)
        if tol is None:
            lambda_opr = lambda x: K.cast_to_floatx(K.not_equal(x[0], x[1]))
        else:
            lambda_opr = lambda x: K.cast_to_floatx(
                K.greater(K.abs(x[0] - x[1]), tol))
    else:
        _warn_for_ndarray(other)
        if tol is None:
            lambda_opr = lambda x: K.cast_to_floatx(K.not_equal(x, other))
        else:
            lambda_opr = lambda x: K.cast_to_floatx(
                K.greater(K.abs(x - other), tol))

    lmbd = [
        Lambda(lambda_opr, name=graph_unique_name("not_equal"))
        for X in f.outputs
    ]

    Functional = f.get_class()
    res = Functional(inputs=unique_tensors(inputs),
                     outputs=_apply_operation(lmbd, f, other),
                     layers=lmbd)
    return res
Beispiel #23
0
  def get_constants(self, inputs, training=None):
    constants = []
    if self.implementation == 0 and 0 < self.dropout < 1:
      ones = K.zeros_like(inputs)
      ones = K.sum(ones, axis=1)
      ones += 1

      def dropped_inputs():
        return K.dropout(ones, self.dropout)

      dp_mask = [
          K.in_train_phase(dropped_inputs, ones, training=training)
          for _ in range(4)
      ]
      constants.append(dp_mask)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(4)])

    if 0 < self.recurrent_dropout < 1:
      depthwise_shape = list(self.depthwise_kernel_shape)
      pointwise_shape = list(self.pointwise_kernel_shape)
      ones = K.zeros_like(inputs)
      ones = K.sum(ones, axis=1)
      ones = self.input_conv(ones, K.zeros(depthwise_shape), 
             K.zeros(pointwise_shape), padding=self.padding)
      ones += 1.

      def dropped_inputs():  # pylint: disable=function-redefined
        return K.dropout(ones, self.recurrent_dropout)

      rec_dp_mask = [
          K.in_train_phase(dropped_inputs, ones, training=training)
          for _ in range(4)
      ]
      constants.append(rec_dp_mask)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(4)])
    return constants
    def __init__(self,
                 max_value=None,
                 negative_slope=0,
                 threshold=0,
                 **kwargs):
        super(ReLU, self).__init__(**kwargs)
        if max_value is not None and max_value < 0.:
            raise ValueError('max_value of a ReLU layer cannot be a negative '
                             'value. Got: %s' % max_value)
        if negative_slope is None or negative_slope < 0.:
            raise ValueError(
                'negative_slope of a ReLU layer cannot be a negative '
                'value. Got: %s' % negative_slope)
        if threshold is None or threshold < 0.:
            raise ValueError('threshold of a ReLU layer cannot be a negative '
                             'value. Got: %s' % threshold)

        self.supports_masking = True
        if max_value is not None:
            max_value = backend.cast_to_floatx(max_value)
        self.max_value = max_value
        self.negative_slope = backend.cast_to_floatx(negative_slope)
        self.threshold = backend.cast_to_floatx(threshold)
Beispiel #25
0
def compute_fd_loss(boxes, scores, annotations, iou_threshold=0.75):
    """compute the overlap of boxes with annotations"""
    iou = overlap(boxes, annotations)

    max_iou = K.max(iou, axis=1, keepdims=True)
    targets = K.cast(K.greater_equal(max_iou, iou_threshold), K.floatx())

    # compute the loss
    loss = focal(targets, scores)  # alpha=self.alpha, gamma=self.gamma)

    # compute the normalizer: the number of cells present in the image
    normalizer = K.cast(K.shape(annotations)[0], K.floatx())
    normalizer = K.maximum(K.cast_to_floatx(1.0), normalizer)

    return K.sum(loss) / normalizer
Beispiel #26
0
    def classification_loss(self, y_true, y_pred):
        # TODO: try weighted_categorical_crossentropy
        labels = y_true[..., :-1]
        # -1 for ignore, 0 for background, 1 for object
        anchor_state = y_true[..., -1]

        classification = y_pred
        # filter out "ignore" anchors
        indices = tf.where(K.not_equal(anchor_state, -1))
        labels = tf.gather_nd(labels, indices)
        classification = tf.gather_nd(classification, indices)

        # compute the loss
        loss = focal(labels, classification, alpha=self.alpha, gamma=self.gamma)

        # compute the normalizer: the number of positive anchors
        normalizer = tf.where(K.equal(anchor_state, 1))
        normalizer = K.cast(K.shape(normalizer)[0], K.floatx())
        normalizer = K.maximum(K.cast_to_floatx(1.0), normalizer)

        return K.sum(loss) / normalizer
Beispiel #27
0
 def __init__(self, l1=0., l2=0.):  # pylint: disable=redefined-outer-name
     self.l1 = K.cast_to_floatx(l1)
     self.l2 = K.cast_to_floatx(l2)
 def __init__(self, alpha=0.3, **kwargs):
     super(LeakyReLU, self).__init__(**kwargs)
     self.supports_masking = True
     self.alpha = K.cast_to_floatx(alpha)
 def __init__(self, theta=1.0, **kwargs):
     super(ThresholdedReLU, self).__init__(**kwargs)
     self.supports_masking = True
     self.theta = K.cast_to_floatx(theta)
Beispiel #30
0
 def __init__(self, l1=0.0, l2=0.0, tv=0.0):
     self.l1 = K.cast_to_floatx(l1)
     self.l2 = K.cast_to_floatx(l2)
     self.tv = K.cast_to_floatx(tv)
 def __init__(self, alpha=0.3, **kwargs):
   super(LeakyReLU, self).__init__(**kwargs)
   self.supports_masking = True
   self.alpha = K.cast_to_floatx(alpha)
 def __init__(self, theta=1.0, **kwargs):
   super(ThresholdedReLU, self).__init__(**kwargs)
   self.supports_masking = True
   self.theta = K.cast_to_floatx(theta)
Beispiel #33
0
    def mask_loss(self, y_true, y_pred):
        def _mask(y_true, y_pred, iou_threshold=0.5, mask_size=(28, 28)):
            # split up the different predicted blobs
            boxes = y_pred[:, :, :4]
            masks = y_pred[:, :, 4:]

            # split up the different blobs
            annotations = y_true[:, :, :5]
            width = K.cast(y_true[0, 0, 5], dtype='int32')
            height = K.cast(y_true[0, 0, 6], dtype='int32')
            masks_target = y_true[:, :, 7:]

            # reshape the masks back to their original size
            masks_target = K.reshape(masks_target,
                                     (K.shape(masks_target)[0] *
                                      K.shape(masks_target)[1], height, width))
            masks = K.reshape(masks, (K.shape(masks)[0] * K.shape(masks)[1],
                                      mask_size[0], mask_size[1], -1))

            # batch size > 1 fix
            boxes = K.reshape(boxes, (-1, K.shape(boxes)[2]))
            annotations = K.reshape(annotations, (-1, K.shape(annotations)[2]))

            # compute overlap of boxes with annotations
            iou = overlap(boxes, annotations)
            argmax_overlaps_inds = K.argmax(iou, axis=1)
            max_iou = K.max(iou, axis=1)

            # filter those with IoU > 0.5
            indices = tf.where(K.greater_equal(max_iou, iou_threshold))
            boxes = tf.gather_nd(boxes, indices)
            masks = tf.gather_nd(masks, indices)
            argmax_overlaps_inds = tf.gather_nd(argmax_overlaps_inds, indices)
            argmax_overlaps_inds = K.cast(argmax_overlaps_inds, 'int32')
            labels = K.gather(annotations[:, 4], argmax_overlaps_inds)
            labels = K.cast(labels, 'int32')

            # make normalized boxes
            x1 = boxes[:, 0]
            y1 = boxes[:, 1]
            x2 = boxes[:, 2]
            y2 = boxes[:, 3]
            boxes = K.stack([
                y1 / (K.cast(height, dtype=K.floatx()) - 1),
                x1 / (K.cast(width, dtype=K.floatx()) - 1),
                (y2 - 1) / (K.cast(height, dtype=K.floatx()) - 1),
                (x2 - 1) / (K.cast(width, dtype=K.floatx()) - 1),
            ],
                            axis=1)

            # crop and resize masks_target
            # append a fake channel dimension
            masks_target = K.expand_dims(masks_target, axis=3)
            masks_target = tf.image.crop_and_resize(masks_target, boxes,
                                                    argmax_overlaps_inds,
                                                    mask_size)

            # remove fake channel dimension
            masks_target = masks_target[:, :, :, 0]

            # gather the predicted masks using the annotation label
            masks = tf.transpose(masks, (0, 3, 1, 2))
            label_indices = K.stack([tf.range(K.shape(labels)[0]), labels],
                                    axis=1)
            masks = tf.gather_nd(masks, label_indices)

            # compute mask loss
            mask_loss = K.binary_crossentropy(masks_target, masks)
            normalizer = K.shape(masks)[0] * K.shape(masks)[1] * K.shape(
                masks)[2]
            normalizer = K.maximum(K.cast(normalizer, K.floatx()), 1)
            mask_loss = K.sum(mask_loss) / normalizer

            return mask_loss

        # if there are no masks annotations, return 0; else, compute the masks loss
        return tf.cond(
            K.any(K.equal(K.shape(y_true), 0)), lambda: K.cast_to_floatx(0.0),
            lambda: _mask(y_true,
                          y_pred,
                          iou_threshold=self.iou_threshold,
                          mask_size=self.mask_size))
Beispiel #34
0
 def __init__(self, l1=0., l2=0.):  # pylint: disable=redefined-outer-name
   self.l1 = K.cast_to_floatx(l1)
   self.l2 = K.cast_to_floatx(l2)
Beispiel #35
0
 def __init__(self, l1=0.01, **kwargs):  # pylint: disable=redefined-outer-name
     l1 = kwargs.pop('l', l1)  # Backwards compatibility
     if kwargs:
         raise TypeError('Argument(s) not recognized: %s' % (kwargs, ))
     self.l1 = backend.cast_to_floatx(l1)