Example #1
0
def triplet_loss(y_true, y_pred, alpha=0.6):
    """
    Implementation of the triplet loss function
    Arguments:
    y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
    y_pred -- python list containing three objects:
            anchor -- the encodings for the anchor data
            positive -- the encodings for the positive data (similar to anchor)
            negative -- the encodings for the negative data (different from anchor)
    Returns:
    loss -- real number, value of the loss
    """
    print('y_pred.shape = ', y_pred)

    total_length = y_pred.shape.as_list()[-1]
    # print('total_length=',  total_length)
    #     total_length =12

    anchor = y_pred[:, 0:int(total_length * 1 / 3)]
    positive = y_pred[:, int(total_length * 1 / 3):int(total_length * 2 / 3)]
    negative = y_pred[:, int(total_length * 2 / 3):int(total_length * 3 / 3)]

    # distance between the anchor and the positive
    pos_dist = K.abs(K.sum(K.square(anchor - positive), axis=1))
    print(pos_dist)
    # distance between the anchor and the negative
    neg_dist = K.abs(K.sum(K.square(anchor - negative), axis=1))
    print(neg_dist)
    # compute loss
    basic_loss = pos_dist - (neg_dist + alpha)
    loss = K.maximum(basic_loss, 0.0)

    return loss
Example #2
0
    def loss_functions(method="mse"):
        """ loss_function returns the callable object to evaluate the loss.

        # Arguments
            method: String.
            - "mse" for `Mean Squared Error` or
            - "mae" for `Mean Absolute Error` or
            - "se" for `Squared Error` or
            - "ae" for `Absolute Error`.

        # Returns
            Callable function that gets (y_true, y_pred) as the input and
                returns the loss value as the output.

        # Raises
            ValueError if anything other than "mse" or "mae" is passed.
        """
        if method in ("mse", "mean_squared_error"):
            return lambda y_true, y_pred: K.mean(K.square(y_true - y_pred),
                                                 axis=-1)
        elif method in ("mae", "mean_absolute_error"):
            return lambda y_true, y_pred: K.mean(K.abs(y_true - y_pred),
                                                 axis=-1)
        elif method in ("se", "squared_error"):
            return lambda y_true, y_pred: K.sum(K.square(y_true - y_pred),
                                                axis=-1)
        elif method in ("ae", "absolute_error"):
            return lambda y_true, y_pred: K.sum(K.abs(y_true - y_pred),
                                                axis=-1)
        elif hasattr(k.losses, method):
            return getattr(k.losses, method)
        else:
            raise ValueError(
                'Supported losses: Keras loss function or (mse, mae, se, ae)')
Example #3
0
def tol_equal(f, other, tol=1e-8):
    """Element-wise comparison applied to the `Functional` objects.

    # Arguments
        f: Functional object.
        other: A python number or a tensor or a functional object.
        tol: float - defaulted to 1e-8.

    # Returns
        A Functional.
    """
    validate_functional(f)
    assert isinstance(tol, float), "Expected a float for tolerance. "

    inputs = f.inputs.copy()
    if is_functional(other):
        inputs += to_list(other.inputs)
        lmbd = [
            Lambda(lambda x: K.cast_to_floatx(
                K.less_equal(K.abs(x[0] - x[1]), tol)),
                   name=graph_unique_name("tol_equal")) for X in f.outputs
        ]
    else:
        _warn_for_ndarray(other)
        lmbd = [
            Lambda(lambda x: K.cast_to_floatx(
                K.less_equal(K.abs(x - other), tol)),
                   name=graph_unique_name("tol_equal")) for X in f.outputs
        ]

    Functional = f.get_class()
    res = Functional(inputs=unique_tensors(inputs),
                     outputs=_apply_operation(lmbd, f, other),
                     layers=lmbd)
    return res
    def _huber_loss(self, y_true, y_pred, clip_delta=1.0):
        error = y_true - y_pred
        cond = K.abs(error) <= clip_delta

        squared_loss = 0.5 * K.square(error)
        quadratic_loss = 0.5 * K.square(clip_delta) + clip_delta * (
            K.abs(error) - clip_delta)

        return K.mean(tf.where(cond, squared_loss, quadratic_loss))
Example #5
0
def huber_loss(y_true, y_pred):
    err = y_true - y_pred

    cond = K.abs(err) < HUBER_LOSS_DELTA
    L2 = 0.5 * K.square(err)
    L1 = HUBER_LOSS_DELTA * (K.abs(err) - 0.5 * HUBER_LOSS_DELTA)

    loss = tf.compat.v1.where(cond, L2, L1)

    return K.mean(loss)
Example #6
0
def DeltaLayer(encoded_l, encoded_r, negateDiffs=False):
    """
  A Layer which computes all possible absolute differences of
  all pixels. Input are two feature volumes, e.g. result of a conv layer
  Hints:
  - The Reshape reshapes a matrix row-wise, that means,

    Reshape( (6,1) ) ([ 1 2 3
                      4 5 6]) is

                      1
                      2
                      3
                      4
                      5
                      6
  - Algorithm:
    - The left  leg is reshaped to a w*h x 1  column vector (for each channel)
    - The right leg is reshaped to a  1 x w*h row vector (for each channel)
    - The left is tiled along colum axis, so from w*h x 1 to w*h x w*h (per channel)
    - The right is tiled along row axis, so from 1 x w*h to w*h x w*h
    - The absolute difference is calculated
  Args:
      encoded_l, encoded_r : left and right image tensor (batchsize,w,h,channels)
                             must have same size
      negateDiffs: if True then not abs(diffs), but -abs(diffs) is returned.
                   Default: False
  Returns:
      difference tensor, has size (batchsize, w*h, w*h, channels)
  """
    w = encoded_l.shape[1]
    h = encoded_l.shape[2]
    chan = encoded_l.shape[3]
    reshapel = Reshape((w * h, 1, chan))  # reshape layer
    reshaped_l = reshapel(encoded_l)
    reshaper = Reshape((1, w * h, chan))
    reshaped_r = reshaper(encoded_r)

    # 之所以是4个维度是因为第一个维度需要给batch,即Reshape输出的就是四维的
    tiled_l = Lambda(lambda x: K.tile(x, [1, 1, w * h, 1]))(reshaped_l)
    tiled_r = Lambda(lambda x: K.tile(x, [1, w * h, 1, 1]))(reshaped_r)

    if negateDiffs:
        diff = Lambda(lambda x: -K.abs(x[0] - x[1]))([tiled_l, tiled_r])
    else:
        diff = Lambda(lambda x: K.abs(x[0] - x[1]))([tiled_l, tiled_r])

    # print("diff类型+++++++++++++", diff)

    return diff
Example #7
0
    def call(self, inputs):
        _, kernel_b = xnorize(self.kernel, self.H)
        _, inputs_b = xnorize(inputs)
        outputs = K.conv2d(inputs_b,
                           kernel_b,
                           strides=self.strides,
                           padding=self.padding,
                           data_format=self.data_format,
                           dilation_rate=self.dilation_rate)

        # calculate Wa and xa

        # kernel_a
        mask = K.reshape(
            self.kernel,
            (-1,
             self.filters))  # self.nb_row * self.nb_col * channels, filters
        kernel_a = K.stop_gradient(K.mean(K.abs(mask), axis=0))  # filters

        # inputs_a
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        mask = K.mean(K.abs(inputs), axis=channel_axis, keepdims=True)
        ones = K.ones(self.kernel_size + (1, 1))
        inputs_a = K.conv2d(mask,
                            ones,
                            strides=self.strides,
                            padding=self.padding,
                            data_format=self.data_format,
                            dilation_rate=self.dilation_rate
                            )  # nb_sample, 1, new_nb_row, new_nb_col
        if self.data_format == 'channels_first':
            outputs = outputs * K.stop_gradient(inputs_a) * K.expand_dims(
                K.expand_dims(K.expand_dims(kernel_a, 0), -1), -1)
        else:
            outputs = outputs * K.stop_gradient(inputs_a) * K.expand_dims(
                K.expand_dims(K.expand_dims(kernel_a, 0), 0), 0)

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Example #8
0
    def call(self, x):
        if len(x) != 2:
            raise Exception('input layers must be a list: mean and stddev')
        if len(x[0].shape) != 2 or len(x[1].shape) != 2:
            raise Exception('input shape is not a vector [batchSize, latentSize]')

        mean = x[0]
        stddev = x[1]

        if self.reg == 'bvae':
            # kl divergence:
            latent_loss = -0.5 * K.mean(1 + stddev
                                - K.square(mean)
                                - K.exp(stddev), axis=-1)
            # use beta to force less usage of vector space:
            # also try to use <capacity> dimensions of the space:
            latent_loss = self.beta * K.abs(latent_loss - self.capacity/self.shape.as_list()[1])
            self.add_loss(latent_loss, x)
        elif self.reg == 'vae':
            # kl divergence:
            latent_loss = -0.5 * K.mean(1 + stddev
                                - K.square(mean)
                                - K.exp(stddev), axis=-1)
            self.add_loss(latent_loss, x)

        epsilon = K.random_normal(shape=self.shape,
                              mean=0., stddev=1.)
        if self.random:
            # 'reparameterization trick':
            return mean + K.exp(stddev) * epsilon
        else: # do not perform random sampling, simply grab the impulse value
            return mean + 0*stddev # Keras needs the *0 so the gradinent is not None
Example #9
0
    def call(self, x):
        if len(x) != 2:
            raise Exception('input layers must be a list: mean and stddev')
        if len(x[0].shape) != 2 or len(x[1].shape) != 2:
            raise Exception(
                'input shape is not a vector [batchSize, latentSize]')

        mean = x[0]
        stddev = x[1]

        if self.reg == 'bvae':
            # kl divergence:
            latent_loss = -0.5 * K.mean(
                1 + stddev - K.square(mean) - K.exp(stddev), axis=-1)
            # use beta to force less usage of vector space:
            # also try to use <capacity> dimensions of the space:
            latent_loss = self.beta * K.abs(latent_loss - self.capacity /
                                            self.shape.as_list()[1])
            self.add_loss(latent_loss, x)
        elif self.reg == 'vae':
            # kl divergence:
            latent_loss = -0.5 * K.mean(
                1 + stddev - K.square(mean) - K.exp(stddev), axis=-1)
            self.add_loss(latent_loss, x)
        if self.random:
            # 'reparameterization trick':
            epsilon = K.random_normal(shape=(self.batchSize, self.latentSize),
                                      mean=0.,
                                      stddev=1.)
            return mean + K.exp(stddev) * epsilon
        else:  # do not perform random sampling, simply grab the impulse value
            return mean + 0 * stddev  # Keras needs the *0 so the gradinent is not None
def define_deepDream_model_layerBased(model):
    dream = model.input
    print('Model loaded.')

    # Get the symbolic outputs of each "key" layer (we gave them unique names).
    layer_dict = dict([(layer.name, layer) for layer in model.layers])

    # Define the loss.
    loss = K.variable(0.)
    for layer_name in settings['features']:
        # Add the L2 norm of the features of a layer to the loss.
        if layer_name not in layer_dict:
            raise ValueError('Layer ' + layer_name + ' not found in model.')
        coeff = settings['features'][layer_name]
        x = layer_dict[layer_name].output
        # We avoid border artifacts by only involving non-border pixels in the loss.
        scaling = K.prod(K.cast(K.shape(x), 'float32'))
        if K.image_data_format() == 'channels_first':
            loss = loss + coeff * K.sum(K.square(x[:, :, 2:-2,
                                                   2:-2])) / scaling
        else:
            loss = loss + coeff * K.sum(K.square(x[:, 2:-2,
                                                   2:-2, :])) / scaling

    # Compute the gradients of the dream wrt the loss.
    grads = K.gradients(loss, dream)[0]
    # Normalize gradients.
    grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon())

    # Set up function to retrieve the value
    # of the loss and gradients given an input image.
    outputs = [loss, grads]
    fetch_loss_and_grads = K.function([dream], outputs)
    def __init__(self, model, layer_name):
        self.model = model
        self.layer_name = layer_name

        dream = model.input
        # Get the symbolic outputs of each "key" layer (we gave them unique names).
        layers_all = [layer.name for layer in model.layers]
        if layer_name not in layers_all:
            raise ValueError('Layer ' + layer_name + ' not found in model.')

        # Define the loss.
        loss = K.variable(0.)
        for layer_local in model.layers:
            if layer_local.name == layer_name:
                x = layer_local.output

                # We avoid border artifacts by only involving non-border pixels in the loss.
                if K.image_data_format() == 'channels_first':
                    scaling = K.prod(K.cast(K.shape(x), 'float32'))
                    loss = loss + K.sum(K.square(x[:, :, 2:-2,
                                                   2:-2])) / scaling
                else:
                    scaling = K.prod(K.cast(K.shape(x), 'float32'))
                    loss = loss + K.sum(K.square(x[:, 2:-2,
                                                   2:-2, :])) / scaling

        # Compute the gradients of the dream wrt the loss.
        grads = K.gradients(loss, dream)[0]
        # Normalize gradients.
        grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon())

        # Set up function to retrieve the value
        # of the loss and gradients given an input image.
        outputs = [loss, grads]
        self.fetch_loss_and_grads = K.function([dream], outputs)
    def __init__(self):

        tf.logging.set_verbosity(tf.logging.ERROR)

        self.__DIMEN = 48

        input_shape = ((self.__DIMEN**2) * 3, )
        convolution_shape = (self.__DIMEN, self.__DIMEN, 3)

        kernel_size_1 = (8, 8)
        kernel_size_2 = (6, 6)
        kernel_size_3 = (4, 4)

        pool_size_1 = (6, 6)
        pool_size_2 = (4, 4)

        strides = 1

        seq_conv_model = [
            tf.keras.layers.Reshape(input_shape=input_shape,
                                    target_shape=convolution_shape),
            tf.keras.layers.Conv2D(32,
                                   kernel_size=kernel_size_1,
                                   strides=strides,
                                   activation='relu'),
            tf.keras.layers.MaxPooling2D(pool_size=pool_size_1,
                                         strides=strides),
            tf.keras.layers.Conv2D(64,
                                   kernel_size=kernel_size_2,
                                   strides=strides,
                                   activation='relu'),
            tf.keras.layers.MaxPooling2D(pool_size=pool_size_2,
                                         strides=strides),
            tf.keras.layers.Conv2D(128,
                                   kernel_size=kernel_size_3,
                                   strides=strides,
                                   activation='relu'),
            tf.keras.layers.Flatten(),
            tf.keras.layers.Dense(3076,
                                  activation=tf.keras.activations.sigmoid)
        ]

        seq_model = tf.keras.Sequential(seq_conv_model)

        input_x1 = tf.keras.layers.Input(shape=input_shape)
        input_x2 = tf.keras.layers.Input(shape=input_shape)

        output_x1 = seq_model(input_x1)
        output_x2 = seq_model(input_x2)

        distance_euclid = tf.keras.layers.Lambda(
            lambda tensors: K.abs(tensors[0] - tensors[1]))(
                [output_x1, output_x2])
        outputs = tf.keras.layers.Dense(
            1, activation=tf.keras.activations.sigmoid)(distance_euclid)
        self.__model = tf.keras.models.Model([input_x1, input_x2], outputs)

        self.__model.compile(loss=tf.keras.losses.binary_crossentropy,
                             optimizer=tf.keras.optimizers.Adam(lr=0.0001),
                             metrics=['accuracy'])
Example #13
0
 def create_model(self):
     K.clear_session()
     input0 = Input(shape=(self.c['sentencepad'], self.c['wordvectdim']))
     input1 = Input(shape=(self.c['sentencepad'], self.c['wordvectdim']))
     Convolt_Layer = []
     MaxPool_Layer = []
     Flatten_Layer = []
     for kernel_size, filters in self.c['cnnfilters'].items():
         Convolt_Layer.append(
             Convolution1D(filters=filters,
                           kernel_size=kernel_size,
                           padding='valid',
                           activation=self.c['cnnactivate'],
                           kernel_initializer=self.c['cnninitial']))
         MaxPool_Layer.append(
             MaxPooling1D(pool_size=int(self.c['sentencepad'] -
                                        kernel_size + 1)))
         Flatten_Layer.append(Flatten())
     Convolted_tensor0 = []
     Convolted_tensor1 = []
     for channel in range(len(self.c['cnnfilters'])):
         Convolted_tensor0.append(Convolt_Layer[channel](input0))
         Convolted_tensor1.append(Convolt_Layer[channel](input1))
     MaxPooled_tensor0 = []
     MaxPooled_tensor1 = []
     for channel in range(len(self.c['cnnfilters'])):
         MaxPooled_tensor0.append(MaxPool_Layer[channel](
             Convolted_tensor0[channel]))
         MaxPooled_tensor1.append(MaxPool_Layer[channel](
             Convolted_tensor1[channel]))
     Flattened_tensor0 = []
     Flattened_tensor1 = []
     for channel in range(len(self.c['cnnfilters'])):
         Flattened_tensor0.append(Flatten_Layer[channel](
             MaxPooled_tensor0[channel]))
         Flattened_tensor1.append(Flatten_Layer[channel](
             MaxPooled_tensor1[channel]))
     if len(self.c['cnnfilters']) > 1:
         Flattened_tensor0 = concatenate(Flattened_tensor0)
         Flattened_tensor1 = concatenate(Flattened_tensor1)
     else:
         Flattened_tensor0 = Flattened_tensor0[0]
         Flattened_tensor1 = Flattened_tensor1[0]
     absDifference = Lambda(lambda X: K.abs(X[0] - X[1]))(
         [Flattened_tensor0, Flattened_tensor1])
     mulDifference = multiply([Flattened_tensor0, Flattened_tensor1])
     allDifference = concatenate([absDifference, mulDifference])
     for ilayer, densedimension in enumerate(self.c['densedimension']):
         allDifference = Dense(
             units=int(densedimension),
             activation=self.c['denseactivate'],
             kernel_initializer=self.c['denseinitial'])(allDifference)
     output = Dense(
         name='output',
         units=self.c['num_classes'],
         activation='softmax',
         kernel_initializer=self.c['denseinitial'])(allDifference)
     self.model = Model(inputs=[input0, input1], outputs=output)
     self.model.compile(loss='mean_squared_error',
                        optimizer=self.c['optimizer'])
Example #14
0
 def additional_generator_losses(self):
     loss_list = super(CGAN, self).additional_generator_losses()
     l1_loss = self.l1_weight_penalty * K.mean(
         K.abs(self.gt_image_placeholder - self.generator_output[0]))
     loss_list.append(l1_loss)
     self.generator_metric_names.append('l1')
     return loss_list
Example #15
0
def smooth_l1(y_true, y_pred, sigma=3.0, axis=None):
    """Compute the smooth L1 loss of y_pred w.r.t. y_true.

    Args:
        y_true: Tensor from the generator of shape (B, N, 5).
            The last value for each box is the state of the anchor
            (ignore, negative, positive).
        y_pred: Tensor from the network of shape (B, N, 4).
        sigma: The point where the loss changes from L2 to L1.

    Returns:
        The smooth L1 loss of y_pred w.r.t. y_true.
    """
    if axis is None:
        axis = 1 if K.image_data_format(
        ) == 'channels_first' else K.ndim(y_pred) - 1

    sigma_squared = sigma**2

    # compute smooth L1 loss
    # f(x) = 0.5 * (sigma * x)^2          if |x| < 1 / sigma / sigma
    #        |x| - 0.5 / sigma / sigma    otherwise
    regression_diff = K.abs(y_true - y_pred)  # |y - f(x)|

    regression_loss = tf.where(K.less(regression_diff, 1.0 / sigma_squared),
                               0.5 * sigma_squared * K.pow(regression_diff, 2),
                               regression_diff - 0.5 / sigma_squared)
    return K.sum(regression_loss, axis=axis)
Example #16
0
    def build_loss(self):
        # Infinity norm
        if np.isinf(self.p):
            value = K.max(self.img)
        else:
            value = K.pow(K.sum(K.pow(K.abs(self.img), self.p)), 1. / self.p)

        return normalize(self.img, value)
Example #17
0
def smooth_l1_loss(y_true, y_pred):
    """Implements Smooth-L1 loss.
    y_true and y_pred are typically: [N, 4], but could be any shape.
    """
    diff = K.abs(y_true - y_pred)
    less_than_one = K.cast(K.less(diff, 1.0), "float32")
    loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
    return loss
Example #18
0
    def call(self, x):
        y_pred = x[0]
        y_recont_gt = x[1]
        y_prob_pred = tf.squeeze(x[2], axis=3)
        y_prob_gt = x[3]
        visible = tf.cast(y_prob_gt > 0.5, y_pred.dtype)
        visible = tf.squeeze(visible, axis=3)
        #generate transformed values using sym
        if (len(self.sym) > 1):
            #if(True):
            for sym_id, transform in enumerate(self.sym):  #3x3 matrix
                tf_mat = tf.convert_to_tensor(transform, y_recont_gt.dtype)
                y_gt_transformed = tf.transpose(
                    tf.matmul(tf_mat,
                              tf.transpose(tf.reshape(y_recont_gt, [-1, 3]))))
                y_gt_transformed = tf.reshape(y_gt_transformed,
                                              [-1, 128, 128, 3])
                loss_xyz_temp = K.sum(K.abs(y_gt_transformed - y_pred),
                                      axis=3) / 3
                loss_sum = K.sum(loss_xyz_temp, axis=[1, 2])
                if (sym_id > 0):
                    loss_sums = tf.concat(
                        [loss_sums,
                         tf.expand_dims(loss_sum, axis=0)], axis=0)
                    loss_xyzs = tf.concat(
                        [loss_xyzs,
                         tf.expand_dims(loss_xyz_temp, axis=0)],
                        axis=0)
                else:
                    loss_sums = tf.expand_dims(loss_sum, axis=0)
                    loss_xyzs = tf.expand_dims(loss_xyz_temp, axis=0)

            min_values = tf.reduce_min(loss_sums, axis=0, keepdims=True)
            loss_switch = tf.cast(tf.equal(loss_sums, min_values),
                                  y_pred.dtype)
            loss_xyz = tf.expand_dims(tf.expand_dims(loss_switch, axis=2),
                                      axis=3) * loss_xyzs
            loss_xyz = K.sum(loss_xyz, axis=0)
        else:
            loss_xyz = K.sum(K.abs(y_recont_gt - y_pred), axis=3) / 3
        prob_loss = K.square(y_prob_pred - K.minimum(loss_xyz, 1))
        loss_invisible = (1 - visible) * loss_xyz
        loss_visible = visible * loss_xyz
        loss = loss_visible * 3 + loss_invisible + 0.5 * prob_loss
        loss = K.mean(loss, axis=[1, 2])
        return loss
Example #19
0
 def __call__(self, x):
     regularization = 0
     if self.l1:
         regularization += K.sum(self.l1 * K.abs(x))
     if self.l2:
         regularization += K.sum(self.l2 * K.square(x))
     if self.tv:
         regularization += K.sum(self.tv * K.square(x[:, 1:] - x[:, :-1]))
     return regularization
def define_rnn_network(hidden_state_dim, timepoints, timepoint_step):
    rnn_input = layers.Input(shape=(40, ), name="g_input")
    x = layers.Dense(64, activation="relu")(rnn_input)
    x = layers.Dense(64, activation="relu")(x)
    # The last output predicts the initial state for the RNN.
    gru_state = layers.Dense(hidden_state_dim, activation="relu")(x)

    # The RNN state is passed through a separate regressor to obtain the (mean,
    # scale) for each of the 4 dimensions.
    gru = layers.GRU(hidden_state_dim)
    intermediate_regressor = layers.Dense(64, activation="relu")
    mean_and_scale_regressor = layers.Dense(8,
                                            activation="linear",
                                            name="mean_and_scale_regressor")
    # Ensure the regressed scale is positive; also reshape to Bx8 -> Bx1x8.
    kSigmaMin = 1e-3  # avoid poor conditioning
    absolute_scale_op = layers.Lambda(lambda x: K.concatenate(
        (x[:, :4], K.abs(x[:, 4:]) + kSigmaMin), axis=1)[:, tf.newaxis, :])
    output_regressor = lambda x: \
        absolute_scale_op(mean_and_scale_regressor(intermediate_regressor(x)))

    current_output = output_regressor(gru_state)  # predict the first output
    t = timepoint_step
    next_timepoint_idx = 0

    outputs = []

    while t <= timepoints[-1] or np.isclose(t, timepoints[-1]):
        if np.isclose(t, timepoints[next_timepoint_idx]):
            outputs.append(current_output)
            t = timepoints[next_timepoint_idx]  # avoid any accumulative drift
            next_timepoint_idx += 1
            if next_timepoint_idx == len(timepoints):
                break
        #current_output_with_time = layers.Lambda(
        #    lambda x: K.concatenate((x, K.zeros_like(x)[:,:,:1] + t)))(
        #    current_output)
        gru_state = gru(current_output, initial_state=gru_state)
        current_output = output_regressor(gru_state)
        t += timepoint_step

    assert (len(outputs) == len(timepoints))

    # Join the T [Bx1x8] outputs into a Bx8xT tensor, then split in half down
    # the first axis and reform into a Bx4xTx2 tensor.
    def rejoin_op(x):
        x = K.stack(x, axis=-1)
        return K.stack((x[:, 0, :4, :], x[:, 0, 4:, :]), axis=-1)

    output = layers.Lambda(rejoin_op, name="transforms")(outputs)

    M = models.Model(inputs=[rnn_input],
                     outputs=[output],
                     name='rnn_regressor')

    return M
Example #21
0
def _signed_sqrt(x):
    '''Calculate element-wise signed square-root.

    Args:
        x: input tensor.

    Returns:
        Element-wise signed square-root tensor.
    '''
    return keras_backend.sign(x) * keras_backend.sqrt(keras_backend.abs(x) + 1e-9)
Example #22
0
def weighted_bce_loss(y_true, y_pred, weight):
    # avoiding overflow
    epsilon = 1e-7
    y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
    logit_y_pred = K.log(y_pred / (1. - y_pred))

    # https://www.tensorflow.org/api_docs/python/tf/nn/weighted_cross_entropy_with_logits
    loss = (1. - y_true) * logit_y_pred + (1. + (weight - 1.) * y_true) * \
                                          (K.log(1. + K.exp(-K.abs(logit_y_pred))) + K.maximum(-logit_y_pred, 0.))
    return K.sum(loss) / K.sum(weight)
Example #23
0
 def _compare(row1, row2):
     ### 比较行与行
     #   shape (5),(5)
     #   比较方式: 类型+个数
     _sum = tf.constant(0., dtype=tf.float32)
     _cnt = tf.constant(0., dtype=tf.float32)
     #   类型--两两相乘/总次数
     r = 0
     while (r < board_size):
         c = 0
         vr = tf.slice(row1, [r], [1])
         vr = K.squeeze(vr, -1)
         while (c < board_size):
             vc = tf.slice(row2, [c], [1])
             vc = K.squeeze(vc, -1)
             calc = K.abs(vr + vc) / (K.abs(vr - vc) + K.epsilon())
             calc = K.clip(calc, 1, 9)  #裁剪
             calc = calc - 0.5 + K.abs(vr + vc) * 0.001
             _sum = tf.cond(
                 tf.logical_or(tf.not_equal(vc, 0), tf.not_equal(vr, 0)),
                 lambda: _sum + calc, lambda: _sum)
             _cnt = tf.cond(
                 tf.logical_or(tf.not_equal(vc, 0), tf.not_equal(vr, 0)),
                 lambda: _cnt + 1, lambda: _cnt)
             c = c + 1
         r = r + 1
     cnt3 = _compare_cnt(row1, row2)
     cnt3 = tf.cond(tf.equal(cnt3, 0), lambda: cnt3 + 1, lambda: cnt3)
     output = tf.cond(tf.equal(_cnt, 0), lambda: _sum,
                      lambda: tf.squeeze(_sum * cnt3 / _cnt))
     ### 查看行与行比较结果
     #        if EmbeddingsLayer.debug:
     #            logging.getLogger().info("\n--row1  %s" % (row1))
     #            logging.getLogger().info("--row2  %s" % (row2))
     #            logging.getLogger().info("--sum  %s" % (_sum))
     #            logging.getLogger().info("--cnt  %s" % (_cnt))
     #            logging.getLogger().info("--compare  %s" % (output))
     ###
     ### 直接输出概率:
     ###   手动测试0.7比较好
     output = output * 0.7
     return output
Example #24
0
    def __init__(self):

        tf.logging.set_verbosity(tf.logging.ERROR)

        self.__DIMEN = 128

        input_shape = ((self.__DIMEN**2) * 3, )
        convolution_shape = (self.__DIMEN, self.__DIMEN, 3)
        kernel_size_1 = (4, 4)
        kernel_size_2 = (3, 3)
        pool_size_1 = (3, 3)
        pool_size_2 = (2, 2)
        strides = 1

        seq_conv_model = [
            Reshape(input_shape=input_shape, target_shape=convolution_shape),
            Conv2D(32,
                   kernel_size=kernel_size_1,
                   strides=strides,
                   activation=activations.leaky_relu),
            Conv2D(32,
                   kernel_size=kernel_size_1,
                   strides=strides,
                   activation=activations.leaky_relu),
            MaxPooling2D(pool_size=pool_size_1, strides=strides),
            Conv2D(64,
                   kernel_size=kernel_size_2,
                   strides=strides,
                   activation=activations.leaky_relu),
            Conv2D(64,
                   kernel_size=kernel_size_2,
                   strides=strides,
                   activation=activations.leaky_relu),
            MaxPooling2D(pool_size=pool_size_2, strides=strides),
            Flatten(),
            Dense(64, activation=activations.sigmoid)
        ]

        seq_model = tf.keras.Sequential(seq_conv_model)

        input_x1 = Input(shape=input_shape)
        input_x2 = Input(shape=input_shape)

        output_x1 = seq_model(input_x1)
        output_x2 = seq_model(input_x2)

        distance_euclid = Lambda(
            lambda tensors: K.abs(tensors[0] - tensors[1]))(
                [output_x1, output_x2])
        outputs = Dense(1, activation=activations.sigmoid)(distance_euclid)
        self.__model = models.Model([input_x1, input_x2], outputs)

        self.__model.compile(loss=losses.binary_crossentropy,
                             optimizer=optimizers.Adam(lr=0.0001))
Example #25
0
def cycle_loss(y_true, y_pred):
    if k.image_data_format() is 'channels_first':
        x_w = 2
        x_h = 3
    else:
        x_w = 1
        x_h = 2
    loss = k.abs(y_true - y_pred)
    loss = k.sum(loss, axis=x_h)
    loss = k.sum(loss, axis=x_w)
    return loss
Example #26
0
def not_equal(f, other, tol=None):
    """Element-wise comparison applied to the `Functional` objects.

    # Arguments
        f: Functional object.
        other: A python number or a tensor or a functional object.
        tol: (float) If you need a tolerance measure.

    # Returns
        A Functional.
    """
    validate_functional(f)
    assert isinstance(
        tol, (type(None), float)), 'Expected a floating value for `tol`.'

    inputs = f.inputs.copy()
    if is_functional(other):
        inputs += to_list(other.inputs)
        if tol is None:
            lambda_opr = lambda x: K.cast_to_floatx(K.not_equal(x[0], x[1]))
        else:
            lambda_opr = lambda x: K.cast_to_floatx(
                K.greater(K.abs(x[0] - x[1]), tol))
    else:
        _warn_for_ndarray(other)
        if tol is None:
            lambda_opr = lambda x: K.cast_to_floatx(K.not_equal(x, other))
        else:
            lambda_opr = lambda x: K.cast_to_floatx(
                K.greater(K.abs(x - other), tol))

    lmbd = [
        Lambda(lambda_opr, name=graph_unique_name("not_equal"))
        for X in f.outputs
    ]

    Functional = f.get_class()
    res = Functional(inputs=unique_tensors(inputs),
                     outputs=_apply_operation(lmbd, f, other),
                     layers=lmbd)
    return res
 def custom_loss(y_true, y_pred, loss_weights = loss_weights): # Verified
     
     zero_index = K.zeros_like(y_true[:, 0]) 
     ones_index = K.ones_like(y_true[:, 0]) 
     
     # Classifier
     labels = y_true[:, 0] 
     class_preds = y_pred[:, 0] 
     bi_crossentropy_loss = -labels * K.log(class_preds) - (1 - labels) * K.log(1 - class_preds) 
     
     classify_valid_index = tf.where(K.less(y_true[:, 0], 0), zero_index, ones_index) 
     classify_keep_num = K.cast(tf.cast(tf.reduce_sum(classify_valid_index), tf.float32) * SAMPLE_KEEP_RATIO, dtype = tf.int32) 
     # For classification problem, only pick 70% of the valid samples. 
     
     classify_loss_sum = bi_crossentropy_loss * tf.cast(classify_valid_index, bi_crossentropy_loss.dtype) 
     classify_loss_sum_filtered, _ = tf.nn.top_k(classify_loss_sum, k = classify_keep_num) 
     classify_loss = tf.where(K.equal(classify_keep_num, 0), tf.constant(0, dtype = tf.float32), K.mean(classify_loss_sum_filtered)) 
     
     # Bounding box regressor
     rois = y_true[:, 1: 5] 
     roi_preds = y_pred[:, 1: 5] 
     roi_raw_mean_square_error = K.sum(K.square(rois - roi_preds), axis = 1) # mse
     # roi_raw_smooth_l1_loss = K.mean(tf.where(K.abs(rois - roi_preds) < 1, 0.5 * K.square(rois - roi_preds), K.abs(rois - roi_preds) - 0.5)) # L1 Smooth Loss 
     
     roi_valid_index = tf.where(K.equal(K.abs(y_true[:, 0]), 1), ones_index, zero_index) 
     roi_keep_num = K.cast(tf.reduce_sum(roi_valid_index), dtype = tf.int32) 
     
     roi_valid_mean_square_error = roi_raw_mean_square_error * tf.cast(roi_valid_index, roi_raw_mean_square_error.dtype)
     roi_filtered_mean_square_error, _ = tf.nn.top_k(roi_valid_mean_square_error, k = roi_keep_num) 
     roi_loss = tf.where(K.equal(roi_keep_num, 0), tf.constant(0, dtype = tf.float32), K.mean(roi_filtered_mean_square_error)) 
     # roi_valid_smooth_l1_loss = roi_raw_smooth_l1_loss * roi_valid_index
     # roi_filtered_smooth_l1_loss, _ = tf.nn.top_k(roi_valid_smooth_l1_loss, k = roi_keep_num) 
     # roi_loss = K.mean(roi_filtered_smooth_l1_loss) 
     
     # Landmark regressor
     pts = y_true[:, 5: 17] 
     pt_preds = y_pred[:, 5: 17] 
     pts_raw_mean_square_error  = K.sum(K.square(pts - pt_preds), axis = 1) # mse 
     # pts_raw_smooth_l1_loss = K.mean(tf.where(K.abs(pts - pt_preds) < 1, 0.5 * K.square(pts - pt_preds), K.abs(pts - pt_preds) - 0.5)) # L1 Smooth Loss 
     
     pts_valid_index = tf.where(K.equal(y_true[:, 0], -2), ones_index, zero_index) 
     pts_keep_num = K.cast(tf.reduce_sum(pts_valid_index), dtype = tf.int32) 
     
     pts_valid_mean_square_error = pts_raw_mean_square_error * tf.cast(pts_valid_index, tf.float32) 
     pts_filtered_mean_square_error, _ = tf.nn.top_k(pts_valid_mean_square_error, k = pts_keep_num) 
     pts_loss = tf.where(K.equal(pts_keep_num, 0), tf.constant(0, dtype = tf.float32), K.mean(pts_filtered_mean_square_error)) 
     # pts_valid_smooth_l1_loss = pts_raw_smooth_l1_loss * pts_valid_index
     # pts_filtered_smooth_l1_loss, _ = tf.nn.top_k(pts_valid_smooth_l1_loss, k = pts_keep_num) 
     # pts_loss = K.mean(pts_filtered_smooth_l1_loss)
     
     loss = classify_loss * loss_weights[0] + roi_loss * loss_weights[1] + pts_loss * loss_weights[2]
     
     return loss 
Example #28
0
def dice_coef_loss(y_true, y_pred, smooth=1e-07, label_smoothing=0):
    y_pred = ops.convert_to_tensor(y_pred)
    y_true = math_ops.cast(y_true, y_pred.dtype)
    label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx())

    def _smooth_labels():
        return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing

    y_true = smart_cond.smart_cond(label_smoothing, _smooth_labels,
                                   lambda: y_true)
    return (2. * K.sum(K.abs(y_true * y_pred), axis=-1) + smooth) / (
        K.sum(K.square(y_true), -1) + K.sum(K.square(y_pred), -1) + smooth)
Example #29
0
    def call(self, inputs, **kwargs):
        W = K.tanh(self.W_hat) * K.sigmoid(self.M_hat)
        a = K.dot(inputs, W)

        if self.nac_only:
            outputs = a
        else:
            m = K.exp(K.dot(K.log(K.abs(inputs) + self.epsilon), W))
            g = K.sigmoid(K.dot(inputs, self.G))
            outputs = g * a + (1. - g) * m

        return outputs
Example #30
0
def my_sigmoid_loss(y_true, y_pred):
    """ Loss function in form of a mean sigmoid. Used for overlap.
      This is an alternative for mean squard error where
      - the loss for small differences is smaller than squared diff
      - the loss for large errors is kind of equal

   In Matlab:   1./(1+exp(-((diff+0.25)*24-12))), diff is absolute difference
  """

    diff = K.abs(y_pred - y_true)
    sigmoidx = (diff + 0.25) * 24 - 12
    loss = K.mean(1 / (1 + K.exp(-sigmoidx)))

    return loss
def define_poly_network(poly_order, timepoints, past_frames):
    poly_input = layers.Input(shape=(past_frames * 4, ), name="g_input")
    x = layers.Dense(64, activation="relu")(poly_input)
    x = layers.Dense(64, activation="relu")(x)
    x = layers.Dense(64, activation="relu")(x)
    #x = layers.Dense(64, activation="relu")(x)

    # coeffs: for each output dimension, (a, b, c, ..., sigma), where sigma is
    #   the confidence value
    coeffs = layers.Dense(4 * (poly_order + 2),
                          activation="linear",
                          name="coeffs")(x)
    coeffs = layers.Reshape((4, poly_order + 2))(coeffs)

    # timepoints: PxT for P polynomial coefficients, i.e.
    # [  t_0   t_1  ... ]
    # [ t_0^2 t_1^2 ... ]
    # [ ...    ...  ... ]
    timepoints = K.constant([[pow(t, i) for t in timepoints]
                             for i in range(1, poly_order + 1)])

    # generate distribution mean and standard deviation
    # the mean is computed as c_1 * t^P + c_2 * t^{P-1} + ... + c_P * t
    # the std. dev. is computed as |d_0 + d_1 * t| + eps
    kSigmaMin = 1e-3  # avoid poor conditioning
    mu = layers.Lambda(lambda x: K.dot(x[..., :-2], timepoints))(coeffs)
    sigma = layers.Lambda(lambda x: K.abs(x[..., -1, tf.newaxis] * timepoints[
        0, :]) + K.abs(x[..., -2, tf.newaxis]) + kSigmaMin)(coeffs)

    output = layers.Lambda(lambda x: K.stack(x, axis=-1),
                           name="transforms")([mu, sigma])

    M = models.Model(inputs=[poly_input],
                     outputs=[output, coeffs],
                     name='poly_regressor')

    return M