Exemple #1
0
    def define_loss(self, netD, real, fake_argb, fake_sz64, distorted, vggface_feat=None):
        alpha = Lambda(lambda x: x[:,:,:, :1])(fake_argb)
        fake_rgb = Lambda(lambda x: x[:,:,:, 1:])(fake_argb)
        fake = alpha * fake_rgb + (1-alpha) * distorted

        if self.use_mixup:
            dist = Beta(self.mixup_alpha, self.mixup_alpha)
            lam = dist.sample()
            # ==========
            mixup = lam * concatenate([real, distorted]) + (1 - lam) * concatenate([fake, distorted])
            # ==========
            output_mixup = netD(mixup)
            loss_D = self.loss_fn(output_mixup, lam * K.ones_like(output_mixup))
            #output_fake = netD(concatenate([fake, distorted])) # dummy
            loss_G = 1 * self.loss_fn(output_mixup, (1 - lam) * K.ones_like(output_mixup))
        else:
            output_real = netD(concatenate([real, distorted])) # positive sample
            output_fake = netD(concatenate([fake, distorted])) # negative sample
            loss_D_real = self.loss_fn(output_real, K.ones_like(output_real))
            loss_D_fake = self.loss_fn(output_fake, K.zeros_like(output_fake))
            loss_D = loss_D_real + loss_D_fake
            loss_G = 1 * self.loss_fn(output_fake, K.ones_like(output_fake))
        # ==========
        if self.use_mask_refinement:
            loss_G += K.mean(K.abs(fake - real))
        else:
            loss_G += K.mean(K.abs(fake_rgb - real))
        loss_G += K.mean(K.abs(fake_sz64 - tf.image.resize_images(real, [64, 64])))
        # ==========

        # Perceptual Loss
        if not vggface_feat is None:
            def preprocess_vggface(x):
                x = (x + 1)/2 * 255 # channel order: BGR
                x -= [93.5940, 104.7624, 129.]
                return x
            pl_params = (0.02, 0.3, 0.5)
            real_sz224 = tf.image.resize_images(real, [224, 224])
            real_sz224 = Lambda(preprocess_vggface)(real_sz224)
            # ==========
            if self.use_mask_refinement:
                fake_sz224 = tf.image.resize_images(fake, [224, 224])
            else:
                fake_sz224 = tf.image.resize_images(fake_rgb, [224, 224])
            fake_sz224 = Lambda(preprocess_vggface)(fake_sz224)
            # ==========
            real_feat55, real_feat28, real_feat7 = vggface_feat(real_sz224)
            fake_feat55, fake_feat28, fake_feat7  = vggface_feat(fake_sz224)
            loss_G += pl_params[0] * K.mean(K.abs(fake_feat7 - real_feat7))
            loss_G += pl_params[1] * K.mean(K.abs(fake_feat28 - real_feat28))
            loss_G += pl_params[2] * K.mean(K.abs(fake_feat55 - real_feat55))

        return loss_D, loss_G
Exemple #2
0
def netG_loss(G_tensors, loss_weight=10):
    netD_A_predict_fake, rec_A, G_A_input, netD_B_predict_fake, rec_B, G_B_input = G_tensors

    loss_G_B = criterion_GAN(netD_A_predict_fake, K.ones_like(netD_A_predict_fake))
    loss_cyc_A = criterion_cycle(rec_A, G_A_input)

    loss_G_A = criterion_GAN(netD_B_predict_fake, K.ones_like(netD_B_predict_fake))
    loss_cyc_B = criterion_cycle(rec_B, G_B_input)

    loss_G = loss_G_A + loss_G_B + loss_weight * (loss_cyc_A + loss_cyc_B)

    return loss_G
def iou(x_true, y_true, w_true, h_true, x_pred, y_pred, w_pred, h_pred, t, pred_confid_tf):
    x_true = K.expand_dims(x_true, 2)
    y_true = K.expand_dims(y_true, 2)
    w_true = K.expand_dims(w_true, 2)
    h_true = K.expand_dims(h_true, 2)
    x_pred = K.expand_dims(x_pred, 2)
    y_pred = K.expand_dims(y_pred, 2)
    w_pred = K.expand_dims(w_pred, 2)
    h_pred = K.expand_dims(h_pred, 2)

    xoffset = K.expand_dims(tf.convert_to_tensor(np.asarray([0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7], dtype=np.float32)),1)
    yoffset = K.expand_dims(tf.convert_to_tensor(np.asarray([0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4], dtype=np.float32)),1)


    # xoffset = K.cast_to_floatx((np.tile(np.arange(side),side)))
    # yoffset = K.cast_to_floatx((np.repeat(np.arange(side),side)))
    x = tf.where(t, x_pred, K.zeros_like(x_pred))
    y = tf.where(t, y_pred, K.zeros_like(y_pred))
    w = tf.where(t, w_pred, K.zeros_like(w_pred))
    h = tf.where(t, h_pred, K.zeros_like(h_pred))

    ow = overlap(x + xoffset, w * 256. , x_true + xoffset, w_true * 256.)
    oh = overlap(y + yoffset, h * 160., y_true + yoffset, h_true * 256.)

    ow = tf.where(K.greater(ow, 0), ow, K.zeros_like(ow))
    oh = tf.where(K.greater(oh, 0), oh, K.zeros_like(oh))
    intersection = ow * oh
    union = w * 256. * h * 160. + w_true * 256. * h_true * 160.  - intersection + K.epsilon()  # prevent div 0

    #
    # find best iou among bboxs
    # iouall shape=(-1, bnum*gridcells)
    iouall = intersection / union
    obj_count = K.sum(tf.where(t, K.ones_like(x_true), K.zeros_like(x_true)))

    ave_iou = K.sum(iouall) / (obj_count + 0.0000001)
    recall_t = K.greater(iouall, 0.5)
    # recall_count = K.sum(tf.select(recall_t, K.ones_like(iouall), K.zeros_like(iouall)))

    fid_t = K.greater(pred_confid_tf, 0.3)
    recall_count_all = K.sum(tf.where(fid_t, K.ones_like(iouall), K.zeros_like(iouall)))

    #  
    obj_fid_t = tf.logical_and(fid_t, t)
    obj_fid_t = tf.logical_and(fid_t, recall_t)
    effevtive_iou_count = K.sum(tf.where(obj_fid_t, K.ones_like(iouall), K.zeros_like(iouall)))

    recall = effevtive_iou_count / (obj_count + 0.00000001)
    precision = effevtive_iou_count / (recall_count_all + 0.0000001)
    return ave_iou, recall, precision, obj_count, intersection, union, ow, oh, x, y, w, h
Exemple #4
0
    def _build(self):
        fake, _, _, g_additional_losses = self.g.run_internal_graph(self.g.inputs)

        real = self.d.inputs[0]
        data = concat([fake, real], axis=0)

        realness, _, _, d_additional_losses = self.d.run_internal_graph(
            [data] + self.d.inputs[1:])

        nb_fakes = fake.shape[0]
        fake_realness = realness[:nb_fakes]
        real_realness = realness[nb_fakes:]
        split = 2*nb_fakes // 3
        g_fake_realness = fake_realness[:split]
        d_fake_realness = fake_realness[split:]

        outputs = OrderedDict()
        g_loss = K.mean(K.binary_crossentropy(g_fake_realness, K.ones_like(real_realness)))
        outputs['g_loss'] = g_loss
        g_reg_loss = sum([v for v in g_additional_losses.values()])
        if g_reg_loss != 0:
            outputs['g_reg_loss'] = g_reg_loss
        g_total_loss = g_loss + g_reg_loss

        d_loss = K.mean(K.binary_crossentropy(real_realness, K.ones_like(real_realness)))
        d_loss += K.mean(K.binary_crossentropy(d_fake_realness, K.zeros_like(real_realness)))
        outputs['d_loss'] = d_loss
        d_reg_loss = sum([v for v in d_additional_losses.values()])
        if d_reg_loss != 0:
            outputs['d_reg_loss'] = d_reg_loss
        d_total_loss = d_loss + d_reg_loss

        inputs = {i.name: i for i in self.g.inputs + self.d.inputs}
        inputs_list = []
        for name in self.input_names:
            inputs_list.append(inputs[name])

        g_updates = self.g_optimizer.get_updates(
            collect_trainable_weights(self.g), self.g.constraints, g_total_loss)
        d_updates = self.d_optimizer.get_updates(
            collect_trainable_weights(self.d), self.d.constraints, d_total_loss)

        if self.uses_learning_phase:
            lr_phase = [K.learning_phase()]
        else:
            lr_phase = []
        self.metrics_names = list(outputs.keys())
        self._train_function = K.function(inputs_list + lr_phase, list(outputs.values()),
                                          updates=g_updates + d_updates)
Exemple #5
0
def time_distributed_dense(x, w, b=None, dropout=None,
                           input_dim=None, output_dim=None, timesteps=None, activation='linear'):
    '''Apply y.w + b for every temporal slice y of x.
    '''
    activation = activations.get(activation)

    if not input_dim:
        # won't work with TensorFlow
        input_dim = K.shape(x)[2]
    if not timesteps:
        # won't work with TensorFlow
        timesteps = K.shape(x)[1]
    if not output_dim:
        # won't work with TensorFlow
        output_dim = K.shape(w)[1]

    if dropout is not None and 0. < dropout < 1.:
        # apply the same dropout pattern at every timestep
        ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
        dropout_matrix = K.dropout(ones, dropout)
        expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
        x = K.in_train_phase(x * expanded_dropout_matrix, x)

    # collapse time dimension and batch dimension together
    x = K.reshape(x, (-1, input_dim))

    x = K.dot(x, w)
    if b:
        x = x + b
    # reshape to 3D tensor
    x = K.reshape(activation(x), (-1, timesteps, output_dim))
    return x
Exemple #6
0
def step(x):
    """Theano step function"""
    if (_BACKEND == 'tensorflow'):
        import tensorflow as tf
        return tf.select(tf.python.math_ops.greater(x, 0), K.ones_like(x), K.zeros_like(x))
    else:
        return K.switch(x > 0, 1, 0)
Exemple #7
0
def _sample_weights(y, mask=None):
    """Compute sample weights."""
    if mask is None:
        weights = K.ones_like(y)
    else:
        weights = 1 - K.cast(K.equal(y, mask), K.floatx())
    return weights
def residual_drop(x, input_shape, output_shape, strides=(1, 1)):
    global add_tables

    nb_filter = output_shape[0]
    conv = Convolution2D(nb_filter, 3, 3, subsample=strides, border_mode="same")(x)
    conv = BatchNormalization(axis=1)(conv)
    conv = Activation("relu")(conv)
    conv = Convolution2D(nb_filter, 3, 3, border_mode="same")(conv)
    conv = BatchNormalization(axis=1)(conv)

    if strides[0] >= 2:
        x = AveragePooling2D(strides)(x)

    if (output_shape[0] - input_shape[0]) > 0:
        pad_shape = (1,
                     output_shape[0] - input_shape[0],
                     output_shape[1],
                     output_shape[2])
        padding = K.ones(pad_shape)
        padding = K.repeat_elements(padding, K.shape(x)[0], axis=0)
        x = Lambda(lambda y: K.concatenate([y, padding], axis=1),
                   output_shape=output_shape)(x)

    _death_rate = K.variable(death_rate)
    scale = K.ones_like(conv) - _death_rate
    conv = Lambda(lambda c: K.in_test_phase(scale * c, c),
                  output_shape=output_shape)(conv)

    out = merge([conv, x], mode="sum")
    out = Activation("relu")(out)

    gate = K.variable(1, dtype="uint8")
    add_tables += [{"death_rate": _death_rate, "gate": gate}]
    return Lambda(lambda tensors: K.switch(gate, tensors[0], tensors[1]),
                  output_shape=output_shape)([out, x])
Exemple #9
0
    def compute_mask(self, inputs, mask=None):

        if mask is None or not any([m is not None for m in mask]):
            return None

        assert hasattr(mask, '__len__') and len(mask) == len(inputs)

        if self.mode in ['sum', 'mul', 'ave']:
            bool_type = 'bool' if K._BACKEND == 'tensorflow' else 'int32'
            masks = [K.cast(m, bool_type) for m in mask if m is not None]
            mask = masks[0]
            for m in masks[1:]:
                mask = mask & m
            return mask
        elif self.mode in ['concat']:
            masks = [K.ones_like(inputs[i][:-1]) if m is None else m for i, m in zip(inputs, mask)]
            expanded_dims = [K.expand_dims(m) for m in masks]
            concatenated = K.concatenate(expanded_dims, axis=self.concat_axis)
            return K.all(concatenated, axis=-1, keepdims=False)
        elif self.mode in ['cos', 'dot']:
            return None
        elif hasattr(self.mode, '__call__'):
            if hasattr(self._output_mask, '__call__'):
                return self._output_mask(mask)
            else:
                return self._output_mask
        else:
            # this should have been caught earlier
            raise Exception('Invalid merge mode: {}'.format(self.mode))
Exemple #10
0
def netD_loss(netD_predict):
    netD_predict_real, netD_predict_fake = netD_predict

    netD_loss_real = criterion_GAN(netD_predict_real, K.ones_like(netD_predict_real))
    netD_loss_fake = criterion_GAN(netD_predict_fake, K.zeros_like(netD_predict_fake))

    loss_netD = (1 / 2) * (netD_loss_real + netD_loss_fake)
    return loss_netD
Exemple #11
0
def positions_func(inputs, pad=0):
    """
    A layer filling i-th column of a 2D tensor with
    1+ln(1+i) when it contains a meaningful symbol
    and with 0 when it contains PAD
    """
    position_inputs = kb.cumsum(kb.ones_like(inputs, dtype="float32"), axis=1)
    position_inputs *= kb.cast(kb.not_equal(inputs, pad), "float32")
    return kb.log(1.0 + position_inputs)
Exemple #12
0
 def get_constants(self, x):
     constants = []
     if 0 < self.dropout_U < 1:
         ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
         ones = K.concatenate([ones] * self.output_dim, 1)
         B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
         constants.append(B_U)
     else:
         constants.append(K.cast_to_floatx(1.))
     if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
         input_shape = self.input_spec[0].shape
         input_dim = input_shape[-1]
         ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
         ones = K.concatenate([ones] * input_dim, 1)
         B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
         constants.append(B_W)
     else:
         constants.append(K.cast_to_floatx(1.))
     return constants
Exemple #13
0
    def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * self.output_dim, 1)
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * input_dim, 1)
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
Exemple #14
0
    def call(self, x, mask=None):
        if mask is None:
            return super(GlobalAveragePooling1D, self).call(x)

        mask = K.expand_dims(mask)
        mask = K.tile(mask, [1, 1, K.shape(x)[2]])
        mask = K.cast(mask, K.dtype(x))

        safe_mask_sum = K.sum(mask, axis=1)
        safe_mask_sum = K.maximum(safe_mask_sum, K.ones_like(safe_mask_sum))

        return K.sum(mask * x, axis=1) / safe_mask_sum
Exemple #15
0
    def _make_regular_grids(self, batch_size, height, width):
        # making a single regular grid
        x_linspace = K_linspace(-1., 1., width)
        y_linspace = K_linspace(-1., 1., height)
        x_coordinates, y_coordinates = K_meshgrid(x_linspace, y_linspace)
        x_coordinates = K.flatten(x_coordinates)
        y_coordinates = K.flatten(y_coordinates)
        ones = K.ones_like(x_coordinates)
        grid = K.concatenate([x_coordinates, y_coordinates, ones], 0)

        # repeating grids for each batch
        grid = K.flatten(grid)
        grids = K.tile(grid, K.stack([batch_size]))
        return K.reshape(grids, (batch_size, 3, height * width))
 def call(self, x, mask=None):
     if (self.size == None) or (self.mode == 'sum'):
         self.size = int(x.shape[-1])
     batch_size, seq_len = K.shape(x)[0], K.shape(x)[1]
     position_j = 1. / K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size)
     position_j = K.expand_dims(position_j, 0)
     position_i = K.cumsum(K.ones_like(x[:, :, 0]), 1) - 1  # K.arange不支持变长,只好用这种方法生成
     position_i = K.expand_dims(position_i, 2)
     position_ij = K.dot(position_i, position_j)
     position_ij = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2)
     if self.mode == 'sum':
         return position_ij + x
     elif self.mode == 'concat':
         return K.concatenate([position_ij, x], 2)
Exemple #17
0
def weighted_bce_dice_loss(y_true, y_pred):
    y_true = K.cast(y_true, 'float32')
    y_pred = K.cast(y_pred, 'float32')
    # if we want to get same size of output, kernel size must be odd number
    averaged_mask = K.pool2d(
            y_true, pool_size=(11, 11), strides=(1, 1), padding='same', pool_mode='avg')
    border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32')
    weight = K.ones_like(averaged_mask)
    w0 = K.sum(weight)
    weight += border * 2
    w1 = K.sum(weight)
    weight *= (w0 / w1)
    loss = weighted_bce_loss(y_true, y_pred, weight) + \
    weighted_dice_loss(y_true, y_pred, weight)
    return loss
Exemple #18
0
def TemporalDropout(inputs, dropout=0.0):
    """
    Drops with :dropout probability temporal steps of input 3D tensor
    """
    # TO DO: adapt for >3D tensors
    if dropout == 0.0:
        return inputs
    inputs_func = lambda x: kb.ones_like(inputs[:, :, 0:1])
    inputs_mask = kl.Lambda(inputs_func)(inputs)
    inputs_mask = kl.Dropout(dropout)(inputs_mask)
    tiling_shape = [1, 1, kb.shape(inputs)[2]] + [1] * (kb.ndim(inputs) - 3)
    inputs_mask = kl.Lambda(kb.tile, arguments={"n": tiling_shape},
                            output_shape=inputs._keras_shape[1:])(inputs_mask)
    answer = kl.Multiply()([inputs, inputs_mask])
    return answer
Exemple #19
0
    def get_constants(self, inputs, training=None):
        constants = []
        if 0. < self.recurrent_dropout < 1.:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)

            rec_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training) for _ in range(3)]
            constants.append(rec_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
def _meshgrid(height, width):
    # This should be equivalent to:
    #  x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
    #                         np.linspace(-1, 1, height))
    #  ones = np.ones(np.prod(x_t.shape))
    #  grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
    x_t = K.dot(T.ones((height, 1)),
                _linspace(-1.0, 1.0, width).dimshuffle('x', 0))
    y_t = K.dot(_linspace(-1.0, 1.0, height).dimshuffle(0, 'x'),
                T.ones((1, width)))

    x_t_flat = x_t.reshape((1, -1))
    y_t_flat = y_t.reshape((1, -1))
    ones = K.ones_like(x_t_flat)
    grid = K.concatenate([x_t_flat, y_t_flat, ones], axis=0)
    return grid
def eval(outputs, anchors, num_classes, image_shape,
         max_boxes=20, score_threshold=.6, iou_threshold=.5):
    '''Evaluate the YOLO model on given input and return filtered boxes'''

    num_layers = len(outputs)
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [
        [3, 4, 5], [1, 2, 3]]
    input_shape = K.shape(outputs[0])[1:3] * 32
    boxes = []
    box_scores = []

    for l in range(num_layers):
        _boxes, _box_scores = boxes_and_scores(outputs[l],
                                               anchors[anchor_mask[l]],
                                               num_classes, input_shape,
                                               image_shape)
        boxes.append(_boxes)
        box_scores.append(_box_scores)

    boxes = K.concatenate(boxes, axis=0)
    box_scores = K.concatenate(box_scores, axis=0)

    mask = box_scores >= score_threshold
    max_boxes_tensor = K.constant(max_boxes, dtype='int32')
    boxes_ = []
    scores_ = []
    classes_ = []

    for c in range(num_classes):
        # TODO: use Keras backend instead of tf.
        class_boxes = tf.boolean_mask(boxes, mask[:, c])
        class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
        nms_index = tf.image.non_max_suppression(
            class_boxes, class_box_scores, max_boxes_tensor,
            iou_threshold=iou_threshold)
        class_boxes = K.gather(class_boxes, nms_index)
        class_box_scores = K.gather(class_box_scores, nms_index)
        classes = K.ones_like(class_box_scores, 'int32') * c
        boxes_.append(class_boxes)
        scores_.append(class_box_scores)
        classes_.append(classes)

    boxes_ = K.concatenate(boxes_, axis=0)
    scores_ = K.concatenate(scores_, axis=0)
    classes_ = K.concatenate(classes_, axis=0)

    return boxes_, scores_, classes_
Exemple #22
0
    def call(self, inputs, mask=None):
        if mask is None:
            mask = K.zeros_like(inputs)
            mask = K.sum(mask, axis=-1)
            mask = 1 + mask

        else:
            mask = K.cast(mask, K.dtype(inputs))

        safe_n1 = K.sum(mask, axis=1) - 1
        safe_n1 = K.maximum(safe_n1, K.ones_like(safe_n1))
        safe_n1 = K.expand_dims(safe_n1)

        r = tf.cumsum(mask, axis=1) - 1
        r = self.start + (self.stop - self.start) * r / safe_n1
        r = mask * r
        r = K.expand_dims(r)
        return r
def time_distributed_dense(x, w, b=None, dropout=None,
                            input_dim=None, output_dim=None,
                            timesteps=None, training=None):
    """Apply `y . w + b` for every temporal slice y of x.
    # Arguments
        x: input tensor.
        w: weight matrix.
        b: optional bias vector.
        dropout: wether to apply dropout (same dropout mask
            for every temporal slice of the input).
        input_dim: integer; optional dimensionality of the input.
        output_dim: integer; optional dimensionality of the output.
        timesteps: integer; optional number of timesteps.
        training: training phase tensor or boolean.
    # Returns
        Output tensor.
    """
    if not input_dim:
        input_dim = K.shape(x)[2]
    if not timesteps:
        timesteps = K.shape(x)[1]
    if not output_dim:
        output_dim = K.shape(w)[1]

    if dropout is not None and 0. < dropout < 1.:
        # apply the same dropout pattern at every timestep
        ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
        dropout_matrix = K.dropout(ones, dropout)
        expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
        x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)

    # collapse time dimension and batch dimension together
    x = K.reshape(x, (-1, input_dim))
    x = K.dot(x, w)
    if b is not None:
        x = K.bias_add(x, b)
    # reshape to 3D tensor
    if K.backend() == 'tensorflow':
        x = K.reshape(x, K.stack([-1, timesteps, output_dim]))
        x.set_shape([None, None, output_dim])
    else:
        x = K.reshape(x, (-1, timesteps, output_dim))
    return x
Exemple #24
0
def adversarial_loss(net_d, real, fake_abgr, distorted, gan_training="mixup_LSGAN", **weights):
    """ Adversarial Loss Function from Shoanlu GAN """
    alpha = Lambda(lambda x: x[:, :, :, :1])(fake_abgr)
    fake_bgr = Lambda(lambda x: x[:, :, :, 1:])(fake_abgr)
    fake = alpha * fake_bgr + (1-alpha) * distorted

    if gan_training == "mixup_LSGAN":
        dist = Beta(0.2, 0.2)
        lam = dist.sample()
        mixup = lam * concatenate([real, distorted]) + (1 - lam) * concatenate([fake, distorted])
        pred_fake = net_d(concatenate([fake, distorted]))
        pred_mixup = net_d(mixup)
        loss_d = calc_loss(pred_mixup, lam * K.ones_like(pred_mixup), "l2")
        loss_g = weights['w_D'] * calc_loss(pred_fake, K.ones_like(pred_fake), "l2")
        mixup2 = lam * concatenate([real,
                                    distorted]) + (1 - lam) * concatenate([fake_bgr,
                                                                           distorted])
        pred_fake_bgr = net_d(concatenate([fake_bgr, distorted]))
        pred_mixup2 = net_d(mixup2)
        loss_d += calc_loss(pred_mixup2, lam * K.ones_like(pred_mixup2), "l2")
        loss_g += weights['w_D'] * calc_loss(pred_fake_bgr, K.ones_like(pred_fake_bgr), "l2")
    elif gan_training == "relativistic_avg_LSGAN":
        real_pred = net_d(concatenate([real, distorted]))
        fake_pred = net_d(concatenate([fake, distorted]))
        loss_d = K.mean(K.square(real_pred - K.ones_like(fake_pred)))/2
        loss_d += K.mean(K.square(fake_pred - K.zeros_like(fake_pred)))/2
        loss_g = weights['w_D'] * K.mean(K.square(fake_pred - K.ones_like(fake_pred)))

        fake_pred2 = net_d(concatenate([fake_bgr, distorted]))
        loss_d += K.mean(K.square(real_pred - K.mean(fake_pred2, axis=0) -
                                  K.ones_like(fake_pred2)))/2
        loss_d += K.mean(K.square(fake_pred2 - K.mean(real_pred, axis=0) -
                                  K.zeros_like(fake_pred2)))/2
        loss_g += weights['w_D'] * K.mean(K.square(real_pred - K.mean(fake_pred2, axis=0) -
                                                   K.zeros_like(fake_pred2)))/2
        loss_g += weights['w_D'] * K.mean(K.square(fake_pred2 - K.mean(real_pred, axis=0) -
                                                   K.ones_like(fake_pred2)))/2
    else:
        raise ValueError("Receive an unknown GAN training method: {gan_training}")
    return loss_d, loss_g
Exemple #25
0
 def call(self, x, mask=None):
     mean = super(IntraAttention, self).call(x, mask)
     # x: (batch_size, input_length, input_dim)
     # mean: (batch_size, input_dim)
     ones = K.expand_dims(K.mean(K.ones_like(x), axis=(0, 2)), dim=0)  # (1, input_length)
     # (batch_size, input_length, input_dim)
     tiled_mean = K.permute_dimensions(K.dot(K.expand_dims(mean), ones), (0, 2, 1))
     if mask is not None:
         if K.ndim(mask) > K.ndim(x):
             # Assuming this is because of the bug in Bidirectional. Temporary fix follows.
             # TODO: Fix Bidirectional.
             mask = K.any(mask, axis=(-2, -1))
         if K.ndim(mask) < K.ndim(x):
             mask = K.expand_dims(mask)
         x = switch(mask, x, K.zeros_like(x))
     # (batch_size, input_length, proj_dim)
     projected_combination = K.tanh(K.dot(x, self.vector_projector) + K.dot(tiled_mean, self.mean_projector))
     scores = K.dot(projected_combination, self.scorer)  # (batch_size, input_length)
     weights = K.softmax(scores)  # (batch_size, input_length)
     attended_x = K.sum(K.expand_dims(weights) * x, axis=1)  # (batch_size, input_dim)
     return attended_x
Exemple #26
0
def contingency_table(y, z):
    """Compute contingency table."""
    y = K.round(y)
    z = K.round(z)

    def count_matches(a, b):
        tmp = K.concatenate([a, b])
        return K.sum(K.cast(K.all(tmp, -1), K.floatx()))

    ones = K.ones_like(y)
    zeros = K.zeros_like(y)
    y_ones = K.equal(y, ones)
    y_zeros = K.equal(y, zeros)
    z_ones = K.equal(z, ones)
    z_zeros = K.equal(z, zeros)

    tp = count_matches(y_ones, z_ones)
    tn = count_matches(y_zeros, z_zeros)
    fp = count_matches(y_zeros, z_ones)
    fn = count_matches(y_ones, z_zeros)

    return (tp, tn, fp, fn)
Exemple #27
0
 def func(x, drop_rate=drop_rate):
     scale = K.ones_like(x) - drop_rate
     return K.in_test_phase(scale * x, x)
Exemple #28
0
    def define_loss(self, netD, real, fake_argb, distorted, vggface_feat=None):
        alpha = Lambda(lambda x: x[:, :, :, :1])(fake_argb)
        fake_rgb = Lambda(lambda x: x[:, :, :, 1:])(fake_argb)
        fake = alpha * fake_rgb + (1 - alpha) * distorted

        if self.use_mixup:
            dist = Beta(self.mixup_alpha, self.mixup_alpha)
            lam = dist.sample()
            # ==========
            mixup = lam * concatenate(
                [real, distorted]) + (1 - lam) * concatenate([fake, distorted])
            # ==========
            output_mixup = netD(mixup)
            loss_D = self.loss_fn(output_mixup,
                                  lam * K.ones_like(output_mixup))
            output_fake = netD(concatenate([fake, distorted]))  # dummy
            loss_G = .5 * self.loss_fn(output_mixup,
                                       (1 - lam) * K.ones_like(output_mixup))
        else:
            output_real = netD(concatenate([real,
                                            distorted]))  # positive sample
            output_fake = netD(concatenate([fake,
                                            distorted]))  # negative sample
            loss_D_real = self.loss_fn(output_real, K.ones_like(output_real))
            loss_D_fake = self.loss_fn(output_fake, K.zeros_like(output_fake))
            loss_D = loss_D_real + loss_D_fake
            loss_G = .5 * self.loss_fn(output_fake, K.ones_like(output_fake))
        # ==========
        loss_G += K.mean(K.abs(fake_rgb - real))
        # ==========

        # Edge loss (similar with total variation loss)
        loss_G += 1 * K.mean(
            K.abs(
                self.first_order(fake_rgb, axis=1) -
                self.first_order(real, axis=1)))
        loss_G += 1 * K.mean(
            K.abs(
                self.first_order(fake_rgb, axis=2) -
                self.first_order(real, axis=2)))

        # Perceptual Loss
        if not vggface_feat is None:

            def preprocess_vggface(x):
                x = (x + 1) / 2 * 255  # channel order: BGR
                #x[..., 0] -= 93.5940
                #x[..., 1] -= 104.7624
                #x[..., 2] -= 129.
                x -= [91.4953, 103.8827, 131.0912]
                return x

            pl_params = (0.011, 0.11, 0.1919)
            real_sz224 = tf.image.resize_images(real, [224, 224])
            real_sz224 = Lambda(preprocess_vggface)(real_sz224)
            # ==========
            fake_sz224 = tf.image.resize_images(fake_rgb, [224, 224])
            fake_sz224 = Lambda(preprocess_vggface)(fake_sz224)
            # ==========
            real_feat55, real_feat28, real_feat7 = vggface_feat(real_sz224)
            fake_feat55, fake_feat28, fake_feat7 = vggface_feat(fake_sz224)
            loss_G += pl_params[0] * K.mean(K.abs(fake_feat7 - real_feat7))
            loss_G += pl_params[1] * K.mean(K.abs(fake_feat28 - real_feat28))
            loss_G += pl_params[2] * K.mean(K.abs(fake_feat55 - real_feat55))

        return loss_D, loss_G
Exemple #29
0
def generator_accuracy(y_real, y_fake):
    y_pos = K.ones_like(y_fake)
    y_neg = K.zeros_like(y_real)
    acc_fake = keras.metrics.binary_accuracy(y_pos, y_fake)
    acc_real = keras.metrics.binary_accuracy(y_neg, y_real)
    return 0.5 * K.mean(acc_real + acc_fake)
Exemple #30
0
def yolo_eval(yolo_outputs,
              anchors,
              num_classes,
              image_shape,
              max_boxes=20,
              score_threshold=.6,
              iou_threshold=.5):
    """
    Evaluate YOLO model on given input and return filtered boxes.
    :param yolo_outputs: list of tensors,
            [(batch_size, grid_h, gird_w, anchors_per_scale * (5 + num_classes)), ...]
            where 5=[d_cx d_cy d_w d_h confidence_score]
    :param anchors: ndarray, shape=(clusters, 2) where 2=[width height]
    :param num_classes: int, classes numbers
    :param image_shape: tensor, (original_image_height, original_image_width)
    :param max_boxes: int, maximum number of class boxes to be selected by NMS
    :param score_threshold: float, class_confidence_score threshold of box
    :param iou_threshold: float, IoU threshold of NMS
    :return: tuple of tensors
    """
    num_out_layers = len(yolo_outputs)
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]
                   ] if num_out_layers == 3 else [[3, 4, 5], [0, 1, 2]]
    input_shape = K.shape(yolo_outputs[0])[1:3] * 32
    boxes = []
    box_scores = []
    for l in range(num_out_layers):
        _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
                                                    anchors[anchor_mask[l]],
                                                    num_classes, input_shape,
                                                    image_shape)
        boxes.append(_boxes)
        box_scores.append(_box_scores)
    boxes = K.concatenate(boxes, axis=0)  # shape = (batch_total, 4)
    box_scores = K.concatenate(box_scores,
                               axis=0)  # shape = (batch_total, num_classes)

    mask = box_scores >= score_threshold
    max_boxes_tensor = K.constant(max_boxes, dtype='int32')
    boxes_ = []
    scores_ = []
    classes_ = []
    for c in range(num_classes):
        # TODO: use keras backend instead of tf.
        class_boxes = tf.boolean_mask(
            boxes, mask[:, c])  # shape=(class_num_boxes, 4), rank=2
        class_box_scores = tf.boolean_mask(
            box_scores[:, c], mask[:, c])  # shape=(class_num_boxes, ), rank=1
        nms_index = tf.image.non_max_suppression(class_boxes, class_box_scores,
                                                 max_boxes_tensor,
                                                 iou_threshold)
        class_boxes = K.gather(class_boxes, nms_index)
        class_box_scores = K.gather(class_box_scores, nms_index)
        classes = K.ones_like(class_box_scores, 'int32') * c
        boxes_.append(class_boxes)
        scores_.append(class_box_scores)
        classes_.append(classes)
    boxes_ = K.concatenate(boxes_, axis=0)  # rank=2
    scores_ = K.concatenate(scores_, axis=0)  # rank=1
    classes_ = K.concatenate(classes_, axis=0)  # rank=1

    return boxes_, scores_, classes_
Exemple #31
0
    def call(self, inputs, states, training=None):
        x = inputs[:, :self.x_dim]
        interval = inputs[:, self.x_dim:(self.x_dim + self.interval_dim)]
        weight = inputs[:, (self.x_dim + self.interval_dim):(self.x_dim + self.interval_dim+self.weight_dim)]

        if 0 < self.dropout < 1 and self._dropout_mask is None:
            self._dropout_mask = _generate_dropout_mask(
                K.ones_like(x),
                self.dropout,
                training=training,
                count=6)
        if (0 < self.recurrent_dropout < 1 and
                self._recurrent_dropout_mask is None):
            self._recurrent_dropout_mask = _generate_dropout_mask(
                K.ones_like(states[0]),
                self.recurrent_dropout,
                training=training,
                count=4)

        # dropout matrices for input units
        dp_mask = self._dropout_mask
        # dropout matrices for recurrent units
        rec_dp_mask = self._recurrent_dropout_mask

        h_tm1 = states[0]  # previous memory state
        c_tm1 = states[1]  # previous carry state

        self.implementation = 1
        if self.implementation == 1:
            if 0 < self.dropout < 1.:
                inputs_i = x * dp_mask[0]
                inputs_f = x * dp_mask[1]
                inputs_c = x * dp_mask[2]
                inputs_o = x * dp_mask[3]
                inputs_T = x * dp_mask[4]
                inputs_W = x * dp_mask[5]
            else:
                inputs_i = x
                inputs_f = x
                inputs_c = x
                inputs_o = x
                inputs_T = x
                inputs_W = x
            x_i = K.dot(inputs_i, self.kernel_i)
            x_f = K.dot(inputs_f, self.kernel_f)
            x_c = K.dot(inputs_c, self.kernel_c)
            x_o = K.dot(inputs_o, self.kernel_o)
            x_T = K.dot(inputs_T, self.kernel_Tx)
            x_W = K.dot(inputs_W, self.kernel_Wx)
            if self.use_bias:
                x_i = K.bias_add(x_i, self.bias_i)
                x_f = K.bias_add(x_f, self.bias_f)
                x_c = K.bias_add(x_c, self.bias_c)
                x_o = K.bias_add(x_o, self.bias_o)
                x_T = K.bias_add(x_T, self.bias_T)
                x_W = K.bias_add(x_W, self.bias_W)

            if 0 < self.recurrent_dropout < 1.:
                h_tm1_i = h_tm1 * rec_dp_mask[0]
                h_tm1_f = h_tm1 * rec_dp_mask[1]
                h_tm1_c = h_tm1 * rec_dp_mask[2]
                h_tm1_o = h_tm1 * rec_dp_mask[3]

            else:
                h_tm1_i = h_tm1
                h_tm1_f = h_tm1
                h_tm1_c = h_tm1
                h_tm1_o = h_tm1
            i = self.recurrent_activation(x_i + K.dot(h_tm1_i, self.recurrent_kernel_i) + K.dot(weight, self.kernel_W_i))
            f = self.recurrent_activation(x_f + K.dot(h_tm1_f, self.recurrent_kernel_f) + K.dot(weight, self.kernel_W_f))


            c_hat = self.activation(x_c + K.dot(h_tm1_c,
                                                    self.recurrent_kernel_c))

            if(self.interval_dim > 0):
                T = self.recurrent_activation(x_T + K.dot(interval,
                                                      self.kernel_interval))
                c_hat = c_hat * T

            if(self.weight_dim > 0):
                W = self.recurrent_activation(x_W + K.dot(weight, self.kernel_W_W))
                c_hat = c_hat * W

            c = f * c_tm1 + i * c_hat
            o = self.recurrent_activation(x_o + K.dot(h_tm1_o, self.recurrent_kernel_o) + K.dot(weight, self.kernel_W_o))
        else:
            pass

        h = o * self.activation(c)
        if 0 < self.dropout + self.recurrent_dropout:
            if training is None:
                h._uses_learning_phase = True
        return h, [h, c]
def mse_onehot(y_true, y_pred):
    return mse_loss(K.ones_like(y_pred)/nb_classes, y_pred)
Exemple #33
0
    def accfun(y0, y1):
        y_pos = K.ones_like(y_fake)

        acc_fake = K.mean(keras.metrics.binary_accuracy(y_pos, y_fake))

        return acc_fake
Exemple #34
0
    def lossfun(self, y_fake):
        y_pos = K.ones_like(y_fake)

        loss_fake = K.mean(keras.metrics.binary_crossentropy(y_pos, y_fake))

        return loss_fake
Exemple #35
0
def complementary_mask(x):
    return K.ones_like(x) - x
Exemple #36
0
def yolo_eval(yolo_outputs,
              anchors,
              num_classes,
              image_shape,
              max_boxes=20,
              score_threshold=.6,
              iou_threshold=.5):
    """ yolo evaluate

    Args:
        yolo_outputs: [batch, 13, 13, 3*85]
        anchors: [9, 2]
        num_classes: num of your own classes
        image_shape: the shape of original image

        max_boxes: need to make it bigger when have a lot of targets in task

        score_threshold: when score > score threshold, the anchor is positive
        iou_threshold: the threshold used in non max suppression

    Returns:
        boxes_, scores_, classes_

    """
    num_layers = len(yolo_outputs)

    # the yolo4_tiny.outputs is [C4, C5]
    anchor_mask = [[0, 1, 2], [3, 4, 5]]
    input_shape = K.shape(yolo_outputs[0])[1:3] * 16
    boxes = []
    box_scores = []

    for i in range(num_layers):
        _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[i],
                                                    anchors[anchor_mask[i]],
                                                    num_classes, input_shape,
                                                    image_shape, i)
        boxes.append(_boxes)
        box_scores.append(_box_scores)

    boxes = K.concatenate(boxes, axis=0)
    box_scores = K.concatenate(box_scores, axis=0)

    mask = box_scores >= score_threshold
    max_boxes_tensor = K.constant(max_boxes, dtype='int32')
    boxes_ = []
    scores_ = []
    classes_ = []
    for c in range(num_classes):
        # get positive anchors by using box_scores >= score_threshold
        class_boxes = tf.boolean_mask(boxes, mask[:, c])
        class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])

        # NMS
        nms_index = tf.image.non_max_suppression(class_boxes,
                                                 class_box_scores,
                                                 max_boxes_tensor,
                                                 iou_threshold=iou_threshold)

        class_boxes = K.gather(class_boxes, nms_index)
        class_box_scores = K.gather(class_box_scores, nms_index)
        classes = K.ones_like(class_box_scores, 'int32') * c
        boxes_.append(class_boxes)
        scores_.append(class_box_scores)
        classes_.append(classes)
    boxes_ = K.concatenate(boxes_, axis=0)
    scores_ = K.concatenate(scores_, axis=0)
    classes_ = K.concatenate(classes_, axis=0)

    return boxes_, scores_, classes_
Exemple #37
0
 def call(self, x, mask=None):
     return K.in_test_phase((K.ones_like(x) - self.death_rate) * x, x)
def myloss(y_true, y_pred, e=0.15):
    loss1 = mse_loss(y_true, y_pred)
    #one_hot
    loss2 = mse_loss(K.ones_like(y_pred)/nb_classes, y_pred)
    loss3 = loss(y_true, y_pred)
    return (1-2*e)*loss1 + e*loss2 + e*loss3
def discriminator_on_generator_loss(y_true,y_pred):
    BATCH_SIZE=10
    return K.mean(K.binary_crossentropy(K.flatten(y_pred), K.ones_like(K.flatten(y_pred))), axis=-1)
#%%

NET_D = BASIC_D(ISIZE, NC_IN, NC_OUT, MAX_LAYERS)
NET_G = UNET_G (ISIZE, NC_IN, NC_OUT)

REAL_A = NET_G.input
FAKE_B = NET_G.output
REAL_B = NET_D.inputs[1]

OUTPUT_D_REAL = NET_D([REAL_A, REAL_B])
OUTPUT_D_FAKE = NET_D([REAL_A, FAKE_B])

LOSS_FN = lambda OUTPUT, TARGET : -K.mean(K.log(OUTPUT+1e-12)*TARGET+K.log(1-OUTPUT+1e-12)*(1-TARGET))

LOSS_D_REAL = LOSS_FN(OUTPUT_D_REAL, K.ones_like(OUTPUT_D_REAL))
LOSS_D_FAKE = LOSS_FN(OUTPUT_D_FAKE, K.zeros_like(OUTPUT_D_FAKE))
LOSS_G_FAKE = LOSS_FN(OUTPUT_D_FAKE, K.ones_like(OUTPUT_D_FAKE))

LOSS_L = K.mean(K.abs(FAKE_B-REAL_B))


LOSS_D = LOSS_D_REAL + LOSS_D_FAKE
TRAINING_UPDATES_D = Adam(lr = 2e-4, beta_1 = 0.5).get_updates(NET_D.trainable_weights, [], LOSS_D)
NET_D_TRAIN = K.function([REAL_A, REAL_B], [LOSS_D/2.0], TRAINING_UPDATES_D)

LOSS_G = LOSS_G_FAKE + 100 * LOSS_L
TRAINING_UPDATES_G = Adam(lr = 2e-4, beta_1 = 0.5).get_updates(NET_G.trainable_weights, [], LOSS_G)
NET_G_TRAIN = K.function([REAL_A, REAL_B], [LOSS_G_FAKE, LOSS_L], TRAINING_UPDATES_G)

#%%
Exemple #41
0
def sigmoid_steep(x):
    base = K.ones_like(x) * pow(10, 20)
    return 1. / (1. + K.pow(base, -x))
def R2(y_true, y_pred):
    SS_res = K.sum(K.square(y_true - y_pred), axis=-1)
    SS_tot = K.sum(K.square(y_true - K.mean(y_true, axis=-1)), axis=-1)
    return K.ones_like(SS_tot) - SS_res/(SS_tot + K.epsilon())
Exemple #43
0
    def build_model(self, compute_output=True):
        """ 
        """

        # TODO: Move this entire section to cost_functions.py

        if self.input_tensor is None:

            if self.cost_function == 'mse':

                if compute_output:
                    self.model = Model(inputs=self.inputs,
                                       outputs=self.output_layer)

                self.model.compile(
                    optimizer=self.keras_optimizer_dict[self.optimizer](
                        lr=self.initial_learning_rate),
                    loss='mean_squared_error',
                    metrics=['mean_squared_error'])

            elif self.cost_function == 'dice':

                if compute_output:
                    if self.output_activation:
                        self.model = Model(inputs=self.inputs,
                                           outputs=Activation('sigmoid')(
                                               self.output_layer))
                    else:
                        self.model = Model(inputs=self.inputs,
                                           outputs=self.output_layer)

                self.model.compile(
                    optimizer=self.keras_optimizer_dict[self.optimizer](
                        lr=self.initial_learning_rate),
                    loss=dice_coef_loss,
                    metrics=[dice_coef])

            # Not Implemented
            elif self.cost_function == 'multi_dice':

                raise NotImplementedError(
                    'Multi-dice coefficient not yet implemented.')

                if compute_output:
                    if self.output_activation:
                        self.model = Model(inputs=self.inputs,
                                           outputs=Activation('sigmoid')(
                                               self.output_layer))
                    else:
                        self.model = Model(inputs=self.inputs,
                                           outputs=self.output_layer)

                self.model.compile(
                    optimizer=self.keras_optimizer_dict[self.optimizer](
                        lr=self.initial_learning_rate),
                    loss=dice_coef_loss,
                    metrics=[dice_coef])

            elif self.cost_function == 'binary_crossentropy':

                if compute_output:
                    if self.output_activation:
                        self.model = Model(inputs=self.inputs,
                                           outputs=Activation('sigmoid')(
                                               self.output_layer))
                    else:
                        self.model = Model(inputs=self.inputs,
                                           outputs=self.output_layer)

                self.model.compile(
                    optimizer=self.keras_optimizer_dict[self.optimizer](
                        lr=self.initial_learning_rate),
                    loss='binary_crossentropy',
                    metrics=['binary_accuracy'])

            elif self.cost_function == 'categorical_crossentropy':

                if compute_output:
                    if self.output_activation:
                        self.model = Model(inputs=self.inputs,
                                           outputs=Activation('softmax')(
                                               self.output_layer))
                    else:
                        self.model = Model(inputs=self.inputs,
                                           outputs=self.output_layer)

                self.model.compile(
                    optimizer=self.keras_optimizer_dict[self.optimizer](
                        lr=self.initial_learning_rate),
                    loss='categorical_crossentropy',
                    metrics=['categorical_accuracy'])

            elif self.cost_function == 'weighted_categorical_crossentropy':

                if compute_output:
                    activation = Activation('sigmoid')(self.output_layer)
                    activation_categorical = Lambda(
                        lambda arg: K.ones_like(arg) - arg)(activation)
                    predictions = concatenate(
                        [activation, activation_categorical], axis=-1)

                    if self.output_activation:
                        self.model = Model(inputs=self.inputs,
                                           outputs=predictions)
                    else:
                        self.model = Model(inputs=self.inputs,
                                           outputs=self.output_layer)

                lossFunc = WeightedCategoricalCrossEntropy(
                    self.categorical_weighting)
                self.model.compile(
                    self.keras_optimizer_dict[self.optimizer](
                        lr=self.initial_learning_rate),
                    loss=lossFunc.loss_wcc_dist,
                    metrics=[lossFunc.metric_dice_dist, lossFunc.metric_acc])

            else:
                raise NotImplementedError(
                    'Cost function {} not implemented.'.format(
                        self.cost_function))

            self.model_input_shape = self.model.layers[0].input_shape
            self.model_output_shape = self.model.layers[-1].output_shape

            return self.model

        else:

            self.model_input_shape = self.model.layers[0].input_shape
            self.model_output_shape = self.model.layers[-1].output_shape

            return self.output_layer
    def call(self, inputs, states, training=None):
        if 0 < self.dropout < 1 and self._dropout_mask is None:
            self._dropout_mask = _generate_dropout_mask(
                K.ones_like(inputs),
                self.dropout,
                training=training,
                count=4)
        if (0 < self.recurrent_dropout < 1 and
                self._recurrent_dropout_mask is None):
            self._recurrent_dropout_mask = _generate_dropout_mask(
                K.ones_like(states[0]),
                self.recurrent_dropout,
                training=training,
                count=4)

        # dropout matrices for input units
        dp_mask = self._dropout_mask
        # dropout matrices for recurrent units
        rec_dp_mask = self._recurrent_dropout_mask

        h_tm1 = states[0]  # previous memory state
        c_tm1 = states[1]  # previous carry state

        # repeat the hidden state to the length of the sequence
        _htm1 = K.repeat(h_tm1, self.seq_len)

        _Whtm1 = K.dot(_htm1, self.W_a)
        _Uinpt = K.dot(self._seq_input, self.U_a)

        # calculate the attention probabilities
        et = K.dot(activations.tanh(_Whtm1 + _Uinpt), K.expand_dims(self.V_a))
        at = K.exp(et)
        at_sum = K.sum(at, axis=1)
        at_sum_repeated = K.repeat(at_sum, self.seq_len)
        at /= at_sum_repeated  # (batch_size, seq_len, 1)

        # calculate the context vector
        context = K.squeeze(K.batch_dot(at, self._seq_input, axes=1), axis=1)

        if self.implementation == 1:
            if 0 < self.dropout < 1.:
                inputs_i = context * dp_mask[0]
                inputs_f = context * dp_mask[1]
                inputs_c = context * dp_mask[2]
                inputs_o = context * dp_mask[3]
            else:
                inputs_i = context
                inputs_f = context
                inputs_c = context
                inputs_o = context
            x_i = K.dot(inputs_i, self.kernel_i)
            x_f = K.dot(inputs_f, self.kernel_f)
            x_c = K.dot(inputs_c, self.kernel_c)
            x_o = K.dot(inputs_o, self.kernel_o)
            if self.use_bias:
                x_i = K.bias_add(x_i, self.bias_i)
                x_f = K.bias_add(x_f, self.bias_f)
                x_c = K.bias_add(x_c, self.bias_c)
                x_o = K.bias_add(x_o, self.bias_o)

            if 0 < self.recurrent_dropout < 1.:
                h_tm1_i = h_tm1 * rec_dp_mask[0]
                h_tm1_f = h_tm1 * rec_dp_mask[1]
                h_tm1_c = h_tm1 * rec_dp_mask[2]
                h_tm1_o = h_tm1 * rec_dp_mask[3]
            else:
                h_tm1_i = h_tm1
                h_tm1_f = h_tm1
                h_tm1_c = h_tm1
                h_tm1_o = h_tm1
            i = self.recurrent_activation(x_i + K.dot(h_tm1_i,
                                                      self.recurrent_kernel_i))
            f = self.recurrent_activation(x_f + K.dot(h_tm1_f,
                                                      self.recurrent_kernel_f))
            c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1_c,
                                                            self.recurrent_kernel_c))
            o = self.recurrent_activation(x_o + K.dot(h_tm1_o,
                                                      self.recurrent_kernel_o))
        else:
            if 0. < self.dropout < 1.:
                inputs *= dp_mask[0]
            z = K.dot(inputs, self.kernel)
            if 0. < self.recurrent_dropout < 1.:
                h_tm1 *= rec_dp_mask[0]
            z += K.dot(h_tm1, self.recurrent_kernel)
            if self.use_bias:
                z = K.bias_add(z, self.bias)

            z0 = z[:, :self.units]
            z1 = z[:, self.units: 2 * self.units]
            z2 = z[:, 2 * self.units: 3 * self.units]
            z3 = z[:, 3 * self.units:]

            i = self.recurrent_activation(z0)
            f = self.recurrent_activation(z1)
            c = f * c_tm1 + i * self.activation(z2)
            o = self.recurrent_activation(z3)

        h = o * self.activation(c)
        if 0 < self.dropout + self.recurrent_dropout:
            if training is None:
                h._uses_learning_phase = True
        self.attention_weight.append(at)

        return h, [h, c]
Exemple #45
0
def _get_report(y_true, y_pred):
    TP = K.sum(y_true * y_pred)
    FP = K.sum((K.ones_like(y_true) - y_true) * y_pred)
    FN = K.sum(y_true * (K.ones_like(y_true) - y_pred))
    return TP, FP, FN
Exemple #46
0
    def call(self, inputs, states, training=None):
        h_tm1 = states[0]  # previous memory

        if 0 < self.dropout < 1 and self._dropout_mask is None:
            self._dropout_mask = _generate_dropout_mask(K.ones_like(inputs),
                                                        self.dropout,
                                                        training=training,
                                                        count=3)
        if (0 < self.recurrent_dropout < 1
                and self._recurrent_dropout_mask is None):
            self._recurrent_dropout_mask = _generate_dropout_mask(
                K.ones_like(h_tm1),
                self.recurrent_dropout,
                training=training,
                count=3)

        # dropout matrices for input units
        dp_mask = self._dropout_mask
        # dropout matrices for recurrent units
        rec_dp_mask = self._recurrent_dropout_mask

        if self.implementation == 1:
            if 0. < self.dropout < 1.:
                inputs_z = inputs * dp_mask[0]
                inputs_r = inputs * dp_mask[1]
                inputs_h = inputs * dp_mask[2]
            else:
                inputs_z = inputs
                inputs_r = inputs
                inputs_h = inputs

            x_z = K.dot(inputs_z, self.kernel_z)
            x_r = K.dot(inputs_r, self.kernel_r)
            x_h = K.dot(inputs_h, self.kernel_h)
            if self.use_bias:
                x_z = K.bias_add(x_z, self.input_bias_z)
                x_r = K.bias_add(x_r, self.input_bias_r)
                x_h = K.bias_add(x_h, self.input_bias_h)

            if 0. < self.recurrent_dropout < 1.:
                h_tm1_z = h_tm1 * rec_dp_mask[0]
                h_tm1_r = h_tm1 * rec_dp_mask[1]
                h_tm1_h = h_tm1 * rec_dp_mask[2]
            else:
                h_tm1_z = h_tm1
                h_tm1_r = h_tm1
                h_tm1_h = h_tm1

            recurrent_z = K.dot(h_tm1_z, self.recurrent_kernel_z)
            recurrent_r = K.dot(h_tm1_r, self.recurrent_kernel_r)
            if self.reset_after and self.use_bias:
                recurrent_z = K.bias_add(recurrent_z, self.recurrent_bias_z)
                recurrent_r = K.bias_add(recurrent_r, self.recurrent_bias_r)

            z = self.recurrent_activation(x_z + recurrent_z)
            r = self.recurrent_activation(x_r + recurrent_r)

            # reset gate applied after/before matrix multiplication
            if self.reset_after:
                recurrent_h = K.dot(h_tm1_h, self.recurrent_kernel_h)
                if self.use_bias:
                    recurrent_h = K.bias_add(recurrent_h,
                                             self.recurrent_bias_h)
                recurrent_h = r * recurrent_h
            else:
                recurrent_h = K.dot(r * h_tm1_h, self.recurrent_kernel_h)

            hh = self.activation(x_h + recurrent_h)
        else:
            if 0. < self.dropout < 1.:
                inputs *= dp_mask[0]

            # inputs projected by all gate matrices at once
            matrix_x = K.dot(inputs, self.kernel)
            if self.use_bias:
                # biases: bias_z_i, bias_r_i, bias_h_i
                matrix_x = K.bias_add(matrix_x, self.input_bias)
            x_z = matrix_x[:, :self.units]
            x_r = matrix_x[:, self.units:2 * self.units]
            x_h = matrix_x[:, 2 * self.units:]

            if 0. < self.recurrent_dropout < 1.:
                h_tm1 *= rec_dp_mask[0]

            if self.reset_after:
                # hidden state projected by all gate matrices at once
                matrix_inner = K.dot(h_tm1, self.recurrent_kernel)
                if self.use_bias:
                    matrix_inner = K.bias_add(matrix_inner,
                                              self.recurrent_bias)
            else:
                # hidden state projected separately for update/reset and new
                matrix_inner = K.dot(h_tm1,
                                     self.recurrent_kernel[:, :2 * self.units])

            recurrent_z = matrix_inner[:, :self.units]
            recurrent_r = matrix_inner[:, self.units:2 * self.units]

            z = self.recurrent_activation(x_z + recurrent_z)
            r = self.recurrent_activation(x_r + recurrent_r)

            if self.reset_after:
                recurrent_h = r * matrix_inner[:, 2 * self.units:]
            else:
                recurrent_h = K.dot(r * h_tm1,
                                    self.recurrent_kernel[:, 2 * self.units:])

            hh = self.activation(x_h + recurrent_h)

        # previous and candidate state mixed by update gate
        h = z * h_tm1 + (1 - z) * hh

        if 0 < self.dropout + self.recurrent_dropout:
            if training is None:
                h._uses_learning_phase = True

        return h, [h]
Exemple #47
0
def mycrossentropy(y_true, y_pred, e=0.1):
    loss1 = K.categorical_crossentropy(y_true, y_pred)
    loss2 = K.categorical_crossentropy(
        K.ones_like(y_pred) / nb_classes, y_pred)
    return (1 - e) * loss1 + e * loss2
Exemple #48
0
 def call(self, inputs):
     x = inputs[0]
     x_decoded_mean = inputs[1]
     loss = self.vae_loss(x, x_decoded_mean)
     self.add_loss(loss, inputs=inputs)
     return K.ones_like(x)
Exemple #49
0
def logistic_loss(y_true, y_pred):
    return K.mean(
        K.log(K.ones_like(y_true, 'float32') + K.exp(-y_true * y_pred)),
        axis=-1)
def weighted_loss_none(y_true, y_pred, is_weight=True):
    weights = K.ones_like(x=y_true)

    l = custom_BCE_loss(y_true, y_pred, weights) + weighted_dice_loss(
        y_true, y_pred, weights)
    return l
Exemple #51
0
def yolo_loss(args,
              anchors,
              num_classes,
              rescore_confidence=False,
              print_loss=False):
    """YOLO localization loss function.

    Parameters
    ----------
    yolo_output : tensor
        Final convolutional layer features.

    true_boxes : tensor
        Ground truth boxes tensor with shape [batch, num_true_boxes, 5]
        containing box x_center, y_center, width, height, and class.

    detectors_mask : array
        0/1 mask for detector positions where there is a matching ground truth.

    matching_true_boxes : array
        Corresponding ground truth boxes for positive detector positions.
        Already adjusted for conv height and width.

    anchors : tensor
        Anchor boxes for model.

    num_classes : int
        Number of object classes.

    rescore_confidence : bool, default=False
        If true then set confidence target to IOU of best predicted box with
        the closest matching ground truth box.

    print_loss : bool, default=False
        If True then use a tf.Print() to print the loss components.

    Returns
    -------
    mean_loss : float
        mean localization loss across minibatch
    """
    (yolo_output, true_boxes, detectors_mask, matching_true_boxes) = args
    num_anchors = len(anchors)
    object_scale = 5
    no_object_scale = 1
    class_scale = 1
    coordinates_scale = 1

    yad2kOutput, edlOutput = yolo_head(yolo_output, anchors, num_classes)
    pred_xy, pred_wh, pred_confidence, pred_softmax_class_probs = yad2kOutput
    pred_class_logits, pred_box_class_evidence, pred_alpha, \
                pred_S, pred_uncertainty, pred_class_probs = edlOutput

    # Unadjusted box predictions for loss.
    # TODO: Remove extra computation shared with yolo_head.
    yolo_output_shape = K.shape(yolo_output)
    feats = K.reshape(yolo_output, [
        -1, yolo_output_shape[1], yolo_output_shape[2], num_anchors,
        num_classes + 5
    ])
    pred_boxes = K.concatenate((K.sigmoid(feats[..., 0:2]), feats[..., 2:4]),
                               axis=-1)

    # TODO: Adjust predictions by image width/height for non-square images?
    # IOUs may be off due to different aspect ratio.

    # Expand pred x,y,w,h to allow comparison with ground truth.
    # batch, conv_height, conv_width, num_anchors, num_true_boxes, box_params
    pred_xy = K.expand_dims(pred_xy, 4)
    pred_wh = K.expand_dims(pred_wh, 4)

    pred_wh_half = pred_wh / 2.
    pred_mins = pred_xy - pred_wh_half
    pred_maxes = pred_xy + pred_wh_half

    true_boxes_shape = K.shape(true_boxes)

    # batch, conv_height, conv_width, num_anchors, num_true_boxes, box_params
    true_boxes = K.reshape(true_boxes, [
        true_boxes_shape[0], 1, 1, 1, true_boxes_shape[1], true_boxes_shape[2]
    ])
    true_xy = true_boxes[..., 0:2]
    true_wh = true_boxes[..., 2:4]

    # Find IOU of each predicted box with each ground truth box.
    true_wh_half = true_wh / 2.
    true_mins = true_xy - true_wh_half
    true_maxes = true_xy + true_wh_half

    intersect_mins = K.maximum(pred_mins, true_mins)
    intersect_maxes = K.minimum(pred_maxes, true_maxes)
    intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
    intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]

    pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
    true_areas = true_wh[..., 0] * true_wh[..., 1]

    union_areas = pred_areas + true_areas - intersect_areas
    iou_scores = intersect_areas / union_areas

    # Best IOUs for each location.
    best_ious = K.max(iou_scores, axis=4)  # Best IOU scores.
    best_ious = K.expand_dims(best_ious)

    # A detector has found an object if IOU > thresh for some true box.
    object_detections = K.cast(best_ious > 0.6, K.dtype(best_ious))

    # TODO: Darknet region training includes extra coordinate loss for early
    # training steps to encourage predictions to match anchor priors.

    # Determine confidence weights from object and no_object weights.
    # NOTE: YOLO does not use binary cross-entropy here.
    no_object_weights = (no_object_scale * (1 - object_detections) *
                         (1 - detectors_mask))
    no_objects_loss = no_object_weights * K.square(-pred_confidence)

    if rescore_confidence:
        objects_loss = (object_scale * detectors_mask *
                        K.square(best_ious - pred_confidence))
    else:
        objects_loss = (object_scale * detectors_mask *
                        K.square(1 - pred_confidence))
    confidence_loss = objects_loss + no_objects_loss

    # Classification loss for matching detections.
    # NOTE: YOLO does not use categorical cross-entropy loss here.
    matching_classes = K.cast(matching_true_boxes[..., 4], 'int32')
    matching_classes = K.one_hot(matching_classes, num_classes)
    classification_loss = (class_scale * detectors_mask *
                           K.square(matching_classes - pred_class_probs))

    # Coordinate loss for matching detection boxes.
    matching_boxes = matching_true_boxes[..., 0:4]
    coordinates_loss = (coordinates_scale * detectors_mask *
                        K.square(matching_boxes - pred_boxes))

    ########################################################
    ########################################################
    ########                                       #########
    ######## EDL Loss and metric calculations here #########
    ########                                       #########
    ########################################################
    ########################################################

    ### EDL Loss - expected value of cross entropy loss over
    # the predicted Dirichlet distribution + KL regularization term

    # Expected value of cross entropy loss
    A = tf.reduce_sum(matching_classes *
                      (tf.digamma(pred_S) - tf.digamma(pred_alpha)),
                      4,
                      keepdims=True)

    # KL term
    alp = pred_box_class_evidence * (1 - matching_classes) + 1
    beta = K.ones_like(alp)
    S_alpha = tf.reduce_sum(alp, axis=4, keep_dims=True)
    S_beta = tf.reduce_sum(beta, axis=4, keep_dims=True)
    lnB = tf.lgamma(S_alpha) - tf.reduce_sum(
        tf.lgamma(alp), axis=4, keep_dims=True)
    lnB_uni = tf.reduce_sum(tf.lgamma(beta), axis=4,
                            keep_dims=True) - tf.lgamma(S_beta)
    dg0 = tf.digamma(S_alpha)
    dg1 = tf.digamma(alp)
    kl = tf.reduce_sum(
        (alp - beta) * (dg1 - dg0), axis=4, keep_dims=True) + lnB + lnB_uni
    B = 1.0 * kl  # Anneal the KL term during training phase

    # 5. Apply detector mask and sum the loss components
    edl_loss = detectors_mask * (A + B)

    confidence_loss_sum = K.sum(confidence_loss)
    classification_loss_sum = K.sum(classification_loss)
    coordinates_loss_sum = K.sum(coordinates_loss)
    edl_loss_sum = K.sum(edl_loss)

    #total_loss = 0.5 * (
    #    confidence_loss_sum + classification_loss_sum + coordinates_loss_sum)

    total_loss = 0.5 * (confidence_loss_sum + edl_loss_sum +
                        coordinates_loss_sum)

    if print_loss:
        total_loss = tf.Print(
            total_loss, [
                total_loss, confidence_loss_sum, classification_loss_sum,
                coordinates_loss_sum
            ],
            message='yolo_loss, conf_loss, class_loss, box_coord_loss:')

    return total_loss
Exemple #52
0
def dsc_crossentropy(y_true, y_pred, smooth=0.00000001):
    K.set_epsilon(1e-8)
    num = K.binary_crossentropy(y_true, y_pred)
    den = ( K.sum( K.abs(y_true), axis=(1,2,3)) + smooth ) * K.ones_like(y_true)
    return 0.5 * 256.0*256.0 * num/den
Exemple #53
0
def weighted_sum(first, second, sigma, first_threshold=-np.inf, second_threshold=np.inf):
    logit_probs = first * sigma + second * (1.0 - sigma)
    infty_tensor = kb.ones_like(logit_probs) * INFTY
    logit_probs = kb.switch(kb.greater(first, first_threshold), logit_probs, infty_tensor)
    logit_probs = kb.switch(kb.greater(second, second_threshold), logit_probs, infty_tensor)
    return logit_probs
def yolo_eval(yolo_outputs,
              anchors,
              num_classes,
              image_shape,
              max_boxes=20,
              score_threshold=.6,
              iou_threshold=.5,
              letterbox_image=True):
    #---------------------------------------------------#
    #   获得特征层的数量,有效特征层的数量为3
    #---------------------------------------------------#
    num_layers = len(yolo_outputs)
    #-----------------------------------------------------------#
    #   13x13的特征层对应的anchor是[81,82], [135,169], [344,319]
    #   26x26的特征层对应的anchor是[23,27], [37,58], [81,82]
    #-----------------------------------------------------------#
    anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]
    
    #-----------------------------------------------------------#
    #   这里获得的是输入图片的大小,一般是416x416
    #-----------------------------------------------------------#
    input_shape = K.shape(yolo_outputs[0])[1:3] * 32
    boxes = []
    box_scores = []
    #-----------------------------------------------------------#
    #   对每个特征层进行处理
    #-----------------------------------------------------------#
    for l in range(num_layers):
        _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l], anchors[anchor_mask[l]], num_classes, input_shape, image_shape, letterbox_image)
        boxes.append(_boxes)
        box_scores.append(_box_scores)
    #-----------------------------------------------------------#
    #   将每个特征层的结果进行堆叠
    #-----------------------------------------------------------#
    boxes = K.concatenate(boxes, axis=0)
    box_scores = K.concatenate(box_scores, axis=0)

    #-----------------------------------------------------------#
    #   判断得分是否大于score_threshold
    #-----------------------------------------------------------#
    mask = box_scores >= score_threshold
    max_boxes_tensor = K.constant(max_boxes, dtype='int32')
    boxes_ = []
    scores_ = []
    classes_ = []
    for c in range(num_classes):
        #-----------------------------------------------------------#
        #   取出所有box_scores >= score_threshold的框,和成绩
        #-----------------------------------------------------------#
        class_boxes = tf.boolean_mask(boxes, mask[:, c])
        class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])

        #-----------------------------------------------------------#
        #   非极大抑制
        #   保留一定区域内得分最大的框
        #-----------------------------------------------------------#
        nms_index = tf.image.non_max_suppression(
            class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)

        #-----------------------------------------------------------#
        #   获取非极大抑制后的结果
        #   下列三个分别是
        #   框的位置,得分与种类
        #-----------------------------------------------------------#
        class_boxes = K.gather(class_boxes, nms_index)
        class_box_scores = K.gather(class_box_scores, nms_index)
        classes = K.ones_like(class_box_scores, 'int32') * c
        boxes_.append(class_boxes)
        scores_.append(class_box_scores)
        classes_.append(classes)
    boxes_ = K.concatenate(boxes_, axis=0)
    scores_ = K.concatenate(scores_, axis=0)
    classes_ = K.concatenate(classes_, axis=0)

    return boxes_, scores_, classes_
def recall(y_true, y_pred):
    y_true = K.ones_like(y_true)
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    all_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall_acc = true_positives / (all_positives + K.epsilon())
    return recall_acc
def discriminator_loss(y_true,y_pred):
    BATCH_SIZE=10
    return K.mean(K.binary_crossentropy(K.flatten(y_pred), K.concatenate([K.ones_like(K.flatten(y_pred[:BATCH_SIZE,:,:,:])),K.zeros_like(K.flatten(y_pred[:BATCH_SIZE,:,:,:])) ]) ), axis=-1)
def precision(y_true, y_pred):
    y_true = K.ones_like(y_true)
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision_acc = true_positives / (predicted_positives + K.epsilon())
    return precision_acc
Exemple #58
0
def acc(y_true, y_pred):
    ones = K.ones_like(y_pred)
    return K.mean(K.equal(y_true, ones - K.clip(K.round(y_pred), 0, 1)),
                  axis=-1)
def new_mse_loss(y_true, y_pred):
    loss1 = mse_loss(y_true, y_pred)
    #one_hot
    loss2 = mse_loss(K.ones_like(y_pred)/nb_classes, y_pred)
    return 0.9*loss1+0.1*loss2