Esempio n. 1
0
def batch_matmul(X, Y):
    """Keras-only implementation of tensorflow's batch matmul behavior in tf.matmul()"""
    with K.name_scope('batch_matmul'):
        x_shape = K.shape(X)
        y_shape = K.shape(Y)

        x_leading_dims = x_shape[:-2]
        y_leading_dims = y_shape[:-2]

        x_mat_dims = x_shape[-2:]
        y_mat_dims = y_shape[-2:]

        x_flatten_dim = K.prod(x_leading_dims, keepdims=True)
        y_flatten_dim = K.prod(y_leading_dims, keepdims=True)

        x_reshape_dims = K.concatenate([x_flatten_dim, x_mat_dims])
        y_reshape_dims = K.concatenate([y_flatten_dim, y_mat_dims])

        product_dims = K.concatenate(
            [x_leading_dims, x_mat_dims[:1], y_mat_dims[1:]])

        X = K.reshape(X, x_reshape_dims)
        Y = K.reshape(Y, y_reshape_dims)

        prod = K.batch_dot(X, Y)

        return K.reshape(prod, product_dims)
Esempio n. 2
0
def _thoracic_loss(y_pred, eta, p, bbox, lambda_bbox=0.5):
    # y_predis a PxP tensor, input img is a 512x512 tensor
    # bbox=(xa,wa,ya,ha)
    lambda_bbox = 0.5
    # p2 = K.ones_like(y_pred) - y_pred
    p2 = 1 - y_pred
    # p_y_x = 1-tf.reduce_prod(p2)
    p_y_x = 1 - K.prod(p2)

    P = K.cast(K.shape(y_pred)[1], 'float')
    x = bbox[0][0] * P / float(512)
    y = bbox[0][1] * P / float(512)
    w = bbox[0][2] * P / float(512)
    h = bbox[0][3] * P / float(512)

    x = K.cast(x, 'int32')
    y = K.cast(y, 'int32')
    w = K.cast(w, 'int32')
    h = K.cast(h, 'int32')

    in_box = y_pred[:, y:y + h, x:x + w]

    p1 = K.prod(in_box)
    p3 = K.prod(1 - in_box)
    p2 = K.prod(y_pred)
    p_y_x_bbox = p3 * p2 / p1

    loss = -eta * K.log(p_y_x_bbox) - (1 - eta) * p * K.log(p_y_x) - (
        1 - eta) * (1 - p) * K.log(1 - p_y_x)
    print(loss)
    return loss
Esempio n. 3
0
def generalizedDiceLoss(y_true, y_pred):
    # GDL, reference: https://arxiv.org/pdf/1707.03237.pdf
    # Sudre et al, 2017, ArXiv (Generalised DICE overlap as a deep learning loss function for highly unbalanced segmentations)
    # DICE Loss for multi-category classification

    _EPSILON = K.epsilon()
    y_pred = K.clip(y_pred, _EPSILON, 1.0 - _EPSILON)
    y_true = K.clip(y_true, _EPSILON, 1.0 - _EPSILON)
    # First flatten the matrices for each channel (category)
    ypredshape = y_pred.get_shape().as_list()
    ytrueshape = y_true.get_shape().as_list()

    dimp = K.prod(K.shape(y_pred)[:-1])
    dimt = K.prod(K.shape(y_true)[:-1])
    y_pred = K.reshape(y_pred, (dimp, ypredshape[-1]))
    y_true = K.reshape(y_true, (dimt, -1))

    y_int = y_pred * y_true

    # Prevent dividing by 0

    # square over the flattened axis
    weights = 1 / (K.square(K.sum(y_true, axis=0)))

    numerator = 2 * K.sum(weights * K.sum(y_int, axis=0), axis=-1)
    denominator = K.sum(
        weights *
        (K.sum(K.square(y_true), axis=0) + K.sum(K.square(y_pred), axis=0)),
        axis=-1)
    loss = -numerator / denominator
    return loss
Esempio n. 4
0
    def loss(y_true, y_pred):

        # Gaussian normalization factor
        normalization_factor = 1 / sqrt(2 * pi * var)

        # Calculate old pdf
        old_exponent = -K.square(actions - y_true) / (2. * var)
        old_pdf = K.prod(normalization_factor * K.exp(old_exponent), axis=-1)

        # Calculate new pdf
        exponent = -K.square(actions - y_pred) / (2. * var)
        pdf = K.prod(normalization_factor * K.exp(exponent), axis=-1)

        # Calculate ratio and clipped ratio
        pdf_ratio = pdf / old_pdf
        clipped_pdf_ratio = K.clip(pdf_ratio, 1 - loss_clipping_epsilon,
                                   1 + loss_clipping_epsilon)

        # Get clipped loss
        # Notice the minus sign. Since we want to maximize the
        # objective, we need to minimize the negative objective.
        clipped_loss = -K.minimum(pdf_ratio * advantages,
                                  clipped_pdf_ratio * advantages)

        return clipped_loss
Esempio n. 5
0
def soft_mask_mean_absolute_error_with_total(y_true, y_pred):
    y_pred_tot = K.sum(K.prod([y_pred[:, :, :, 0], y_pred[:, :, :, 1]],
                              axis=(1, 2)),
                       axis=(1, 2))
    y_true_tot = K.sum(K.prod([y_true[:, :, :, 0], y_true[:, :, :, 1]],
                              axis=(1, 2)),
                       axis=(1, 2))
    return soft_mask_mean_absolute_error(
        y_true, y_pred) + 0.5 * K.mean(K.abs(y_pred_tot - y_true_tot))
Esempio n. 6
0
def sample(inputs, coords, dim, wrapped):
    """
    sample - samples from the inputs tensor using coords as indices.

    :param inputs: the tensor to sample from
        shape: (N, width, height, ..., n_chan)
    :param coords: the indices to sample with
        shape: (N, dim, width, height, ...)
    :param dim: dimensionality of the data, e.g. 2 for 2D images
    :param wrapped: whether to wrap out of bound indices or to clip them
    """

    inputs_shape = K.shape(inputs)
    coords_shape = K.shape(coords)
    outputs_shape = K.concatenate(
        [inputs_shape[0:1], coords_shape[2:], inputs_shape[-1:]])

    maxes = K.cast(inputs_shape[1:-1] - 1, "int32")
    if wrapped:
        coords = wrap(coords, maxes, dim)
    else:
        coords = clip(coords, maxes, dim)

    if K.backend() == "tensorflow":
        return sample_tf(inputs, coords, dim)

    n = inputs_shape[0]
    n_chan = inputs_shape[-1]

    flat_inputs = K.reshape(inputs, (-1, n_chan))

    flat_coords = K.flatten(coords[:, -1])
    for i in reversed(range(dim - 1)):
        flat_coords += K.prod(inputs_shape[1:i + 2]) * K.flatten(coords[:, i])

    coords_per_sample = K.prod(coords_shape[2:])

    # add the offsets for each sample in the minibatch
    if K.backend() == "tensorflow":
        import tensorflow as tf
        offsets = tf.range(n) * K.prod(inputs_shape[1:-1])
    else:
        import theano.tensor as T
        offsets = T.arange(n) * K.prod(inputs_shape[1:-1])

    offsets = K.reshape(offsets, (-1, 1))
    offsets = K.tile(offsets, (1, coords_per_sample))
    offsets = K.flatten(offsets)
    flat_coords += offsets

    outputs = K.gather(flat_inputs, flat_coords)
    outputs = K.reshape(outputs, outputs_shape)
    return outputs
Esempio n. 7
0
 def calc_iou(self, target_regions):
     """
     TODO : Write description
     calc_iou
     """
     pos_tl = KB.maximum(self.__regions[:, None, :2], target_regions[:, :2])
     pos_br = KB.maximum(self.__regions[:, None, 2:], target_regions[:, 2:])
     t_p = KB.prod(pos_br - pos_tl, axis=2) * KB.cast(
         KB.all(pos_br > pos_tl, axis=2), 'float32')
     g_t = KB.prod(self.__regions[:, 2:] - self.__regions[:, :2], axis=1)
     p_r = KB.prod(target_regions[:, 2:] - target_regions[:, :2], axis=1)
     return t_p / (g_t + p_r - t_p)
Esempio n. 8
0
    def mi(y_true, y_pred):
        """ soft mutual info """
        y_pred = K.clip(y_pred, 0, max_clip)
        y_true = K.clip(y_true, 0, max_clip)

        if crop_background:
            # does not support variable batch size
            thresh = 0.0001
            padding_size = 20
            filt = tf.ones([padding_size, padding_size, padding_size, 1, 1])

            smooth = tf.nn.conv3d(y_true, filt, [1, 1, 1, 1, 1], "SAME")
            mask = smooth > thresh
            # mask = K.any(K.stack([y_true > thresh, y_pred > thresh], axis=0), axis=0)
            y_pred = tf.boolean_mask(y_pred, mask)
            y_true = tf.boolean_mask(y_true, mask)
            y_pred = K.expand_dims(K.expand_dims(y_pred, 0), 2)
            y_true = K.expand_dims(K.expand_dims(y_true, 0), 2)

        else:
            # reshape: flatten images into shape (batch_size, heightxwidthxdepthxchan, 1)
            y_true = K.reshape(y_true, (-1, K.prod(K.shape(y_true)[1:])))
            y_true = K.expand_dims(y_true, 2)
            y_pred = K.reshape(y_pred, (-1, K.prod(K.shape(y_pred)[1:])))
            y_pred = K.expand_dims(y_pred, 2)

        nb_voxels = tf.cast(K.shape(y_pred)[1], tf.float32)

        # reshape bin centers to be (1, 1, B)
        o = [1, 1, np.prod(vol_bin_centers.get_shape().as_list())]
        vbc = K.reshape(vol_bin_centers, o)

        # compute image terms
        I_a = K.exp(-preterm * K.square(y_true - vbc))
        I_a /= K.sum(I_a, -1, keepdims=True)

        I_b = K.exp(-preterm * K.square(y_pred - vbc))
        I_b /= K.sum(I_b, -1, keepdims=True)

        # compute probabilities
        I_a_permute = K.permute_dimensions(I_a, (0, 2, 1))
        pab = K.batch_dot(
            I_a_permute,
            I_b)  # should be the right size now, nb_labels x nb_bins
        pab /= nb_voxels
        pa = tf.reduce_mean(I_a, 1, keep_dims=True)
        pb = tf.reduce_mean(I_b, 1, keep_dims=True)

        papb = K.batch_dot(K.permute_dimensions(pa,
                                                (0, 2, 1)), pb) + K.epsilon()
        mi = K.sum(K.sum(pab * K.log(pab / papb + K.epsilon()), 1), 1)

        return mi
def kernel_loss(y_true, y_pred):
    inclusion_dist = kb.max(y_pred - 1 + y_true)
    exclusion_dist = kb.max(y_pred - y_true)
    exclusion_dist2 = kb.mean(y_pred * (1 - y_true) *
                              kb.cast(y_pred > 0, dtype=kb.floatx()))

    # ex_cost = kb.log(exclusion_dist + kb.epsilon()) * (1 - kb.prod(y_true))
    # in_cost = -kb.log(inclusion_dist + kb.epsilon()) * (1 - kb.prod(1 - y_true))
    ex_cost = (exclusion_dist2 + kb.epsilon()) * (1 - kb.prod(y_true))
    in_cost = -(inclusion_dist + kb.epsilon()) * (1 - kb.prod(1 - y_true))
    # return inclusion_dist * kb.sum(y_true)
    # return - exclusion_dist * (1 - kb.prod(y_true))
    return in_cost + ex_cost
Esempio n. 10
0
    def thoracic_loss(self, y_pred, eta, p, bbox, lambda_bbox=0.5):
        # Input shape: a list of 4 tensor
        # y_pred:(1,P,P)
        # eta:(1,1)
        # p:(1,1)
        # bbox:(1,4) = (xa,wa,ya,ha)
        # print(y_pred.shape)
        # print(eta.shape)
        # print(p.shape)
        # print(bbox.shape)
        # print

        lambda_bbox = 0.5

        y_pred = K.minimum(1.0, y_pred)

        y_pred_tmp = 1 - y_pred
        y_pred_tmp = K.minimum(1.0, y_pred_tmp)

        y_pred = y_pred * 0.02 + 0.98  # preform normalization
        y_pred_tmp = y_pred_tmp * 0.02 + 0.98  # preform normalization

        p_y_x = 1 - K.prod(y_pred_tmp)

        P = K.cast(K.shape(y_pred)[1], 'float')

        x = bbox[0] * P
        y = bbox[1] * P
        w = bbox[2] * P
        h = bbox[3] * P

        x = K.cast(x, 'int32')
        y = K.cast(y, 'int32')
        w = K.cast(w, 'int32')
        h = K.cast(h, 'int32')

        # in_box = y_pred[:,y:y+h,x:x+w]
        in_box = y_pred[y:y + h, x:x + w]

        p1 = K.prod(in_box)
        # p3 = K.prod(1.0-in_box)
        p3 = K.prod(y_pred_tmp[y:y + h, x:x + w])
        # p2 = K.prod(1-y_pred)
        p2 = K.prod(y_pred_tmp)
        p_y_x_bbox = p1 * p2 / p3 + 1e-10

        loss = -eta * K.log(p_y_x_bbox) - (1.0 - eta) * p * K.log(p_y_x) - (
            1.0 - eta) * (1.0 - p) * K.log(1.0 - p_y_x)
        return loss
Esempio n. 11
0
def ksparse(x, k, axis, alpha=1, absolute=False):
    if isinstance(axis, int):
        axis = (axis, )
    elif isinstance(axis, list):
        axis = tuple(axis)
    axis_complement = tuple(set(range(K.ndim(x))) - set(axis))
    shape_reduce = K.prod([K.shape(x)[j] for j in axis])
    _k = K.minimum(K.in_train_phase(k, alpha * k), shape_reduce)
    inputs_permute_dimensions = K.permute_dimensions(x, axis_complement + axis)
    inputs_permute_dimensions_reshape = K.reshape(inputs_permute_dimensions,
                                                  (-1, shape_reduce))
    if absolute is True:
        inputs_permute_dimensions_reshape = K.abs(
            inputs_permute_dimensions_reshape)
    _, indices = tf.nn.top_k(inputs_permute_dimensions_reshape, _k)
    scatter_indices = K.concatenate([
        (K.arange(K.shape(inputs_permute_dimensions_reshape)[0])[:, None] *
         K.ones((1, _k), dtype='int32'))[:, :, None], indices[:, :, None]
    ])
    scatter_updates = K.ones(
        (K.shape(inputs_permute_dimensions_reshape)[0], _k))
    mask_permute_dimensions_reshape = K.cast(
        tf.scatter_nd(scatter_indices, scatter_updates,
                      K.shape(inputs_permute_dimensions_reshape)), K.floatx())
    mask_permute_dimensions = K.reshape(mask_permute_dimensions_reshape,
                                        K.shape(inputs_permute_dimensions))
    mask = K.permute_dimensions(mask_permute_dimensions,
                                tuple(np.argsort(axis_complement + axis)))
    return mask * x
Esempio n. 12
0
def get_single_deconv_out_new(input, filter):
    output_shape = (1, 1, input.shape[0]*filter.shape[0], input.shape[1]*filter.shape[1])
    # reshape inputs
    image = input.reshape((1, K.prod(input.shape)))
    kernel = filter.reshape((1, K.prod(filter.shape)))

    # neibs = images2neibs(output, neib_shape=filter.shape, neib_step=filter.shape)

    def fn(i, k):
        return i*k
    results, updates = theano.scan(fn=fn, sequences=image[0], non_sequences=kernel[0])

    # neibs = neibs*results
    img_new = neibs2images(results, filter.shape, output_shape)

    return img_new[0][0]
def weighted_mse(y_true, y_pred):
    epsilon = 0.000001
    pos_mask = K.cast(y_true >= 0.5, 'float32')
    neg_mask = K.cast(y_true < -0.5, 'float32')
    #y_pred = K.clip(y_pred,epsilon,1-epsilon)
    ## all labels with absolute value less than 0.01 is background
    #pos_mask = K.cast(K.abs(y_true) >= 0.75, 'float32')
    #neg_mask = K.cast(K.abs(y_true) < 0.75, 'float32')
    num_pixels = K.cast(K.prod(K.shape(y_true)[:]), 'float32')
    num_pixels = maybe_print(num_pixels, "total ", do_print=True)
    num_pos = maybe_print(K.sum(pos_mask), 'npositive ', do_print=True)
    pos_fracs = K.clip((num_pos / num_pixels), 0.05, 0.95)
    neg_fracs = K.clip((K.sum(neg_mask) / num_pixels), 0.05, 0.95)

    pos_fracs = maybe_print(pos_fracs, "positive fraction", do_print=True)

    # chosen to sum to 1 when multiplied by their fractions, assuming no ignore
    pos_weight = maybe_print(1.0 / (2 * pos_fracs),
                             "positive weight",
                             do_print=True)
    neg_weight = maybe_print(1.0 / (2 * neg_fracs),
                             "negative weight",
                             do_print=True)  #1.25

    per_pixel_weights = pos_weight * pos_mask + neg_weight * neg_mask
    per_pixel_weighted_sq_error = K.square(y_true - y_pred) * per_pixel_weights

    batch_weighted_mse = K.mean(per_pixel_weighted_sq_error) / 1.0

    return K.mean(batch_weighted_mse)
Esempio n. 14
0
def define_loss_fun(model, settings):
    dream = model.input

    # Get the symbolic outputs of each "key" layer (we gave them unique names).
    layer_dict = dict([(layer.name, layer) for layer in model.layers])

    # Define the loss.
    loss = K.variable(0.)
    for layer_name in settings['features']:
        # Add the L2 norm of the features of a layer to the loss.
        if layer_name not in layer_dict:
            raise ValueError('Layer ' + layer_name + ' not found in model.')
        coeff = settings['features'][layer_name]
        x = layer_dict[layer_name].output
        # We avoid border artifacts by only involving non-border pixels in the loss.
        scaling = K.prod(K.cast(K.shape(x), 'float32'))
        if K.image_data_format() == 'channels_first':
            loss += coeff * K.sum(K.square(x[:, :, 2:-2, 2:-2])) / scaling
        else:
            loss += coeff * K.sum(K.square(x[:, 2:-2, 2:-2, :])) / scaling

    # Compute the gradients of the dream wrt the loss.
    grads = K.gradients(loss, dream)[0]
    # Normalize gradients.
    grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon())

    # Set up function to retrieve the value
    # of the loss and gradients given an input image.
    outputs = [loss, grads]
    fetch_loss_and_grads = K.function([dream], outputs)
    return fetch_loss_and_grads
Esempio n. 15
0
def siamese_loss(y_true, y_pred):

    y_true_ = K.argmax(y_true, axis=-1)
    y_true_even = y_true_[::2]
    y_true_even = K.expand_dims(y_true_even, axis=-1)
    y_true_odd = y_true_[1::2]
    y_true_odd = K.expand_dims(y_true_odd, axis=-1)
    y_true_ = K.concatenate([y_true_even, y_true_odd], axis=-1)
    # label = 1 if dissimilar
    labels = K.sum(
        y_true_, axis=-1,
        keepdims=False) - 2 * K.prod(y_true_, axis=-1, keepdims=False)
    labels = K.cast(labels, dtype=K.floatx())

    y_pred_even = y_pred[::2, :]
    y_pred_odd = y_pred[1::2, :]

    l2error = K.sqrt(
        K.maximum(
            K.sum(K.square(y_pred_even - y_pred_odd), axis=-1, keepdims=False),
            K.epsilon()))

    contrastive_loss = 0.5 * (
        (1 - labels) * K.square(l2error) +
        labels * K.square(K.maximum(margin - l2error, 0)))
    contrastive_loss = K.expand_dims(contrastive_loss, axis=-1)

    contrastive_loss_full = K.concatenate([contrastive_loss, contrastive_loss],
                                          axis=-1)
    contrastive_loss_full = K.reshape(contrastive_loss_full, shape=(-1, 1))
    contrastive_loss_full = K.squeeze(contrastive_loss_full, axis=-1)

    return contrastive_loss_full
Esempio n. 16
0
def gram_matrix(x, shift = -1):
    '''
        Batch calculation the gram matrix based on the given input tensor x.
        Args:
            x (tensor):     A 4-tensor of (batch size, img height, img width,
                            channels)
            shift (int):    A scalar shifting the activations before calculating 
                            the Gram matrix in the style of
                            (Novak & Nikulin, 2016)

        Returns: The calculated gram matrix.
    '''

    # Permutes tensor axis (move channels in front)
    x = K.permute_dimensions(x, (0, 3, 1, 2))

    s = K.shape(x)

    # Flatten img dimensions to a single width x height vector
    feat = K.reshape(x, (s[0], s[1], s[2] * s[3]))

    # Shift the activations before calculating  the Gram matrix in the style of
    #(Novak & Nikulin, 2016)
    feat = feat + shift

    # Calculate Gram matrix
    gram = K.batch_dot(feat,
                       K.permute_dimensions(feat, (0, 2, 1))) \
        / K.prod(K.cast(s[1:], K.floatx()))

    return gram
Esempio n. 17
0
    def _build_loss(self):
        """
        Builds gradient ascent loss

        Returns
        -------
        loss : keras.backend.ops.Tensor
            Loss tensor
        """
        # Get the symbolic outputs of each "key" layer (we gave them unique names).
        layer_dict = dict([(layer.name, layer) for layer in self.model.layers])
        # Define the loss.
        loss = K.variable(0.)
        for layer_name in self.layer_config:
            # Add the L2 norm of the features of a layer to the loss.
            assert layer_name in layer_dict.keys(
            ), 'Layer ' + layer_name + ' not found in model.'
            # feature map weight
            w = self.layer_config[layer_name]
            x = layer_dict[layer_name].output
            # We avoid border artifacts by only involving non-border pixels in the loss.
            scaling = K.prod(K.cast(K.shape(x), 'float32'))
            if K.image_data_format() == 'channels_first':
                loss += w * K.sum(K.square(x[:, :, 2:-2, 2:-2])) / scaling
            else:
                loss += w * K.sum(K.square(x[:, 2:-2, 2:-2, :])) / scaling
        return loss
Esempio n. 18
0
def psnr_masked(true, pred):
    mask = K.cast(K.not_equal(true, self.config['pad_value']), K.floatx())
    return 10. * K.log(
        K.cast(
            K.sum(1 - K.all(1 - mask, axis=[2, 3, 4]), axis=1) *
            K.prod(K.shape(true)[2:]), K.floatx()) /
        K.sum(K.square(pred - true) * mask, axis=[1, 2, 3, 4])) / K.log(10.)
Esempio n. 19
0
def gram_matrix(x):
    x = K.permute_dimensions(x, (0, 3, 1, 2))
    shape = K.shape(x)
    feat = K.reshape(x, (shape[0], shape[1], shape[2] * shape[3]))
    num = K.batch_dot(feat, K.permute_dimensions(feat, (0, 2, 1)))
    den = K.prod(K.cast(shape[1:], K.floatx()))
    return num / den
Esempio n. 20
0
def pixelwise_crossentropy_loss_mc(y_true, y_hat):
    y_hat += 1e-8  # avoid issues with log
    w = K.sum(K.sum(K.sum(y_true, axis=0), axis=2), axis=3) / K.prod(
        K.shape(y_true)) / K.shape(y_true)[1]
    ce = -(1 - w) * y_true * K.log(y_hat) - w * (1. - y_true) * K.log(1 -
                                                                      y_hat)
    return K.mean(ce)
Esempio n. 21
0
def construct_loss(model, layer_contributions):

    layer_dict = dict([layer.name, layer] for layer in model.layers)

    loss = K.variable(0.0)

    for layer_name in layer_contributions:
        coeff = layer_contributions[layer_name]
        activation = layer_dict[layer_name]

        # mean of sum of squares (L2norm) of activations scaled by contribution constitutes the loss.
        # avoiding border artifacts
        scaling = K.prod(K.cast(K.shape(activation), 'float32'))
        loss += coeff * K.sum(K.square(activation[:, 2:-2, 2:-2, :])) / scaling

    dream = model.input

    # The call to `gradients` returns a list of tensors (of size 1 in this case)
    # hence we only keep the first element -- which is a tensor.
    grads = K.gradients(loss, dream)[0]
    # Normalized gradients. We floor with 1e-7 before dividing so as to avoid accidentally dividing by 0.
    # grads /= (K.sqrt(K.mean(K.square(grads))) + EPSILON)
    grads /= K.maximum(K.mean(L.abs(grads)), EPSILON)

    # going from input image to loss and gradients
    iterate = K.function([dream], [loss, grads])

    return iterate
Esempio n. 22
0
        def training_phase():
            mean_batch = K.mean(mean_instance, axis=0, keepdims=True)
            variance_batch = K.mean(temp, axis=0,
                                    keepdims=True) - K.square(mean_batch)

            mean_batch_reshaped = K.flatten(mean_batch)
            variance_batch_reshaped = K.flatten(variance_batch)

            if K.backend() != 'cntk':
                sample_size = K.prod(
                    [K.shape(inputs)[axis] for axis in reduction_axes])
                sample_size = K.cast(sample_size, dtype=K.dtype(inputs))

                # sample variance - unbiased estimator of population variance
                variance_batch_reshaped *= sample_size / (sample_size -
                                                          (1.0 + self.epsilon))

            self.add_update([
                K.moving_average_update(self.moving_mean, mean_batch_reshaped,
                                        self.momentum),
                K.moving_average_update(self.moving_variance,
                                        variance_batch_reshaped, self.momentum)
            ], inputs)

            return normalize_func(mean_batch, variance_batch)
Esempio n. 23
0
    def get_output(self, train=False):
        def format_shape(shape):
            if K._BACKEND == 'tensorflow':
                def trf(x):
                    try:
                        return int(x)
                    except TypeError:
                        return x

                return map(trf, shape)
            return shape

        X = self.get_input(train)

        in_shape = format_shape(K.shape(X))
        batch_flatten_len = K.prod(in_shape[:2])
        cast_in_shape = (batch_flatten_len, ) + tuple(in_shape[i] for i in range(2, K.ndim(X)))
        
        pre_outs = self.layer(K.reshape(X, cast_in_shape))
        
        out_shape = format_shape(K.shape(pre_outs))
        cast_out_shape = (in_shape[0], in_shape[1]) + tuple(out_shape[i] for i in range(1, K.ndim(pre_outs)))
        
        outputs = K.reshape(pre_outs, cast_out_shape)
        return outputs
Esempio n. 24
0
 def exponent_neg_cosine_similarity(x):
     """ Helper function for the similarity estimate of the LSTMs outputs """
     leftNorm = K.l2_normalize(x[:, :hidden_size], axis=-1)
     rightNorm = K.l2_normalize(x[:, hidden_size:], axis=-1)
     return K.sum(K.prod([leftNorm, rightNorm], axis=0),
                  axis=1,
                  keepdims=True)
Esempio n. 25
0
def my_acc(target, output):
    target = K.cast(target, tf.int32)
    correct_count = K.sum(K.cast(K.equal(K.cast(K.argmax(target, axis=-1), tf.int32),
                                         K.cast(K.argmax(output, axis=-1), tf.int32)), tf.int32))
    neutral_count = K.sum(K.cast(K.equal(target[:, :, :, -1], K.variable(1, dtype=tf.int32)), tf.int32))
    total_count = K.prod(K.shape(output)[:-1]) - neutral_count
    return tf.cast(correct_count / total_count, tf.float32)
Esempio n. 26
0
    def get_output(self, train=False):
        def format_shape(shape):
            if K._BACKEND == 'tensorflow':

                def trf(x):
                    try:
                        return int(x)
                    except TypeError:
                        return x

                return map(trf, shape)
            return shape

        X = self.get_input(train)

        in_shape = format_shape(K.shape(X))
        batch_flatten_len = K.prod(in_shape[:2])
        cast_in_shape = (batch_flatten_len, ) + tuple(
            in_shape[i] for i in range(2, K.ndim(X)))

        pre_outs = self.layer(K.reshape(X, cast_in_shape))

        out_shape = format_shape(K.shape(pre_outs))
        cast_out_shape = (in_shape[0], in_shape[1]) + tuple(
            out_shape[i] for i in range(1, K.ndim(pre_outs)))

        outputs = K.reshape(pre_outs, cast_out_shape)
        return outputs
Esempio n. 27
0
    def __call__(self, y_true: plaidml.tile.Value,
                 y_pred: plaidml.tile.Value) -> plaidml.tile.Value:
        """ Calculate the Laplacian Pyramid Loss.

        Parameters
        ----------
        y_true: :class:`plaidml.tile.Value`
            The ground truth value
        y_pred: :class:`plaidml.tile.Value`
            The predicted value

        Returns
        -------
        :class: `plaidml.tile.Value`
            The loss value
        """
        if not self._shape:
            self._shape = K.int_shape(y_pred)
        pyramid_true = self._get_laplacian_pyramid(y_true)
        pyramid_pred = self._get_laplacian_pyramid(y_pred)

        losses = K.stack([
            K.sum(K.abs(ppred - ptrue)) /
            K.cast(K.prod(K.shape(ptrue)), "float32")
            for ptrue, ppred in zip(pyramid_true, pyramid_pred)
        ])
        loss = K.sum(losses * self._weights)
        return loss
Esempio n. 28
0
        def loss_func(y_true, y_pred):
            y_shape = K.shape(y_true)

            y_true_mask = tf.where(y_true > -1,
                                   x=tf.ones_like(y_true),
                                   y=tf.zeros_like(y_true))
            centers = center_of_mass(y_true_mask)[:, :2]
            y_true_sm = tf.image.extract_glimpse(y_true,
                                                 crop_size,
                                                 offsets=centers,
                                                 normalized=False,
                                                 centered=False)
            y_pred_sm = tf.image.extract_glimpse(y_pred,
                                                 crop_size,
                                                 offsets=centers,
                                                 normalized=False,
                                                 centered=False)

            scale_difference = K.prod(y_shape[1:]) / (win * win)

            error = sse(y_true, y_pred) / tf.cast(scale_difference,
                                                  dtype=tf.float32)
            error += sse(y_true_sm, y_pred_sm) * factor

            return error / (factor + 1)
Esempio n. 29
0
def sparsity_level(x):
    _shape = K.get_variable_shape(x)
    shape = K.shape(x)
    total = K.cast(K.prod(shape[1:]), K.floatx())
    return K.reshape(
        K.sum(K.cast(x > 0.0, K.floatx()), axis=range(1, len(_shape))),
        (-1, 1)) / total
Esempio n. 30
0
def gram_matrix(x):
    x = K.permute_dimensions(x, (0, 3, 1, 2))
    s = K.shape(x)
    feat = K.reshape(x, (s[0], s[1], s[2] * s[3]))
    feat_T = K.permute_dimensions(feat, (0, 2, 1))
    norm_factor = K.prod(K.cast(s[1:], K.floatx()))
    return K.batch_flatten(K.batch_dot(feat, feat_T) / norm_factor)
Esempio n. 31
0
def dream_fxn(model, target_layer_dict):
	K.set_learning_phase(0)

	# Build the InceptionV3 network with our placeholder.
	# The model will be loaded with pre-trained ImageNet weights.

	dream = model.input
	print('Model loaded.')

	# Get the symbolic outputs of each "key" layer (we gave them unique names).
	layer_dict = dict([(layer.name, layer) for layer in model.layers])

	# Define the loss.
	loss = K.variable(0.)
	for layer_name in target_layer_dict:
		# Add the L2 norm of the features of a layer to the loss.
		assert layer_name in layer_dict.keys(), 'Layer ' + layer_name + ' not found in model.'
		coeff = target_layer_dict[layer_name]
		x = layer_dict[layer_name].output
		# We avoid border artifacts by only involving non-border pixels in the loss.
		scaling = K.prod(K.cast(K.shape(x), 'float32'))
		if K.image_data_format() == 'channels_first':
			loss += coeff * K.sum(K.square(x[:, :, 2: -2, 2: -2])) / scaling
		else:
			loss += coeff * K.sum(K.square(x[:, 2: -2, 2: -2, :])) / scaling

	# Compute the gradients of the dream wrt the loss.
	grads = K.gradients(loss, dream)[0]
	# Normalize gradients.
	grads /= K.maximum(K.mean(K.abs(grads)), 1e-7)

	# Set up function to retrieve the value
	# of the loss and gradients given an input image.
	outputs = [loss, grads]
	return K.function([dream], outputs)
Esempio n. 32
0
 def __call__(self, loss):
     output = self.layer.get_output(True)
     batch_size = K.shape(output)[0] // 2
     generated = output[:batch_size, :, :, :]
     loss += self.weight * K.mean(
         K.sum(K.square(gram_matrix(self.target) - gram_matrix(generated)), axis=(1,2))
     ) / (4.0 * K.square(K.prod(K.shape(generated)[1:])))
     return loss
Esempio n. 33
0
    def call(self, x, mask=None):
        N_DECISION = (2 ** (self.n_depth)) - 1  # Number of decision nodes
        N_LEAF  = 2 ** (self.n_depth + 1)  # Number of leaf nodes

        flat_decision_p_e = []
        leaf_p_e = []
        for w_d, w_l in zip(self.w_d_ensemble, self.w_l_ensemble):

            decision_p = K.sigmoid((K.dot(x, w_d)))
            leaf_p = K.softmax(w_l)

            decision_p_comp = 1 - decision_p

            decision_p_pack = K.concatenate([decision_p, decision_p_comp])

            flat_decision_p_e.append(decision_p_pack)
            leaf_p_e.append(leaf_p)

        #Construct tiling pattern for decision probability matrix
        #Could be done in TF, but I think it's better statically
        tiling_pattern = np.zeros((N_LEAF, self.n_depth), dtype=np.int32)
        comp_offset = N_DECISION
        dec_idx = 0
        for n in xrange(self.n_depth):
            j = 0
            for depth_idx in xrange(2**n):
                repeat_times = 2 ** (self.n_depth - n)
                for _ in xrange(repeat_times):
                    tiling_pattern[j][n] = dec_idx 
                    j = j + 1

                for _ in xrange(repeat_times):
                    tiling_pattern[j][n] = comp_offset + dec_idx 
                    j = j + 1

                dec_idx = dec_idx + 1

        flat_pattern = tiling_pattern.flatten()

        # iterate over each tree
        tree_ret = None
        for flat_decision_p, leaf_p in zip(flat_decision_p_e, leaf_p_e):
            flat_mu = tf.transpose(tf.gather(tf.transpose(flat_decision_p), flat_pattern))
            
            batch_size = tf.shape(flat_decision_p)[0]
            shape = tf.pack([batch_size, N_LEAF, self.n_depth])

            mu = K.reshape(flat_mu, shape)
            leaf_prob = K.prod(mu, [2])
            prob_label = K.dot(leaf_prob, leaf_p)

            if tree_ret is None:
              tree_ret = prob_label
            else:
              tree_ret = tree_ret + prob_label

        return tree_ret/self.n_trees
Esempio n. 34
0
File: memn2n.py Progetto: poyuwu/QA
 def get_output(self, train=False):
     X = self.get_input(train)
     if self.mode == 'ave':
         s = K.mean(X, axis=self.dims)
         return s
     if self.mode == 'sum':
         s = K.sum(X, axis=self.dims)
         return s
     elif self.mode == 'mul':
         s = K.prod(X, axis=self.dims)
         return s
     else:
         raise Exception('Unknown merge mode')
Esempio n. 35
0
 def loss(y_true, y_pred):
     from plasma.conf import conf
     fac = MaxHingeTarget.fac
     #overall_fac = np.prod(np.array(K.shape(y_pred)[1:]).astype(np.float32))
     overall_fac = K.prod(K.cast(K.shape(y_pred)[1:],K.floatx()))
     max_val = K.max(y_pred,axis=-2) #temporal axis!
     max_val1 = K.repeat(max_val,K.shape(y_pred)[-2])
     mask = K.cast(K.equal(max_val1,y_pred),K.floatx())
     y_pred1 = mask * y_pred + (1-mask) * y_true
     weight_mask = K.mean(y_true,axis=-1)
     weight_mask = K.cast(K.greater(weight_mask,0.0),K.floatx()) #positive label!
     weight_mask = fac*weight_mask + (1 - weight_mask)
     #return weight_mask*squared_hinge(y_true,y_pred1)
     return conf['model']['loss_scale_factor']*overall_fac*weight_mask*hinge(y_true,y_pred1)
def get_single_deconv_out(input, filter):

    # reshape inputs
    img = input.reshape((1, 1, input.shape[0], input.shape[1]))
    kernel = filter.reshape((1, K.prod(filter.shape)))

    # construct split function
    # image = T.tensor4("image")
    neibs = images2neibs(img, neib_shape=filter.shape, neib_step=filter.shape)
    # window_function = theano.function([image], neibs)
    #
    # neibs_val = window_function(img_val)

    neibs = neibs*kernel

    # construct merge function
    img_new = neibs2images(neibs, filter.shape, img.shape)

    return img_new[0][0]
Esempio n. 37
0
    'mixed4': 2.,
    'mixed5': 1.5,
}
# 定义需要最大化的损失
# Get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])

# Define the loss.
loss = K.variable(0.)
for layer_name in layer_contributions:
    # Add the L2 norm of the features of a layer to the loss.
    coeff = layer_contributions[layer_name]
    activation = layer_dict[layer_name].output  # 获取层的输出

    # We avoid border artifacts by only involving non-border pixels in the loss.
    scaling = K.prod(K.cast(K.shape(activation), 'float32'))
    loss += coeff * K.sum(K.square(activation[:, 2: -2, 2: -2, :])) / scaling

# 梯度上升过程
# This holds our generated image
dream = model.input

# Compute the gradients of the dream with regard to the loss.
grads = K.gradients(loss, dream)[0]

# Normalize gradients.
grads /= K.maximum(K.mean(K.abs(grads)), 1e-7)

# Set up function to retrieve the value
# of the loss and gradients given an input image.
outputs = [loss, grads]
Esempio n. 38
0
dream = model.input
print('Model loaded.')

# Get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])

# Define the loss.
loss = K.variable(0.)
for layer_name in settings['features']:
    # Add the L2 norm of the features of a layer to the loss.
    assert (layer_name in layer_dict.keys(),
            'Layer ' + layer_name + ' not found in model.')
    coeff = settings['features'][layer_name]
    x = layer_dict[layer_name].output
    # We avoid border artifacts by only involving non-border pixels in the loss.
    scaling = K.prod(K.cast(K.shape(x), 'float32'))
    if K.image_data_format() == 'channels_first':
        loss += coeff * K.sum(K.square(x[:, :, 2: -2, 2: -2])) / scaling
    else:
        loss += coeff * K.sum(K.square(x[:, 2: -2, 2: -2, :])) / scaling

# Compute the gradients of the dream wrt the loss.
grads = K.gradients(loss, dream)[0]
# Normalize gradients.
grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon())

# Set up function to retrieve the value
# of the loss and gradients given an input image.
outputs = [loss, grads]
fetch_loss_and_grads = K.function([dream], outputs)