def distributed_dot_softmax(M, H):
    """
    Obtains a matrix m of recent embeddings
    and the hidden state h of the lstm
    and calculates the attention distribution over embeddings
    p = softmax(<m, h>)

    M: embeddings tensor of shape
        (batch_size, timesteps, history_length, units) or
        (timesteps, history_length, units)
    H: hidden state tensor of shape
        (batch_size, timesteps, units) or
        (timesteps, units)
    :return:
    """
    # flattening all dimensions of M except the last two ones
    M_shape = kb.print_tensor(kb.shape(M))
    M_shape = tuple((M_shape[i] for i in range(kb.ndim(M))))
    new_M_shape = (-1, ) + M_shape[-2:]
    H_shape = kb.print_tensor(kb.shape(H))
    new_H_shape = (-1, H_shape[-1])
    M_ = kb.reshape(M, new_M_shape)
    # new_H_shape = kb.concatenate([np.array([-1]), kb.shape(H)[-2:]], axis=0)
    H_ = kb.reshape(H, new_H_shape)
    energies = kb.batch_dot(M_, H_, axes=[2, 1])
    # Tensor representing shape is not iterable with tensorflow backend
    answer = kb.reshape(kb.softmax(energies), M_shape[:-1])
    if not hasattr(answer, "_keras_shape") and hasattr(M, "_keras_shape"):
        answer._keras_shape = M._keras_shape[:-1]
    return answer
def dice_coef(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    K.print_tensor(intersection, message="Dice intersection:")
    return -((2. * intersection + K.epsilon()) /
             (K.sum(y_true_f) + K.sum(y_pred_f) + K.epsilon()))
Beispiel #3
0
def self_aware_loss(y_true, y_pred):

    abstain_penalty = 2.  # ea
    adversarial_penalty = 10.  # eq

    ea = tf.constant(abstain_penalty, shape=(1, ))
    eq = tf.constant(adversarial_penalty, shape=(1, ))

    label_true = tf.argmax(y_true)
    label_pred = tf.argmax(y_pred)

    xe = tf.nn.softmax(logits=y_pred)

    # we want to compute a whole lottsa shit
    p1 = y_pred[1]
    p11 = K.constant(1) - K.gather(y_pred, label_true[0])
    p12 = tf.multiply(p1, p11)
    p2 = tf.multiply(y_pred[0], eq)
    ai = p12 + p2
    predict_options = K.concatenate([eq, p11])

    def f1():
        return K.gather(predict_options, label_pred)

    def f2():
        return ea

    L = tf.cond(tf.less(ai[0], ea[0]), f1, f2)
    K.print_tensor(ai)
    K.print_tensor(ea)
    return p12

    return L
Beispiel #4
0
def binary_crossentropy_MConly_Delphes(y_true, y_pred):
    """
    """
    printAll=False
    
    if printAll:
        y_pred=K.print_tensor(y_pred,' labelpred ')
        y_true=K.print_tensor(y_true,' ytrueLabel ')
    
    # the prediction if it is data or MC is in the first index (see model)
    labelpred = y_pred
    
    # the truth if it is data or MC, 0 for data
    
    isMCtrue = y_true[:,:1]
    # labels: B, C, UDSG 
    labels_true = y_true[:,1:]
   
    if printAll:
        isMCtrue=K.print_tensor(isMCtrue,' MCtruth')
        labels_true=K.print_tensor(labels_true,' labels')
    
    weighted_xentr = isMCtrue*K.binary_crossentropy(labelpred, labels_true)
    
    if printAll:
        weighted_xentr= K.print_tensor(weighted_xentr,' weighted xent ')
    
    out=K.mean( weighted_xentr )
    #sum weight again over all samples
    return out
Beispiel #5
0
def train(dataset, epochs):

    for epoch in range(epochs):
        start = time.time()

        for image_batch in dataset:
            x, p_labels = image_batch

            (gen_loss, disc_loss) = train_step(x, p_labels)

        K.print_tensor(gen_loss, message='gen loss =')
        K.print_tensor(disc_loss, message='disc loss =')

        # generate and save at each epoch
        plot_predictions(generator, seed_start, seed_end, seed)
        # plot_predictions(generator, seed)

        generator.save(model_file)

        # Save the model every 5 epochs
        if (epoch + 1) % 5 == 0:
            checkpoint.save(file_prefix=checkpoint_prefix)

        print('Time for epoch {} is {} sec'.format(epoch + 1,
                                                   time.time() - start))
Beispiel #6
0
    def call(self, x):
        shape = K.int_shape(x)
        # print(shape)
        # the_probs = K.mean(x)
        # the_probs = K.print_tensor(the_probs, message="the probs now are: ")
        # the_probs = np.repeat(the_probs, x_len)

        # backup = x

        dolog = False

        if dolog:
            x = K.print_tensor(x, message="[0] the x now is: ")
        x = K.mean(x, axis=[0], keepdims=True)
        if dolog:
            x = K.print_tensor(x, message="[1] the x now is: ")
        x = K.repeat_elements(x, 1, axis=0)
        if dolog:
            x = K.print_tensor(x, message="[2] the x now is: ")
        x = K.reshape(x, shape=(-1, self.output_dim))

        # x = K.permute_dimensions(x, (1, self.output_dim))
        if dolog:
            x = K.print_tensor(x, message="[3] the x now is: ")

        # raise Exception
        # x = backup

        return x
    def dual_loss(y_true, y_pred):
        # This function creates a graph that computes the Triplet Loss, the Regression MSE and fuses both.
        # To be compatible with keras, the only parameters this function can have are <y_true> and <y_pred>.

        if verbose: y_true = K.print_tensor(y_true, message="y_true : ")
        if verbose: y_pred = K.print_tensor(y_pred, message="y_pred : ")

        tripletLoss = tripletLoss_Wrapper(
            fvSize=fvSize, margin=margin, verbose=verbose)(
                y_true,
                y_pred)  # Creating the subgraph that computes the Triplet Loss
        if verbose:
            tripletLoss = K.print_tensor(tripletLoss, message="tripletLoss : ")

        y_pred_cropped = y_pred[:, 3 * fvSize:]
        y_true_cropped = y_true[:, 3 * fvSize:]
        # regressionLoss = regressionMSE_Wrapper(fvSize=fvSize, verbose=verbose)(y_true_cropped, y_pred_cropped) # Creating the subgraph that computes the regression Loss
        regressionLoss = regressionCE_Wrapper(fvSize=fvSize)(
            y_true_cropped, y_pred_cropped
        )  # Creating the subgraph that computes the regression Loss
        if verbose:
            regressionLoss = K.print_tensor(regressionLoss,
                                            message="regressionLoss : ")

        # dualLoss = tripletLoss * regressionLoss # Fusing both losses
        dualLoss = tripletLoss * tripletToRegressionRatio + regressionLoss * (
            1 - tripletToRegressionRatio)  # Fusing both losses
        if verbose: dualLoss = K.print_tensor(dualLoss, message="dualLoss : ")

        return dualLoss  # The tensor named "dualLoss"
def huber_loss_print(x, y):
    x = b.print_tensor(x, message="x= ")
    y = b.print_tensor(y, message="y= ")
    e = x - y
    e = b.print_tensor(e, message="e= ")
    loss = b.mean(b.sqrt(1 + b.square(e)) - 1, axis=-1)
    loss = b.print_tensor(loss, message="loss= ")
    return loss
Beispiel #9
0
 def test_geom_type_loss(self):
     tensor1 = np.array(
         [[[1, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]], dtype=float)
     tensor2 = np.array(
         [[[1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]], dtype=float)
     K.print_tensor(tensor1)
     loss = geom_gaussian_loss(tensor1, tensor2).eval()
     self.assertEqual(loss, 3.26972228)
Beispiel #10
0
    def loss(y_data, y_pred):
        y_true = [y_data[:, i] for i in range(4)]
        y_pred = [y_pred[:, i] for i in range(4)]
        sample_weights = y_data[:, 4]
        class_weights = y_data[:, 5]

        def crossentropy(i):
            true = y_true[i]
            pred = y_pred[i]
            score = keras.losses.categorical_crossentropy(y_true[i], y_pred[i])
            return weight(score, class_weights)

        def significance(i):
            s_w = expsig / K.sum(y_true[i])
            b_w = expbg / K.sum(1 - y_true[i])
            s = s_w * K.sum(sample_weights * y_pred[i] * y_true[i])
            b = b_w * K.sum(sample_weights * y_pred[i] * (1 - y_true[i]))
            b = tf.cond(b < 0, lambda: tf.constant(0.), lambda: b)
            return (s + b) / (s * s + K.epsilon())

        def asimov(i):
            s_w = expsig / K.sum(sample_weights * y_true[i])
            b_w = expbg / K.sum(sample_weights * (1 - y_true[i]))

            s = s_w * K.sum(sample_weights * y_pred[i] * y_true[i])
            b = b_w * K.sum(sample_weights * y_pred[i] * (1 - y_true[i]))
            b = tf.maximum(tf.constant(2.), b)
            sigB = sigma * b

            ln1_top = (s + b) * (b + sigB * sigB)
            ln1_bot = b * b + (s + b) * sigB * sigB
            ln1 = K.log(ln1_top / (ln1_bot + K.epsilon()) + K.epsilon())
            ln2 = K.log(1. + sigB * sigB * s /
                        (b * (b + sigB * sigB) + K.epsilon()))
            loss = 1. / K.sqrt(2 * ((s + b) * ln1 - b * b * ln2 /
                                    (sigB * sigB + K.epsilon())) + K.epsilon())
            # loss = weight(loss, sample_weights)
            return loss

        cc = crossentropy(0) \
           + crossentropy(1) \
           + crossentropy(2) \
           + crossentropy(3)

        asi = asimov(3)

        if debug:
            cc = K.print_tensor(cc, "cc =")
            asi = K.print_tensor(asi, "asi =")

        if name == 'crossentropy':
            return cc
        elif name == 'asimov':
            return asi + cc * cc_weight
        else:
            print("\x1b[38;5;1;1mError:\x1b[0m undefined loss name, {}".format(
                name))
            raise Exception("undefined loss")
Beispiel #11
0
def n0_dft(n0_scaled):
    n0_scaled = K.print_tensor(n0_scaled, "n0_scaled is: ")
    n0 = n0_scaled * gain  #*P_max
    n0 = K.print_tensor(n0, "n0 is: ")
    #note n0_scaled = n0/P_max such that n0_scaled stays betwen [0..1]
    N = width
    cos_term = K.cos(n0 * K.cast(K.arange(N), dtype='float32') * np.pi / N)
    sin_term = K.sin(-n0 * K.cast(K.arange(N), dtype='float32') * np.pi / N)
    return K.concatenate([cos_term, sin_term], axis=-1)
def mean_pred(y_true, y_pred):
    print("----------------")
    y_true = K.print_tensor(y_true, message='y_true = ')
    y_pred = K.print_tensor(y_pred, message='y_pred = ')
    y_pred_idx = K.argmax(y_pred, axis=-1)
    y_pred_idx = K.cast( y_pred_idx, K.dtype(y_true) )
    print('-------------',K.dtype(y_pred_idx))
    compare = K.equal(y_true, y_pred_idx)
    compare = K.print_tensor(compare, message='compare = ')
    return compare
def _iou_metric(y_true, y_pred, epsilon=1e-5, sequence_length=5):
    """ Inspired by: http://ronny.rest/tutorials/module/localization_001/intersect_of_union/

        Given two arrays `y_true` and `y_pred` where each row contains a bounding
        boxes for sequence of digits. By default, sequence length is 5. Each digit is represented by 4 numbers:
            [y1, x1, y2, x2]
        where:
            x1,y1 represent the upper left corner
            x2,y2 represent the lower right corner
        It returns the Intersect of Union scores for each sequence.

    Args:
        y_true:          (numpy array) sequence_length * each row containing [y1, x1, y2, x2] coordinates
        y_pred:          (numpy array) sequence_length * each row containing [y1, x1, y2, x2] coordinates
        epsilon:    (float) Small value to prevent division by zero
        sequence_length: (int) number of digits in the sequence

    Returns:
        (float) Sum of IoU for all digits in sequence
    """

    # Reshape the sequence coordinates which comes flatten from the regressor.
    y_true = K.reshape(y_true, [-1, 5, 4])
    y_pred = K.reshape(y_pred, [-1, 5, 4])

    K.print_tensor(y_true, "OLOLO")

    # COORDINATES OF THE INTERSECTION BOXES
    y1 = K.maximum(y_true[:, :, 0], y_pred[:, :, 0])
    x1 = K.maximum(y_true[:, :, 1], y_pred[:, :, 1])
    y2 = K.minimum(y_true[:, :, 2], y_pred[:, :, 2])
    x2 = K.minimum(y_true[:, :, 3], y_pred[:, :, 3])

    # AREAS OF OVERLAP - Area where the boxes intersect
    width = (x2 - x1)
    height = (y2 - y1)

    # handle case where there is NO overlap
    width = K.clip(width, 0, None)
    height = K.clip(height, 0, None)

    # area_overlap = width * height
    # area_overlap = K.prod(width * height)
    area_overlap = K.tf.multiply(width, height)

    # COMBINED AREAS
    area_a = (y_true[:, :, 2] - y_true[:, :, 0]) * (y_true[:, :, 3] - y_true[:, :, 1])
    area_b = (y_pred[:, :, 2] - y_pred[:, :, 0]) * (y_pred[:, :, 3] - y_pred[:, :, 1])
    area_combined = area_a + area_b - area_overlap

    # RATIO OF AREA OF OVERLAP OVER COMBINED AREA
    iou = area_overlap / (area_combined + epsilon)
    iou = K.mean(iou)  # reduce mean across all axis

    return iou
Beispiel #14
0
        def gaussian_kl_loss(z_mean, z_log_var, params, debug):
            kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
            kl_loss = K.sum(kl_loss, axis=-1)
            kl_loss *= -0.5

            if debug:
                z_mean = K.print_tensor(z_mean, "\nz_mean")
                z_log_var = K.print_tensor(z_log_var, "\nz_log_var")
                kl_loss = K.print_tensor(kl_loss, "\nkl_loss")

            return kl_loss
Beispiel #15
0
    def loss(y_true, y_pred):
        with K.name_scope('regression_loss'):
            if not K.is_tensor(y_pred):
                y_pred = K.constant(y_pred)

            y_true /= 100. # because our data is max 100 GeV

            if DEBUG:
                y_pred = K.print_tensor(y_pred, message='pred')
                y_true = K.print_tensor(y_true, message='true')

            return K.mean(K.square(y_true - y_pred) / y_true, axis=-1)
Beispiel #16
0
		def critic_optimizer():
			R = K.placeholder(shape=(None,))
			critic = model_critic.output
			critic = K.print_tensor(critic, message='critic: ')
			Lv = K.mean(K.square(R - critic))
			Lv = K.sum(Lv)
			Lv = K.print_tensor(Lv, message='Lv: ')
			optimizer = RMSprop(lr=2.5e-4, rho=0.99, epsilon=0.01)
			#optimizer = my_optimizer_critic
			updates = optimizer.get_updates(model_critic.trainable_weights, [], Lv)
			train = K.function([model_critic.input, R], [Lv], updates=updates)
			return train		
Beispiel #17
0
    def call(self, input):
        query, values = input

        # query = K.print_tensor(query, 'query')
        # values = K.print_tensor(values, 'values')

        # hidden shape == (batch_size, hidden size)
        # hidden_with_time_axis shape == (batch_size, 1, hidden size)
        # we are doing this to perform addition to calculate the score
        hidden_with_time_axis = K.expand_dims(query, 1)

        # score shape == (batch_size, max_length, hidden_size)
        score = self.V(
            K.tanh(self.W1(values) + self.W2(hidden_with_time_axis)))

        # score = K.print_tensor(score, 'score')

        # attention_weights shape == (batch_size, max_length, 1) # 32?
        # we get 1 at the last axis because we are applying score to self.V
        attention_weights = K.softmax(score, axis=1)

        # context_vector shape after sum == (batch_size, hidden_size)
        context_vector = attention_weights * values
        context_vector = K.sum(context_vector, axis=1)

        attention_weights = K.print_tensor(attention_weights,
                                           'attention_weights')

        return [context_vector, attention_weights]
def CCC4Keras(y_pred, y_true):
    K.print_tensor(y_true, message='y_true = ')
    pc = PearsonCorrelation4keras(y_true, y_pred)
    devP = K.std(y_pred, axis=0)
    devT = K.std(y_true, axis=0)
    meanP = K.mean(y_pred, axis=0)
    meanT = K.mean(y_true, axis=0)
    powMeans = K.pow(meanP-meanT,2)

    varP = K.var(y_pred, axis=0)
    varT = K.var(y_true, axis=0)

    numerator = 2*pc*devP*devT
    denominator = varP+varT+powMeans
    CCC = numerator/denominator
    return K.sum(CCC)
Beispiel #19
0
 def sampling(args, operation):
     z_mean, z_log_var = args
     batch = K.shape(z_mean)[0]
     dim = K.int_shape(z_mean)[1]
     epsilon = K.random_normal(shape=(batch, dim), mean=0.0, stddev=1.0)
     if debug:
         z_mean = K.print_tensor(z_mean,
                                 "sampling" + operation + " z_mean")
         z_log_var = K.print_tensor(
             z_log_var, "sampling" + operation + " z_log_var")
         epsilon = K.print_tensor(epsilon,
                                  "sampling" + operation + " epsilon")
     latent_space = z_mean + K.exp(0.5 * z_log_var) * epsilon
     if debug:
         latent_space = K.print_tensor(
             latent_space, "sampling" + operation + " latent_space")
     return latent_space
Beispiel #20
0
 def gaussian_kl_divergence(m, v, params, debug):
     ds = tf.contrib.distributions
     p = ds.Normal(loc=m[0], scale=v[0])
     q = ds.Normal(loc=m[1], scale=v[1])
     kl = ds.kl_divergence(p, q)
     if debug:
         kl = K.print_tensor(kl, "\nkl_divergence")
     return kl
Beispiel #21
0
def addNoise(x, sigma, len_test=None):
    if len_test is None:
        w = K.random_normal(K.shape(x), mean=0.0, stddev=sigma)
        positives = K.equal(x, 3)
        positives = K.cast(positives, K.floatx())
        noisy = x + w
        noisy = noisy - noisy * positives + 3 * positives
        K.print_tensor(noisy)
        return noisy
    else:
        w = np.random.normal(0.0, sigma, x.shape)
        noisy = x + w
        for noisy_test_i in range(0, noisy.shape[0]):
            if len_test[noisy_test_i][0] < noisy.shape[1]:
                noisy[noisy_test_i][int(len_test[noisy_test_i][0]):] = [3] * (
                    noisy.shape[1] - int(len_test[noisy_test_i][0]))
        return noisy
Beispiel #22
0
def custom_loss(y_true, y_pred):

    x = y_true
    y = y_pred * macd
    y = tf.Print(y, [y], 'refactored indicator', summarize=4000)

    mx = K.mean(x)
    mx = K.print_tensor(mx, message="mx = ")
    my = K.mean(y)
    my = K.print_tensor(my, message="my = ")

    xm, ym = x - mx, y - my
    r_num = K.sum(tf.multiply(xm, ym))
    r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
    r = r_num / r_den

    r = K.maximum(K.minimum(r, 1.0), -1.0)
    return 1 - K.square(r)
Beispiel #23
0
 def binary_crossentropy(y_true, y_pred):
     l2 = 0.001
     y_true = K.print_tensor(y_true, 'y_true: ')
     print_y_pred = K.tf.print({'y_pred': y_pred}, summarize=-1)
     with K.tf.control_dependencies([print_op, print_y_pred]):
         loss = K.mean(K.binary_crossentropy(y_true, y_pred),
                       axis=-1) + K.sum(l2 * K.square(layer))
         loss = K.tf.Print(loss, [loss], 'loss is: ', summarize=-1)
     return loss
Beispiel #24
0
        def gaussian_reconstruction_loss(inputs, outputs, output_dim, params):
            if params['reconstruction_loss'] == 'mse':
                reconstruction_loss = mse(inputs, outputs)
            elif params['reconstruction_loss'] == 'binary_crossentropy':
                reconstruction_loss = binary_crossentropy(inputs, outputs)

            if debug:
                reconstruction_loss = K.print_tensor(
                    reconstruction_loss, "\ngaussian reconstruction_loss")

            reconstruction_loss *= output_dim

            if debug:
                reconstruction_loss = K.print_tensor(
                    reconstruction_loss,
                    "\ngaussian reconstruction_loss scaled up")

            return reconstruction_loss
Beispiel #25
0
def D_Loss(y_true, y_pred):
    # Sample from the gaussian distribution.
    D_x = y_pred[:512]
    D_y = y_pred[512:]

    #Calculate and print the loss
    loss = -(K.mean(K.log(1 - D_y)) + K.mean(K.log(D_x)))

    loss = K.print_tensor(loss)

    return loss
Beispiel #26
0
    def asimovSigLossInvert(y_true, y_pred):
        signalWeight = expectedSignal / K.sum(y_true)
        bkgdWeight = expectedBkgd / K.sum(1 - y_true)
        # signalWeight = 1.#expectedSignal/K.sum(y_true)
        # bkgdWeight = 1.#expectedBkgd/K.sum(1-y_true)

        s = signalWeight * K.sum(y_pred * y_true)
        b = bkgdWeight * K.sum(y_pred * (1 - y_true))
        b = tf.cond(b < 2, lambda: tf.constant(2.), lambda: b)
        # b = K.print_tensor(b, "b =")
        sigB = systematic * b

        ln1_top = (s + b) * (b + sigB * sigB)
        ln1_bot = b * b + (s + b) * sigB * sigB
        ln1 = K.log(ln1_top / (ln1_bot + K.epsilon()) + K.epsilon())

        ln2 = K.log(1. + sigB * sigB * s / (b *
                                            (b + sigB * sigB) + K.epsilon()))

        if debug:
            s = K.print_tensor(s, message='s = ')
            b = K.print_tensor(b, message='b = ')
            sigB = K.print_tensor(sigB, message='sigB = ')
            ln1 = K.print_tensor(ln1, message='ln1 = ')
            ln2 = K.print_tensor(ln2, message='ln2 = ')

        loss = 1. / (2 * ((s + b) * ln1 - b * b * ln2 /
                          (sigB * sigB + K.epsilon())) + K.epsilon()
                     )  #Add the epsilon to avoid dividing by 0
        if debug:
            loss = K.print_tensor(loss, message='loss = ')
        return loss
Beispiel #27
0
def average_fn(x):
    # print(x.get_shape()[1])

    shape = x.get_shape()
    # try:
    #     print(K.get_value(K.mean(x)))
    # except: # Exception(x):
    #     print("get_value raised an exc")
    #     #print(x)

    # try:
    #     print(K.batch_get_value(x))
    # except: #Exception(x):
    #     print("batch_get_value raised an exc")
    #     #print(x)


    # raise Exception
    # x_length = len(x)
    # the_probs = sum(x) / x_length
    # the_probs = the_probs.reshape(1, -1)
    # the_probs = np.repeat(the_probs, x_length, axis=0)
    # return the_probs

    the_probs = K.mean(x)
    the_probs = K.print_tensor(the_probs, message="the probs now are: ")
    # the_probs = np.repeat(the_probs, x_len)

    backup = x

    x = K.print_tensor(x, message="the x now is: ")
    x = K.mean(x, axis=[1])
    # x = K.repeat_elements(x, shape[1], axis=0)
    x = K.reshape(x, shape=(self.input_shape, shape[1]))
    x = K.print_tensor(x, message="the x now is: ")

    x = backup

    return x
Beispiel #28
0
    def asimovSigLossInvert(y_true, y_pred):
        l = losses(y_true, y_pred)
        crossentropy = l['crossentropy']
        asimov = l['asimov']
        sample_weights = l['sample_weights']
        class_weights = l['class_weights']

        def weight(score, weights):
            score_arr = (score * weights) \
                      / K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
            return K.mean(score_arr)

        loss_bg = weight(crossentropy(0), class_weights) \
                + weight(crossentropy(1), class_weights) \
                + weight(crossentropy(2), class_weights) \
                + weight(crossentropy(3), class_weights)
        loss_sig = asimov(3)

        if debug:
            loss_bg = K.print_tensor(loss_bg, "loss_bg = ")
            loss_sig = K.print_tensor(loss_sig, "loss_sig = ")
        return loss_sig + 1e-2 * loss_bg
Beispiel #29
0
def paper_regulariser(weights):
    '''Purpose is to make topic vectors perpendicular, i.e. their
    dot product should be zero'''
    normalised_weights = weights / tf.sqrt(
        tf.reduce_sum(tf.square(weights), axis=0, keepdims=True))
    dot_prod_between_topic_matrices = tf.matmul(
        tf.transpose(normalised_weights), normalised_weights)
    dot_prod_between_topic_matrices = K.print_tensor(
        dot_prod_between_topic_matrices, "dot_prod_between_topic_matrices = ")
    minus_identity_matrix = dot_prod_between_topic_matrices - tf.eye(11)
    absolute_value = tf.abs(minus_identity_matrix)
    sum_columns = reduce_sum(absolute_value, axis=0)
    sum_all_elements = reduce_sum(sum_columns)
    return sum_all_elements
Beispiel #30
0
    def return_heatmap(self, model, org_img,normalise = True):
        """CAM implementation here. An activation heatmap is produced for every test images. """
        test_img = model.output[:, 1]
        if self.model_type == 'simple':
            last_conv_layer = model.get_layer('conv2d_3')
        else:
            last_conv_layer = model.get_layer('conv2d_6')
        grads = K.gradients(test_img, last_conv_layer.output)[0]

        pooled_grads = K.mean(grads, axis=(0, 1, 2))
        message = K.print_tensor(pooled_grads, message='pool_grad = ')
        iterate = K.function([model.input],
                             [message, last_conv_layer.output[0]])
        pooled_grads_value, conv_layer_output_value = iterate([org_img.reshape(-1, self.img_rows, self.img_cols, self.c_dim)])
        heatmap = np.mean(conv_layer_output_value, axis=-1)
        if normalise:
            heatmap = np.maximum(heatmap, 0)
            heatmap /= np.max(heatmap)
        return heatmap
Beispiel #31
0
from __future__ import print_function
import math
import keras
import numpy
from keras import backend as K

print("math.exp(4):", math.exp(4))
print(numpy.random.rand(3,2))
#K_square = K.square(numpy.random.rand(3,2))
K_square = K.print_tensor(K.square, message = "keras.square() is ")
K_square = K.square(numpy.random.rand(3,2))

#print("keras.square():", keras.square(numpy.random.rand(3,2)))