Example #1
0
def stock_loss(y_true, y_pred):
    alpha = 100.
    loss = K.switch(K.less(y_true * y_pred, 0), \
        alpha*y_pred**2 - K.sign(y_true)*y_pred + K.abs(y_true), \
        K.abs(y_true - y_pred)
        )
    return K.mean(loss, axis=-1)
Example #2
0
def weighted_bce_dice_loss(y_true, y_pred):
    y_true = K.cast(y_true, 'float32')
    y_pred = K.cast(y_pred, 'float32')
    # if we want to get same size of output, kernel size must be odd number
    averaged_mask = K.pool2d(
            y_true, pool_size=(11, 11), strides=(1, 1), padding='same', pool_mode='avg')
    border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32')
    weight = K.ones_like(averaged_mask)
    w0 = K.sum(weight)
    weight += border * 2
    w1 = K.sum(weight)
    weight *= (w0 / w1)
    loss = weighted_bce_loss(y_true, y_pred, weight) + \
    weighted_dice_loss(y_true, y_pred, weight)
    return loss
Example #3
0
    def output_sampling(self, output, rand_matrix):
        # Generates a sampled selection based on raw output state vector
        # Creates a cdf vector and compares against a randomly generated vector
        # Requires a pre-generated rand_matrix (i.e. generated outside step function)

        sampled_output = output / K.sum(output, axis=-1, keepdims=True)  # (batch_size, self.units)
        mod_sampled_output = sampled_output / K.exp(self.temperature)
        norm_exp_sampled_output = mod_sampled_output / K.sum(mod_sampled_output, axis=-1, keepdims=True)

        cdf_vector = K.cumsum(norm_exp_sampled_output, axis=-1)
        cdf_minus_vector = cdf_vector - norm_exp_sampled_output

        rand_matrix = K.stack([rand_matrix], axis=0)
        rand_matrix = K.stack([rand_matrix], axis=2)

        compared_greater_output = K.cast(K.greater(cdf_vector, rand_matrix), dtype='float32')
        compared_lesser_output = K.cast(K.less(cdf_minus_vector, rand_matrix), dtype='float32')

        final_output = compared_greater_output * compared_lesser_output
        return final_output
Example #4
0
 def _loop_over_nodes_condition(node_idx, h, c):
     return K.less(node_idx, num_nodes)
Example #5
0
 def is_iterate_neurons_n(idx_p, idx_l, idx_m, idx_n):
     return k.less(idx_n, shape[3])
Example #6
0
 def is_iterate_powers(act_value, idx_p, idx_l, idx_m, idx_n):
     return k.less(idx_p, num_bits)
def custom_accuracy(y_true, y_pred, thresh):
    y_true_t = y_true[y_true > thresh + 0.1]
    y_pred_t = y_pred[y_true > thresh + 0.1]
    eval = K.mean(K.less(K.abs(y_true_t - y_pred_t), 0.05))
    return eval
def FP(y_true, y_pred):
    y_true = K.argmax(y_true, axis=-1)
    y_pred = K.argmax(y_pred, axis=-1)
    return K.sum(K.cast(K.less(y_true, y_pred), K.floatx()))
Example #9
0
def false_positive(y_true, y_pred):
    return K.sum(K.cast(K.less(y_true, y_pred), K.floatx()))
Example #10
0
    def max_sim_acc(y_true, y_pred):

        centroids = K.constant(embedding.T)
        sim = K.dot(y_pred, centroids)
        true_sim = K.sum(y_pred * y_true, axis = -1)
        return K.cast(K.less(K.abs(K.max(sim, axis = -1) - true_sim), 1e-6), K.floatx())
Example #11
0
def limit(x):
    y = tf.where(K.greater(x, 100000), 1000000. * K.ones_like(x), x)
    z = tf.where(K.less(y, -100000), -1000000. * K.ones_like(x), y)
    return z
def PredictClass(y_pred,thresholdLow=0.5,thresholdHigh=1):
    return K.tf.cast((K.greater(y_pred,thresholdLow) & K.less(y_pred,thresholdHigh)),tf.int8)
Example #13
0
 def tmp(v):
     r = K.arange(size, dtype=K.floatx())
     mask = K.less(r, trunc)
     return K.cast(mask, K.floatx())
Example #14
0
 def func(tensors):
     return K.cast(K.less(tensors[0], tensors[1]), dtype='float32')
Example #15
0
def eucAcc(y_true, y_pred):
    thresh = 0.5
    return K.mean(K.equal(y_true, tf.to_float(K.less(thresh, y_pred))), axis=-1)
    def get_loss(self):
        def cal_df_gp():
            def cal_gp(gradients):
                gradients_sqr = K.square(gradients[0])
                gradients_sqr_sum = K.sum(gradients_sqr,
                                          axis=np.arange(
                                              1, len(gradients_sqr.shape)))
                gradient_l2_norm = K.sqrt(gradients_sqr_sum)
                gradient_penalty = K.mean(K.square(1 - gradient_l2_norm))
                return gradient_penalty

            alpha = K.random_uniform_variable(shape=(1, ), low=0, high=1)

            mix_tar = alpha * self.img_a + (1 - alpha) * self.img_a2b

            mix_outputs_a2b = self.d_model(
                [self.img_a, mix_tar, self.vec_ab_pos])
            mix_outputs_a2ab = self.d_model(
                [self.img_a, self.img_a2ab, self.vec_ab_pos])

            gradients_a2b = K.gradients([mix_outputs_a2b[0]], [mix_tar])
            gradients_a2ab = K.gradients([mix_outputs_a2ab[2]],
                                         [self.img_a2ab])

            df_gp = cal_gp(gradients_a2b) + cal_gp(gradients_a2ab)

            return df_gp

        def lsgan(xs, ts):
            real = 0
            fake = 0
            for i in range(len(xs)):
                if ts[i] == 1:
                    real += K.mean(K.square(K.ones_like(xs[i]) - xs[i]),
                                   axis=[-1])
                else:
                    fake += K.mean(K.square(K.zeros_like(xs[i]) - xs[i]),
                                   axis=[-1])

            return real + fake

        self.img_a = Input(shape=self.img_shape)
        self.img_b = Input(shape=self.img_shape)
        self.img_c = Input(shape=self.img_shape)

        self.vec_ab_pos = Input(shape=self.vec_shape)
        self.vec_ac_pos = Input(shape=self.vec_shape)
        self.vec_cb_pos = Input(shape=self.vec_shape)

        self.img_a2b, self.enc_a2b = self.g_model(
            [self.img_a, self.vec_ab_pos])
        self.img_a2a, self.enc_a2a = self.g_model(
            [self.img_a, K.zeros_like(self.vec_ab_pos)])
        self.img_a2b2a, _ = self.g_model([self.img_a2b, -self.vec_ab_pos])

        inter_seed = K.random_uniform_variable(shape=([
            self.batch,
        ]),
                                               low=0,
                                               high=1)
        inter_seed = K.reshape(inter_seed, [self.batch, 1])
        self.img_a2ab, self.enc_a2ab = self.g_model(
            [self.img_a, inter_seed * self.vec_ab_pos])

        input_real = [self.img_a, self.img_b, self.vec_ab_pos]
        input_fake = [self.img_a, self.img_a2b, self.vec_ab_pos]
        input_w_ori = [self.img_c, self.img_b, self.vec_ab_pos]
        input_w_tar = [self.img_a, self.img_c, self.vec_ab_pos]
        input_w_vec1 = [self.img_a, self.img_b, self.vec_ac_pos]
        input_w_vec2 = [self.img_a, self.img_b, self.vec_cb_pos]

        input_inter = [self.img_a, self.img_a2ab, inter_seed * self.vec_ab_pos]
        input_zero = [self.img_a, self.img_a2a, K.zeros_like(self.vec_ab_pos)]

        d_real, dc_real, _ = self.d_model(input_real)

        d_fake, dc_fake, di_fake = self.d_model(input_fake)

        d_w_ori, dc_w_ori, _ = self.d_model(input_w_ori)
        d_w_tar, dc_w_tar, _ = self.d_model(input_w_tar)
        d_w_vec1, dc_w_vec1, _ = self.d_model(input_w_vec1)
        d_w_vec2, dc_w_vec2, _ = self.d_model(input_w_vec2)

        _, _, di_inter = self.d_model(input_inter)
        _, _, di_zero = self.d_model(input_zero)

        self.df_loss = lsgan([d_real, d_fake], [1, 0])
        self.dc_loss = lsgan(
            [dc_real, dc_fake, dc_w_ori, dc_w_tar, dc_w_vec1, dc_w_vec2],
            [1, 0, 0, 0, 0, 0])
        inter_seed_rep = K.flatten(inter_seed)

        di_temp = K.switch(
            K.less(inter_seed_rep, 0.5 * K.ones_like(inter_seed_rep)), di_zero,
            di_fake)

        self.di_loss = K.square(
            K.minimum(inter_seed_rep,
                      K.ones_like(inter_seed_rep) - inter_seed_rep) *
            K.ones_like(di_inter) - di_inter) + K.square(di_temp)
        print('self.df_loss', K.int_shape(self.df_loss))
        print('self.dc_loss', K.int_shape(self.dc_loss))
        print('self.di_loss', K.int_shape(self.di_loss))

        self.df_gp = cal_df_gp()

        self.d_loss = self.df_loss + self.dc_loss + self.gp_l * self.df_gp + self.lambda5 * self.di_loss

        self.gf_loss = lsgan([d_real, d_fake], [0, 1])
        self.gc_loss = lsgan([dc_real, dc_fake], [0, 1])
        self.gi_loss = K.square(di_inter)

        dist_a2b = self.enc_a2b - self.enc_a2a
        dist_a2ab = self.enc_a2ab - self.enc_a2a

        inter_seed = K.reshape(inter_seed, [self.batch, 1, 1, 1])
        self.g_inter_loss = K.mean(K.abs(inter_seed * dist_a2b - dist_a2ab))

        g_loss_rec1 = K.mean(K.abs(self.img_a - self.img_a2b2a))
        g_loss_rec2 = K.mean(K.abs(self.img_a - self.img_a2a))

        print('self.gf_loss', K.int_shape(self.gf_loss))
        print('self.gc_loss', K.int_shape(self.gc_loss))
        print('self.gi_loss', K.int_shape(self.gi_loss))
        print('self.g_loss_rec1', K.int_shape(g_loss_rec1))
        print('self.g_loss_rec2', K.int_shape(g_loss_rec2))

        self.gr_loss = self.lambda1 * g_loss_rec1 + self.lambda2 * g_loss_rec2
        self.g_loss = self.gf_loss + self.gc_loss + self.gr_loss + self.lambda5 * self.gi_loss
Example #17
0
    def matching_loss(y_true, y_pred):
        # # the indexes of a1, a2, b1, b2
        # args = self.regression_model.predict_on_batch(correlation)
        # a and b are the original inputs

        # inverse a or b ??

        sess = tf.InteractiveSession()
        main_log.debug(a.shape)

        main_log.debug(y_pred.eval()[0, 1])
        main_log.debug(y_pred.eval()[0, 1].shape)

        aa = a[:, :, y_pred.eval()[0] - 1, :]
        main_log.debug(aa)

        a1 = tf.squeeze(a[:, :, y_pred.eval()[0], :])
        a2 = tf.squeeze(a[:, :, y_pred.eval()[1], :])
        b1 = tf.squeeze(b[:, :, y_pred.eval()[2], :])
        b2 = tf.squeeze(b[:, :, y_pred.eval()[3], :])

        d = subtract([b1, a1])
        dx = d[0]
        dy = d[1]

        translated_a2 = add([a2, d])

        cos = tf.squeeze(
            dot([translated_a2 - b1, b2, b1], axes=0, normalize=True))
        sin = tf.sqrt(subtract([1, cos * cos]))

        # rotation_m = np.array([
        #     [cos, -sin],
        #     [sin, cos]
        # ])
        # tf_rotation_m = tf.constant(rotation_m, dtype=tf.float32)

        # translation_m = np.array([
        #     [dx],
        #     [dy]
        # ])
        # tf_translation_m = tf.constant(translation_m, dtype=tf.float32)

        affine = np.array([[cos, -sin, dx], [sin, cos, dy], [0, 0, 1]])
        tf_affine = tf.constant(affine, dtype=tf.float32)

        h**o = np.ones((1, tf.shape(a).eval()[1]))
        tf_homo = tf.constant(h**o, dtype=tf.float32)

        homo_a = concatenate([a, tf_homo], axis=0)
        homo_b = concatenate([b, tf_homo], axis=0)

        new_a_homo = tf.matmul(tf_affine, homo_a)
        new_a = new_a_homo[:, 0:2, :, :]

        new_a1 = tf.squeeze(new_a[:, :, y_pred[0], :])
        new_a2 = tf.squeeze(new_a[:, :, y_pred[1], :])

        pi = tf.constant(3.14159265, dtype=tf.float32)

        # convexity matching cost
        def convexity_cue(contour, i):
            # boundary??
            v1 = tf.squeeze(contour[:, :, i, :])
            v0 = tf.squeeze(contour[:, :, i - 1, :])
            v2 = tf.squeeze(contour[:, :, i + 1, :])

            v01 = subtract([v1, v0])
            v21 = subtract([v2, v1])

            zero = tf.constant([[0]], dtype=tf.float32)
            v01 = tf.reshape(tf.concat([v01, zero], axis=0), [3])
            v21 = tf.reshape(tf.concat([v21, zero], axis=0), [3])

            sign = tf.sign(tf.cross(v01, v21)[2])

            theta = tf.acos(cos)

            convexity = multiply([sign, subtract(pi, theta)])

            return convexity

        convexity_loss = tf.Variable(1.0, dtype=tf.float32)
        fc_loss = tf.Variable(0, dtype=tf.float32)
        # sess = tf.InteractiveSession()
        sess.run(tf.global_variables_initializer())

        # how to define the bijective function??
        i = tf.constant(y_pred[0], dtype=tf.int32)
        i_last = tf.constant(add(y_pred[1], 1), dtype=tf.int32)
        j = tf.constant(y_pred[2], dtype=tf.int32)
        j_last = tf.constant(add(y_pred[3], 1), dtype=tf.int32)

        while_condition = lambda i, i_last, j, j_last, fc_loss: K.less(
            i, i_last)

        def while_body(i, i_last, j, j_last, fc_loss):
            main_log.debug('while_body')
            alpha_a = convexity_cue(new_a, i)
            alpha_b = convexity_cue(b, j)

            fc_loss = add(
                fc_loss,
                multiply(tf.sign(multiply(alpha_a, alpha_b)),
                         tf.sqrt(tf.abs(multiply(alpha_a, alpha_b)))))

            return [add(i, 1), i_last, add(j, 1), j_last, fc_loss]

        results = tf.while_loop(while_condition, while_body,
                                [i, i_last, j, j_last, fc_loss])

        fc_loss = results[4]

        convexity_loss = add(convexity_loss,
                             tf.divide(fc_loss, subtract(i_last, i)))

        sess.close()

        return convexity_loss
Example #18
0
 def call(self, x, mask=None):
     return K.less(K.abs(x), 1)
Example #19
0
def calc_accuracy(labels, predictions):
    '''accuracy function for compilation'''
    return K.mean(K.equal(labels, K.cast(K.less(predictions, 0.5), "float32")))
    def step(self, x, states):

        ytm, stm, t = states

        # repeat the hidden state to the length of the sequence
        _stm = K.repeat(stm, self.timesteps)

        # now multiplty the weight matrix with the repeated hidden state
        _Wxstm = K.dot(_stm, self.W_a)

        # calculate the attention probabilities
        # this relates how much other timesteps contributed to this one.
        et = K.dot(activations.tanh(_Wxstm + self._uxpb),
                   K.expand_dims(self.V_a))

        if self.causal and not self.use_attention_horizon:
            is_future = K.greater(self._input_t, t)
            mask = K.cast(is_future, 'float32') * -10e9
            et = et + K.expand_dims(K.expand_dims(mask, -1), 0)
        elif self.causal and self.use_attention_horizon:
            is_future = K.greater(self._input_t, t)
            is_beyond_horizon = K.less(self._input_t, t - self.attn_horizon)
            mask_future = K.cast(is_future, 'float32') * -10e9
            mask_past = K.cast(is_beyond_horizon, 'float32') * -10e9
            mask = mask_future + mask_past
            et = et + K.expand_dims(K.expand_dims(mask, -1), 0)

        at = K.softmax(et, axis=1)

        # calculate the context vector
        context = K.squeeze(K.batch_dot(at, self.x_seq, axes=1), axis=1)
        # ~~~> calculate new hidden state
        # first calculate the "r" gate:

        rt = activations.sigmoid(
            K.dot(ytm, self.W_r) + K.dot(stm, self.U_r) +
            K.dot(context, self.C_r) + self.b_r)

        # now calculate the "z" gate
        zt = activations.sigmoid(
            K.dot(ytm, self.W_z) + K.dot(stm, self.U_z) +
            K.dot(context, self.C_z) + self.b_z)

        # calculate the proposal hidden state:
        s_tp = activations.tanh(
            K.dot(ytm, self.W_p) + K.dot((rt * stm), self.U_p) +
            K.dot(context, self.C_p) + self.b_p)

        # new hidden state:
        st = (1 - zt) * stm + zt * s_tp

        yt = self.activation(
            K.dot(ytm, self.W_o) + K.dot(stm, self.U_o) +
            K.dot(context, self.C_o) + self.b_o)

        t += 1

        if self.return_probabilities:
            return at, [yt, st, t]
        else:
            return yt, [yt, st, t]
Example #21
0
def get_lr_decay_schedule(args):
    number_of_iters_generator = 1000. * args.number_of_epochs
    number_of_iters_discriminator = 1000. * args.number_of_epochs * args.training_ratio

    if args.lr_decay_schedule is None:
        lr_decay_schedule_generator = lambda iter: 1.
        lr_decay_schedule_discriminator = lambda iter: 1.
    elif args.lr_decay_schedule == 'linear':
        lr_decay_schedule_generator = lambda iter: K.maximum(
            0., 1. - K.cast(iter, 'float32') / number_of_iters_generator)
        lr_decay_schedule_discriminator = lambda iter: K.maximum(
            0., 1. - K.cast(iter, 'float32') / number_of_iters_discriminator)
    elif args.lr_decay_schedule == 'half-linear':
        lr_decay_schedule_generator = lambda iter: ktf.where(
            K.less(iter, K.cast(number_of_iters_generator / 2, 'int64')),
            ktf.maximum(
                0., 1. -
                (K.cast(iter, 'float32') / number_of_iters_generator)), 0.5)
        lr_decay_schedule_discriminator = lambda iter: ktf.where(
            K.less(iter, K.cast(number_of_iters_discriminator / 2, 'int64')),
            ktf.maximum(
                0., 1. - (K.cast(iter, 'float32') /
                          number_of_iters_discriminator)), 0.5)
    elif args.lr_decay_schedule == 'linear-end':
        decay_at = 0.828

        number_of_iters_until_decay_generator = number_of_iters_generator * decay_at
        number_of_iters_until_decay_discriminator = number_of_iters_discriminator * decay_at

        number_of_iters_after_decay_generator = number_of_iters_generator * (
            1 - decay_at)
        number_of_iters_after_decay_discriminator = number_of_iters_discriminator * (
            1 - decay_at)

        lr_decay_schedule_generator = lambda iter: ktf.where(
            K.greater(iter,
                      K.cast(number_of_iters_until_decay_generator, 'int64')),
            ktf.maximum(
                0., 1. - (K.cast(iter, 'float32') -
                          number_of_iters_until_decay_generator) /
                number_of_iters_after_decay_generator), 1)
        lr_decay_schedule_discriminator = lambda iter: ktf.where(
            K.greater(
                iter, K.cast(number_of_iters_until_decay_discriminator, 'int64'
                             )),
            ktf.maximum(
                0., 1. - (K.cast(iter, 'float32') -
                          number_of_iters_until_decay_discriminator) /
                number_of_iters_after_decay_discriminator), 1)
    elif args.lr_decay_schedule.startswith("dropat"):
        drop_at = int(args.lr_decay_schedule.replace('dropat', ''))
        drop_at_generator = drop_at * 1000
        drop_at_discriminator = drop_at * 1000 * args.training_ratio
        print("Drop at generator %s" % drop_at_generator)
        lr_decay_schedule_generator = lambda iter: (ktf.where(
            K.less(iter, drop_at_generator), 1., 0.1) * K.maximum(
                0., 1. - K.cast(iter, 'float32') / number_of_iters_generator))
        lr_decay_schedule_discriminator = lambda iter: (ktf.where(
            K.less(iter, drop_at_discriminator), 1., 0.1) * K.maximum(
                0., 1. - K.cast(iter, 'float32') /
                number_of_iters_discriminator))
    else:
        assert False

    return lr_decay_schedule_generator, lr_decay_schedule_discriminator
 def contrastive_accuracy(y_true, y_pred):
     margin = 1.0
     return K.mean(K.equal(y_true,
                           K.cast(K.less(y_pred, margin / 2), 'float32')),
                   axis=-1)
Example #23
0
 def triplet_acc(self, y_true, y_pred):
     return K.mean(K.less(y_pred, 0))
Example #24
0
def my_critic_acc(y_true, y_pred):
	sign = K.less(K.zeros(1), y_true*y_pred)
	return K.mean(sign)
Example #25
0
 def call(self, x):
     a, b = x
     return K.less(a, b)
Example #26
0
    def GRU_merge(self, self_act, a, b, act):
        lower_a, upper_a = a.get_lu()
        lower_b, upper_b = b.get_lu()
        fa_lower, fa_upper = lower_a, upper_a
        fb_lower, fb_upper = act(lower_b), act(upper_b)
        lower_x, upper_x = self.get_lu()
        fx_lower, fx_upper = self_act(lower_x), self_act(upper_x)
        partial_fx_lower = tf.gradients(fx_lower, lower_x)[0]
        partial_fx_upper = tf.gradients(fx_upper, upper_x)[0]

        def lower_a_greater_zero():
            uz_x_Phi = K.minimum(partial_fx_upper * fa_upper,
                                 (fx_upper - fx_lower) * fa_upper /
                                 (upper_x - lower_x))
            ax_right_upper = fx_upper * fa_upper
            ax_left_upper = uz_x_Phi * (lower_x - upper_x) + ax_right_upper
            lz_x_Phi = K.minimum(partial_fx_lower * fa_lower,
                                 (fx_lower - fx_upper) * fa_lower /
                                 (lower_x - upper_x))
            ax_left_lower = fx_lower * fa_lower
            ax_right_lower = lz_x_Phi * (upper_x - lower_x) + ax_left_lower
            return [
                ax_left_lower, ax_left_upper, ax_right_lower, ax_right_upper
            ]

        def lower_b_greater_zero():
            uz_x_Phi = K.maximum(-partial_fx_lower * fb_upper,
                                 (-fx_upper + fx_lower) * fb_upper /
                                 (upper_x - lower_x))
            bx_left_upper = (1 - fx_lower) * fb_upper
            bx_right_upper = uz_x_Phi * (upper_x - lower_x) + bx_left_upper
            lz_x_Phi = K.maximum(-partial_fx_upper * fb_lower,
                                 (-fx_lower + fx_upper) * fb_lower /
                                 (lower_x - upper_x))
            bx_right_lower = (1 - fx_upper) * fb_lower
            bx_left_lower = lz_x_Phi * (lower_x - upper_x) + bx_right_lower
            return [
                bx_left_lower, bx_left_upper, bx_right_lower, bx_right_upper
            ]

        def upper_a_less_zero():
            uz_x_Phi = K.maximum(partial_fx_lower * fa_upper,
                                 (fx_lower - fx_upper) * fa_upper /
                                 (lower_x - upper_x))
            ax_left_upper = fx_lower * fa_upper
            ax_right_upper = uz_x_Phi * (upper_x - lower_x) + ax_left_upper
            lz_x_Phi = K.maximum(partial_fx_upper * fa_lower,
                                 (fx_upper - fx_lower) * fa_lower /
                                 (upper_x - lower_x))
            ax_right_lower = fx_upper * fa_lower
            ax_left_lower = lz_x_Phi * (lower_x - upper_x) + ax_right_lower
            return [
                ax_left_lower, ax_left_upper, ax_right_lower, ax_right_upper
            ]

        def upper_b_less_zero():
            uz_x_Phi = K.minimum(-partial_fx_upper * fb_upper,
                                 (-fx_upper + fx_lower) * fb_upper /
                                 (upper_x - lower_x))
            bx_right_upper = (1 - fx_upper) * fb_upper
            bx_left_upper = uz_x_Phi * (lower_x - upper_x) + bx_right_upper
            lz_x_Phi = K.minimum(-partial_fx_lower * fb_lower,
                                 (-fx_lower + fx_upper) * fb_lower /
                                 (lower_x - upper_x))
            bx_left_lower = (1 - fx_lower) * fb_lower
            bx_right_lower = lz_x_Phi * (upper_x - lower_x) + bx_left_lower
            return [
                bx_left_lower, bx_left_upper, bx_right_lower, bx_right_upper
            ]

        def otherwise_a():
            uz_x_Phi = K.minimum(partial_fx_upper * fa_upper,
                                 (fx_upper - fx_lower) * fa_upper /
                                 (upper_x - lower_x))
            ax_right_upper = fx_upper * fa_upper
            ax_left_upper = uz_x_Phi * (lower_x - upper_x) + ax_right_upper
            lz_x_Phi = K.maximum(partial_fx_upper * fa_lower,
                                 (fx_upper - fx_lower) * fa_lower /
                                 (upper_x - lower_x))
            ax_right_lower = fx_upper * fa_lower
            ax_left_lower = lz_x_Phi * (lower_x - upper_x) + ax_right_lower
            return [
                ax_left_lower, ax_left_upper, ax_right_lower, ax_right_upper
            ]

        def otherwise_b():
            uz_x_Phi = K.maximum(-partial_fx_lower * fb_upper,
                                 (-fx_upper + fx_lower) * fb_upper /
                                 (upper_x - lower_x))
            bx_left_upper = (1 - fx_lower) * fb_upper
            bx_right_upper = uz_x_Phi * (upper_x - lower_x) + bx_left_upper
            lz_x_Phi = K.minimum(-partial_fx_lower * fb_lower,
                                 (-fx_lower + fx_upper) * fb_lower /
                                 (lower_x - upper_x))
            bx_left_lower = (1 - fx_lower) * fb_lower
            bx_right_lower = lz_x_Phi * (upper_x - lower_x) + bx_left_lower
            return [
                bx_left_lower, bx_left_upper, bx_right_lower, bx_right_upper
            ]

        a_anchors = otherwise_a()
        anchors_lower_a_greater_zero = lower_a_greater_zero()
        anchors_upper_a_less_zero = upper_a_less_zero()
        for i in range(4):
            a_anchors[i] = K.switch(K.greater(lower_a, K.zeros_like(lower_a)),
                                    anchors_lower_a_greater_zero[i],
                                    a_anchors[i])
            a_anchors[i] = K.switch(K.less(upper_a, K.zeros_like(upper_a)),
                                    anchors_upper_a_less_zero[i], a_anchors[i])

        b_anchors = otherwise_b()
        anchors_lower_b_greater_zero = lower_b_greater_zero()
        anchors_upper_b_less_zero = upper_b_less_zero()
        for i in range(4):
            b_anchors[i] = K.switch(K.greater(lower_b, K.zeros_like(lower_b)),
                                    anchors_lower_b_greater_zero[i],
                                    b_anchors[i])
            b_anchors[i] = K.switch(K.less(upper_b, K.zeros_like(upper_b)),
                                    anchors_upper_b_less_zero[i], b_anchors[i])

        for i in range(4):
            a_anchors[i] += b_anchors[i]
        lower_z = K.minimum(a_anchors[0], a_anchors[2])
        upper_z = K.maximum(a_anchors[1], a_anchors[3])
        return AI((lower_z + upper_z) / 2, (upper_z - lower_z) / 2, None, True)
Example #27
0
 def call(self, x):
     return K.less(x, K.constant(0))
def Max(x):
    zeros = K.zeros_like(x)
    return K.switch(K.less(x, 0.9), zeros, x)
 def customized_loss(y_true, y_pred):
     self.tensor_ = y_true
     loss = K.switch(K.less(y_pred, y_true), cu * (y_true - y_pred), co * (y_pred - y_true))
     return K.sum(loss)
Example #30
0
 def is_iterate_neurons_m(idx_p, idx_l, idx_m, idx_n):
     return k.less(idx_m, shape[2])
def RecoProb_forVAE(x, par1, par2, par3):
    N = 0
    nll_loss = 0

    #Log-Normal distributed variables
    mu = par1[:, :Nf_lognorm]
    sigma = par2[:, :Nf_lognorm]
    fraction = par3[:, :Nf_lognorm]
    x_clipped = K.clip(x[:, :Nf_lognorm], clip_x_to0, 1e8)
    single_NLL = K.tf.where(
        K.less(x[:, :Nf_lognorm], clip_x_to0), -K.log(fraction),
        -K.log(1 - fraction) + K.log(sigma) + K.log(x_clipped) +
        0.5 * K.square(K.tf.divide(K.log(x_clipped) - mu, sigma)))
    nll_loss += K.sum(single_NLL, axis=-1)
    N += Nf_lognorm

    # Gaussian distributed variables
    mu = par1[:, N:N + Nf_gauss]
    sigma = par2[:, N:N + Nf_gauss]
    norm_x = K.tf.divide(x[:, N:N + Nf_gauss] - mu, sigma)
    single_NLL = K.log(sigma) + 0.5 * K.square(norm_x)
    nll_loss += K.sum(single_NLL, axis=-1)
    N += Nf_gauss

    # Positive Gaussian distributed variables
    mu = par1[:, N:N + Nf_Pgauss]
    sigma = par2[:, N:N + Nf_Pgauss]
    norm_x = K.tf.divide(x[:, N:N + Nf_Pgauss] - mu, sigma)

    sqrt2 = 1.4142135624
    aNorm = 1 + 0.5 * (1 + K.tf.erf(K.tf.divide(-mu, sigma) / sqrt2))

    single_NLL = K.log(sigma) + 0.5 * K.square(norm_x) - K.log(aNorm)
    nll_loss += K.sum(single_NLL, axis=-1)
    N += Nf_Pgauss

    # Positive Discrete Gaussian distributed variables
    mu = par1[:, N:N + Nf_PDgauss]
    sigma = par2[:, N:N + Nf_PDgauss]
    norm_xp = K.tf.divide(x[:, N:N + Nf_PDgauss] + 0.5 - mu, sigma)
    norm_xm = K.tf.divide(x[:, N:N + Nf_PDgauss] - 0.5 - mu, sigma)
    sqrt2 = 1.4142135624
    single_LL = 0.5 * (K.tf.erf(norm_xp / sqrt2) - K.tf.erf(norm_xm / sqrt2))

    norm_0 = K.tf.divide(-0.5 - mu, sigma)
    aNorm = 1 + 0.5 * (1 + K.tf.erf(norm_0 / sqrt2))

    single_NLL = -K.log(K.clip(single_LL, 1e-10, 1e40)) - K.log(aNorm)
    nll_loss += K.sum(single_NLL, axis=-1)
    N += Nf_PDgauss

    #Binomial distributed variables
    p = 0.5 * (1 + 0.98 * K.tanh(par1[:, N:N + Nf_binomial]))
    single_NLL = -K.tf.where(K.equal(x[:, N:N + Nf_binomial], 1), K.log(p),
                             K.log(1 - p))
    nll_loss += K.sum(single_NLL, axis=-1)
    N += Nf_binomial

    #Poisson distributed variables
    aux = par1[:, N:]
    mu = 1 + K.tf.where(K.tf.greater(aux, 0), aux,
                        K.tf.divide(aux, K.sqrt(1 + K.square(aux))))
    single_NLL = K.tf.lgamma(x[:, N:] + 1) - x[:, N:] * K.log(mu) + mu
    nll_loss += K.sum(single_NLL, axis=-1)

    return nll_loss
Example #32
0
 def is_iterate_neurons_l(idx_p, idx_l):
     return k.less(idx_l, shape[1])
Example #33
0
def IOU_metric(y_true, y_pred):
    """
    Compute the intersection over the union of the true and
    the predicted bounding boxes. Output in range 0-1;  
    1 being the best match of bounding boxes (perfect alignment), 
    0 being worst (no intersection at all).
    
    @param y_true - BATCH_SIZEx4 Tensor object (float), the ground 
                    truth labels for the bounding boxes around the
                    tumor in the corresponding images. Value order:
                    (x_top_left, y_top_left, x_bot_right, y_bot_right)
    @param y_pred - BATCH_SIZEx4 Tensor object (float), the model's 
                    prediction for the bounding boxes around the
                    tumor in the corresponding images. Value order:
                    (x_top_left, y_top_left, x_bot_right, y_bot_right)
    @return iou - 1x1 Tensor object (float), value being the mean
                  IOU for all image in the batch, and is within the 
                  range of 0-1 (inclusive). 
    """
    # extract points from tensors
    x_LT = tf.math.minimum(y_true[:, 0], y_true[:, 2])
    y_UT = tf.math.minimum(y_true[:, 1], y_true[:, 3])
    x_RT = tf.math.maximum(y_true[:, 0], y_true[:, 2])
    y_LT = tf.math.maximum(y_true[:, 1], y_true[:, 3])

    x_LP = tf.math.minimum(y_pred[:, 0], y_pred[:, 2])
    y_UP = tf.math.minimum(y_pred[:, 1], y_pred[:, 3])
    x_RP = tf.math.maximum(y_pred[:, 0], y_pred[:, 2])
    y_LP = tf.math.maximum(y_pred[:, 1], y_pred[:, 3])

    # to perform the IOU math correctly, the points that are left-most,
    # upper-most, right-most, and lower-most must be found
    xL_pairwise_gt = K.greater(x_LT, x_LP)
    yU_pairwise_gt = K.greater(y_UT, y_UP)

    xW1_pairwise_int = K.less(x_LT, x_RP)
    xW1 = K.cast(xW1_pairwise_int, K.floatx())

    xW2_pairwise_int = K.less(x_LP, x_RT)
    xW2 = K.cast(xW2_pairwise_int, K.floatx())

    yH1_pairwise_int = K.less(y_UT, y_LP)
    yH1 = K.cast(yH1_pairwise_int, K.floatx())

    yH2_pairwise_int = K.less(y_UP, y_LT)
    yH2 = K.cast(yH2_pairwise_int, K.floatx())

    x_bin = K.cast(xL_pairwise_gt, K.floatx())
    y_bin = K.cast(yU_pairwise_gt, K.floatx())

    # find the amount by which the bboxes intersect
    x_does_intersect = tf.math.add(
        tf.math.multiply(x_bin, xW1),
        tf.math.multiply(tf.math.subtract(1.0, x_bin), xW2))
    y_does_intersect = tf.math.add(
        tf.math.multiply(y_bin, yH1),
        tf.math.multiply(tf.math.subtract(1.0, y_bin), yH2))
    box_does_intersect = tf.math.multiply(x_does_intersect, y_does_intersect)

    a = tf.math.minimum(tf.math.subtract(x_RP, x_LT),
                        tf.math.subtract(x_RP, x_LP))
    b = tf.math.minimum(tf.math.subtract(x_RT, x_LP),
                        tf.math.subtract(x_RT, x_LT))
    c = tf.math.minimum(tf.math.subtract(y_LP, y_UT),
                        tf.math.subtract(y_LP, y_UP))
    d = tf.math.minimum(tf.math.subtract(y_LT, y_UP),
                        tf.math.subtract(y_LT, y_UT))

    # calculate intersection area
    intersection_width = tf.math.add(
        tf.math.multiply(x_bin, a),
        tf.math.multiply(tf.math.subtract(1.0, x_bin), b))
    intersection_height = tf.math.add(
        tf.math.multiply(y_bin, c),
        tf.math.multiply(tf.math.subtract(1.0, y_bin), d))

    intersection = tf.math.multiply(
        tf.math.multiply(intersection_width, intersection_height),
        box_does_intersect)
    # calculate union area
    union_double = tf.math.add(
        tf.math.multiply(tf.math.subtract(x_RP, x_LP),
                         tf.math.subtract(y_LP, y_UP)),
        tf.math.multiply(tf.math.subtract(x_RT, x_LT),
                         tf.math.subtract(y_LT, y_UT)))
    union = tf.math.subtract(union_double, intersection)

    # take the mean in order to compress BATCH_SIZEx1 Tensor
    # into a 1x1 Tensor
    iou = K.mean(tf.math.divide(intersection, union))
    return iou