def dice_loss(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (1 - (2. * intersection + smooth) /
            (K.sum(y_true_f) + K.sum(y_pred_f) + smooth))
示例#2
0
def neighborhood_likelihood_loss(y_true, y_pred):
    # y_true = (label, bidding_price, winning_price
    # y_pred = (price_step)
    y_pred = ops.convert_to_tensor(y_pred)  # (None_all, price_step)
    y_true = math_ops.cast(y_true, y_pred.dtype)  # (None_all, 3)

    # arg
    price_min = -3.0
    price_max = 4.0
    price_interval = 0.1
    price_step = tf.cast(tf.shape(y_pred)[-1], tf.int32)

    # split y_true
    y_true_label_1d = K.flatten(tf.slice(y_true, [0, 0],
                                         [-1, 1]))  # (None_all,)
    # caculate the bidding price bucket index
    y_true_b = tf.slice(y_true, [0, 1], [-1, 1])  # (None_all, 1)
    y_true_b = tf.clip_by_value(y_true_b, price_min, price_max)
    y_true_b_idx_2d = tf.cast(tf.floor(
        (y_true_b - price_min) / price_interval),
                              dtype='int32')  # (None_all, 1)
    y_true_b_idx_1d = K.flatten(y_true_b_idx_2d)  # (None_all,)
    # caculate the winning price bucket index
    y_true_z = tf.slice(y_true, [0, 2], [-1, 1])  # (None_all, 1)
    y_true_z = tf.clip_by_value(y_true_z, price_min, price_max)
    y_true_z_idx_2d = tf.cast(tf.floor(
        (y_true_z - price_min) / price_interval),
                              dtype='int32')  # (None_all, 1)
    y_true_z_idx_1d = K.flatten(y_true_z_idx_2d)  # (None_all,)

    # Calculate masks
    ## on All bids
    mask_win = y_true_label_1d  # (None,)
    mask_lose = 1 - mask_win  # (None,)

    mask_z_cdf = tf.sequence_mask(y_true_z_idx_1d + 1,
                                  price_step)  # (None, price_step)
    mask_z_pdf = tf.math.logical_xor(
        mask_z_cdf, tf.sequence_mask(y_true_z_idx_1d,
                                     price_step))  # (None, price_step)

    mask_b_cdf = tf.sequence_mask(y_true_b_idx_1d + 1,
                                  price_step)  # (None, price_step)
    mask_b_pdf = tf.math.logical_xor(
        mask_b_cdf, tf.sequence_mask(y_true_b_idx_1d,
                                     price_step))  # (None, price_step)
    ## on Winning bids
    mask_win_z_cdf = tf.boolean_mask(mask_z_cdf,
                                     mask_win)  # (None_win, price_step)
    mask_win_z_pdf = tf.boolean_mask(mask_z_pdf,
                                     mask_win)  # (None_win, price_step)
    mask_win_b_cdf = tf.boolean_mask(mask_b_cdf,
                                     mask_win)  # (None_win, price_step)
    mask_win_b_pdf = tf.boolean_mask(mask_b_pdf,
                                     mask_win)  # (None_win, price_step)
    ## on Losing bids
    mask_lose_b_cdf = tf.boolean_mask(mask_b_cdf,
                                      mask_lose)  # (None_lose, price_step)
    mask_lose_b_pdf = tf.boolean_mask(mask_b_pdf,
                                      mask_lose)  # (None_lose, price_step)

    # Price Distribution
    y_pred_win = tf.boolean_mask(y_pred, mask_win)  # (None_win, price_step)
    y_pred_lose = tf.boolean_mask(y_pred, mask_lose)  # (None_lose, price_step)

    # Loss
    zeros = tf.zeros(tf.shape(y_pred), tf.float32)  # (None, price_step)
    zeros_win = tf.zeros(tf.shape(y_pred_win),
                         tf.float32)  # (None_win, price_step)
    zeros_lose = tf.zeros(tf.shape(y_pred_lose),
                          tf.float32)  # (None_lose, price_step)
    ones = tf.ones(tf.shape(y_pred), tf.float32)  # (None, price_step)
    ones_win = tf.ones(tf.shape(y_pred_win),
                       tf.float32)  # (None_win, price_step)
    ones_lose = tf.ones(tf.shape(y_pred_lose),
                        tf.float32)  # (None_lose, price_step)

    # loss_1
    loss_1 = -K.sum(
        tf.math.log(
            tf.clip_by_value(tf.boolean_mask(y_pred_win, mask_win_z_pdf),
                             K.epsilon(), 1.)))

    # loss_2_win
    left_neighborhood_offset = y_true_b_idx_1d - y_true_z_idx_1d
    left_neighborhood_idx = tf.math.maximum(
        y_true_z_idx_1d - left_neighborhood_offset, 0)
    mask_z_neighborhood_cdf = tf.math.logical_xor(
        mask_b_cdf, tf.sequence_mask(left_neighborhood_idx, price_step))
    mask_win_z_neighborhood_cdf = tf.boolean_mask(mask_z_neighborhood_cdf,
                                                  mask_win)
    loss_2_win = -K.sum(
        tf.math.log(
            tf.clip_by_value(
                K.sum(tf.where(mask_win_z_neighborhood_cdf, y_pred_win,
                               zeros_win),
                      axis=1), K.epsilon(), 1.)))

    # loss_2_lose
    right_neighborhood_offset = 40
    right_neighborhood_idx = tf.math.minimum(
        y_true_b_idx_1d + right_neighborhood_offset, price_step - 1)
    mask_b_neighborhood_cdf = tf.math.logical_xor(
        tf.math.logical_not(mask_b_cdf),
        tf.math.logical_not(
            tf.sequence_mask(right_neighborhood_idx, price_step)))
    mask_lose_b_neighborhood_cdf = tf.boolean_mask(mask_b_neighborhood_cdf,
                                                   mask_lose)
    loss_2_lose = -K.sum(
        tf.math.log(
            tf.clip_by_value(
                K.sum(tf.where(mask_lose_b_neighborhood_cdf, y_pred_lose,
                               zeros_lose),
                      axis=1), K.epsilon(), 1.)))

    # loss_2
    beta = 0.8
    loss_2 = beta * loss_2_win + (1 - beta) * loss_2_lose

    # total loss
    alpha = 0.2
    return alpha * loss_1 + (1 - alpha) * loss_2
示例#3
0
 def loss(y_true, y_pred):
     c = K.exp(2 * kappa)
     y_pred_ = K.flatten(y_pred)
     y_true_ = K.flatten(y_true)
     score = c - K.exp(kappa * (K.cos(y_pred_ - y_true_) + 1))
     return score
示例#4
0
def dice_coef(y_true, y_pred, smooth=1.):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
示例#5
0
def dice_coef(y_true, y_pred, smooth=1):
    """ Calculate DICE coeficient given y_true and y_pred """
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2.0 * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
示例#6
0
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
outputs = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(x)

# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()

# instantiate VAE model
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs, name='vae_mlp')

# Compute VAE loss
reconstruction_loss = binary_crossentropy(K.flatten(inputs),
                                          K.flatten(outputs))
reconstruction_loss *= 320 * 896
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)

vae.add_loss(vae_loss)
vae.compile(optimizer='adam')

# Running autoencoder
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                              factor=0.5,
                              patience=40,
                              min_lr=0.0001)
示例#7
0
    def _interpolate(image, sampled_grids, output_size):

        batch_size = K.shape(image)[0]
        height = K.shape(image)[1]
        width = K.shape(image)[2]
        num_channels = K.shape(image)[3]

        x = K.cast(K.flatten(sampled_grids[:, 0:1, :]), dtype='float32')
        y = K.cast(K.flatten(sampled_grids[:, 1:2, :]), dtype='float32')

        x = .5 * (x + 1.0) * K.cast(width, dtype='float32')
        y = .5 * (y + 1.0) * K.cast(height, dtype='float32')

        x0 = K.cast(x, 'int32')
        x1 = x0 + 1
        y0 = K.cast(y, 'int32')
        y1 = y0 + 1

        max_x = int(K.int_shape(image)[2] - 1)
        max_y = int(K.int_shape(image)[1] - 1)

        x0 = K.clip(x0, 0, max_x)
        x1 = K.clip(x1, 0, max_x)
        y0 = K.clip(y0, 0, max_y)
        y1 = K.clip(y1, 0, max_y)

        pixels_batch = K.arange(0, batch_size) * (height * width)
        pixels_batch = K.expand_dims(pixels_batch, axis=-1)
        flat_output_size = output_size[0] * output_size[1]
        base = K.repeat_elements(pixels_batch, flat_output_size, axis=1)
        base = K.flatten(base)

        # base_y0 = base + (y0 * width)
        base_y0 = y0 * width
        base_y0 = base + base_y0
        # base_y1 = base + (y1 * width)
        base_y1 = y1 * width
        base_y1 = base_y1 + base

        indices_a = base_y0 + x0
        indices_b = base_y1 + x0
        indices_c = base_y0 + x1
        indices_d = base_y1 + x1

        flat_image = K.reshape(image, shape=(-1, num_channels))
        flat_image = K.cast(flat_image, dtype='float32')
        pixel_values_a = K.gather(flat_image, indices_a)
        pixel_values_b = K.gather(flat_image, indices_b)
        pixel_values_c = K.gather(flat_image, indices_c)
        pixel_values_d = K.gather(flat_image, indices_d)

        x0 = K.cast(x0, 'float32')
        x1 = K.cast(x1, 'float32')
        y0 = K.cast(y0, 'float32')
        y1 = K.cast(y1, 'float32')

        area_a = K.expand_dims(((x1 - x) * (y1 - y)), 1)
        area_b = K.expand_dims(((x1 - x) * (y - y0)), 1)
        area_c = K.expand_dims(((x - x0) * (y1 - y)), 1)
        area_d = K.expand_dims(((x - x0) * (y - y0)), 1)

        values_a = area_a * pixel_values_a
        values_b = area_b * pixel_values_b
        values_c = area_c * pixel_values_c
        values_d = area_d * pixel_values_d
        return values_a + values_b + values_c + values_d
示例#8
0
def jacard_coef(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) -
                                   intersection + 1.0)
示例#9
0
def keras_batch_hard_triplet_loss(labels, y_pred):
    # As omoindrot's loss functions expects the labels to have shape (batch_size,), labels are flattaned.
    # Before flattening, they have shape (batch_size,1).

    labels = K.flatten(labels)
    return batch_hard_triplet_loss(labels, y_pred, margin=0.5)
示例#10
0
def dice_coef(y_true, y_pred):
    y_true_f = keras.flatten(y_true)
    y_pred_f = keras.flatten(y_pred)
    intersection = keras.sum(y_true_f * y_pred_f)
    return (2. * intersection + 1) / (keras.sum(y_true_f) + keras.sum(y_pred_f) + 1)
示例#11
0
def mean_iou(y_true, y_pred):
    y_true_f = keras.flatten(y_true)
    y_pred_f = keras.flatten(y_pred)
    intersection = K.sum(K.abs(y_true_f * y_pred_f), axis=-1)
    union = K.sum(y_true_f + y_pred_f) - intersection
    return ((intersection + K.epsilon()) / (union + K.epsilon()))
示例#12
0
def logx_loss(y_true, y_pred):
    y_true_flat = K.flatten(y_true)
    y_pred_flat = K.flatten(y_pred)
    xent_loss = 28 * 28 * metrics.binary_crossentropy(y_true_flat, y_pred_flat)
    return xent_loss
示例#13
0
def X_normal_logpdf(x, mu, lsgms):
    lsgms = backend.flatten(lsgms)
    return backend.mean(-(0.5 * logc + 0.5 * lsgms) - 0.5 *
                        ((x - mu)**2 / backend.exp(lsgms)),
                        axis=-1)
示例#14
0
def integrate_vec(vec, time_dep=False, method='ss', **kwargs):
    """
    Integrate (stationary of time-dependent) vector field (N-D Tensor) in tensorflow
    
    Aside from directly using tensorflow's numerical integration odeint(), also implements 
    "scaling and squaring", and quadrature. Note that the diff. equation given to odeint
    is the one used in quadrature.   

    Parameters:
        vec: the Tensor field to integrate. 
            If vol_size is the size of the intrinsic volume, and vol_ndim = len(vol_size),
            then vector shape (vec_shape) should be 
            [vol_size, vol_ndim] (if stationary)
            [vol_size, vol_ndim, nb_time_steps] (if time dependent)
        time_dep: bool whether vector is time dependent
        method: 'scaling_and_squaring' or 'ss' or 'ode' or 'quadrature'
        
        if using 'scaling_and_squaring': currently only supports integrating to time point 1.
            nb_steps: int number of steps. Note that this means the vec field gets broken
            down to 2**nb_steps. so nb_steps of 0 means integral = vec.

        if using 'ode':
            out_time_pt (optional): a time point or list of time points at which to evaluate
                Default: 1
            init (optional): if using 'ode', the initialization method.
                Currently only supporting 'zero'. Default: 'zero'
            ode_args (optional): dictionary of all other parameters for 
                tf.contrib.integrate.odeint()

    Returns:
        int_vec: integral of vector field.
        Same shape as the input if method is 'scaling_and_squaring', 'ss', 'quadrature', 
        or 'ode' with out_time_pt not a list. Will have shape [*vec_shape, len(out_time_pt)]
        if method is 'ode' with out_time_pt being a list.

    Todo:
        quadrature for more than just intrinsically out_time_pt = 1
    """

    if method not in ['ss', 'scaling_and_squaring', 'ode', 'quadrature']:
        raise ValueError("method has to be 'scaling_and_squaring' or 'ode'. found: %s" % method)

    if method in ['ss', 'scaling_and_squaring']:
        nb_steps = kwargs['nb_steps']
        assert nb_steps >= 0, 'nb_steps should be >= 0, found: %d' % nb_steps

        if time_dep:
            svec = K.permute_dimensions(vec, [-1, *range(0, vec.shape[-1] - 1)])
            assert 2**nb_steps == svec.shape[0], "2**nb_steps and vector shape don't match"

            svec = svec/(2**nb_steps)
            for _ in range(nb_steps):
                svec = svec[0::2] + tf.map_fn(transform, svec[1::2,:], svec[0::2,:])

            disp = svec[0, :]

        else:
            vec = vec/(2**nb_steps)
            for _ in range(nb_steps):
                vec += transform(vec, vec)
            disp = vec

    elif method == 'quadrature':
        # TODO: could output more than a single timepoint!
        nb_steps = kwargs['nb_steps']
        assert nb_steps >= 1, 'nb_steps should be >= 1, found: %d' % nb_steps

        vec = vec/nb_steps

        if time_dep:
            disp = vec[...,0]
            for si in range(nb_steps-1):
                disp += transform(vec[...,si+1], disp)
        else:
            disp = vec
            for _ in range(nb_steps-1):
                disp += transform(vec, disp)

    else:
        assert not time_dep, "odeint not implemented with time-dependent vector field"
        fn = lambda disp, _: transform(vec, disp)  

        # process time point.
        out_time_pt = kwargs['out_time_pt'] if 'out_time_pt' in kwargs.keys() else 1
        out_time_pt = tf.cast(K.flatten(out_time_pt), tf.float32)
        len_out_time_pt = out_time_pt.get_shape().as_list()[0]
        assert len_out_time_pt is not None, 'len_out_time_pt is None :('
        z = out_time_pt[0:1]*0.0  # initializing with something like tf.zeros(1) gives a control flow issue.
        K_out_time_pt = K.concatenate([z, out_time_pt], 0)

        # enable a new integration function than tf.contrib.integrate.odeint
        odeint_fn = tf.contrib.integrate.odeint
        if 'odeint_fn' in kwargs.keys() and kwargs['odeint_fn'] is not None:
            odeint_fn = kwargs['odeint_fn']

        # process initialization
        if 'init' not in kwargs.keys() or kwargs['init'] == 'zero':
            disp0 = vec*0  # initial displacement is 0
        else:
            raise ValueError('non-zero init for ode method not implemented')

        # compute integration with odeint
        if 'ode_args' not in kwargs.keys():
            kwargs['ode_args'] = {}
        disp = odeint_fn(fn, disp0, K_out_time_pt, **kwargs['ode_args'])
        disp = K.permute_dimensions(disp[1:len_out_time_pt+1, :], [*range(1,len(disp.shape)), 0])

        # return
        if len_out_time_pt == 1: 
            disp = disp[...,0]

    return disp
示例#15
0
def jaccard_coef(y_true, y_pred, smooth=1):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (intersection) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection +
                             smooth)
示例#16
0
def get_MRI_CCVAE_3D(input_shape=(64, 64, 64, 1),
                     latent_dim=2,
                     beta=1,
                     disentangle=False,
                     gamma=1,
                     bias=True,
                     batch_size=64):

    image_size, _, _, channels = input_shape
    kernel_size = 3
    filters = 32
    intermediate_dim = 128
    epochs = 10
    nlayers = 2

    # build encoder model
    tg_inputs = Input(shape=input_shape, name='tg_inputs')
    bg_inputs = Input(shape=input_shape, name='bg_inputs')

    z_conv1 = Conv3D(filters=filters * 2,
                     kernel_size=kernel_size,
                     activation='relu',
                     strides=2,
                     use_bias=bias,
                     padding='same')

    z_conv2 = Conv3D(filters=filters * 4,
                     kernel_size=kernel_size,
                     activation='relu',
                     strides=2,
                     use_bias=bias,
                     padding='same')

    # generate latent vector Q(z|X)
    z_h_layer = Dense(intermediate_dim, activation='relu', use_bias=bias)
    z_mean_layer = Dense(latent_dim, name='z_mean', use_bias=bias)
    z_log_var_layer = Dense(latent_dim, name='z_log_var', use_bias=bias)
    z_layer = Lambda(sampling, output_shape=(latent_dim, ), name='z')

    def z_encoder_func(inputs):
        z_h = inputs
        z_h = z_conv1(z_h)
        z_h = z_conv2(z_h)
        # shape info needed to build decoder model
        shape = K.int_shape(z_h)
        z_h = Flatten()(z_h)
        z_h = z_h_layer(z_h)
        z_mean = z_mean_layer(z_h)
        z_log_var = z_log_var_layer(z_h)
        z = z_layer([z_mean, z_log_var])
        return z_mean, z_log_var, z, shape

    tg_z_mean, tg_z_log_var, tg_z, shape_z = z_encoder_func(tg_inputs)

    s_conv1 = Conv3D(filters=filters * 2,
                     kernel_size=kernel_size,
                     activation='relu',
                     strides=2,
                     use_bias=bias,
                     padding='same')

    s_conv2 = Conv3D(filters=filters * 4,
                     kernel_size=kernel_size,
                     activation='relu',
                     strides=2,
                     use_bias=bias,
                     padding='same')

    # generate latent vector Q(z|X)
    s_h_layer = Dense(intermediate_dim, activation='relu', use_bias=bias)
    s_mean_layer = Dense(latent_dim, name='s_mean', use_bias=bias)
    s_log_var_layer = Dense(latent_dim, name='s_log_var', use_bias=bias)
    s_layer = Lambda(sampling, output_shape=(latent_dim, ), name='s')

    def s_encoder_func(inputs):
        s_h = inputs
        s_h = s_conv1(s_h)
        s_h = s_conv2(s_h)
        # shape info needed to build decoder model
        shape = K.int_shape(s_h)
        s_h = Flatten()(s_h)
        s_h = s_h_layer(s_h)
        s_mean = s_mean_layer(s_h)
        s_log_var = s_log_var_layer(s_h)
        s = s_layer([s_mean, s_log_var])
        return s_mean, s_log_var, s, shape

    tg_s_mean, tg_s_log_var, tg_s, shape_s = s_encoder_func(tg_inputs)
    #bg_s_mean, bg_s_log_var, bg_s, _ = s_encoder_func(bg_inputs) # this is what they had
    bg_z_mean, bg_z_log_var, bg_z, _ = z_encoder_func(
        bg_inputs)  # Aidas and Stefano team hax

    # instantiate encoder models
    z_encoder = tf.keras.models.Model(tg_inputs,
                                      [tg_z_mean, tg_z_log_var, tg_z],
                                      name='z_encoder')
    s_encoder = tf.keras.models.Model(tg_inputs,
                                      [tg_s_mean, tg_s_log_var, tg_s],
                                      name='s_encoder')

    # build decoder model
    latent_inputs = Input(shape=(2 * latent_dim, ), name='z_sampling')

    x = Dense(intermediate_dim, activation='relu',
              use_bias=bias)(latent_inputs)
    x = Dense(shape_z[1] * shape_z[2] * shape_z[3] * shape_z[4],
              activation='relu',
              use_bias=bias)(x)
    x = Reshape((shape_z[1], shape_z[2], shape_z[3], shape_z[4]))(x)

    for i in range(nlayers):
        x = Conv3DTranspose(filters=filters,
                            kernel_size=kernel_size,
                            activation='relu',
                            strides=2,
                            use_bias=bias,
                            padding='same')(x)
        filters //= 2

    outputs = Conv3DTranspose(filters=1,
                              kernel_size=kernel_size,
                              activation='sigmoid',
                              padding='same',
                              use_bias=bias,
                              name='decoder_output')(x)

    # instantiate decoder model
    cvae_decoder = Model(latent_inputs, outputs, name='decoder')

    # decoder.summary()

    def zeros_like(x):
        return tf.zeros_like(x)

    tg_outputs = cvae_decoder(tf.keras.layers.concatenate([tg_z, tg_s], -1))
    zeros = tf.keras.layers.Lambda(zeros_like)(tg_z)

    bg_outputs = cvae_decoder(tf.keras.layers.concatenate(
        [bg_z, zeros], -1))  # Aidas look into this, is this correct

    #   fg_outputs = cvae_decoder(tf.keras.layers.concatenate([tg_z, zeros], -1))

    # instantiate VAE model
    cvae = tf.keras.models.Model(inputs=[tg_inputs, bg_inputs],
                                 outputs=[tg_outputs, bg_outputs],
                                 name='contrastive_vae')

    #     cvae_fg = tf.keras.models.Model(inputs=tg_inputs,
    #                                   outputs=fg_outputs,
    #                                   name='contrastive_vae_fg')

    if disentangle:
        discriminator = Dense(1, activation='sigmoid')

        z1 = Lambda(lambda x: x[:int(batch_size / 2), :])(tg_z)
        z2 = Lambda(lambda x: x[int(batch_size / 2):, :])(tg_z)
        s1 = Lambda(lambda x: x[:int(batch_size / 2), :])(tg_s)
        s2 = Lambda(lambda x: x[int(batch_size / 2):, :])(tg_s)

        q_bar = tf.keras.layers.concatenate([
            tf.keras.layers.concatenate([s1, z2], axis=1),
            tf.keras.layers.concatenate([s2, z1], axis=1)
        ],
                                            axis=0)

        q = tf.keras.layers.concatenate([
            tf.keras.layers.concatenate([s1, z1], axis=1),
            tf.keras.layers.concatenate([s2, z2], axis=1)
        ],
                                        axis=0)

        q_bar_score = (discriminator(q_bar) +
                       .1) * .85  # +.1 * .85 so that it's 0<x<1
        q_score = (discriminator(q) + .1) * .85
        tc_loss = K.log(q_score / (1 - q_score))
        discriminator_loss = -K.log(q_score) - K.log(1 - q_bar_score)
    else:
        tc_loss = 0
        discriminator_loss = 0

    reconstruction_loss = tf.keras.losses.mse(K.flatten(tg_inputs),
                                              K.flatten(tg_outputs))
    reconstruction_loss += tf.keras.losses.mse(K.flatten(bg_inputs),
                                               K.flatten(bg_outputs))
    reconstruction_loss *= input_shape[0] * input_shape[1] * input_shape[
        2] * input_shape[3]

    kl_loss = 1 + tg_z_log_var - tf.keras.backend.square(
        tg_z_mean) - tf.keras.backend.exp(tg_z_log_var)
    kl_loss += 1 + tg_s_log_var - tf.keras.backend.square(
        tg_s_mean) - tf.keras.backend.exp(tg_s_log_var)
    kl_loss += 1 + bg_z_log_var - tf.keras.backend.square(
        bg_z_mean) - tf.keras.backend.exp(bg_z_log_var)
    kl_loss = tf.keras.backend.sum(kl_loss, axis=-1)
    kl_loss *= -0.5

    #print(f'reconstruction loss {reconstruction_loss}')
    #print(f'kl_loss loss {kl_loss}')
    #print(f'tc_loss loss {tc_loss}')
    #print(f'discriminator_loss loss {discriminator_loss}')

    cvae_loss = tf.keras.backend.mean(reconstruction_loss + beta * kl_loss +
                                      gamma * tc_loss + discriminator_loss)
    cvae.add_loss(cvae_loss)

    opt = tf.keras.optimizers.Adam(learning_rate=0.001,
                                   beta_1=0.9,
                                   beta_2=0.999,
                                   epsilon=1e-07,
                                   amsgrad=False,
                                   name='Adam')

    #     opt = tf.keras.optimizers.SGD(
    #     learning_rate=0.01, momentum=0.0, nesterov=False, name='SGD')

    #opt = tf.keras.optimizers.RMSprop(learning_rate=0.001, rho=0.9, momentum=0.9, epsilon=1e-07, centered=False, name='RMSprop')

    #cvae.compile(optimizer='rmsprop',run_eagerly=True)
    cvae.compile(optimizer=opt, run_eagerly=True)

    #return cvae, cvae_fg, z_encoder, s_encoder, cvae_decoder
    return cvae, z_encoder, s_encoder, cvae_decoder
示例#17
0
def dice_coef(y_true, y_pred):
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    intersection = K.sum(y_true * y_pred)
    return 2.0 * intersection / (K.sum(y_true) + K.sum(y_pred) + 1)
示例#18
0
def get_MRI_VAE_3D(input_shape=(64, 64, 64, 1),
                   latent_dim=2,
                   batch_size=32,
                   disentangle=False,
                   gamma=1):
    #TODO: add discriminator loss, see if there is improvement. Perhaps try on shapes dataset if it's easier...

    image_size, _, _, channels = input_shape
    kernel_size = 3
    filters = 16
    intermediate_dim = 128
    epochs = 10
    nlayers = 2

    # VAE model = encoder + decoder
    # build encoder model
    inputs = Input(shape=input_shape, name='encoder_input')
    x = inputs
    for i in range(nlayers):
        filters *= 2
        x = Conv3D(filters=filters,
                   kernel_size=kernel_size,
                   activation='relu',
                   strides=2,
                   padding='same')(x)

    # shape info needed to build decoder model
    shape = K.int_shape(x)

    # generate latent vector Q(z|X)
    x = Flatten()(x)
    x = Dense(intermediate_dim, activation='relu')(x)
    z_mean = Dense(latent_dim, name='z_mean')(x)
    z_log_var = Dense(latent_dim, name='z_log_var')(x)

    # use reparameterization trick to push the sampling out as input
    # note that "output_shape" isn't necessary with the TensorFlow backend
    z = Lambda(sampling, output_shape=(latent_dim, ),
               name='z')([z_mean, z_log_var])

    # instantiate encoder model
    encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')

    # build decoder model
    latent_inputs = Input(shape=(latent_dim, ), name='z_sampling')
    x = Dense(intermediate_dim, activation='relu')(latent_inputs)
    x = Dense(shape[1] * shape[2] * shape[3] * shape[4], activation='relu')(x)
    x = Reshape((shape[1], shape[2], shape[3], shape[4]))(x)

    for i in range(nlayers):
        x = Conv3DTranspose(filters=filters,
                            kernel_size=kernel_size,
                            activation='relu',
                            strides=2,
                            padding='same')(x)
        filters //= 2

    outputs = Conv3DTranspose(filters=1,
                              kernel_size=kernel_size,
                              activation='sigmoid',
                              padding='same',
                              name='decoder_output')(x)

    # instantiate decoder model
    decoder = Model(latent_inputs, outputs, name='decoder')

    #     decoder.summary()

    # instantiate VAE model
    outputs = decoder(encoder(inputs)[2])
    vae = Model(inputs, outputs, name='vae')

    if disentangle:
        discriminator = Dense(1, activation='sigmoid')

        z1 = Lambda(lambda x: x[:int(batch_size / 2), :int(latent_dim / 2)])(z)
        z2 = Lambda(lambda x: x[int(batch_size / 2):, :int(latent_dim / 2)])(z)
        s1 = Lambda(lambda x: x[:int(batch_size / 2), int(latent_dim / 2):])(z)
        s2 = Lambda(lambda x: x[int(batch_size / 2):, int(latent_dim / 2):])(z)

        q_bar = tf.keras.layers.concatenate([
            tf.keras.layers.concatenate([s1, z2], axis=1),
            tf.keras.layers.concatenate([s2, z1], axis=1)
        ],
                                            axis=0)
        q = tf.keras.layers.concatenate([
            tf.keras.layers.concatenate([s1, z1], axis=1),
            tf.keras.layers.concatenate([s2, z2], axis=1)
        ],
                                        axis=0)

        #         q_bar_score = discriminator(q_bar)
        #         q_score = discriminator(q)
        #         tc_loss = K.log(q_score / (1 - q_score))

        q_bar_score = (discriminator(q_bar) +
                       .1) * .85  # +.1 * .85 so that it's 0<x<1
        q_score = (discriminator(q) + .1) * .85
        tc_loss = K.log(q_score / (1 - q_score))

        discriminator_loss = -K.log(q_score) - K.log(1 - q_bar_score)

    reconstruction_loss = mse(K.flatten(inputs), K.flatten(outputs))
    reconstruction_loss *= image_size * image_size

    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    if disentangle:
        vae_loss = K.mean(reconstruction_loss) + K.mean(
            kl_loss) + gamma * K.mean(tc_loss) + K.mean(discriminator_loss)
    else:
        vae_loss = K.mean(reconstruction_loss) + K.mean(kl_loss)

    vae.add_loss(vae_loss)
    opt = tf.keras.optimizers.Adam(learning_rate=0.001,
                                   beta_1=0.9,
                                   beta_2=0.999,
                                   epsilon=1e-07,
                                   amsgrad=False,
                                   name='Adam')

    #vae.compile(optimizer='rmsprop')
    vae.compile(optimizer=opt)

    if disentangle:
        vae.metrics_tensors = [
            reconstruction_loss, kl_loss, tc_loss, discriminator_loss
        ]
        #     vae.summary()
    return encoder, decoder, vae
def tp_rate(y_true, y_pred):
    return K.sum(
        K.flatten(y_true) * K.flatten(K.round(y_pred))) / K.sum(y_true)
示例#20
0
def mse(y_true, y_pred):
    y_true = K.cast(y_true, y_pred.dtype)
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    return K.mean(K.square(y_true - y_pred), axis=-1)
示例#21
0
def batch_gather(reference, indices):
    ref_shape = K.shape(reference)
    batch_size = ref_shape[0]
    n_classes = ref_shape[1]
    flat_indices = K.arange(0, batch_size) * n_classes + K.flatten(indices)
    return K.gather(K.flatten(reference), flat_indices)
def b_cross(y_true, y_pred):
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    return binary_crossentropy(y_true, y_pred)
示例#23
0
def d_coef(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)

    return f1(y_true_f, y_pred_f)
 def _single_dice_loss(self, y_true, y_pred):
     y_true = K.flatten(y_true)
     y_pred = K.flatten(y_pred)
     intersection = K.sum(y_true * y_pred)
     return 1 - ((2. * intersection + self.smooth) /
                 (K.sum(y_true) + K.sum(y_pred) + self.smooth))
def dice_coef(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) +
                                        1.0)
示例#26
0
        #  miou and fmiou
        miou = torch_imgseg_metrics.mean_iou().to(device)
        print('Mean IU: {0}, type: {1}'.format(np.round(miou, 2), type(miou)))
        fmiou = torch_imgseg_metrics.Frequently_mean_iou().to(device)
        print('Frequently mean IU: {0}, type: {1}'.format(np.round(fmiou, 2), type(fmiou)))
        end_time = time.time()
        print('Torch process: \n Time: {} ms'.format(timer(start_time, end_time)))

    if is_tf:
        import tensorflow as tf
        from tensorflow_loss import tensorflow_ImgSeg_Metrics
        import tensorflow.keras.backend as k

        gt_ = tf.convert_to_tensor(gt_, dtype=tf.int32)
        pred_ = tf.convert_to_tensor(pred_, dtype=tf.int32)
        gt_flatten = k.flatten(gt_)
        pred_flatten = k.flatten(pred_)
        tensorflow_imgseg_metrics = tensorflow_ImgSeg_Metrics(gt_flatten, pred_flatten)

        start_time = time.time()
        np_cm = tensorflow_imgseg_metrics.confusion_matrix()
        end_time = time.time()
        print('Numpy confusion matrix: \n {0} \n Time: {1} ms'.format(np_cm, timer(start_time, end_time)))

        start_time = time.time()
        #  pixel_accuracy, mean_pixel_acc
        pixel_acc = tensorflow_imgseg_metrics.pixel_accuracy()
        print('Pixel accuracy: {0}%, type: {1}'.format(np.round(pixel_acc * 100, 2), type(pixel_acc)))
        mean_pixel_acc = tensorflow_imgseg_metrics.mean_pixel_accuracy()
        print('Mean pixel accuracy: {0}%, type: {1}'.format(np.round(mean_pixel_acc * 100, 2), type(mean_pixel_acc)))