Beispiel #1
0
def model_DIBCO(inp_shape=(256, 256, 3)):
    I = Input(inp_shape)
    #I=Input(shape=(None,None,1))

    z1 = Dilation2D(2, (8, 8), padding="same", strides=(1, 1))(I)
    z2 = Erosion2D(2, (8, 8), padding="same", strides=(1, 1))(I)
    z3 = Concatenate()([z1, z2])
    z3 = Conv2D(8, (1, 1), padding='same')(z3)

    for j in range(1):
        z1 = Dilation2D(2, (8, 8), padding="same", strides=(1, 1))(z3)
        z2 = Erosion2D(2, (8, 8), padding="same", strides=(1, 1))(z3)
        z3 = Concatenate()([z1, z2])
        z3 = Conv2D(8, (1, 1), padding='same')(z3)

    z1 = Dilation2D(2, (8, 8), padding="same", strides=(1, 1))(z3)
    z2 = Erosion2D(2, (8, 8), padding="same", strides=(1, 1))(z3)
    z3 = Concatenate()([z1, z2])
    z3 = Conv2D(1, (1, 1), padding='same', activation='sigmoid')(z3)

    model = Model(inputs=[I], outputs=[z3])
    model.compile(loss=DSSIMObjective(kernel_size=100),
                  optimizer="adam",
                  metrics=['mse', DSSIMObjective(kernel_size=100)])

    return model
Beispiel #2
0
def test_DSSIM_channels_first():
    prev_data = K.image_data_format()
    K.set_image_data_format('channels_first')
    for input_dim, kernel_size in zip([32, 33], [2, 3]):
        input_shape = [3, input_dim, input_dim]
        X = np.random.random_sample(4 * input_dim * input_dim * 3)
        X = X.reshape([4] + input_shape)
        y = np.random.random_sample(4 * input_dim * input_dim * 3)
        y = y.reshape([4] + input_shape)

        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape,
                         activation='relu'))
        model.add(Conv2D(3, (3, 3), padding='same', input_shape=input_shape,
                         activation='relu'))
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        model.compile(loss=DSSIMObjective(kernel_size=kernel_size), metrics=['mse'],
                      optimizer=adam)
        model.fit(X, y, batch_size=2, epochs=1, shuffle='batch')

        # Test same
        x1 = K.constant(X, 'float32')
        x2 = K.constant(X, 'float32')
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.0, K.eval(dssim(x1, x2)), atol=1e-4)

        # Test opposite
        x1 = K.zeros([4] + input_shape)
        x2 = K.ones([4] + input_shape)
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.5, K.eval(dssim(x1, x2)), atol=1e-4)

    K.set_image_data_format(prev_data)
Beispiel #3
0
def compile_model(model, lambda1=0.05):

    I1 = model.inputs[0]
    I2 = model.inputs[1]
    o1 = model.outputs[0]

    # this is to calculate the inverse_warp
    o2 = image_warp(-o1, o1)

    I2_rec = image_warp(I1, o1)
    I1_rec = image_warp(I2, o2)

    ux, uy = grad_xy(o1[:, :, :, :1])
    vx, vy = grad_xy(o1[:, :, :, 1:2])
    sm_loss = lambda1 * (K.mean(
        K.abs(ux * ux) + K.abs(uy * uy) + K.abs(vx * vx) + K.abs(vy * vy)))

    re_loss1 = DSSIMObjective(kernel_size=50)(I2, I2_rec)
    re_loss2 = DSSIMObjective(kernel_size=50)(I1, I1_rec)

    total_loss = lambda1 * sm_loss + re_loss1 + re_loss2

    model = Model(inputs=[I1, I2], outputs=[o1])
    model.add_loss(total_loss)
    model.compile(optimizer=keras.optimizers.Adadelta(
        lr=1.0, rho=0.95, epsilon=None, decay=0.0))

    return model
def compile_model(model,lambda_smoothness = 0, lambda_flow=0.0001, lambda_mse=0, occ_punishment = 0):

    i1=model.inputs[0]
    i2=model.inputs[1]
    o1=model.outputs[0]
    o2 = image_warp(-o1,o1)

    oxf, oxb = mask(i1,i2,o1,o2)
    mask_f = oxf[:,:,:,0]
    mask_b = oxb[:,:,:,1]

    err_f, err_b = photometric_error(i1,i2,o1,o2)
    flow_f, flow_b = flow_error(o1,o2)

    ###--------Occlusion_aware_mse_rec_image-------------------------------------
    occ_loss1 = (tf.reduce_sum(tf.boolean_mask(charbonnier(err_f), mask_f)))#/(436*1024)
    occ_loss2 = (tf.reduce_sum(tf.boolean_mask(charbonnier(err_b), mask_b)))#/(436*1024)
    occ_loss = (occ_loss1 + occ_loss2)*lambda_mse

    ###--------Occlusion_aware_mse_flow------------------------------------
    flow_loss1 = tf.reduce_sum(tf.boolean_mask(charbonnier(flow_f), mask_f))
    flow_loss2 = tf.reduce_sum(tf.boolean_mask(charbonnier(flow_b), mask_f))
    flow_loss = (flow_loss1 + flow_loss2)*lambda_flow

    ###--------Punishment_for_occlusion-----------------------------------------
    occ_punish1 = tf.multiply(tf.reduce_sum(tf.cast(mask_f, tf.float32)),occ_punishment)
    occ_punish2 = tf.multiply(tf.reduce_sum(tf.cast(mask_b, tf.float32)),occ_punishment)
    occ_punish = occ_punish1 + occ_punish2

    ###--------Gradient_smoothness--------------------------------------------
    ux,uy=grad_xy(o1[:,:,:,:1])
    vx,vy=grad_xy(o1[:,:,:,1:2])
    sm_loss_o1 = K.mean(K.abs(ux*ux)+ K.abs(uy*uy)+ K.abs(vx*vx)+ K.abs(vy*vy))
    ux,uy=grad_xy(o2[:,:,:,:1])
    vx,vy=grad_xy(o2[:,:,:,1:2])
    sm_loss_o2 = K.mean(K.abs(ux*ux)+ K.abs(uy*uy)+ K.abs(vx*vx)+ K.abs(vy*vy))
    sm_loss = (sm_loss_o1 + sm_loss_o2)*lambda_smoothness   

    ### Reconstruction_loss_ssim_(occlusion_not considered)
    i2_rec=image_warp(i1,o1)
    i1_rec=image_warp(i2,o2)
    re_loss1=DSSIMObjective(kernel_size=50)(i2,i2_rec)
    re_loss2=DSSIMObjective(kernel_size=50)(i1,i1_rec)
    re_loss_ssim = re_loss1 + re_loss2

    total_loss = sm_loss + occ_loss + occ_punish + re_loss_ssim + flow_loss 

    model = Model(inputs=[i1,i2], outputs=[o1])
    model.add_loss(total_loss)
    model.compile(optimizer=keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0))
   	
    return model
Beispiel #5
0
def compile_model(model, lambda1=0.005):
    s1 = tf.get_variable("sig1",
                         trainable=True,
                         initializer=tf.constant([0.3]))
    s2 = tf.get_variable("sig2",
                         trainable=True,
                         initializer=tf.constant([0.7]))
    s1_2 = s1 * s1
    s2_2 = s2 * s2

    I1 = model.inputs[0]
    I2 = model.inputs[1]
    o1 = model.outputs[0]

    I2_rec = image_warp(I1, o1)
    I1_rec = image_warp(I2, -o1)

    ux, uy = grad_xy(o1[:, :, :, :1])
    vx, vy = grad_xy(o1[:, :, :, 1:2])
    sm_loss = (K.mean(
        K.abs(ux * ux) + K.abs(uy * uy) + K.abs(vx * vx) + K.abs(vy * vy)))

    # re_loss_mse = K.mean(K.square(I2 - input1_rec))
    re_loss1 = DSSIMObjective(kernel_size=50)(I2, I2_rec)
    re_loss2 = DSSIMObjective(kernel_size=50)(I1, I1_rec)

    ###############################################
    # input1_rec=image_warp(model.inputs[0],model.outputs[0],num_batch=b_size)
    # input0_rec=image_warp(model.inputs[1],-model.outputs[0],num_batch=b_size)
    # ux,uy=grad_xy(model.outputs[0][:,:,:,:1])
    # vx,vy=grad_xy(model.outputs[0][:,:,:,1:2])
    # sm_loss=lambda1*(K.mean(K.abs(ux*ux)+ K.abs(uy*uy)+ K.abs(vx*vx)+ K.abs(vy*vy)))
    # re_loss=DSSIMObjective(kernel_size=50)(model.inputs[1],input1_rec)
    ################################################

    # loss_mse = K.mean(K.square(model.outputs[0] - Y))

    re_loss = re_loss1 + re_loss2

    total_loss = (1 / s1_2) * re_loss + (1 / s2_2) * sm_loss + K.log(
        s1_2) + K.log(s2_2)

    model = Model(inputs=[I1, I2], outputs=[o1])
    model.add_loss(total_loss)
    model.compile(loss="mse",
                  optimizer=keras.optimizers.Adadelta(lr=1.0,
                                                      rho=0.95,
                                                      epsilon=None,
                                                      decay=0.0))
    #model.compile(optimizer='rmsprop')

    return model
Beispiel #6
0
def total_loss(y_true, y_pred):
    s1 = tf.get_variable("sig1", shape=(1, ), trainable=True)
    s2 = tf.get_variable("sig2", shape=(1, ), trainable=True)
    loss = (1 / (s1 * s1)) * DSSIMObjective(kernel_size=100)(
        y_true, y_pred) + (1 / (s2 * s2)) * (PSNRLoss(y_true, y_pred)) + K.log(
            s1 * s1) + K.log(s2 * s2)
    return loss
def total_loss1(y_true,y_pred):
    s1 = tf.get_variable("sig1", shape=(1,), trainable=True)
    s2 = tf.get_variable("sig2", shape=(1,), trainable=True)
    #loss=(1/(s1*s1+K.epsilon()))*DSSIMObjective(kernel_size=100)(y_true,y_pred)+ (1/(s2*s2+K.epsilon()))*K.mean(K.abs(y_true-y_pred))+2*K.log(s1)+2*K.log(s2)
    loss=(1/(s1*s1))*DSSIMObjective(kernel_size=100)(y_true,y_pred)+ (1/(s2*s2))*K.mean(K.abs(y_true-y_pred))+K.log(s1*s1)+K.log(s2*s2)

    return(loss)
Beispiel #8
0
def get_dssim_l1_loss(alpha=0.84, kernel_size=3, max_value=1.0):
    from keras_contrib.losses import DSSIMObjective
    from keras.losses import mean_absolute_error
    dssim = DSSIMObjective(kernel_size=kernel_size, max_value=max_value)
    def DSSIM_L1(y_true, y_pred):
        return alpha*dssim(y_true, y_pred) + (1.0-alpha)*mean_absolute_error(y_true, y_pred)
    return DSSIM_L1
Beispiel #9
0
def loss_new(y_true, y_pred):
    #loss=DSSIMObjective(kernel_size=20)(y_true,y_pred)+ K.mean(K.abs(y_true-y_pred))
    loss_ssim = DSSIMObjective(kernel_size=100)(
        y_true, y_pred)  #+ K.mean(K.abs(y_true-y_pred))
    loss_psnr = PSNRLoss(y_true, y_pred)
    loss = loss_ssim + 0.006 * loss_psnr
    return loss
Beispiel #10
0
def DSSIM_RGB(y_true, y_pred):

    #loss1=DSSIMObjective(kernel_size=23)(y_true[:,:,:,:1],y_pred[:,:,:,:1])
    #loss2=DSSIMObjective(kernel_size=23)(y_true[:,:,:,1:2],y_pred[:,:,:,1:2])
    #loss3=DSSIMObjective(kernel_size=23)(y_true[:,:,:,2:3],y_pred[:,:,:,2:3])
    #loss=K.mean(loss1+loss2+loss3)
    #loss=(loss1+loss2+loss3)/3.0
    loss = DSSIMObjective(kernel_size=23)(y_true[:, :, :, :3],
                                          y_pred[:, :, :, :3])
    return loss
Beispiel #11
0
def fcnn_loss(input_img, output, beta = 1):
    # Compute error in reconstruction
    reconstruction_loss = mae(input_img, output)

    # compute the structural similarity index
    dssim = DSSIMObjective()
    structural_loss = dssim(input_img, output)

    total_loss = reconstruction_loss + (beta * structural_loss)
    return total_loss
Beispiel #12
0
def mse_ssim_loss():
    dssim = DSSIMObjective()
    mse = losses.mean_squared_error

    # Create a loss function that adds the MSE loss to SSIM loss
    def loss(y_true, y_pred):
        #return K.mean(mse(y_true, y_pred), dssim(y_true,y_pred), axis=-1)
        return mse(y_true, y_pred) + dssim(y_true, y_pred)

    # Return a function
    return loss
def compile_model(model, lambda1=0.05):

    s1 = tf.get_variable("sig1",
                         trainable=True,
                         initializer=tf.constant([0.3]))
    s2 = tf.get_variable("sig2",
                         trainable=True,
                         initializer=tf.constant([0.7]))
    s1_2 = s1 * s1
    s2_2 = s1 * s1

    I1 = model.inputs[0]
    I2 = model.inputs[1]
    o1 = model.outputs[0]

    # this is to calculate the inverse_warp
    o2 = image_warp(-o1, o1)

    I2_rec = image_warp(I1, o1)
    I1_rec = image_warp(I2, o2)

    ux, uy = grad_xy(o1[:, :, :, :1])
    vx, vy = grad_xy(o1[:, :, :, 1:2])
    sm_loss = (K.mean(
        K.abs(ux * ux) + K.abs(uy * uy) + K.abs(vx * vx) + K.abs(vy * vy)))

    # re_loss_mse = K.mean(K.square(I2 - input1_rec))
    re_loss1 = DSSIMObjective(kernel_size=50)(I2, I2_rec)
    re_loss2 = DSSIMObjective(kernel_size=50)(I1, I1_rec)

    re_loss = re_loss1 + re_loss2

    total_loss = (1 / s1_2) * re_loss + (1 / s2_2) * sm_loss + K.log(
        s1_2) + K.log(s2_2)

    model = Model(inputs=[I1, I2], outputs=[o1])
    model.add_loss(total_loss)
    model.compile(optimizer=keras.optimizers.Adadelta(
        lr=1.0, rho=0.95, epsilon=None, decay=0.0))

    return model
def compile_model_new(model, b_size, lambda1=0.002):
    """
    session=tf.Session()
    session.run(tf.global_variables_initializer())
    
    var = [v for v in tf.trainable_variables() if v.name == "sig1:0"][0]
    session.run(var)
    """
    #s1 = tf.get_variable("sig1", shape=(1,), trainable=True,initializer=tf.constant([0.3]))
    #s2 = tf.get_variable("sig2", shape=(1,), trainable=True,initializer=tf.constant([0.7]))
    #s1 = tf.get_variable("sig1",  trainable=True,initializer=tf.constant([0.3]))
    #s2 = tf.get_variable("sig2",  trainable=True,initializer=tf.constant([0.7]))
    #s1_2=s1*s1
    #s2_2=s1*s1

    input1_rec = image_warp(model.inputs[0],
                            model.outputs[0],
                            num_batch=b_size)
    input0_rec = image_warp(model.inputs[1],
                            -model.outputs[0],
                            num_batch=b_size)

    ux, uy = grad_xy(model.outputs[0][:, :, :, :1])
    vx, vy = grad_xy(model.outputs[0][:, :, :, 1:2])
    sm_loss = lambda1 * (K.mean(
        K.abs(ux * ux) + K.abs(uy * uy) + K.abs(vx * vx) + K.abs(vy * vy)))

    re_loss = DSSIMObjective(kernel_size=50)(model.inputs[1], input1_rec)

    loss_mse = K.mean(K.square(model.outputs[0] - model.inputs[1]))

    #total_loss=(1/s1_2)*re_loss+(1/s2_2)*sm_loss+K.log(s1_2)+K.log(s2_2)
    total_loss = lambda1 * sm_loss + re_loss + loss_mse

    model.add_loss(total_loss)

    model.compile(optimizer='rmsprop')

    return model
def hourglass_net():
    def conv(x,
             filters,
             kernel_size=1,
             strides=(1, 1),
             padding='same',
             name="conv"):
        x = Conv2D(filters,
                   kernel_size,
                   strides=strides,
                   padding=padding,
                   use_bias=False)(x)
        return x

    def max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding="valid"):
        x = MaxPooling2D(pool_size=pool_size, strides=strides,
                         padding=padding)(x)
        return x

    def conv_bn(x,
                filters,
                kernel_size=1,
                strides=(1, 1),
                padding='same',
                name="conv_bn"):
        x = conv(x, filters, kernel_size, strides, padding, name)
        x = BatchNormalization(axis=-1, scale=False)(x)
        return x

    def conv_bn_relu(x,
                     filters,
                     kernel_size=1,
                     strides=1,
                     padding="same",
                     name="conv_bn_rel"):
        x = conv(x, filters, kernel_size, strides, padding, name)
        x = BatchNormalization(axis=-1, scale=False)(x)
        x = Activation('relu')(x)
        return x

    def conv_block(x, numOut, name="conv_block"):
        x = BatchNormalization(axis=-1, scale=False)(x)
        x = Activation('relu')(x)
        x = conv(x, int(numOut))
        return x

    def skip_layer(x, numOut, name='skip_layer'):

        if x.shape[3] == numOut:  # check if right
            return x

        x = conv(x, numOut)
        return x

    def residual_block(x, numOut, name="residual_block"):

        convb = conv_block(x, numOut)
        skip_l = skip_layer(x, numOut)
        x = Add()([convb, skip_l])
        x = Activation('relu')(x)
        return x

    def up_block(x, numOut, kernel_size=(3, 3), strides=(1, 1)):
        x = Conv2DTranspose(filters=filters,
                            kernel_size=kernel_size,
                            padding='same',
                            strides=strides,
                            kernel_initializer='he_normal')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        return x

    def hourglass(x, numOut, name='hourglass'):

        # upper branch
        up_1 = residual_block(x, numOut, name="up_1")
        # lower branch
        low_ = max_pool2d(x)
        low_1 = residual_block(low_, numOut, name="low_1")

        up_2 = residual_block(low_1, numOut, name="up_2")

        low_2 = max_pool2d(low_1)
        low_2 = residual_block(low_2, numOut, name="low_2")

        up_3 = residual_block(low_2, numOut, name="up_3")

        low_3 = max_pool2d(low_2)
        low_3 = residual_block(low_3, numOut, name="low_3_1")
        low_3 = residual_block(low_3, numOut, name="low_3_2")
        low_3 = residual_block(low_3, numOut, name="low_3_3")
        #low_up_3 = UpSampling2D((2,2))(low_3)
        low_up_3 = up_block(low_3, numOut, strides=(2, 2))
        low_up_3 = Add()([low_up_3, up_3])
        low_up_2 = residual_block(low_up_3, numOut, name="low_up_2")
        #low_up_2 = UpSampling2D((2,2))(low_up_2)
        low_up_2 = up_block(low_up_2, numOut, strides=(2, 2))
        low_up_1 = Add()([low_up_2, up_2])
        low_up_1 = residual_block(low_up_1, numOut, name="low_up_1")
        #low_up_1 = UpSampling2D((2,2))(low_up_1)
        low_up_1 = up_block(low_up_1, numOut, strides=(2, 2))

        x = Add()([low_up_1, up_1])
        x = Activation('relu')(x)
        x = Dropout(0.2)(x)
        return x

    def up_block(input, filters=64, kernel_size=(3, 3), strides=(1, 1)):
        x = Conv2DTranspose(filters=filters,
                            kernel_size=kernel_size,
                            padding='same',
                            strides=strides,
                            kernel_initializer='he_normal')(input)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        return x

    inputs = Input((image_rows_low, image_cols, channel_num))
    filters = 128

    # upscaling
    x0 = inputs
    for _ in range(int(np.log(upscaling_factor) / np.log(2))):
        x0 = up_block(x0, filters, strides=(2, 1))

    hg1 = hourglass(x0, filters, name='hourglass_1')
    hg2 = hourglass(hg1, filters, name='hourglass_2')
    hg3 = hourglass(hg2, filters, name='hourglass_3')

    # last_put = conv_bn_relu(hg3, 1, kernel_size = 1, strides = 1, padding="same", name="conv_bn_rel")
    last_put = Conv2D(1, (1, 1), activation='relu')(hg3)

    def PSNR(y_true, y_pred):
        max_pixel = 1.0
        return -(10.0 * K.log(
            (max_pixel**2) / (K.mean(K.square(y_pred - y_true))))) / 2.303

    from keras_contrib.losses import DSSIMObjective

    loss_func = DSSIMObjective()

    model = Model(inputs=inputs, outputs=last_put)
    model.compile(optimizer=Adam(lr=0.0001, decay=0.00001),
                  loss=loss_func,
                  metrics=['accuracy', 'mse', 'mae', PSNR, loss_func])

    model.summary()

    print("Input shape: ", inputs.shape)
    print("Output length: ", last_put.shape)

    return model
train_generator = aae.generate_keras_input('train')#val')

mse = []
ssim = []

for i in range(973):
    imgs_true, under, _ = next(train_generator)
    z = encoder.predict(under)
    imgs_rec = generator.predict(z)
    
    mse.append(np.mean((imgs_true - imgs_rec)**2))

    imgs_true = K.constant(imgs_true)
    imgs_rec = K.constant(imgs_rec)

    ssim.append(K.eval(DSSIMObjective()(imgs_true, imgs_rec)))

print(np.mean(np.asarray(mse)))
print(np.mean(np.asarray(ssim)))

val_generator = aae.generate_keras_input('val')#val')

mse = []
ssim = []

for i in range(100):
    imgs_true, under, _ = next(val_generator)
    z = encoder.predict(under)
    imgs_rec = generator.predict(z)
    
    mse.append(np.mean((imgs_true - imgs_rec)**2))
Beispiel #17
0

file_in, file_out = get_in_out_file()
X, Y_gt = read_files(file_in, file_out)
model_path1 = path1_old()
model_path2 = path2_old()
model_path12_new = model_new()
model_cnn = create_CNN_model()

model_list = [model_path1, model_path2, model_path12_new, model_cnn]
model_list = load_weights(model_list)

#our_loss=DSSIMObjective(kernel_size=23)
#our_loss=loss_all
num_epochs = 1

for i in range(len(model_list)):
    model_list[i].compile(loss=DSSIMObjective(kernel_size=23),
                          optimizer="RMSprop",
                          metrics=['mse',
                                   DSSIMObjective(kernel_size=23)])
    model_list[i].fit(X, Y_gt, epochs=num_epochs, batch_size=4)

#save_models(model_list)
"""
####   Test  ##################
datagen=gen_data(batch_size=4)
t1,t2=datagen.next()
t2_out=model.predict(t1)
"""
Beispiel #18
0
import matplotlib.pyplot as plt
from tensorflow import keras
from keras_contrib.losses import DSSIMObjective
from keras.losses import mean_squared_error
smiloss = DSSIMObjective()
def rmse_loss(y_true,y_pred):
    return smiloss(y_true,y_pred) + K.sqrt(K.mean(K.square(y_pred - y_true)))

def rmse_smi_loss(y_true,y_pred):
    return smiloss(y_true,y_pred) + K.sqrt(K.mean(K.square(y_pred - y_true)))

Input = keras.layers.Input

Dense = keras.layers.Dense
Conv2D = keras.layers.Conv2D
MaxPooling2D = keras.layers.MaxPooling2D
UpSampling2D = keras.layers.UpSampling2D
Model = keras.models.Model
K = keras.backend

input_img = Input(shape=(256, 256, 1))  # adapt this if using `channels_first` image data format
x = Conv2D(128, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
Beispiel #19
0
x = upscale(64)(x)
x = res_block(x, 64)
x = upscale(32)(x)
#x = res_block(x, 32)
#x = upscale(16)(x)

x = Conv2D(3, kernel_size=5, padding='same', activation='sigmoid')(x)
m1 = Input(shape=(128, 128, 1))
sumModel = Model([input_warped, input_examples, m1], [x])
print(sumModel.summary())
try:
    sumModel.load_weights('weights.dat')
except:
    print('Weights not found')

DSSIM = DSSIMObjective()

sumModel.compile(optimizer=optimizer, loss=['mae'])

from keras_contrib.losses import DSSIMObjective

for n in range(900000):
    imaegsAGen = get_training_data(images_A, landmarks_A, batch_size)
    imaegsBGen = get_training_data(images_B, landmarks_B, batch_size)
    imaegsCGen = get_training_data(images_C, landmarks_C, batch_size)
    imaegsDGen = get_training_data(images_D, landmarks_D, batch_size)

    xa, xae, ya, ma = next(imaegsAGen)
    xb, xbe, yb, mb = next(imaegsBGen)
    xc, xce, yc, mc = next(imaegsCGen)
    xd, xde, yd, md = next(imaegsDGen)
Beispiel #20
0
def reconstruction(img, reconstructed_img):
    #return huber_loss(img, reconstructed_img)
    return DSSIMObjective()(img, reconstructed_img)
Beispiel #21
0
import keras.models
import keras.losses
import scipy.misc
import numpy as np
import skimage.measure
import os
from keras_contrib.losses import DSSIMObjective

keras.losses.dssim = DSSIMObjective()
import sys

sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))

import matplotlib

matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cmocean
import cmocean.cm

# from keras.utils.generic_utils import get_custom_objects
#
# loss = DSSIMObjective()
# get_custom_objects().update({"dssim": loss})

from src.data.loader import DataLoader
from src.processing.folders import Folders
from PIL import Image
from src.visualization.ssim_plotter import SSIMPlotter
Beispiel #22
0
from keras_contrib.losses import DSSIMObjective
from keras import backend as K

import numpy as np
from keras_contrib.losses import DSSIMObjective
from keras import backend as K

#Shape should be (batch,x,y,channels)
imga = np.random.normal(size=(1,256,256,3))
imgb = np.random.normal(size=(1,256,256,3))

loss_func = DSSIMObjective()

resulting_loss1 = K.eval(loss_func(K.variable(imga),K.variable(imgb)))
resulting_loss2 = K.eval(loss_func(K.variable(imga),K.variable(imga)))

print ("Loss for different images: %.2f" % resulting_loss1)
print ("Loss for same image: %.2f" % resulting_loss2)
import matplotlib.image as mpimg
from skimage.transform import resize
from keras.models import Model, load_model
from keras.layers import Input, Dense
import math
from keras.layers import Conv2D, Conv2DTranspose, Dense, Dropout, Flatten, Input, MaxPooling2D, UpSampling2D
from keras.applications.vgg16 import VGG16
from keras_contrib.losses import DSSIMObjective
import glob
import time
import sys
#--------------------------------------main code-----------------------------------------------------------------------

model = load_model(
    'model.h5',
    custom_objects={'DSSIMObjective': DSSIMObjective(kernel_size=23)})
model.load_weights('model_weights.h5')


def patchextractor(img, patchsize, stride):
    patch = []
    (a, b) = img.shape
    xlim = int(
        np.ceil(float(max(a, patchsize[0]) - patchsize[0] + stride) /
                stride)) * stride
    ylim = int(
        np.ceil(float(max(b, patchsize[1]) - patchsize[1] + stride) /
                stride)) * stride
    # print(xlim,ylim)

    for i in range(0, xlim, stride):
] = load_weights(model_list)
model_list = [
    model_path1,
    model_path2,
    model_path12,
    model_cnn,
    model_path12_new,
    model_morph_type2,
]

# our_loss=DSSIMObjective(kernel_size=23)
our_loss = loss_all
num_epochs = 20

for i in range(len(model_list)):
    datagen = gen_data(batch_size=4)
    model_list[i].compile(
        loss=our_loss,
        optimizer="RMSprop",
        metrics=["mse", DSSIMObjective(kernel_size=23)],
    )
    model_list[i].fit_generator(datagen, epochs=num_epochs, steps_per_epoch=20)

save_models(model_list)
"""
####   Test  ##################
datagen=gen_data(batch_size=4)
t1,t2=datagen.next()
t2_out=model.predict(t1)
"""
Beispiel #25
0
def SSIM(y_true, y_pred):
    # DSSIM = (1 - SSIM) / 2 => SSIM = 1 - 2 * DSSIM
    dssim = DSSIMObjective()
    return 1 - 2 * dssim(y_true, y_pred)
Beispiel #26
0
def loss_new(y_true, y_pred):
    loss = DSSIMObjective(kernel_size=100)(y_true, y_pred) + K.mean(
        K.abs(y_true - y_pred))
    return loss
def LSTM():
    def conv_block(input, filters=64, kernel_size=(3, 3)):
        x = Conv2D(filters=filters,
                   kernel_size=kernel_size,
                   padding='same',
                   kernel_initializer=kernel_init)(input)
        x = BatchNormalization()(x)
        x = Activation(act_func)(x)
        x = Conv2D(filters=filters,
                   kernel_size=kernel_size,
                   padding='same',
                   kernel_initializer=kernel_init)(x)
        x = BatchNormalization()(x)
        x = Activation(act_func)(x)
        return x

    def up_block(input, filters=64, kernel_size=(3, 3), strides=(1, 1)):
        x = Conv2DTranspose(filters=filters,
                            kernel_size=kernel_size,
                            padding='same',
                            strides=strides,
                            kernel_initializer=kernel_init)(input)
        x = BatchNormalization()(x)
        x = Activation(act_func)(x)
        return x

    filters = 64
    dropout_rate = 0.25
    act_func = 'relu'
    kernel_init = 'he_normal'

    inputs = Input((image_rows_low, image_cols, channel_num))

    # upscailing
    x0 = inputs
    for _ in range(int(np.log(upscaling_factor) / np.log(2))):
        x0 = up_block(x0, filters, strides=(2, 1))

    x1 = conv_block(x0, filters)

    x2 = AveragePooling2D((2, 2))(x1)
    x2 = Dropout(dropout_rate)(x2, training=True)
    x2 = conv_block(x2, filters * 2)

    x3 = AveragePooling2D((2, 2))(x2)
    x3 = Dropout(dropout_rate)(x3, training=True)
    x3 = conv_block(x3, filters * 4)

    x4 = AveragePooling2D((2, 2))(x3)
    x4 = Dropout(dropout_rate)(x4, training=True)
    x4 = conv_block(x4, filters * 8)

    y4 = AveragePooling2D((2, 2))(x4)
    y4 = Dropout(dropout_rate)(y4, training=True)
    y4 = conv_block(y4, filters * 16)
    y4 = Dropout(dropout_rate)(y4, training=True)
    y4 = up_block(y4, filters * 8, strides=(2, 2))

    y3 = concatenate([x4, y4], axis=3)
    y3 = conv_block(y3, filters * 8)
    y3 = Dropout(dropout_rate)(y3, training=True)
    y3 = up_block(y3, filters * 4, strides=(2, 2))

    y2 = concatenate([x3, y3], axis=3)
    y2 = conv_block(y2, filters * 4)
    y2 = Dropout(dropout_rate)(y2, training=True)
    y2 = up_block(y2, filters * 2, strides=(2, 2))

    y1 = concatenate([x2, y2], axis=3)
    y1 = conv_block(y1, filters * 2)
    y1 = Dropout(dropout_rate)(y1, training=True)
    y1 = up_block(y1, filters, strides=(2, 2))

    y0 = concatenate([x1, y1], axis=3)
    y0 = conv_block(y0, filters)

    outputs = Conv2D(1, (1, 1), activation=act_func)(y0)

    def PSNR(y_true, y_pred):
        max_pixel = 1.0
        return (10.0 * K.log(
            (max_pixel**2) / (K.mean(K.square(y_pred - y_true))))) / 2.303

    from keras_contrib.losses import DSSIMObjective
    loss_func = DSSIMObjective()

    model = Model(inputs=inputs, outputs=outputs)
    # model.compile(optimizer=Adam(lr=0.0001, decay=0.00001),loss=loss_func ,metrics =['accuracy', 'mse' , 'mae', PSNR, loss_func ] )
    model.compile(optimizer=Adam(lr=0.0001, decay=0.00001),
                  loss=loss_func,
                  metrics=['accuracy', 'mse', 'mae', PSNR, loss_func])

    model.summary()

    return model
Beispiel #28
0
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '4, 5'
import math
import numpy as np
from keras_contrib.losses import DSSIMObjective
import tensorflow as tf
from keras import backend as K
import cv2

'''
- contain code for reconstruct, blockify, finding nearest codevector
'''

ssim = lambda im1, im2: 1 - K.get_value(DSSIMObjective(kernel_size=3).__call__(tf.cast(np.array(im1), tf.float32), tf.cast(np.array(im2), tf.float32)))

def reconstruct_image(blocks, image_size):
    image = np.zeros(image_size)
    avg = np.zeros(image_size)
    bh = blocks.shape[1]
    bw = blocks.shape[2]
    bd = blocks.shape[3]
    for i in range(blocks.shape[0]):
        fitH = math.ceil(image_size[0]/bh)
        overH = image_size[0]%bh
        fitW = math.ceil(image_size[1]/bw)
        overW = image_size[1]%bw

        h0 = image_size[0]-bh if bh*(i//fitW)+bh>image_size[0] else bh*(i//fitW)
        h1 = h0+bh
        w0 = image_size[1]-bw if bw*(i%fitW)+bw>image_size[1] else bw*(i%fitW)
        w1 = w0+bw
Beispiel #29
0
	# x= MaxPooling2D(pool_size=(2, 2), strides=(2,2),padding='valid')(x)

	x=Conv2D(256,(2,2),name='conv4',activation=actFunc, padding='valid',strides=(2,2))(x)#8
	# x= MaxPooling2D(pool_size=(2, 2), strides=(2sigmoid,2),padding='valid')(x)

	#x=Conv2D(256,(5,5),name='conv5',activation=actFunc, padding='valid',strides=(2,2))(x)#1
	# x= MaxPooling2D(pool_size=(2, 2), strides=(2,2), padding='valid')(x)
	y4=x

	x= Conv2DTranspose(128,(4,4),activation=actFunc, padding='valid',strides=(2,2))(x)
	# x=Conv2D(256,(3,3),activation='tanh', padding='valid')(x)
	x= Conv2DTranspose(64,(2,2),activation=actFunc, padding='valid',strides=(2,2))(x)
	# x=Conv2D(128,(3,3),activation='tanh', padding='valid')(x)
	x= Conv2DTranspose(64,(2,2),activation=actFunc, padding='valid',strides=(2,2))(x)
	# x=Conv2D(64,(3,3),activation='tanh', paddactFuncg='valid')(x)
	x= Conv2DTranspose(16,(1,1),activation=actFunc, padding='same',strides=(2,2))(x)
	x= Conv2DTranspose(8,(2,2),activation=actFunc, padding='same',strides=(1,1))(x)
	# x=Conv2D(64,(3,3),activation='tanh', paddactFuncg='valid')(x)
	x= Conv2DTranspose(1,(1,1),activation="sigmoid", padding='same',strides=(1,1))(x)
	# x=Conv2D(1,(3,3),activation='sigmoid', padding='valid')(x)
	#x= Conv2DTranspose(1,(2,2),activation='sigmoid', padding='valid',strides=(1,1))(x)

	model = Model(inputs = [I], outputs= [x])
	model.summary()
	return model
	
model=build_model()
model.compile(optimizer = 'rmsprop', loss=DSSIMObjective(kernel_size=23))

history4= model.fit(X,Y,batch_size=5,epochs=5)
def TCN_net():
    def conv_block(input, filters=32, kernel_size=(3, 3)):
        x = Conv2D(filters=filters,
                   kernel_size=kernel_size,
                   padding='same',
                   kernel_initializer='he_normal')(input)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Conv2D(filters=filters,
                   kernel_size=kernel_size,
                   padding='same',
                   kernel_initializer='he_normal')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        return x

    def BandA(input):
        x = BatchNormalization()(input)
        x = Activation('relu')(x)
        return x

    inputs = Input((seq_length, image_rows_low, image_cols, channel_num))

    # upscailing
    x0 = inputs
    # split in seq_length
    split_x = Lambda(tf.split,
                     arguments={
                         'axis': 1,
                         'num_or_size_splits': seq_length
                     })(inputs)
    # shared parameteres layer
    up0 = Conv2DTranspose(filters=32,
                          kernel_size=(3, 3),
                          padding='same',
                          strides=(2, 1),
                          kernel_initializer='he_normal')
    up1 = Conv2DTranspose(filters=32,
                          kernel_size=(3, 3),
                          padding='same',
                          strides=(2, 1),
                          kernel_initializer='he_normal')

    output1 = []  # 0,1,2,3,4,5,6...15
    for i in range(seq_length):
        slice = split_x[i]
        slice = Lambda(tf.squeeze, arguments={
            'axis': 1,
        })(slice)
        slice = up0(slice)
        slice = BatchNormalization()(slice)
        slice = Activation('relu')(slice)
        slice = up1(slice)
        slice = BatchNormalization()(slice)
        slice = Activation('relu')(slice)
        output1.append(slice)

    output2 = []  # 0,1,2,3,4,5,6,7
    conv1 = Conv2D(filters=64,
                   kernel_size=(3, 3),
                   kernel_regularizer=regularizers.l2(0.01),
                   dilation_rate=(2, 2),
                   padding='same',
                   kernel_initializer='he_normal')
    conv2 = AtrousConvolution2D(filters=64,
                                kernel_size=(3, 3),
                                kernel_regularizer=regularizers.l2(0.01),
                                dilation_rate=(2, 2),
                                strides=(2, 2),
                                padding='same',
                                kernel_initializer='he_normal')
    for i in range(int(seq_length / 2)):
        slice = concatenate([output1[2 * i], output1[2 * i + 1]], axis=3)
        slice = conv1(slice)
        slice = BatchNormalization()(slice)
        slice = Activation('relu')(slice)
        slice = conv2(slice)
        slice = BatchNormalization()(slice)
        slice = Activation('relu')(slice)
        output2.append(slice)

    output3 = []  # 0,1,2,3
    conv3 = Conv2D(filters=128,
                   kernel_regularizer=regularizers.l2(0.01),
                   kernel_size=(3, 3),
                   dilation_rate=(2, 2),
                   padding='same',
                   kernel_initializer='he_normal')
    conv4 = AtrousConvolution2D(filters=128,
                                kernel_regularizer=regularizers.l2(0.01),
                                kernel_size=(3, 3),
                                dilation_rate=(2, 2),
                                strides=(2, 2),
                                padding='same',
                                kernel_initializer='he_normal')
    for i in range(int(seq_length / 4)):
        slice = concatenate([output2[i * 2], output2[2 * i + 1]], axis=3)
        slice = conv3(slice)
        slice = BatchNormalization()(slice)
        slice = Activation('relu')(slice)
        slice = conv4(slice)
        slice = BatchNormalization()(slice)
        slice = Activation('relu')(slice)
        output3.append(slice)

    output4 = []  # 0,1
    conv5 = Conv2D(filters=256,
                   kernel_regularizer=regularizers.l2(0.01),
                   kernel_size=(3, 3),
                   dilation_rate=(2, 2),
                   padding='same',
                   kernel_initializer='he_normal')
    conv6 = AtrousConvolution2D(filters=256,
                                kernel_regularizer=regularizers.l2(0.01),
                                kernel_size=(3, 3),
                                dilation_rate=(2, 2),
                                strides=(2, 2),
                                padding='same',
                                kernel_initializer='he_normal')
    for i in range(int(seq_length / 8)):
        slice = concatenate([output3[i * 2], output3[2 * i + 1]], axis=3)
        slice = conv5(slice)
        slice = BatchNormalization()(slice)
        slice = Activation('relu')(slice)
        slice = conv6(slice)
        slice = BatchNormalization()(slice)
        slice = Activation('relu')(slice)
        output4.append(slice)

    output5 = []  # 0
    conv7 = Conv2D(filters=512,
                   kernel_regularizer=regularizers.l2(0.01),
                   kernel_size=(3, 3),
                   dilation_rate=(2, 2),
                   padding='same',
                   kernel_initializer='he_normal')
    conv8 = AtrousConvolution2D(filters=512,
                                kernel_regularizer=regularizers.l2(0.01),
                                kernel_size=(3, 3),
                                dilation_rate=(2, 2),
                                strides=(2, 2),
                                padding='same',
                                kernel_initializer='he_normal')
    for i in range(int(seq_length / 16)):
        slice = concatenate([output4[i * 2], output4[2 * i + 1]], axis=3)
        slice = conv7(slice)
        slice = BatchNormalization()(slice)
        slice = Activation('relu')(slice)
        slice = conv8(slice)
        slice = BatchNormalization()(slice)
        slice = Activation('relu')(slice)
        output5.append(slice)

    highest_map = output5[0]
    highest_map = Conv2D(filters=1024,
                         kernel_regularizer=regularizers.l2(0.01),
                         kernel_size=(3, 3),
                         padding='same',
                         kernel_initializer='he_normal')(highest_map)
    highest_map = BatchNormalization()(highest_map)
    highest_map = Activation('relu')(highest_map)
    highest_map = Conv2D(filters=1024,
                         kernel_regularizer=regularizers.l2(0.01),
                         kernel_size=(3, 3),
                         padding='same',
                         kernel_initializer='he_normal')(highest_map)
    highest_map = BatchNormalization()(highest_map)
    highest_map = Activation('relu')(highest_map)

    up1_map = Conv2DTranspose(filters=512,
                              kernel_size=(3, 3),
                              padding='same',
                              strides=(2, 2),
                              kernel_initializer='he_normal')(highest_map)
    up1_map = BandA(up1_map)
    up1_concat = concatenate([up1_map, output4[1]], axis=3)
    up1_concat = Conv2D(filters=512,
                        kernel_regularizer=regularizers.l2(0.01),
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_initializer='he_normal')(up1_concat)
    up1_concat = BandA(up1_concat)
    up1_concat = Conv2D(filters=512,
                        kernel_regularizer=regularizers.l2(0.01),
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_initializer='he_normal')(up1_concat)
    up1_concat = BandA(up1_concat)

    up2_map = Conv2DTranspose(filters=256,
                              kernel_size=(3, 3),
                              padding='same',
                              strides=(2, 2),
                              kernel_initializer='he_normal')(up1_concat)
    up2_map = BandA(up2_map)
    up2_concat = concatenate([up2_map, output3[3]], axis=3)
    up2_concat = Conv2D(filters=256,
                        kernel_regularizer=regularizers.l2(0.01),
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_initializer='he_normal')(up2_concat)
    up2_concat = BandA(up2_concat)
    up2_concat = Conv2D(filters=256,
                        kernel_regularizer=regularizers.l2(0.01),
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_initializer='he_normal')(up2_concat)
    up2_concat = BandA(up2_concat)

    up3_map = Conv2DTranspose(filters=128,
                              kernel_size=(3, 3),
                              padding='same',
                              strides=(2, 2),
                              kernel_initializer='he_normal')(up2_concat)
    up3_map = BandA(up3_map)
    up3_concat = concatenate([up3_map, output2[7]], axis=3)
    up3_concat = Conv2D(filters=128,
                        kernel_regularizer=regularizers.l2(0.01),
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_initializer='he_normal')(up3_concat)
    up3_concat = BandA(up3_concat)
    up3_concat = Conv2D(filters=128,
                        kernel_regularizer=regularizers.l2(0.01),
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_initializer='he_normal')(up3_concat)
    up3_concat = BandA(up3_concat)

    up4_map = Conv2DTranspose(filters=64,
                              kernel_size=(3, 3),
                              padding='same',
                              strides=(2, 2),
                              kernel_initializer='he_normal')(up3_concat)
    up4_map = BandA(up4_map)
    up4_concat = concatenate([up4_map, output1[15]], axis=3)
    up4_concat = Conv2D(filters=64,
                        kernel_regularizer=regularizers.l2(0.01),
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_initializer='he_normal')(up4_concat)
    up4_concat = BandA(up4_concat)
    up4_concat = Conv2D(filters=64,
                        kernel_regularizer=regularizers.l2(0.01),
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_initializer='he_normal')(up4_concat)
    up4_concat = BandA(up4_concat)

    lowest_map = Conv2DTranspose(filters=64,
                                 kernel_size=(3, 3),
                                 padding='same',
                                 strides=(2, 2),
                                 kernel_initializer='he_normal')(up3_concat)

    print(K.int_shape(lowest_map))
    gen_image = Conv2D(1, (1, 1), activation='relu')(lowest_map)
    print(K.int_shape(gen_image))

    outputs = gen_image

    def PSNR(y_true, y_pred):
        max_pixel = 1.0
        return -(10.0 * K.log(
            (max_pixel**2) / (K.mean(K.square(y_pred - y_true))))) / 2.303

    from keras_contrib.losses import DSSIMObjective
    ssim_loss = DSSIMObjective()

    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=Adam(lr=0.0001, decay=0.00001),
                  loss='mse',
                  metrics=['accuracy', 'mse', 'mae', PSNR, ssim_loss])
    # model.compile(optimizer=Adam(lr=0.0001, decay=0.00001),loss=PSNR ,metrics =['accuracy', 'mse' , 'mae', PSNR, ssim_loss ] )

    model.summary()

    return model