Пример #1
0
def SDN_ver1(
        inputs
):  #should control the size carefully, larger strides to downsample
    z1_1 = Conv3D(32, (2, 2, 2), padding='same')(inputs)
    z1_2 = Conv3D(32, (2, 2, 2), strides=2, padding='valid')(z1_1)
    z1_2 = BatchNormalization()(z1_2)

    z2_1 = Conv3D(64, (2, 2, 2), padding='same')(z1_2)
    z2_2 = Conv3D(64, (2, 2, 2), strides=2, padding='valid')(z2_1)

    z3 = Conv3D(128, (2, 2, 2), padding='same')(z2_2)

    z4 = Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding='valid')(z3)
    #    z4 = Conv3D(64, (2,2,2), padding = 'same')(z4)

    z5 = Conv3DTranspose(32, (2, 2, 2), strides=(2, 2, 2), padding='valid')(z4)
    #    z5 = Conv3D(32, (2,2,2), padding = 'same', activation = 'linear')(z5)

    zzzz = Conv3D(
        3,
        (2, 2, 2),
        padding='same',
        #                      kernel_initializer= 'he_uniform',
        #                      bias_initializer = 'he_uniform',
        #                      activity_regularizer = l1(0.001),
        activation='tanh')(z5)

    locnet = Model(inputs, zzzz)

    x1 = SpatialDeformer3D(localization_net=locnet,
                           output_size=(input_shape[0], input_shape[1],
                                        input_shape[2]),
                           input_shape=input_shape)(inputs)

    return x1, locnet(inputs)
Пример #2
0
def SDN_ver1(
        inputs
):  #should control the size carefully, larger strides to downsample
    #z1_1 = Conv3D(32, (2,2,2), padding = 'same')(inputs)
    z1_2 = Conv3D(32, (2, 2, 2), strides=2, padding='valid')(inputs)
    z1_2 = PReLU(shared_axes=[4])(z1_2)
    #z2_1 = Conv3D(64, (2,2,2), padding = 'same')(z1_2)
    z2_2 = Conv3D(64, (2, 2, 2), strides=2, padding='valid')(z1_2)
    z2_2 = PReLU(shared_axes=[4])(z2_2)
    #z3 = Conv3D(64, (2,2,2), padding = 'same')(z2_2)
    #z3 = PReLU()(z3)
    z4 = Conv3DTranspose(64, (2, 2, 2), strides=2, padding='valid')(z2_2)
    z4 = Conv3D(64, (2, 2, 2), padding='same')(z4)
    #z4 = PReLU()(z4)
    z5 = Conv3DTranspose(32, (2, 2, 2), strides=2, padding='valid')(z4)
    z5 = Conv3D(32, (2, 2, 2), padding='same', activation='linear')(z5)
    #z5 = PReLU()(z5)
    z5 = ZeroPadding3D((2, 1, 2))(z5)  #Extra padding to make size match
    zzzz = Conv3D(
        3,
        (2, 2, 2),
        padding='valid',
        #                      kernel_initializer= 'he_uniform',
        #                      bias_initializer = 'he_uniform',
        #                      activity_regularizer = l1(0.001),
        activation='tanh')(z5)

    locnet = Model(inputs, zzzz)

    x1 = SpatialDeformer3D(localization_net=locnet,
                           output_size=(input_shape[0], input_shape[1],
                                        input_shape[2]),
                           input_shape=input_shape)(inputs)

    return x1, locnet(inputs)
Пример #3
0
def SDN(inputs):

    zz = Conv3D(64, (2, 2, 2), padding='same')(inputs)
    zzz = Conv3D(64, (2, 2, 2), padding='same')(zz)

    zzz = MaxPooling3D((2, 2, 2))(zzz)
    zzz = Conv3D(128, (2, 2, 2), padding='same')(zzz)

    zzz = UpSampling3D((2, 2, 2))(zzz)
    zzz = Conv3D(64, (2, 2, 2), padding='same')(zzz)

    zzzz = multiply([
        zz, zzz
    ])  # easy to cause trouble when shape does not contain enough power of 2.
    zzzz = Conv3D(
        3,
        (2, 2, 2),
        padding='same',
        kernel_initializer='he_normal',
        bias_initializer='he_normal',
        #                      activity_regularizer = l2(0.1),
        activation='tanh')(zzz)

    locnet = Model(inputs, zzzz)

    x1 = SpatialDeformer3D(localization_net=locnet,
                           output_size=(input_shape[0], input_shape[1],
                                        input_shape[2]),
                           input_shape=input_shape)(inputs)

    return x1, locnet(inputs)
def SDN_ver2(inputs): #should control the size carefully, larger strides to downsample 

#    z1_1 = Conv3D(32, par['kernel_size'], padding = 'same')(inputs)

    z1_2 = Conv3D(32, par['kernel_size'], strides = par['kernel_strides'], padding = 'valid', activation = 'linear')(inputs)
    #z1_2 = BatchNormalization()(z1_2)
    z1_2 = PReLU(shared_axes = [4])(z1_2)
#    z2_1 = Conv3D(64, par['kernel_size'], padding = 'same')(z1_2)

    z2_2 = Conv3D(64, par['kernel_size'], strides = par['kernel_strides'], padding = 'valid', activation = 'linear')(z1_2)
   # z2_2 = BatchNormalization()(z2_2)  
    z2_2 = PReLU(shared_axes = [4])(z2_2)
    
    z4x = Conv3DTranspose(64, par['kernel_size'], strides= par['kernel_strides'], padding = 'valid', activation = 'linear')(z2_2)
    z4x = Conv3D(64, (2,2,2), padding = 'same')(z4x)
    z5x = Conv3DTranspose(32, par['kernel_size'], strides= par['kernel_strides'], padding = 'valid', activation = 'linear')(z4x)   
    z5x = Conv3D(32, (2,2,2), padding = 'same')(z5x)
    z5x = ZeroPadding3D((2,1,2))(z5x)
    zzzzx = Conv3D(1, par['kernel_size'], padding = 'valid', activation = 'tanh')(z5x)

    z4y = Conv3DTranspose(64, par['kernel_size'], strides= par['kernel_strides'], padding = 'valid',activation = 'linear')(z2_2)
    z4y = Conv3D(64, (2,2,2), padding = 'same')(z4y)

    z5y = Conv3DTranspose(32, par['kernel_size'], strides= par['kernel_strides'], padding = 'valid', activation = 'linear')(z4y)   
    z5y = Conv3D(32, (2,2,2), padding = 'same')(z5y)

    z5y = ZeroPadding3D((2,1,2))(z5y)
    zzzzy = Conv3D(1, par['kernel_size'], padding = 'valid', activation = 'tanh')(z5y)

    z4z = Conv3DTranspose(64, par['kernel_size'], strides= par['kernel_strides'], padding = 'valid', activation = 'linear')(z2_2)
    z4z = Conv3D(64, (2,2,2), padding = 'same')(z4z)

    z5z = Conv3DTranspose(32, par['kernel_size'], strides= par['kernel_strides'], padding = 'valid', activation = 'linear')(z4z)   
    z5z = Conv3D(32, (2,2,2), padding = 'same')(z5z)
    z5z = ZeroPadding3D((2,1,2))(z5z)
    zzzzz = Conv3D(1, par['kernel_size'], padding = 'valid', activation = 'tanh')(z5z)   

    zzzz = concatenate([zzzzx, zzzzy, zzzzz], axis = -1)   

    locnet = Model(inputs, zzzz)    

    x1 = SpatialDeformer3D(localization_net=locnet, output_size=(input_shape[0],input_shape[1], input_shape[2]), input_shape=input_shape)(inputs)   

    return x1, locnet(inputs)
Пример #5
0
def SDN_deeper(inputs):  # need a deeper one for 3d? one with no pooling?
    z1_1 = Conv3D(32, (2, 2, 2), padding='same')(inputs)
    z1_2 = Conv3D(32, (2, 2, 2), padding='same')(z1_1)

    z2 = MaxPooling3D((2, 2, 2))(z1_2)
    z2_1 = Conv3D(64, (2, 2, 2), padding='same')(z2)
    z2_2 = Conv3D(64, (2, 2, 2), padding='same')(z2_1)

    z3 = MaxPooling3D((2, 2, 2))(z2_2)
    z3 = Conv3D(128, (2, 2, 2), padding='same')(z3)

    z3 = UpSampling3D((2, 2, 2))(z3)
    z3 = Conv3D(64, (2, 2, 2),
                padding='same')(z3)  # help to overcome local minimum?
    #    z3 = Conv3DTranspose(64, (2,2,2), strides=(2,2,2), padding = 'same')(z3)
    #    z4 = multiply([z2_1, z3])

    z4 = UpSampling3D((2, 2, 2))(z3)
    z5 = Conv3D(32, (2, 2, 2), padding='same')(z4)
    #    z5 = Conv3DTranspose(32, (2,2,2), strides=(2,2,2), padding = 'same')(z3)
    #    z5= multiply([z1_2, z4])
    z5 = ZeroPadding3D((2, 1, 2))(z5)

    zzzz = Conv3D(
        3,
        (2, 2, 2),
        padding='valid',
        #                      kernel_initializer= 'he_uniform',
        #                      bias_initializer = 'he_uniform',
        #                      activity_regularizer = l1(0.001),
        activation='tanh')(z5)

    locnet = Model(inputs, zzzz)

    x1 = SpatialDeformer3D(localization_net=locnet,
                           output_size=(input_shape[0], input_shape[1],
                                        input_shape[2]),
                           input_shape=input_shape)(inputs)

    return x1, locnet(inputs)
Пример #6
0
res1, res2, res3 = 144, 180, 144

input_shape = (res1, res2, res3, 2)

inputs = Input(shape=input_shape)

disp_M = Model(inputs, SDN(inputs))

print(disp_M.summary())
source_data = r''
datapath = r''
disp_M.load_weights(r'')

_warped = SpatialDeformer3D(localization_net=disp_M,
                            output_size=(input_shape[0], input_shape[1],
                                         input_shape[2]),
                            input_shape=input_shape)(inputs)

sdn = Model(inputs, [_warped, disp_M(inputs)])

test_files = ['{:03d}.npy'.format(i) for i in range(80, 100)]
test_list = []

from itertools import combinations
for ind in combinations(range(0, len(test_files), 1), 2):
    test_list.append(test_files[ind[0]][:3] + test_files[ind[1]][:3])
    test_list.append(test_files[ind[1]][:3] + test_files[ind[0]][:3])

from Utils import Get_Ja
for sample in test_list[:3]:
    mov = np.load(source_data + sample[:3] + '.npy')
Пример #7
0
        #return -tf.log(tf.reduce_mean(cc))

        return 1 / tf.reduce_mean(cc) - 1

    return loss


from architecture import SDN_ver1 as SDN
#from keras.layers import GaussianNoise
inputs = Input(shape=input_shape)
inputs_l = Input(shape=input_shape)
#aug = GaussianNoise(0.05)(inputs)
disp = SDN(inputs)
disp_M = Model(inputs, disp)
transformer = SpatialDeformer3D(localization_net=disp_M,
                                output_size=(input_shape[0], input_shape[1],
                                             input_shape[2]),
                                input_shape=input_shape)

transformer_l = SpatialDeformer3D(localization_net=disp_M,
                                  output_size=(input_shape[0], input_shape[1],
                                               input_shape[2]),
                                  input_shape=input_shape)

warped = transformer(inputs)
warped_l = transformer_l(
    inputs_l)  #really should use a nearest neibor interpolation
#warped = transformer(inputs)

sdn = Model([inputs, inputs_l], [warped, warped_l, disp])
#warped_l = SpatialDeformer3D(localization_net=disp_M, output_size=(input_shape[0],input_shape[1], input_shape[2]), input_shape=input_shape)(inputs_l)
#sdn_pre = Model(inputs, [warped, disp])
    return par['w1']*total_variation(diff) + par['w2']*K.pow(K.sum(K.pow(diff, 2)),0.5)

input_shape = (res1,res2,res3,2)

inputs = Input(shape = input_shape)

dispnet = Model(inputs, SDN_ver1(inputs)) #change this when using different models


datapath = r'/home/dkuang/LPBA40_npy/image/'
labelpath = r'/home/dkuang/LPBA40_npy/label/'
predpath = r'/home/dkuang/'

warped = SpatialDeformer3D(localization_net=dispnet,
                             output_size=(input_shape[0],input_shape[1], input_shape[2]),
                             input_shape=input_shape)(inputs)

sdn_pred = Model(inputs, [warped, dispnet(inputs)])
#print(sdn_pred.summary())
dispnet.load_weights(r'result_test/SDN3d_weights_3_1.h5') # location does not seems to matter

for layer in sdn_pred.layers:
    layer.trainable = False

def vol_gen_refine(path, pred_path, file_list, batch_size):
    x = np.zeros((batch_size, res1, res2, res3, 2))
    zeros = np.zeros([batch_size, res1, res2, res3, 3])
    count = 0
    while True:
        for j in range(batch_size):
#print(K.int_shape(inputs))
#splited = Lambda(split_by_mask)(inputs) #considerusing tf.split?
#ctx, rst = splited[0], splited[1]
#ctx = Lambda(get_masked)(inputs)
#rst = Lambda(get_unmasked)(inputs)
#print(K.int_shape(ctx))
#print(K.int_shape(rst))

ctx = Input((res1, res2, res3, 2))
rst = Input((res1, res2, res3, 2))

disp_ctx = SDN_ver1(ctx)
disp_ctx_M = Model(ctx, disp_ctx)
print(disp_ctx_M.summary())
img_ctx = SpatialDeformer3D(localization_net=disp_ctx_M,
                            output_size=(input_shape[0], input_shape[1],
                                         input_shape[2]),
                            input_shape=input_shape)(ctx)
disp_rst = SDN_ver1(rst)
disp_rst_M = Model(rst, disp_rst)
print(disp_rst_M.summary())
img_rst = SpatialDeformer3D(localization_net=disp_rst_M,
                            output_size=(input_shape[0], input_shape[1],
                                         input_shape[2]),
                            input_shape=input_shape)(rst)

disp = add([disp_ctx, disp_rst])
img = add([img_ctx, img_rst])

print(K.int_shape(disp))
print(K.int_shape(img))
    D1 = K.sum(K.square(Dx_yTrue - Dx_yPred))
    D2 = K.sum(K.square(Dy_yTrue - Dy_yPred))
    D3 = K.sum(K.square(Dz_yTrue - Dz_yPred))

    return sse + 0.2 * (D1 + D2 + D3)


from spatial_deformer_net3d import SpatialDeformer3D
from architecture import SDN_ver3

input_shape = (res1, res2, res3, 2)
inputs = Input(shape=input_shape)
locnet = Model(inputs, SDN_ver3(inputs))
x1 = SpatialDeformer3D(localization_net=locnet,
                       output_size=(input_shape[0], input_shape[1],
                                    input_shape[2]),
                       input_shape=input_shape)(inputs)

sdn = Model(inputs, [x1, SDN_ver3(inputs)])

#print(sdn.summary())
print(locnet.summary())
#print(sdn.layers)
from keras.optimizers import SGD
sdn.compile(loss=['mse', total_variation_loss],
            loss_weights=[1.0, 0],
            optimizer=Adam(lr=5e-4, decay=1e-5))

epochs = 20
batch_size = 8
import os