예제 #1
0
def _pad_or_crop_to_shape_3D(x, in_shape, tgt_shape):
    '''
    in_shape, tgt_shape are both 2x1 numpy arrays
    '''
    im_diff = np.asarray(in_shape[:3]) - np.asarray(tgt_shape[:3])

    if im_diff[0] < 0:
        pad_amt = (int(np.ceil(abs(im_diff[0]) / 2.0)), int(np.floor(abs(im_diff[0]) / 2.0)))
        x = ZeroPadding3D((
            pad_amt,
            (0, 0),
            (0, 0)
        ))(x)
    if im_diff[1] < 0:
        pad_amt = (int(np.ceil(abs(im_diff[1]) / 2.0)), int(np.floor(abs(im_diff[1]) / 2.0)))
        x = ZeroPadding3D(((0, 0), pad_amt, (0, 0)))(x)
    if im_diff[2] < 0:
        pad_amt = (int(np.ceil(abs(im_diff[2]) / 2.0)), int(np.floor(abs(im_diff[2]) / 2.0)))
        x = ZeroPadding3D(((0, 0), (0, 0), pad_amt))(x)

    if im_diff[0] > 0:
        crop_amt = (int(np.ceil(im_diff[0] / 2.0)), int(np.floor(im_diff[0] / 2.0)))
        x = Cropping3D((crop_amt, (0, 0), (0, 0)))(x)
    if im_diff[1] > 0:
        crop_amt = (int(np.ceil(im_diff[1] / 2.0)), int(np.floor(im_diff[1] / 2.0)))
        x = Cropping3D(((0, 0), crop_amt, (0, 0)))(x)
    if im_diff[2] > 0:
        crop_amt = (int(np.ceil(im_diff[2] / 2.0)), int(np.floor(im_diff[2] / 2.0)))
        x = Cropping3D(((0, 0), (0, 0), crop_amt))(x)
    return x
예제 #2
0
def ClassNet_MultiScale():
    inputs = Input((1,48,48,48))
    #noise=GaussianNoise(stddev=0.01,input_shape=(1,48,48,48))(inputs)
    ch1=inputs
    #ch1=inputs#add([inputs,noise])
    ch2=Cropping3D(((8,8),(8,8),(8,8)))(inputs)
    ch3=Cropping3D(((16,16),(16,16),(16,16)))(inputs)
    
    #ch2=UpSampling3D(size=(2,2,2))(ch2)
    #ch3=UpSampling3D(size=(4,4,4))(ch3)
    
    ch1=ConvNet48(ch1)
    ch2=ConvNet32(ch2)
    ch3=ConvNet16(ch3)
    #ch2=ConvNet32(ch2)    
    #ch3=ConvNet12(ch3)
    
    #fusion=add([ch1,ch2,ch3])
    fusion=concatenate([ch1,ch2,ch3],axis=1)
    fusion=Dense(2)(fusion)#Conv3D(2,(1,1,1), padding='same',activation='relu')(fusion)
    fusion=core.Reshape((2,1))(fusion)
    #a=core.Reshape((6,1))(fusion)
    a=core.Permute((2,1))(fusion)
    act=Activation('softmax')(a)
    model = Model(inputs=inputs, outputs=act)
    model.compile(optimizer=Adam(lr=config["initial_learning_rate"]), loss='mean_squared_error',metrics=['accuracy'])
    return model
예제 #3
0
def resnet_like(in_sz=None):
    """ returns a model that uses residual components
    """
    in_sz = fplutils.to3d(in_sz)
    in_sz = in_sz + (1, )

    inputs = Input(shape=in_sz)

    conv1 = Conv3D(32, (3, 3, 3), use_bias=False)(inputs)  # 16x16x16
    conv1 = _bn_relu(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)  # 8x8x8

    conv2 = Conv3D(32, (3, 3, 3), use_bias=False)(pool1)  # 6x6x6
    conv2 = _bn_relu(conv2)
    conv2 = Conv3D(32, (1, 1, 1), use_bias=False)(conv2)  # 6x6x6
    conv2 = BatchNormalization()(conv2)
    crop_pool1 = Cropping3D(cropping=((1, 1), (1, 1), (1, 1)))(pool1)
    conv2 = add([crop_pool1, conv2])
    conv2 = Activation("relu")(conv2)

    pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)  # 3x3x3

    conv3 = Conv3D(64, (3, 3, 3), use_bias=False)(pool2)  # 1x1x1
    conv3 = _bn_relu(conv3)
    conv3 = Conv3D(64, (1, 1, 1), use_bias=False)(conv3)  # 1x1x1
    pool2_shortcut = Conv3D(64, (1, 1, 1), use_bias=False)(pool2)
    crop_pool2 = Cropping3D(cropping=((1, 1), (1, 1), (1, 1)))(pool2_shortcut)
    conv3 = BatchNormalization()(conv3)
    conv3 = add([crop_pool2, conv3])
    conv3 = Activation("relu")(conv3)

    predictions = Conv3D(1, (1, 1, 1), activation='sigmoid')(conv3)

    model = Model(inputs=inputs, outputs=predictions)
    return model, (18, 7, 4), 102, None
예제 #4
0
    def manipulate_input_stack(self, inputlayer):
        pad_crop = ((0, 0), (0, 0), hp.calculate_pad_crop_value(self.e_v))
        if self.manipulation == MANIPULATION.SPATIAL_UP:
            output = ZeroPadding3D(padding=pad_crop,
                                   data_format="channels_last")(inputlayer)
        elif self.manipulation == MANIPULATION.SPATIAL_DOWN:
            output = Cropping3D(cropping=pad_crop,
                                data_format="channels_last")(inputlayer)
        elif self.manipulation == MANIPULATION.SPATIAL_MIN:
            x = hp.calculate_stack_resize(self.vol_depth, 'min')[0]
            if 2**x < self.vol_depth:
                output = Cropping3D(cropping=pad_crop,
                                    data_format="channels_last")(inputlayer)
            else:
                output = ZeroPadding3D(padding=pad_crop,
                                       data_format="channels_last")(inputlayer)
        elif self.manipulation == MANIPULATION.FREQUENCY_UP:
            print('MANIPULATION.FREQUENCY_UP not yet implemented')

            self.fourier_transform(inputlayer, pad_crop)
        elif self.manipulation == MANIPULATION.FREQUENCY_DOWN:
            print('MANIPULATION.FREQUENCY_DOWN not yet implemented')
        elif self.manipulation == MANIPULATION.FREQUENCY_MIN:
            print('MANIPULATION.FREQUENCY_MIN not yet implemented')
        return output
예제 #5
0
def get_unet(lr=1e-6):  # , l2_constant=0.002
    # model_id = "unet3d_same"
    image_depth = 44  # 38
    image_rows = 120 #80 #200  # 140
    image_columns = 132 #80 #220  # 140

    inputs = Input((1, image_depth, image_rows, image_columns))
    conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='valid')(inputs)  # ,W_regularizer=l2(l2_constant)
    conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='valid')(conv1)  # ,,W_regularizer=l2(l2_constant)
    pool1 = MaxPooling3D(pool_size=(1, 2, 2), padding='valid')(conv1)  # strides=(2, 2, 2),

    conv2 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='valid')(pool1)  # W_regularizer=l2(l2_constant),
    conv2 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='valid')(conv2)
    pool2 = MaxPooling3D(pool_size=(1, 2, 2), padding='valid')(conv2)  # strides=(2, 2, 2),

    conv3 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='valid')(pool2)  # W_regularizer=l2(l2_constant),
    conv3 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='valid')(conv3)  # W_regularizer=l2(l2_constant),

    conv2_cropped = Cropping3D(((2, 2), (4, 4), (4, 4)))(conv2)
    up4 = merge([UpSampling3D(size=(1, 2, 2))(conv3), conv2_cropped], mode='concat', concat_axis=1)
    conv4 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='valid')(up4)  # W_regularizer=l2(l2_constant),
    conv4 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='valid')(conv4)  # W_regularizer=l2(l2_constant),

    conv1_cropped = Cropping3D(((6, 6), (16, 16), (16, 16)))(conv1)
    up5 = merge([UpSampling3D(size=(1, 2, 2))(conv4), conv1_cropped], mode='concat', concat_axis=1)
    conv5 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='valid')(up5)  # W_regularizer=l2(l2_constant),
    conv5 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='valid')(conv5)  # W_regularizer=l2(l2_constant),

    conv6 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(conv5)

    model = Model(input=inputs, output=conv6)
    model.compile(optimizer=Adam(lr=lr), loss=dice_coef_loss, metrics=[dice_coef])

    return model  # , model_id
예제 #6
0
def make_flood_fill_network(input_fov_shape, output_fov_shape, network_config):
    """Construct a stacked convolution module flood filling network.
    """
    if network_config.convolution_padding != 'same':
        raise ValueError('ResNet implementation only supports same padding.')

    image_input = Input(shape=tuple(input_fov_shape) + (1, ),
                        dtype='float32',
                        name='image_input')
    if network_config.rescale_image:
        ffn = Lambda(lambda x: (x - 0.5) * 2.0)(image_input)
    else:
        ffn = image_input
    mask_input = Input(shape=tuple(input_fov_shape) + (1, ),
                       dtype='float32',
                       name='mask_input')
    ffn = concatenate([ffn, mask_input])

    # Convolve and activate before beginning the skip connection modules,
    # as discussed in the Appendix of He et al 2016.
    ffn = Conv3D(network_config.convolution_filters,
                 tuple(network_config.convolution_dim),
                 kernel_initializer=network_config.initialization,
                 activation=network_config.convolution_activation,
                 padding='same')(ffn)
    if network_config.batch_normalization:
        ffn = BatchNormalization()(ffn)

    contraction = (input_fov_shape - output_fov_shape) // 2
    if np.any(np.less(contraction, 0)):
        raise ValueError(
            'Output FOV shape can not be larger than input FOV shape.')
    contraction_cumu = np.zeros(3, dtype=np.int32)
    contraction_step = np.divide(contraction,
                                 float(network_config.num_modules))

    for i in range(0, network_config.num_modules):
        ffn = add_convolution_module(ffn, network_config)
        contraction_dims = np.floor(i * contraction_step -
                                    contraction_cumu).astype(np.int32)
        if np.count_nonzero(contraction_dims):
            ffn = Cropping3D(
                zip(list(contraction_dims), list(contraction_dims)))(ffn)
            contraction_cumu += contraction_dims

    if np.any(np.less(contraction_cumu, contraction)):
        remainder = contraction - contraction_cumu
        ffn = Cropping3D(zip(list(remainder), list(remainder)))(ffn)

    mask_output = Conv3D(1,
                         tuple(network_config.convolution_dim),
                         kernel_initializer=network_config.initialization,
                         padding='same',
                         name='mask_output',
                         activation=network_config.output_activation)(ffn)
    ffn = Model(inputs=[image_input, mask_input], outputs=[mask_output])

    return ffn
예제 #7
0
def dense_block(num_fms, x, filter_size, kernel_initializer,
                kernel_regularizer, dropout):
    """
	4-layer dense block 
	Each layer is connected to the layer before it 
	In the DenseNet paper, they specify the following order:
		BN --> ReLU --> Conv on the concatenation
	"""
    # Layer 1
    d1 = BatchNormalization()(x)
    d1 = Activation("relu")(d1)
    d1 = Conv3D(num_fms,
                filter_size,
                padding="valid",
                kernel_initializer=kernel_initializer,
                kernel_regularizer=kernel_regularizer)(d1)
    if dropout is not None:
        d1 = Dropout(dropout)(d1)
    # Layer 2
    d2 = BatchNormalization()(d1)
    d2 = Activation("relu")(d2)
    d2 = Conv3D(num_fms,
                filter_size,
                padding="valid",
                kernel_initializer=kernel_initializer,
                kernel_regularizer=kernel_regularizer)(d2)
    if dropout is not None:
        d2 = Dropout(dropout)(d2)
    # Layer 3
    # Concatenate layers 1, 2, output
    r1 = Cropping3D(cropping=((1, 1), (1, 1), (1, 1)))(d1)
    d3 = Concatenate()([r1, d2])
    d3 = BatchNormalization()(d3)
    d3 = Activation("relu")(d3)
    d3 = Conv3D(num_fms,
                filter_size,
                padding="valid",
                kernel_initializer=kernel_initializer,
                kernel_regularizer=kernel_regularizer)(d3)
    if dropout is not None:
        d3 = Dropout(dropout)(d3)
    # Layer 4
    # Concatenate layers 1, 2, 3 output
    r1 = Cropping3D(cropping=((2, 2), (2, 2), (2, 2)))(d1)
    r2 = Cropping3D(cropping=((1, 1), (1, 1), (1, 1)))(d2)
    d4 = Concatenate(axis=4)([r1, r2, d3])
    d4 = BatchNormalization()(d4)
    d4 = Activation("relu")(d4)
    d4 = Conv3D(num_fms,
                filter_size,
                padding="valid",
                kernel_initializer=kernel_initializer,
                kernel_regularizer=kernel_regularizer)(d4)
    if dropout is not None:
        d4 = Dropout(dropout)(d4)
    return d4
예제 #8
0
def unet_like4(in_sz=40):
    '''
    construct a u-net style network
    '''
    in_sz = fplutils.to3d(in_sz)
    in_sz = in_sz + (1, )

    inputs = Input(shape=in_sz)  # 40^2

    # down-sample
    conv1 = Conv3D(32, (3, 3, 3), use_bias=False)(inputs)  # 38
    conv1 = _bn_relu(conv1)
    conv1 = Conv3D(32, (3, 3, 3), use_bias=False)(conv1)  # 36
    conv1 = _bn_relu(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)  # 18

    conv2 = Conv3D(64, (3, 3, 3), use_bias=False)(pool1)  # 16
    conv2 = _bn_relu(conv2)
    conv2 = Conv3D(64, (3, 3, 3), use_bias=False)(conv2)  # 14
    conv2 = _bn_relu(conv2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)  # 7

    conv3 = Conv3D(128, (3, 3, 3), use_bias=False)(pool2)  # 5
    conv3 = _bn_relu(conv3)
    conv3 = Conv3D(128, (3, 3, 3), use_bias=False)(conv3)  # 3
    conv3 = _bn_relu(conv3)

    # up-sample
    crop_conv2 = Cropping3D(cropping=((4, 4), (4, 4), (4, 4)))(conv2)
    up4 = concatenate([UpSampling3D(size=(2, 2, 2))(conv3), crop_conv2])  # 6
    conv4 = Conv3D(64, (3, 3, 3), use_bias=False)(up4)  # 4
    conv4 = _bn_relu(conv4)
    conv4 = Conv3D(64, (1, 1, 1), use_bias=False)(conv4)  # 4
    conv4 = _bn_relu(conv4)

    crop_conv1 = Cropping3D(cropping=((14, 14), (14, 14), (14, 14)))(conv1)
    up5 = concatenate([UpSampling3D(size=(2, 2, 2))(conv4),
                       crop_conv1])  # 8x8x8
    conv5 = Conv3D(32, (3, 3, 3), use_bias=False)(up5)  # 6
    conv5 = _bn_relu(conv5)
    conv5 = Conv3D(32, (1, 1, 1), use_bias=False)(conv5)  # 6
    conv5 = _bn_relu(conv5)

    predictions = Conv3D(1, (1, 1, 1), activation='sigmoid',
                         use_bias=False)(conv5)  # 6

    model = Model(inputs=inputs, outputs=predictions)
    compile_args = {
        'loss': masked_focal_loss,  #masked_binary_crossentropy,
        'optimizer': 'adam',
        'metrics': [masked_accuracy, lb0l1err, lb1l1err]
    }
    return model, (40, 17, 1), 100, compile_args
예제 #9
0
def CroppingTest():
    inputs = Input((1,48,48,48))
    #noise=GaussianNoise(stddev=0.01,input_shape=(1,48,48,48))(inputs)
    ch1=inputs
    ch2=Cropping3D(((12,12),(12,12),(12,12)))(inputs)#(inputs)
    ch3=Cropping3D(((18,18),(18,18),(18,18)))(inputs)#(inputs)
    
    ch2=UpSampling3D(size=(2,2,2))(ch2)
    ch3=UpSampling3D(size=(4,4,4))(ch3)
    fusion=concatenate([ch1,ch2,ch3],axis=1)
    
    model = Model(inputs=inputs, outputs=fusion)
    return model
    
    
예제 #10
0
def residual_block(num_fms, x, filter_size, kernel_initializer,
                   kernel_regularizer, dropout):
    x = Conv3D(num_fms,
               filter_size,
               activation="relu",
               padding="valid",
               kernel_initializer=kernel_initializer,
               kernel_regularizer=kernel_regularizer)(x)
    x = BatchNormalization()(x)
    if dropout is not None:
        x = Dropout(dropout)(x)
    y = Conv3D(num_fms,
               filter_size,
               activation="relu",
               padding="valid",
               kernel_initializer=kernel_initializer,
               kernel_regularizer=kernel_regularizer)(x)
    if dropout is not None:
        x = Dropout(dropout)(x)
    y = BatchNormalization()(y)
    y = Conv3D(num_fms,
               filter_size,
               padding="valid",
               kernel_initializer=kernel_initializer,
               kernel_regularizer=kernel_regularizer)(y)
    if dropout is not None:
        x = Dropout(dropout)(x)
    y = BatchNormalization()(y)
    r = Cropping3D(cropping=((2, 2), (2, 2), (2, 2)))(x)
    y = Add()([y, r])
    y = Activation("relu")(y)
    return y
예제 #11
0
def crop3D_layer(inp,
                 cropping=((1, 1), (1, 1), (1, 1)),
                 data_format=None,
                 time_dist=False):
    assert type(cropping) in (int, tuple)

    input_shape = get_data_shape(inp)

    cropping = get_cropping_tuple(cropping=cropping, patch_topology='2D')

    if time_dist:
        crop_op = TimeDistributed(Cropping3D(cropping=cropping))(inp)
    else:
        crop_op = Cropping3D(cropping=cropping)(inp)

    return crop_op
예제 #12
0
def new_instance(input_shape, learning_rate):

    x, y, time, spectral = input_shape

    inputLayer = Input(shape=input_shape)

    cnn_model = Conv3D(256, kernel_size=(3, 3, 5), padding='same')(inputLayer)
    cnn_model = Conv3D(256, kernel_size=(3, 3, 1), padding='valid')(cnn_model)
    cnn_model = BatchNormalization()(cnn_model)
    cnn_model = Activation('relu')(cnn_model)
    cnn_model = Flatten()(cnn_model)

    lstm_model = Cropping3D(cropping=(1, 1, 0))(inputLayer)
    lstm_model = Reshape(target_shape=(time, spectral))(lstm_model)
    lstm_model = BatchNormalization()(lstm_model)
    lstm_model = Bidirectional(CuDNNLSTM(256,
                                         return_sequences=True))(lstm_model)
    lstm_model = Flatten()(lstm_model)

    conc_model = concatenate([lstm_model, cnn_model])
    conc_model = Dense(256, activation='relu')(conc_model)
    conc_model = Dropout(0.3)(conc_model)
    conc_model = Dense(64, activation='relu')(conc_model)
    conc_model = Dense(2, activation='sigmoid')(conc_model)

    conc_model = Model(inputLayer, conc_model)
    optimizer = optimizers.Nadam(lr=learning_rate)
    conc_model.compile(loss='binary_crossentropy',
                       optimizer=optimizer,
                       metrics=['accuracy'])

    return conc_model
    def build_discriminator(self):
        def d_layer(layer_input,
                    filters,
                    f_size=3,
                    bn=True,
                    scale=True,
                    name=''):  #change the bn to False
            """Discriminator layer"""
            d = Conv3D(filters,
                       kernel_size=f_size,
                       strides=1,
                       padding='same',
                       name=name + '_conv3d')(layer_input)
            if bn:
                d = BatchNormalization(momentum=0.8,
                                       name=name + '_bn',
                                       scale=scale)(d)
            d = LeakyReLU(alpha=0.2, name=name + '_leakyrelu')(d)
            return d

        img_A = Input(shape=self.input_shape_d,
                      name='input_img_A')  # 24x24x24 warped_img or reference
        img_T = Input(shape=self.input_shape_g,
                      name='input_img_T')  # 64x64x64 template

        img_T_cropped = Cropping3D(cropping=20)(img_T)  # 24x24x24

        # Concatenate image and conditioning image by channels to produce input
        #combined_imgs = Concatenate(axis=-1, name='combine_imgs_d')([img_A, img_T_cropped])
        combined_imgs = Add(name='combine_imgs_d')([img_A, img_T_cropped])

        d1 = d_layer(combined_imgs, self.df, bn=False, name='d1')  # 24x24x24
        d2 = d_layer(d1, self.df * 2, name='d2')  # 24x24x24
        pool = MaxPooling3D(pool_size=(2, 2, 2),
                            name='d2_pool')(d2)  # 12x12x12

        d3 = d_layer(pool, self.df * 4, name='d3')  # 12x12x12
        d4 = d_layer(d3, self.df * 8, name='d4')  # 12x12x12
        pool = MaxPooling3D(pool_size=(2, 2, 2), name='d4_pool')(d4)  # 6x6x6

        d5 = d_layer(pool, self.df * 8, name='d5')  # 6x6x6

        # ToDo: Use FC layer at the end like specified in the paper
        validity = Conv3D(1,
                          kernel_size=4,
                          strides=1,
                          padding='same',
                          activation='sigmoid',
                          name='validity')(d5)  #6x6x6
        #d6 = Conv3D(1, kernel_size=4, strides=1, padding='same', name='validity')(d5)  # 6x6x6

        #validity = Flatten(data_format='channels_last')(d6)
        #x = Reshape((6*6*6*512,))(d5) # hack to avoid flatten bug
        #validity = Dense(1, activation='sigmoid')(x)

        # Use FC layer
        #d6 = Flatten(input_shape=(self.batch_sz,) + (6,6,6,512))(d5)
        #validity = Dense(1, activation='sigmoid')(d5)

        return Model([img_A, img_T], validity, name='discriminator_model')
예제 #14
0
    def build_discriminator(self):

        def d_layer(layer_input, filters, f_size=4, bn=True):
            """Discriminator layer"""
            d = Conv3D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            if bn:
                d = BatchNormalization(momentum=0.8)(d)
            d = LeakyReLU(alpha=0.2)(d)
            return d

        img_S = Input(shape=self.input_shape_d) #128 S
        img_T = Input(shape=self.img_shape) #256 T

        img_T_cropped = Cropping3D(cropping=64)(img_T)  # 128

        combined_imgs = Concatenate(axis=-1)([img_S, img_T_cropped])
        #combined_imgs = Add()([img_S, img_T])

        d1 = d_layer(combined_imgs, self.df, bn=False)
        d2 = d_layer(d1, self.df*2)
        d3 = d_layer(d2, self.df*4)
        d4 = d_layer(d3, self.df*8)

        validity = Conv3D(1, kernel_size=4, strides=1, padding='same', name='disc_sig')(d4) #original is linear activation no sigmoid

        return Model([img_S, img_T], validity, name='discriminator_model')
예제 #15
0
파일: unet3d.py 프로젝트: shihuai/TCAI-2017
    def unet3d_generator(self):
        inputs = Input((self.color_type, self.depth, self.height, self.width))

        conv1 = Convolution3D(128, 3, 3, 3, activation='relu')(inputs)
        conv2 = Convolution3D(128, 3, 3, 3, activation='relu')(conv1)
        conv3 = Convolution3D(128, 3, 3, 3, activation='relu')(conv2)
        pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)

        conv4 = Convolution3D(128, 3, 3, 3, activation='relu')(pool1)
        conv5 = Convolution3D(128, 3, 3, 3, activation='relu')(conv4)
        conv6 = Convolution3D(128, 3, 3, 3, activation='relu')(conv5)
        conv7 = Convolution3D(128, 3, 3, 3, activation='relu')(conv6)

        up1 = merge([
            UpSampling3D(size=(2, 2, 2))(conv7),
            Cropping3D(cropping=((8, 8), (8, 8), (8, 8)))(conv3)
        ],
                    mode='concat',
                    concat_axis=1)
        conv8 = Convolution3D(256, 3, 3, 3, activation='relu')(up1)
        conv9 = Convolution3D(128, 3, 3, 3, activation='relu')(conv8)
        conv10 = Convolution3D(128, 3, 3, 3, activation='relu')(conv9)
        conv11 = Convolution3D(128, 3, 3, 3, activation='relu')(conv10)
        conv12 = Convolution3D(128, 3, 3, 3, activation='relu')(conv11)

        model = Model(input=inputs, output=conv12)

        model.compile(optimizer=Adam(lr=1e-5),
                      loss=dice_coef_loss,
                      metric=[dice_coef])

        return model
예제 #16
0
def model_flat(input_shape, learning_rate, training=True):
    input_layer = Input(shape=input_shape, name='input_layer')
    width = input_shape[1]
    height = input_shape[2]
    # For the flat model occupancy is index 0
    if training:
        x = keras.layers.Lambda(whitten)(input_layer)
    else:
        x = input_layer
    mask = keras.layers.Reshape(
        (10, width, height, 1))(input_layer[:, 10:, :, :,
                                            1])  # last 10 visibility grids
    x = keras.layers.Lambda(whitten)(input_layer)
    x = TimeDistributed(
        Conv2D(8, kernel_size=(7, 7), activation=leaky_relu,
               padding='same'))(x)
    x = ConvLSTM2D(16,
                   kernel_size=(5, 5),
                   padding='same',
                   activation=leaky_relu,
                   return_sequences=True)(x)
    x = Cropping3D(((10, 0), (0, 0), (0, 0)))(x)
    x = TimeDistributed(Conv2D(1, (7, 7), padding='same'))(x)
    out = keras.layers.Activation(activation='sigmoid')(x)
    model = Model(inputs=[input_layer], outputs=[out])
    if not training:
        for layer in model.layers:
            layer.trainable = False
    model.compile(loss=loss_fun_flat(mask, width, height),
                  optimizer=keras.optimizers.Adam(lr=learning_rate),
                  metrics=['mse'])
    return model
예제 #17
0
def _vshifted_conv(x, num_filters, name):
    """ 
    Vertically shifted 3-d convolution
    """
    filter_size = [3, 3, 3]
    # Assumes the height is the second dimension
    k = filter_size[1] // 2

    ### 2d code ###
    #     x = ZeroPadding2D([[k,0],[0,0]])(x)
    #     x = Conv2D(filters=num_filters, kernel_size=filter_size, padding='same', kernel_initializer='he_normal', name=name)(x)
    #     x = LeakyReLU(0.1)(x)
    #     x = Cropping2D([[0,k],[0,0]])(x)

    ### 3d adaptation ###

    # assumes first tuple is frame number, second is height, 3rd is width
    # padding on height
    x = ZeroPadding3D([[0, 0], [k, 0], [0, 0]])(x)
    x = Conv3D(filters=num_filters,
               kernel_size=filter_size,
               padding='same',
               kernel_initializer='he_normal',
               name=name)(x)
    x = LeakyReLU(0.1)(x)
    x = Cropping3D([[0, 0], [0, k], [0, 0]])(x)

    return x
def make_model(num_classes,
               conv_dropout_p=0.75,
               dense_dropout_p=0.5,
               name='conv_2_layer_pass_through',
               **kwargs):

    name = name + '_' + str(conv_dropout_p) + '_' + str(dense_dropout_p)
    input_shape = (25, 25, 25)
    k_input_shape = (input_shape[0], input_shape[1], input_shape[2], 1)

    inputs = Input(shape=(input_shape[0], input_shape[1], input_shape[2], 1))

    processed = all_preprocessing(inputs, 'all', functional_api=True, **kwargs)

    conv_1 = Conv3D(48, (5, 5, 5), padding='valid')(processed)
    conv_1 = Activation('relu')(conv_1)

    pool_1 = MaxPooling3D(pool_size=(3, 3, 3))(conv_1)
    drop_1a = Dropout(conv_dropout_p)(pool_1)

    crop_1 = Cropping3D((7, 7, 7))(conv_1)
    drop_1b = Dropout(conv_dropout_p)(crop_1)

    conc_1 = Concatenate(axis=4)([drop_1a, drop_1b])

    conv_2 = Conv3D(96, (3, 3, 3), padding='valid')(conc_1)
    conv_2 = Activation('relu')(conv_2)

    pool_2 = MaxPooling3D(pool_size=(3, 3, 3))(conv_2)
    drop_2a = Dropout(conv_dropout_p)(pool_2)

    crop_2 = Cropping3D((2, 2, 2))(conv_2)
    drop_2b = Dropout(conv_dropout_p)(crop_2)

    conc_2 = Concatenate(axis=4)([drop_2a, drop_2b])

    flat = Flatten()(conc_2)
    fc_1 = Dense(150)(flat)
    fc_1 = Activation('relu')(fc_1)
    drop_3 = Dropout(dense_dropout_p)(fc_1)

    fc_2 = Dense(num_classes)(drop_3)
    predictions = Activation('softmax')(fc_2)

    model = Model(inputs=inputs, outputs=predictions)

    return model, name, input_shape
예제 #19
0
def model_col(input_shape, learning_rate):
    input_layer_remote = Input(shape=input_shape, name='remote_input')
    x = TimeDistributed(Conv2D(8,
                               kernel_size=(7, 7),
                               activation='relu',
                               padding='same'),
                        name='remote_1')(input_layer_remote)
    x = TimeDistributed(Conv2D(16,
                               kernel_size=(7, 7),
                               activation='relu',
                               padding='same'),
                        name='remote_2')(x)
    x = TimeDistributed(Conv2D(1,
                               kernel_size=(7, 7),
                               activation='sigmoid',
                               padding='same'),
                        name='remote_3')(x)
    # x = keras.layers.Activation(activation = 'tanh', name = 'enforce_range')(x)
    # x = keras.layers.Activation(activation = 'relu', name = 'positive_filters')(x)
    out_remote = keras.layers.Lambda(
        filter_input,
        arguments={'input_layer_remote': input_layer_remote},
        name='filter_input_remote')(x)

    input_layer_local = Input(shape=input_shape, name='input_layer_local')
    new_input = keras.layers.Add(name='concat_inputs')(
        [input_layer_local, out_remote])
    new_input = keras.layers.Lambda(range_inputs,
                                    name='normalize_inputs')(new_input)

    mask = keras.layers.Reshape((10, 64, 64, 1))(new_input[:, 10:, :, :, 1])

    x2 = TimeDistributed(Conv2D(8,
                                kernel_size=(7, 7),
                                activation=leaky_relu,
                                padding='same'),
                         trainable=False,
                         name='local_1')(new_input)
    x2 = ConvLSTM2D(16,
                    kernel_size=(5, 5),
                    padding='same',
                    activation=leaky_relu,
                    return_sequences=True,
                    trainable=False,
                    name='local_2')(x2)
    x2 = Cropping3D(((10, 0), (0, 0), (0, 0)))(x2)
    x2 = TimeDistributed(Conv2D(1, (7, 7), padding='same'),
                         trainable=False,
                         name='local_3')(x2)
    out = keras.layers.Activation(activation='sigmoid')(x2)

    model = Model(inputs=[input_layer_local, input_layer_remote],
                  outputs=[out])

    model.compile(loss=loss_fun_flat_collab(mask),
                  optimizer=keras.optimizers.Adam(lr=learning_rate),
                  metrics=['mse'])
    return model
예제 #20
0
def build_model(input_shape, layers):
    print("Build model ...")
    model = Sequential()
    # design model : CONV Layers
    for c in layers:
        if c[0] == 'crop3D':
            model.add(
                Cropping3D(cropping=((0, 0), (c[1], 0), (0, 0)),
                           input_shape=input_shape))
        if c[0] == 'crop2D':
            model.add(
                Cropping2D(cropping=((c[1], 0), (0, 0)),
                           input_shape=input_shape))
        if c[0] == 'norm':
            model.add(Lambda(lambda x: x / 255.0 - 0.5))
        if c[0] == 'conv2D':
            model.add(
                Conv2D(filters=c[1],
                       kernel_size=c[2],
                       strides=c[3],
                       padding='valid'))
            model.add(BatchNormalization())
            model.add(Activation(c[4]))
        if c[0] == 'conv3D':
            model.add(
                Conv3D(filters=c[1],
                       kernel_size=c[2],
                       strides=c[3],
                       padding='valid'))
            model.add(BatchNormalization())
            model.add(Activation(c[4]))
        if c[0] == 'maxpooling2D':
            model.add(MaxPooling2D(pool_size=c[1], strides=c[2]))
        if c[0] == 'avgpooling2D':
            model.add(AveragePooling2D(pool_size=c[1], strides=c[2]))
        if c[0] == 'maxpooling3D':
            model.add(MaxPooling3D(pool_size=c[1], strides=c[2]))
        if c[0] == 'avgpooling3D':
            model.add(AveragePooling3D(pool_size=c[1], strides=c[2]))
        if c[0] == 'dropout':
            model.add(Dropout(c[1]))
        if c[0] == 'batchnorm':
            model.add(BatchNormalization())
        if c[0] == 'flatten':
            model.add(Flatten())
        if c[0] == 'fc':
            model.add(Dense(c[1], activity_regularizer=l2(c[3])))
            model.add(BatchNormalization())
            model.add(Activation(c[2]))
        if c[0] == 'fc_wo_bn':
            model.add(Dense(c[1], activity_regularizer=l2(c[3])))
            model.add(Activation(c[2]))
    print("Done.")
    # summarize model.
    model.summary()
    # out
    return model
예제 #21
0
def dolz_1(size_x, size_y, size_z, num_classes):
    init_input = Input((size_x, size_y, size_z, 1))

    x = Conv3D(25, kernel_size=(3, 3, 3))(init_input)
    x = PReLU()(x)
    x = Conv3D(25, kernel_size=(3, 3, 3))(x)
    x = PReLU()(x)
    x = Conv3D(25, kernel_size=(3, 3, 3))(x)
    x = PReLU()(x)

    y = Conv3D(50, kernel_size=(3, 3, 3))(x)
    y = PReLU()(y)
    y = Conv3D(50, kernel_size=(3, 3, 3))(y)
    y = PReLU()(y)
    y = Conv3D(50, kernel_size=(3, 3, 3))(y)
    y = PReLU()(y)

    z = Conv3D(75, kernel_size=(3, 3, 3))(y)
    z = PReLU()(z)
    z = Conv3D(75, kernel_size=(3, 3, 3))(z)
    z = PReLU()(z)

    x_crop = Cropping3D(cropping=((5, 5), (5, 5), (5, 5)))(x)
    y_crop = Cropping3D(cropping=((2, 2), (2, 2), (2, 2)))(y)

    concat = concatenate([x_crop, y_crop, z], axis=4)

    fc = Conv3D(400, kernel_size=(1, 1, 1))(concat)
    fc = PReLU()(fc)
    fc = Conv3D(200, kernel_size=(1, 1, 1))(fc)
    fc = PReLU()(fc)
    fc = Conv3D(150, kernel_size=(1, 1, 1))(fc)
    fc = PReLU()(fc)

    pred = Conv3D(num_classes, kernel_size=(1, 1, 1))(fc)
    pred = PReLU()(pred)
    pred = Reshape(
        (num_classes, (size_x - 16) * (size_z - 16) * (size_y - 16)))(pred)
    pred = Permute((2, 1))(pred)
    pred = Activation('softmax')(pred)

    model = Model(inputs=init_input, outputs=pred)

    return model
예제 #22
0
    def build_transformation(self):

        img_S = Input(shape=self.img_shape, name='input_img_S_transform')      # 256
        phi = Input(shape=self.output_shape_g, name='input_phi_transform')     # 128

        img_S_cropped = Cropping3D(cropping=64)(img_S)  # 128

        warped_S = Lambda(dense_image_warp_3D, output_shape=(128, 128, 128, 1))([img_S_cropped, phi])

        return Model([img_S, phi], warped_S,  name='transformation_layer')
예제 #23
0
def HyperDenseNet(kernelshapes,
                  numkernelsperlayer,
                  input_shape,
                  activation_name="sigmoid",
                  dropout_rate=0.3,
                  n_labels=2,
                  optimizer=Adam,
                  initial_learning_rate=5e-4,
                  loss_function="categorical_crossentropy"):
    n_conv_layer = 0
    for kernel in kernelshapes:
        if len(kernel) == 3:
            n_conv_layer += 1
    layers = []

    inputs = Input(input_shape)
    current_layer = inputs
    layers.append(current_layer)

    for i in range(n_conv_layer):
        current_layer = Conv3D(numkernelsperlayer[i],
                               kernelshapes[i],
                               strides=(1, 1, 1),
                               padding='valid',
                               activation=activation_name,
                               data_format='channels_first')(current_layer)
        layers.append(current_layer)
        cropped_layers = []
        n_layers = len(layers)
        for count, layer in enumerate(layers):
            cropped_layer = Cropping3D(cropping=(n_layers - 1 - count),
                                       data_format="channels_first")(layer)
            cropped_layers.append(cropped_layer)
        current_layer = Concatenate(axis=1)(cropped_layers)

    for i in range(n_conv_layer, len(kernelshapes)):
        current_layer = Conv3D(numkernelsperlayer[i], [1, 1, 1],
                               strides=(1, 1, 1),
                               padding='valid',
                               activation=activation_name,
                               data_format='channels_first')(current_layer)
        current_layer = SpatialDropout3D(
            rate=dropout_rate, data_format='channels_first')(current_layer)

    current_layer = Conv3D(n_labels, [1, 1, 1],
                           strides=(1, 1, 1),
                           padding="valid",
                           activation=None,
                           data_format='channels_first')(current_layer)
    current_layer = Softmax(axis=1)(current_layer)

    model = Model(inputs=inputs, outputs=current_layer)
    model.compile(optimizer=optimizer(lr=initial_learning_rate),
                  loss=loss_function)
    return model
예제 #24
0
    def concat(self, x1, x2):

        dx = (x1._keras_shape[_row_axis] - x2._keras_shape[_row_axis]) / 2
        dy = (x1._keras_shape[_col_axis] - x2._keras_shape[_col_axis]) / 2
        dz = (x1._keras_shape[_depth_axis] - x2._keras_shape[_depth_axis]) / 2

        crop_size = ((floor(dx), ceil(dx)), (floor(dy), ceil(dy)), (floor(dz), ceil(dz)))

        x12 = Concatenate(axis=_channel_axis)([Cropping3D(crop_size)(x1), x2])

        return x12
    def build_transformation(self):
        img_S = Input(shape=self.input_shape_g,
                      name='input_img_S_transform')  # 64x64x64
        phi = Input(shape=self.output_shape_g,
                    name='input_phi_transform')  # 24x24x24

        img_S_cropped = Cropping3D(cropping=20)(img_S)  # 24x24x24
        warped_S = Lambda(dense_image_warp_3D,
                          output_shape=(24, 24, 24, 1))([img_S_cropped, phi])

        return Model([img_S, phi], warped_S, name='transformation_layer')
예제 #26
0
def get_autoencoder_model():
    from keras.layers import Convolution3D, MaxPooling3D, UpSampling3D, GaussianNoise, Cropping3D, Input
    from keras.regularizers import l2
    from keras.models import Model

    img_channels = 1
    noise_factor = 0.02

    # ---------------------------------------- START MODEL ------------------------------------------------------------
    input_img = Input(shape=(img_channels, ) + image_dimension)
    x = GaussianNoise(noise_factor)(input_img)
    x = Convolution3D(64, (7, 7, 7), padding='same', activation='relu')(x)
    x = MaxPooling3D(strides=(2, 2, 2), padding='same')(x)

    x = Convolution3D(32, (5, 5, 5),
                      padding='same',
                      activation='relu',
                      kernel_regularizer=l2(0.0001))(x)
    x = MaxPooling3D(strides=(2, 2, 2), padding='same')(x)

    x = Convolution3D(32, (5, 5, 5),
                      padding='same',
                      activation='relu',
                      kernel_regularizer=l2(0.0001))(x)
    encoded = MaxPooling3D(strides=(2, 2, 2), padding='same')(x)

    # Bottleneck

    x = Convolution3D(32, (5, 5, 5),
                      padding='same',
                      activation='relu',
                      kernel_regularizer=l2(0.0001))(encoded)
    x = UpSampling3D(size=(2, 2, 2))(x)

    x = Convolution3D(32, (5, 5, 5),
                      padding='same',
                      activation='relu',
                      kernel_regularizer=l2(0.0001))(x)
    x = UpSampling3D(size=(2, 2, 2))(x)

    x = Convolution3D(64, (5, 5, 5),
                      padding='same',
                      activation='relu',
                      kernel_regularizer=l2(0.0001))(x)
    x = UpSampling3D(size=(2, 2, 2))(x)

    decoded = Convolution3D(1, (7, 7, 7), padding='same')(x)
    decoded_cropped = Cropping3D(((1, 1), (3, 3), (3, 3)))(decoded)

    encoder = Model(input_img, encoded)
    autoencoder = Model(input_img, decoded_cropped)

    # ------------------------------------------- END MODEL ------------------------------------------------------------
    return encoder, autoencoder
예제 #27
0
def cae_decoder():
    # 3D Convolutional Auto-Decoder
    inputs = Input(shape=(cae_output_count, 1))

    x = Reshape(cae_output_shape)(inputs)

    x = Conv3DTranspose(cae_filter_count * 2,
                        conv_size,
                        padding='same',
                        activation=activation_function)(x)

    x = UpSampling3D(pool_size)(x)
    x = Conv3DTranspose(cae_filter_count * 16,
                        conv_size,
                        padding='same',
                        activation=activation_function)(x)

    x = UpSampling3D(pool_size)(x)
    x = Conv3DTranspose(cae_filter_count * 8,
                        conv_size,
                        padding='same',
                        activation=activation_function)(x)

    x = UpSampling3D(pool_size)(x)
    x = Conv3DTranspose(cae_filter_count * 4,
                        conv_size,
                        padding='same',
                        activation=activation_function)(x)

    x = Cropping3D()(x)

    x = UpSampling3D(pool_size)(x)
    x = Conv3DTranspose(cae_filter_count * 2,
                        conv_size,
                        padding='same',
                        activation=activation_function)(x)

    x = UpSampling3D(pool_size)(x)
    x = Conv3DTranspose(cae_filter_count,
                        conv_size,
                        padding='same',
                        activation=activation_function)(x)

    decoder = Conv3DTranspose(1,
                              conv_size,
                              padding='same',
                              activation='sigmoid',
                              name='decoded')(x)

    model = Model(inputs=inputs, outputs=decoder)

    model.summary()

    return model
예제 #28
0
def unet_like2(in_sz=24):
    '''
    construct a u-net style network
    '''
    in_sz = fplutils.to3d(in_sz)
    in_sz = in_sz + (1, )

    inputs = Input(shape=in_sz)  # 24x24x24

    # down-sample
    conv1 = Conv3D(32, (3, 3, 3), use_bias=False)(inputs)  # 22x22x22
    conv1 = _bn_relu(conv1)
    conv1 = Conv3D(32, (3, 3, 3), use_bias=False)(conv1)  # 20x20x20
    conv1 = _bn_relu(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)  # 10x10x10

    conv2 = Conv3D(64, (3, 3, 3), use_bias=False)(pool1)  # 8x8x8
    conv2 = _bn_relu(conv2)
    conv2 = Conv3D(64, (3, 3, 3), use_bias=False)(conv2)  # 6x6x6
    conv2 = _bn_relu(conv2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)  # 3x3x3

    conv3 = Conv3D(128, (1, 1, 1), use_bias=False)(pool2)  # 3x3x3
    conv3 = _bn_relu(conv3)

    # up-sample
    up4 = concatenate([UpSampling3D(size=(2, 2, 2))(conv3), conv2])  # 6x6x6
    conv4 = Conv3D(64, (3, 3, 3), use_bias=False)(up4)  # 4x4x4
    conv4 = _bn_relu(conv4)
    conv4 = Conv3D(64, (1, 1, 1), use_bias=False)(conv4)  # 4x4x4
    conv4 = _bn_relu(conv4)

    crop_conv1 = Cropping3D(cropping=((6, 6), (6, 6), (6, 6)))(conv1)
    up5 = concatenate([UpSampling3D(size=(2, 2, 2))(conv4),
                       crop_conv1])  # 8x8x8
    conv5 = Conv3D(32, (3, 3, 3), use_bias=False)(up5)  # 6x6x6
    conv5 = _bn_relu(conv5)
    conv5 = Conv3D(32, (1, 1, 1), use_bias=False)(conv5)  # 6x6x6
    conv5 = _bn_relu(conv5)

    predictions = Conv3D(1, (1, 1, 1), activation='sigmoid',
                         use_bias=False)(conv5)  # 6x6x6

    model = Model(inputs=inputs, outputs=predictions)
    compile_args = {
        'loss': masked_binary_crossentropy,
        'optimizer': 'adam',
        'metrics': [masked_accuracy, lb0l1err, lb1l1err]
    }
    return model, (24, 10, 1), 100, compile_args
예제 #29
0
 def convolutional_blocks(s, f_list, k_size_list, d=None, conv_list=None):
     for l, (filters, kernel_size) in enumerate(zip(f_list, k_size_list)):
         conv = Conv3D(filters,
                       kernel_size=kernel_size,
                       activation='relu',
                       data_format='channels_first')
         s = BatchNormalization(axis=1)(conv(s))
         if d is not None:
             d = BatchNormalization(axis=1)(conv(d))
             d_crop = Cropping3D(cropping=len(filters_list) - l - 1,
                                 data_format='channels_first')(d)
             if conv_list is not None:
                 conv_list.append(d_crop)
     return s
예제 #30
0
def Combine(gen, disc, input_shape, sequence_crop, new_sequence):
    input = Input(shape=input_shape)
    generated_image = gen(input)

    cropped = Cropping3D(cropping=(sequence_crop, 0, 0))(input)
    reshaped = Reshape(new_sequence)(generated_image)
    
    DCGAN_output = disc([cropped, reshaped])

    DCGAN = Model(inputs=[input],
                  outputs=[generated_image, DCGAN_output],
                  name="Combined")

    return DCGAN