Esempio n. 1
0
def generator(latent_size=1024, return_intermediate=False):

    loc = Sequential([
        Dense(64 * 7* 7, input_dim=latent_size),
        Reshape((7, 7,8, 8)),

        Conv3D(64, 6, 6, 8, border_mode='same', init='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        UpSampling3D(size=(2, 2, 2)),

        ZeroPadding3D((2, 2, 0)),
        Conv3D(6, 6, 5, 8, init='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        UpSampling3D(size=(2, 2, 3)),

        ZeroPadding3D((1,0,3)),
        Conv3D(6, 3, 3, 8, init='he_uniform'),
        LeakyReLU(),
        Conv3D(1, 2, 2, 2, bias=False, init='glorot_normal'),
        Activation('relu')
    ])
   
    latent = Input(shape=(latent_size, ))
     
    fake_image = loc(latent)

    Model(input=[latent], output=fake_image).summary()
    return Model(input=[latent], output=fake_image)
Esempio n. 2
0
def build_generator():
    g = Sequential()

    g.add(Dense(2 * 1 * 2 * 128, activation="relu", input_dim=NOISE_DIM))
    g.add(Reshape((2, 1, 2, 128)))

    g.add(UpSampling3D())
    g.add(Conv3D(128, kernel_size=3, padding="same"))
    g.add(BatchNormalization(momentum=BATCH_MOMENTUM))
    g.add(Activation("relu"))

    g.add(UpSampling3D())
    g.add(Conv3D(64, kernel_size=3, padding="same"))
    g.add(BatchNormalization(momentum=BATCH_MOMENTUM))
    g.add(Activation("relu"))

    g.add(UpSampling3D())
    g.add(Conv3D(1, kernel_size=(9, 3, 9), padding="valid"))
    g.add(Activation("sigmoid"))

    print("-" * 15, "GENERATOR SUMMARY", "-" * 15)
    g.summary()
    noise = Input(shape=(NOISE_DIM, ))
    img = g(noise)

    return Model(noise, img)
Esempio n. 3
0
def generator(latent_size=200, gflag=0, gf=8, gx=5, gy=5, gz=5):

    latent = Input(shape=(latent_size, ))

    x = Dense(64 * 7 * 7)(latent)
    x = Reshape((7, 7, 8, 8))(x)
    x = Conv3D(64, 6, 6, 8, border_mode='same', init='he_uniform')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = UpSampling3D(size=(2, 2, 2))(x)

    x = ZeroPadding3D((2, 2, 0))(x)
    x = Conv3D(6, 6, 5, 8, init='he_uniform')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = UpSampling3D(size=(2, 2, 3))(x)

    if gflag == 1:
        x = Conv3D(gf, gx, gy, gz, init='he_uniform', border_mode='same')(x)
        x = LeakyReLU()(x)
        x = BatchNormalization()(x)

    x = ZeroPadding3D((1, 0, 3))(x)
    x = Conv3D(6, 3, 3, 8, init='he_uniform')(x)
    x = LeakyReLU()(x)
    x = Conv3D(1, 2, 2, 2, bias=False, init='glorot_normal')(x)
    x = Activation('relu')(x)

    loc = Model(latent, x)
    loc.summary()
    fake_image = loc(latent)
    Model(input=[latent], output=fake_image)
    return Model(input=[latent], output=fake_image)
Esempio n. 4
0
    def wunet(self,input_pl,batch,is_training_pl,_bn_decay):
        with tf.device('/gpu:0'):
                conv5x5_1 = Convolution3DRelu(input_pl,1,5,5,5,4,init='glorot_uniform',name='c1',batchnorm=True,istraining=is_training_pl,bn_decay=_bn_decay)
                conv5x5_1_s = Convolution3DRelu(conv5x5_1,4,5,5,5,4,init='glorot_uniform',name='c2',batchnorm=True,istraining=is_training_pl,bn_decay=_bn_decay)
                conv5x5_1_p = MaxPooling3D(conv5x5_1_s,2,2,2)
        with tf.device('/gpu:0'):
                conv5x5_6 = Convolution3DRelu(conv5x5_1_p,4,5,5,5,8,init='glorot_uniform',name='c11',batchnorm=True,istraining=is_training_pl,bn_decay=_bn_decay)
                conv5x5_6_s = Convolution3DRelu(conv5x5_6,8,5,5,5,8,init='glorot_uniform',name='c12',batchnorm=True,istraining=is_training_pl,bn_decay=_bn_decay)
                conv5x5_6_u = tf.concat([UpSampling3D((2,2,2),data_format='channels_last')(conv5x5_6_s),conv5x5_1_s],4)

        with tf.device('/gpu:0'):
                conv5x5_7 = Convolution3DRelu(conv5x5_6_u,12,5,5,5,4,init='glorot_uniform',name='c13',batchnorm=True,istraining=is_training_pl,bn_decay=_bn_decay)
                conv5x5_7_s = Convolution3DRelu(conv5x5_7,4,5,5,5,4,init='glorot_uniform',name='c14',batchnorm=True,istraining=is_training_pl,bn_decay=_bn_decay)
                conv5x5_7_p = tf.concat([MaxPooling3D(conv5x5_7_s,2,2,2),conv5x5_6_s],4)
        with tf.device('/gpu:0'):
                conv5x5_9 = Convolution3DRelu(conv5x5_7_p,12,5,5,5,8,init='glorot_uniform',name='c17',batchnorm=True,istraining=is_training_pl,bn_decay=_bn_decay)
                conv5x5_9_s = Convolution3DRelu(conv5x5_9,8,5,5,5,8,init='glorot_uniform',name='c18',batchnorm=True,istraining=is_training_pl,bn_decay=_bn_decay)
                conv5x5_9_u = tf.concat([UpSampling3D((2,2,2),data_format='channels_last')(conv5x5_9_s),conv5x5_7_s],4)

        with tf.device('/gpu:0'):
                conv5x5_10 = Convolution3DRelu(conv5x5_9_u,12,5,5,5,4,init='glorot_uniform',name='c19',batchnorm=True,istraining=is_training_pl,bn_decay=_bn_decay)
                conv5x5_10_s = Convolution3DRelu(conv5x5_10,4,5,5,5,4,init='glorot_uniform',name='c20',batchnorm=True,istraining=is_training_pl,bn_decay=_bn_decay)
                conv5x5_10_p = tf.concat([MaxPooling3D(conv5x5_10_s,2,2,2),conv5x5_9_s],4)
        with tf.device('/gpu:0'):
                conv5x5_11 = Convolution3DRelu(conv5x5_10_p,12,5,5,5,8,init='glorot_uniform',name='c21',batchnorm=True,istraining=is_training_pl,bn_decay=_bn_decay)
                conv5x5_11_s = Convolution3DRelu(conv5x5_11,8,5,5,5,8,init='glorot_uniform',name='c22',batchnorm=True,istraining=is_training_pl,bn_decay=_bn_decay)
                conv5x5_11_u = tf.concat([UpSampling3D((2,2,2),data_format='channels_last')(conv5x5_11_s),conv5x5_10_s],4)

        return conv5x5_11_u,12 
    def build(self, img1, img2):
        '''
            img1, img2, flow : tensor of shape [batch, X, Y, Z, C]
        '''
        concatImgs = tf.concat([img1, img2], 4, 'concatImgs')

        conv1 = convolveLeakyReLU('conv1', concatImgs, self.encoders[0], 3,
                                  2)  # 64 * 64 * 64
        conv2 = convolveLeakyReLU('conv2', conv1, self.encoders[1], 3,
                                  2)  # 32 * 32 * 32
        conv3 = convolveLeakyReLU('conv3', conv2, self.encoders[2], 3,
                                  2)  # 16 * 16 * 16
        conv4 = convolveLeakyReLU('conv4', conv3, self.encoders[3], 3,
                                  2)  # 8 * 8 * 8

        net = convolveLeakyReLU('decode4', conv4, self.decoders[0], 3, 1)
        net = tf.concat([UpSampling3D()(net), conv3], axis=-1)
        net = convolveLeakyReLU('decode3', net, self.decoders[1], 3, 1)
        net = tf.concat([UpSampling3D()(net), conv2], axis=-1)
        net = convolveLeakyReLU('decode2', net, self.decoders[2], 3, 1)
        net = tf.concat([UpSampling3D()(net), conv1], axis=-1)
        net = convolveLeakyReLU('decode1', net, self.decoders[3], 3, 1)
        net = convolveLeakyReLU('decode1_1', net, self.decoders[4], 3, 1)
        net = tf.concat([UpSampling3D()(net), concatImgs], axis=-1)
        net = convolveLeakyReLU('decode0', net, self.decoders[5], 3, 1)
        if len(self.decoders) == 8:
            net = convolveLeakyReLU('decode0_1', net, self.decoders[6], 3, 1)
        net = convolve('flow',
                       net,
                       self.decoders[-1],
                       3,
                       1,
                       weights_init=normal(stddev=1e-5))
        return {'flow': net * self.flow_multiplier}
Esempio n. 6
0
def generator_model(width, height):
    model = Sequential()
    model.add(
        Dense(input_dim=100, output_dim=128, bias_initializer='he_uniform'))
    model.add(Activation('tanh'))
    model.add(Dense(width * height))
    model.add(Activation('tanh'))
    model.add(Dense(width * height * DIM))
    model.add(Activation('tanh'))
    model.add(Dense(2 * width * height * DIM))
    model.add(Activation('tanh'))
    model.add(Dense(4 * width * height * DIM))
    model.add(Activation('tanh'))
    model.add(
        Reshape((height, width, DIM, 4),
                input_shape=(4 * width * height * DIM, )))
    model.add(Dropout(0.5))
    model.add(UpSampling3D(size=(2, 2, 1)))
    model.add(Conv3D(4, (2, 2, 1), padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(UpSampling3D(size=(2, 2, 1)))
    model.add(Conv3D(2, (2, 2, 1), padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Conv3D(1, (2, 2, 1), padding='same'))
    model.add(Activation('tanh'))
    return model
Esempio n. 7
0
def generator(latent_size=200, return_intermediate=False):

    loc = Sequential([
        Dense(64 * 7 * 7, input_dim=latent_size),
        Reshape((7, 7, 8, 8)),
        Conv3D(64, 6, 6, 8, border_mode='same', init='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        UpSampling3D(size=(2, 2, 2)),
        ZeroPadding3D((2, 2, 0)),
        Conv3D(6, 6, 5, 8, init='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        UpSampling3D(size=(2, 2, 3)),
        ZeroPadding3D((1, 0, 3)),
        Conv3D(6, 3, 3, 8, init='he_uniform'),
        LeakyReLU(),
        Conv3D(1, 2, 2, 2, bias=False, init='glorot_normal'),
        Activation('relu')
    ])

    latent = Input(shape=(latent_size, ))

    image_class = Input(shape=(1, ), dtype='float32')
    emb = Flatten()(Embedding(500,
                              latent_size,
                              input_length=1,
                              init='glorot_normal')(image_class))

    h = merge([latent, emb], mode='mul')

    fake_image = loc(h)

    Model(input=[latent, image_class], output=fake_image).summary()
    return Model(input=[latent, image_class], output=fake_image)
Esempio n. 8
0
def train_model():
    input_img = Input(shape=(128, 128, 128, 1))
    x = Convolution3D(16, (5, 5, 5), activation='relu', padding='same')(input_img)
    x = MaxPooling3D((2, 2, 2), padding='same')(x)
    x = Convolution3D(16, (5, 5, 5), activation='relu', padding='same')(x)
    x = MaxPooling3D((2, 2, 2), padding='same')(x)
    x = Convolution3D(16, (5, 5, 5), activation='relu', padding='same')(x)
    encoded = MaxPooling3D((2, 2, 2), padding='same', name='encoder')(x)

    print("shape of encoded: ")
    print(K.int_shape(encoded))

    x = Convolution3D(16, (5, 5, 5), activation='relu', padding='same')(encoded)
    x = UpSampling3D((2, 2, 2))(x)
    x = Convolution3D(16, (5, 5, 5), activation='relu', padding='same')(x)
    x = UpSampling3D((2, 2, 2))(x)
    x = Convolution3D(16, (5, 5, 5), activation='relu', padding='same')(x)
    x = UpSampling3D((2, 2, 2))(x)
    decoded = Convolution3D(1, (5, 5, 5), activation='sigmoid', padding='same')(x)
    print("shape of decoded: ")
    print(K.int_shape(decoded))

    autoencoder = Model(input_img, decoded)
    autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

    autoencoder.fit(train_data, train_data,
              epochs=10,
              batch_size=14,
              validation_data=(val_data, val_data),
              callbacks=[TensorBoard(log_dir='/tmp/autoencoder')])

    autoencoder.save('autoencoder.h5')
    def build_generator(self):
        """Construct the graph for the generator"""
        model = Sequential()

        dim1 = int(self.input_shape[0] // 4)
        dim2 = int(self.input_shape[1] // 4)
        dim3 = int(self.input_shape[2] // 4)

        model.add(
            Dense(128 * dim1 * dim2 * dim3,
                  activation="relu",
                  input_dim=self.latent_dim))
        model.add(Reshape((dim1, dim2, dim3, 128)))
        model.add(UpSampling3D())
        model.add(Conv3D(128, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(UpSampling3D())
        model.add(Conv3D(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv3D(1, kernel_size=3, padding="same"))
        model.add(Activation("sigmoid"))

        model.summary()

        noise = Input(shape=(self.latent_dim, ))
        terrain = model(noise)

        return Model(noise, terrain)
Esempio n. 10
0
    def construct_model(self):
        # th input (channels, height, width)
        # tf input (height, width, channels)
        keras.backend.set_image_dim_ordering('tf')
        model = Sequential()

        #Encode:
        model.add(Convolution3D(16,(2,3,3), activation='relu', input_shape=(config.frames_per_input, 501, 501, 3),border_mode='same'))        
        model.add(MaxPooling3D(pool_size=(2,2,2)))
        model.add(Convolution3D(16,(2,3,3), activation='relu', border_mode='same'))
        model.add(Convolution3D(16,(2,3,3), activation='relu', border_mode='same'))
        model.add(MaxPooling3D(pool_size=(2,2,2)))
        model.add(Convolution3D(8,(1,3,3), activation='relu', border_mode='same'))
        #model.add(Convolution3D(8,(1,3,3), activation='relu', border_mode='same'))
        #model.add(MaxPooling3D(pool_size=(1,2,2)))        
        #model.add(Convolution3D(8,(1,3,3), activation='relu', border_mode='same'))
        #model.add(Convolution3D(8,(1,3,3), activation='relu', border_mode='same'))
        #Decode:
        model.add(Convolution3D(16,(2,3,3), activation='relu', border_mode='same'))
        model.add(UpSampling3D((1,2,2)))
        model.add(Convolution3D(16,(2,3,3), activation='relu', border_mode='same'))
        model.add(UpSampling3D((1,2,2)))
        model.add(Convolution3D(3,(2,3,3), activation='relu', border_mode='same'))
        # model.add(Convolution3D(16,(2,3,3), activation='relu', border_mode='same'))
        # model.add(UpSampling3D((1,1,1)))
        # model.add(Convolution3D(3,(2,3,3), activation='relu', border_mode='same'))
        # model.add(UpSampling3D((1,1,1)))
        # model.add(Flatten())

        model.compile(loss='mean_squared_error', optimizer='adam')
        model.compile(loss=y_std, optimizer='adam')
        model.compile(loss='binary_crossentropy', optimizer='adam') # doesn't reset weights
        
        model.summary()        
        self.model = model
Esempio n. 11
0
def decoder_model():
    inputs = Input(shape=(10, 16, 16, 64))

    # 10x16x16
    convlstm_1 = ConvLSTM2D(filters=64,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            return_sequences=True,
                            recurrent_dropout=0.5)(inputs)
    x = TimeDistributed(BatchNormalization())(convlstm_1)
    x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
    out_1 = TimeDistributed(Dropout(0.5))(x)

    convlstm_2 = ConvLSTM2D(filters=64,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            return_sequences=True,
                            recurrent_dropout=0.5)(out_1)
    x = TimeDistributed(BatchNormalization())(convlstm_2)
    h_2 = TimeDistributed(LeakyReLU(alpha=0.2))(x)
    out_2 = UpSampling3D(size=(1, 2, 2))(h_2)

    # 10x32x32
    convlstm_3 = ConvLSTM2D(filters=128,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            return_sequences=True,
                            recurrent_dropout=0.5)(out_2)
    x = TimeDistributed(BatchNormalization())(convlstm_3)
    h_3 = TimeDistributed(LeakyReLU(alpha=0.2))(x)
    out_3 = UpSampling3D(size=(1, 2, 2))(h_3)

    # 10x64x64
    convlstm_4 = ConvLSTM2D(filters=32,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            return_sequences=True,
                            recurrent_dropout=0.5)(out_3)
    x = TimeDistributed(BatchNormalization())(convlstm_4)
    h_4 = TimeDistributed(LeakyReLU(alpha=0.2))(x)
    out_4 = UpSampling3D(size=(1, 2, 2))(h_4)

    # 10x128x128
    convlstm_5 = ConvLSTM2D(filters=3,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            return_sequences=True,
                            recurrent_dropout=0.5)(out_4)
    predictions = TimeDistributed(Activation('tanh'))(convlstm_5)

    model = Model(inputs=inputs, outputs=predictions)

    return model
Esempio n. 12
0
    def build(input_shape):
        """Create a 3D Convolutional Autoencoder model.

        Parameters:
        - input_shape: Tuple of input shape in the format
            (conv_dim1, conv_dim2, conv_dim3, channels)
        - initial_filter: Initial filter size. This will be doubled
            for each hidden layer as it goes deeper.
        - num_encoding_layers: Number of encoding convolutional +
            pooling layers. The number of decoding
            layers will be the same.

        Returns:
        - A 3D CAD model that takes a 5D tensor (volumetric images
        in batch) as input and returns a 5D vector (prediction) as output.
        """

        if len(input_shape) != 4:
            raise ValueError("Input shape should be a tuple "
                             "(conv_dim1, conv_dim2, conv_dim3, channels)")

        input_img = Input(shape=input_shape, name="cad_input")

        # Encoding
        x = Conv3D(8, (3, 3, 3), activation='relu', padding='same',
                   name="cad_enc_1")(input_img)
        x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                         padding="same", name="cad_pool_1")(x)
        x = Conv3D(8, (3, 3, 3), activation='relu', padding='same',
                   name="cad_enc_2")(x)
        x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                         padding="same", name="cad_pool_2")(x)
        x = Conv3D(8, (3, 3, 3), activation='relu', padding='same',
                   name="cad_enc_3")(x)
        x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                         padding="same", name="cad_pool_3")(x)

        # Decoding
        x = Conv3D(8, (3, 3, 3), activation='relu', padding='same',
                   name="cad_dec_1")(x)
        x = UpSampling3D(size=(2, 2, 2), name="cad_unpool_1")(x)
        x = Conv3D(8, (3, 3, 3), activation='relu', padding='same',
                   name="cad_dec_2")(x)
        x = UpSampling3D(size=(2, 2, 2), name="cad_unpool_2")(x)
        x = Conv3D(8, (3, 3, 3), activation='relu', padding='same',
                   name="cad_dec_3")(x)
        x = UpSampling3D(size=(2, 2, 2), name="cad_unpool_3")(x)

        output_img = Conv3D(1, (3, 3, 3), activation='sigmoid',
                            padding='same', name="cad_sigmoid")(x)
        model = Model(inputs=input_img, outputs=output_img)
        return model
Esempio n. 13
0
def decoder_model():
    inputs = Input(shape=(int(VIDEO_LENGTH / 2), 16, 26, 64))

    # 10x16x16
    convlstm_1 = ConvLSTM2D(filters=64,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            return_sequences=True,
                            recurrent_dropout=0.2)(inputs)
    x = TimeDistributed(BatchNormalization())(convlstm_1)
    out_1 = TimeDistributed(Activation('tanh'))(x)

    res_1 = UpSampling3D(size=(1, 2, 2))(out_1)

    # 10x32x32
    convlstm_3a = ConvLSTM2D(filters=64,
                             kernel_size=(3, 3),
                             strides=(1, 1),
                             padding='same',
                             return_sequences=True,
                             recurrent_dropout=0.2)(res_1)
    x = TimeDistributed(BatchNormalization())(convlstm_3a)
    out_3a = TimeDistributed(Activation('tanh'))(x)

    res_2 = UpSampling3D(size=(1, 2, 2))(out_3a)

    # 10x64x64
    convlstm_4a = ConvLSTM2D(filters=32,
                             kernel_size=(3, 3),
                             strides=(1, 1),
                             padding='same',
                             return_sequences=True,
                             recurrent_dropout=0.2)(res_2)
    x = TimeDistributed(BatchNormalization())(convlstm_4a)
    out_4a = TimeDistributed(Activation('tanh'))(x)

    res_3 = UpSampling3D(size=(1, 2, 2))(out_4a)

    # 10x128x128
    convlstm_5 = ConvLSTM2D(filters=3,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            return_sequences=True,
                            recurrent_dropout=0.2)(res_3)
    predictions = TimeDistributed(Activation('tanh'))(convlstm_5)

    model = Model(inputs=inputs, outputs=predictions)

    return model
Esempio n. 14
0
def unet_model():
    
    inputs = Input(shape=(1, max_slices, img_size, img_size))
    conv1 = Convolution3D(width, 3, 3, 3, activation = 'relu', border_mode='same')(inputs)
    conv1 = BatchNormalization(axis = 1)(conv1)
    conv1 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(conv1)
    conv1 = BatchNormalization(axis = 1)(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2), strides = (2, 2, 2), border_mode='same')(conv1)
    
    conv2 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(pool1)
    conv2 = BatchNormalization(axis = 1)(conv2)
    conv2 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(conv2)
    conv2 = BatchNormalization(axis = 1)(conv2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2), strides = (2, 2, 2), border_mode='same')(conv2)

    conv3 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(pool2)
    conv3 = BatchNormalization(axis = 1)(conv3)
    conv3 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(conv3)
    conv3 = BatchNormalization(axis = 1)(conv3)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2), strides = (2, 2, 2), border_mode='same')(conv3)
    
    conv4 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(pool3)
    conv4 = BatchNormalization(axis = 1)(conv4)
    conv4 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(conv4)
    conv4 = BatchNormalization(axis = 1)(conv4)
    conv4 = Convolution3D(width*16, 3, 3, 3, activation = 'relu', border_mode='same')(conv4)
    conv4 = BatchNormalization(axis = 1)(conv4)

    up5 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv3], mode='concat', concat_axis=1)
    conv5 = SpatialDropout3D(dropout_rate)(up5)
    conv5 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(conv5)
    conv5 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(conv5)
    
    up6 = merge([UpSampling3D(size=(2, 2, 2))(conv5), conv2], mode='concat', concat_axis=1)
    conv6 = SpatialDropout3D(dropout_rate)(up6)
    conv6 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(conv6)
    conv6 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(conv6)

    up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1)
    conv7 = SpatialDropout3D(dropout_rate)(up7)
    conv7 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(conv7)
    conv7 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(conv7)
    conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(conv7)

    model = Model(input=inputs, output=conv8)
    model.compile(optimizer=Adam(lr=1e-5), 
                  loss=dice_coef_loss, metrics=[dice_coef])

    return model
Esempio n. 15
0
def generator(latent_size=1024, return_intermediate=False):

    loc = Sequential([
        Dense(64 * 64, input_dim=latent_size),
        Reshape((8, 8, 8, 8)),

        Conv3D(128, 3, 3, 3, border_mode='valid', init='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        UpSampling3D(size=(2, 2, 2)),

        #ZeroPadding3D((2, 2, 0)),
        Conv3D(64, 4, 4, 4, init='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        UpSampling3D(size=(2, 2, 2)),

        #ZeroPadding3D((2, 2, 3)),   #added
        Conv3D(32, 5, 5, 5, init='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        UpSampling3D(size=(2, 2, 2)),

        #ZeroPadding3D((1,0,3)),
        Conv3D(16, 4, 4, 4, init='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        #UpSampling3D(size=(2, 2, 2)),

        #ZeroPadding3D((1,0,3)),   
        Conv3D(8, 8, 8, 8, init='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        UpSampling3D(size=(2, 2, 2)),

        #ZeroPadding3D((1,0,3)),           
        Conv3D(1, 12, 12, 12, init='he_uniform'),
        LeakyReLU(),
        #BatchNormalization(),
    ])
   
    latent = Input(shape=(latent_size, ))
    loc.summary() 
    plot_model(loc, to_file='loc.pdf', show_shapes=1)
    fake_image = loc(latent)

    Model(input=[latent], output=fake_image)
    return Model(input=[latent], output=fake_image)
    def build_generator(self):

        model = Sequential()

        # Encoder
        model.add(
            Conv3D(32,
                   kernel_size=5,
                   strides=2,
                   input_shape=self.vol_shape,
                   padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv3D(64, kernel_size=5, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv3D(128, kernel_size=5, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))

        model.add(Conv3D(512, kernel_size=1, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.5))

        # Decoder
        model.add(UpSampling3D())
        model.add(Deconv3D(256, kernel_size=5, padding="same"))
        model.add(Activation('relu'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Deconv3D(128, kernel_size=5, padding="same"))
        model.add(Activation('relu'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling3D())
        model.add(Deconv3D(64, kernel_size=5, padding="same"))
        model.add(Activation('relu'))
        model.add(BatchNormalization(momentum=0.8))
        #
        model.add(UpSampling3D())
        #
        model.add(Deconv3D(self.channels, kernel_size=5, padding="same"))
        model.add(Activation('tanh'))

        model.summary()

        masked_vol = Input(shape=self.vol_shape)
        gen_missing = model(masked_vol)

        return Model(masked_vol, gen_missing)
Esempio n. 17
0
def generator(latent_size=200, return_intermediate=False, with_bn=True):
    latent = Input(shape=(latent_size, ))

    bnm = 0

    x = _Dense(64 * 8 * 8, init='glorot_normal', name='gen_dense1')(latent)
    x = Reshape((8, 8, 8, 8))(x)
    x = _Conv3D(64,
                6,
                6,
                8,
                border_mode='same',
                init='he_uniform',
                name='gen_c1')(x)
    x = LeakyReLU()(x)
    if with_bn:
        x = _BatchNormalization(name='gen_bn1', mode=bnm)(x)
    x = UpSampling3D(size=(2, 2, 2))(x)
    x = ZeroPadding3D((0, 0, 2))(x)
    x = _Conv3D(6,
                1,
                1,
                10,
                border_mode='valid',
                init='he_uniform',
                name='gen_c2')(x)
    x = LeakyReLU()(x)
    if with_bn:
        x = _BatchNormalization(name='gen_bn2', mode=bnm)(x)

    x = UpSampling3D(size=(1, 1, 5))(x)
    x = ZeroPadding3D((1, 1, 0))(x)

    x = _Conv3D(1,
                3,
                3,
                1,
                bias=False,
                border_mode='valid',
                init='glorot_normal',
                name='gen_c3')(x)

    x = Activation('relu')(x)

    loc = _Model(input=latent, output=x)
    fake_image = loc(latent)
    _Model(input=[latent], output=fake_image)
    return _Model(input=[latent], output=fake_image, name='generator_model')
Esempio n. 18
0
 def deconv3d(layer_input,
              skip_input,
              filters,
              kernel_size=(4, 4, 2),
              strides=(2, 2, 2),
              dropout_rate=0,
              bn=True):
     """Layers used during upsampling"""
     if self.resizeconv:
         u = My3dResize(strides)(layer_input)
     else:
         u = UpSampling3D(size=strides)(layer_input)
     init = RandomNormal(stddev=0.02)
     u = Conv3D(filters,
                kernel_size=kernel_size,
                strides=1,
                padding='same',
                kernel_initializer=init,
                activation='relu')(u)
     if dropout_rate:
         u = Dropout(dropout_rate)(u)
     if bn:
         #u = BatchNormalization(momentum=0.8)(u)
         u = InstanceNormalization()(u)
     u = Concatenate()([u, skip_input])
     u = Activation('relu')(u)
     return u
Esempio n. 19
0
def UpSampling(ndim=2,*args,  **kwargs):
    if ndim==2:
        return UpSampling2D(*args, **kwargs)
    elif ndim==3:
        return UpSampling3D(*args, **kwargs)
    else:
        raise ValueError("ndim must be 2 or 3")
Esempio n. 20
0
def up_conv_block_seunet(x, x2, f, dropout=False):

    x = UpSampling3D(size=(2, 2, 2))(x)

    channels_nb = K.int_shape(x2)[-1]

    if channels_nb==16:
        channels_nb_bottleneck = channels_nb // 16
    else:
        channels_nb_bottleneck = channels_nb // 32

    x3=GlobalMaxPooling3D()(x2)
    x3 = Dense(channels_nb_bottleneck, activation='relu')(x3)
    x3 = Dense(channels_nb, activation='sigmoid')(x3)

    y = Lambda(lambda x: attetion(x))([x2, x3])

    x = Concatenate(axis=-1)([x, y])

    f_new = f + channels_nb

    x = Conv3D(f_new, (3, 3, 3), padding="same")(x)
    x = Conv3D(f_new, (3, 3, 3), padding="same")(x)

    x = BatchNormalization(axis=-1)(x)
    if dropout:
        x = Dropout(0.5)(x)

    x = Activation("relu")(x)

    return x
Esempio n. 21
0
def __transition_up_block(ip,
                          nb_filters,
                          type='deconv',
                          weight_decay=1E-4,
                          block_prefix=None):
    '''Adds an upsampling block. Upsampling operation relies on the the type parameter.
    # Arguments
        ip: input keras tensor
        nb_filters: integer, the dimensionality of the output space
            (i.e. the number output of filters in the convolution)
        type: can be 'upsampling', 'subpixel', 'deconv'. Determines
            type of upsampling performed
        weight_decay: weight decay factor
        block_prefix: str, for block unique naming
    # Input shape
        4D tensor with shape:
        `(samples, channels, rows, cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(samples, rows, cols, channels)` if data_format='channels_last'.
    # Output shape
        4D tensor with shape:
        `(samples, nb_filter, rows * 2, cols * 2)` if data_format='channels_first'
        or 4D tensor with shape:
        `(samples, rows * 2, cols * 2, nb_filter)` if data_format='channels_last'.
    # Returns
        a keras tensor
    '''
    with K.name_scope('TransitionUp'):

        if type == 'upsampling':
            x = UpSampling3D(
                name=name_or_none(block_prefix, '_upsampling'))(ip)
        elif type == 'subpixel':
            x = Conv3D(nb_filters, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=l2(weight_decay),
                       use_bias=False,
                       kernel_initializer='he_normal',
                       name=name_or_none(block_prefix, '_Conv3D'))(ip)
            x = SubPixelUpscaling(scale_factor=2,
                                  name=name_or_none(block_prefix,
                                                    '_subpixel'))(x)
            x = Conv3D(nb_filters, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=l2(weight_decay),
                       use_bias=False,
                       kernel_initializer='he_normal',
                       name=name_or_none(block_prefix, '_Conv3D'))(x)
        else:
            x = Conv3DTranspose(nb_filters, (3, 3, 3),
                                activation='relu',
                                padding='same',
                                strides=(2, 2, 2),
                                kernel_initializer='he_normal',
                                kernel_regularizer=l2(weight_decay),
                                name=name_or_none(block_prefix,
                                                  '_Conv3DT'))(ip)
        return x
Esempio n. 22
0
    def create_model(self, patch_size):
        inp = Input(shape=(patch_size[0], patch_size[0], patch_size[0], 1))
        e = Convolution3D(16, (5, 5, 5), activation='elu', padding='same')(inp)
        e = BatchNormalization()(e)
        e = MaxPooling3D((2, 2, 2), padding='same')(e)
        e = Flatten()(e)
        e = Dropout(0.5)(e)
        encoded = Dense(512, activation="elu")(e)
        d = Dropout(0.5)(encoded)
        d = Dense(int(patch_size[0] / 2) * int(patch_size[1] / 2) *
                  int(patch_size[2] / 2) * 16,
                  activation="elu")(d)
        d = Dropout(0.5)(d)
        d = Reshape((int(patch_size[0] / 2), int(patch_size[1] / 2),
                     int(patch_size[2] / 2), 16))(d)
        d = UpSampling3D((2, 2, 2))(d)
        decoded = Convolution3D(1, (5, 5, 5),
                                activation='sigmoid',
                                padding='same')(d)

        autoencoder = Model(inp, decoded)
        autoencoder.summary()
        autoencoder.compile(optimizer='adam', loss='mse')

        return autoencoder
        def deconv3d(input_tensor,
                     n_filters,
                     kernel_size=(3, 3, 3),
                     batch_normalization=True,
                     scale=True,
                     padding='valid',
                     use_bias=False,
                     name=''):
            """
            3D deconvolutional layer (+ batch normalization) followed by ReLu activation
            """
            layer = UpSampling3D(size=2)(input_tensor)
            layer = Conv3D(filters=n_filters,
                           kernel_size=kernel_size,
                           padding=padding,
                           use_bias=use_bias,
                           name=name + '_conv3d')(layer)
            # if batch_normalization:
            #     layer = BatchNormalization(name=name+'_bn')(layer)
            #layer = Activation('relu', name=name+'_actrelu')(layer)

            # BN before activation
            if batch_normalization:
                layer = BatchNormalization(momentum=0.8,
                                           name=name + '_bn',
                                           scale=scale)(layer)
            layer = LeakyReLU(alpha=0.2, name=name + '_actleakyrelu')(layer)
            return layer
Esempio n. 24
0
def myUpsample(n_dims, size=2, prefix=None, suffix=None):
    if n_dims == 2:
        if not isinstance(size, tuple):
            size = (size, size)

        return UpSampling2D(
            size=size,
            name='_'.join([
                str(part)
                for part in [prefix, 'upsamp2D', suffix
                             ]  # include prefix and suffix if they exist
                if part is not None and len(str(part)) > 0
            ]))
    elif n_dims == 3:
        if not isinstance(size, tuple):
            size = (size, size, size)

        return UpSampling3D(
            size=size,
            name='_'.join([
                str(part)
                for part in [prefix, 'upsamp3D', suffix
                             ]  # include prefix and suffix if they exist
                if part is not None and len(str(part)) > 0
            ]))
    def build_generator(self):
        """U-Net Generator"""
        def conv2d(layer_input, filters, f_size=4, bn=True):
            """Layers used during downsampling"""
            d = Conv3D(filters, kernel_size=f_size, strides=2,
                       padding='same')(layer_input)
            if bn:
                d = BatchNormalization(momentum=0.8)(d)
            d = LeakyReLU(alpha=0.2)(d)
            return d

        def deconv2d(layer_input,
                     skip_input,
                     filters,
                     f_size=4,
                     dropout_rate=0
                     ):  # dropout is 50 ->change from the implementaion
            """Layers used during upsampling"""
            u = UpSampling3D(size=2)(layer_input)

            u = Conv3D(filters, kernel_size=f_size,
                       padding='same')(u)  # remove the strides
            if dropout_rate:
                u = Dropout(dropout_rate)(u)
            u = BatchNormalization(momentum=0.8)(u)
            u = Activation('relu')(u)
            u = Concatenate()([u, skip_input])
            return u

        img_S = Input(shape=self.img_shape, name='input_img_S')
        img_T = Input(shape=self.img_shape, name='input_img_T')

        d0 = Concatenate(axis=-1, name='combine_imgs_g')([img_S, img_T])
        #d0= Add(name='combine_imgs_g')([img_S, img_T])  #256

        # Downsampling
        d1 = conv2d(d0, self.gf, bn=False)  #128
        d2 = conv2d(d1, self.gf * 2)  #64
        d3 = conv2d(d2, self.gf * 4)  #32
        d4 = conv2d(d3, self.gf * 8)  #16
        d5 = conv2d(d4, self.gf * 8)  #8
        d6 = conv2d(d5, self.gf * 8)  #4
        d7 = conv2d(d6, self.gf * 8)  #2

        # Upsampling
        u1 = deconv2d(d7, d6, self.gf * 8)  #4
        u2 = deconv2d(u1, d5, self.gf * 8)  #8
        u3 = deconv2d(u2, d4, self.gf * 8)  #16
        u4 = deconv2d(u3, d3, self.gf * 4)  #32
        u5 = deconv2d(u4, d2, self.gf * 2)  #64
        u6 = deconv2d(u5, d1, self.gf)  #128

        u7 = UpSampling3D(size=2)(
            u6
        )  #256 #the original architecture from the paper is a bit different
        phi = Conv3D(filters=3, kernel_size=1, strides=1,
                     padding='same')(u7)  #256

        return Model([img_S, img_T], outputs=phi, name='generator_model')
Esempio n. 26
0
def make_generator_ae(input_layer, num_output_filters):
    """
    Creates the generator according to the specs in the paper below.
    [https://arxiv.org/pdf/1611.07004v1.pdf][5. Appendix]
    :param model:
    :return:
    """
    # -------------------------------
    # ENCODER
    # C64-C128-C256-C512-C512-C512-C512-C512
    # 1 layer block = Conv - BN - LeakyRelu
    # -------------------------------
    stride = 2
    filter_sizes = [32, 64, 128, 256, 256, 256, 256, 256]
    # filter_sizes = [64, 128, 256, 512, 512, 512, 512, 512]

    encoder = input_layer
    for filter_size in filter_sizes:
        encoder = Conv3D(filters=filter_size,
                         kernel_size=(4, 4, 4),
                         padding='same',
                         strides=(stride, stride, stride))(encoder)
        # paper skips batch norm for first layer
        if filter_size != 32:
            # if filter_size != 64:
            encoder = BatchNormalization()(encoder)
        encoder = Activation(LeakyReLU(alpha=0.2))(encoder)

    # -------------------------------
    # DECODER
    # CD512-CD512-CD512-C512-C512-C256-C128-C64
    # 1 layer block = Conv - Upsample - BN - DO - Relu
    # -------------------------------
    stride = 2
    # filter_sizes = [512, 512, 512, 512, 512, 256, 128, 64]
    # filter_sizes = [256, 256, 256, 256, 256, 128, 64, 32]
    filter_sizes = [
        256 / 4, 256 / 4, 256 / 4, 256 / 4, 256 / 4, 128 / 4, 64 / 4, 32 / 4
    ]

    decoder = encoder
    for filter_size in filter_sizes:
        decoder = UpSampling3D(size=(2, 2, 2))(decoder)
        decoder = Conv3D(filters=filter_size,
                         kernel_size=(4, 4, 4),
                         padding='same')(decoder)
        decoder = BatchNormalization()(decoder)
        decoder = Dropout(rate=0.5)(decoder)
        decoder = Activation('relu')(decoder)

    # After the last layer in the decoder, a convolution is applied
    # to map to the number of output channels (3 in general,
    # except in colorization, where it is 2), followed by a Tanh
    # function.
    decoder = Conv3D(filters=num_output_filters,
                     kernel_size=(4, 4, 4),
                     padding='same')(decoder)
    generator = Activation('tanh')(decoder)
    return generator
Esempio n. 27
0
    def build(input_shape,
              filters=(64, 128, 256),
              filter_size=(3, 3, 3),
              pool_size=(2, 2, 2)):
        """Create a 3D Convolutional Autoencoder model.

        Parameters:
        - input_shape: Tuple of input shape in the format
            (conv_dim1, conv_dim2, conv_dim3, channels)
        - initial_filter: Initial filter size. This will be doubled
            for each hidden layer as it goes deeper.
        - num_encoding_layers: Number of encoding convolutional +
            pooling layers. The number of decoding
            layers will be the same.

        Returns:
        - A 3D CAD model that takes a 5D tensor (volumetric images
        in batch) as input and returns a 5D vector (prediction) as output.
        """

        if len(input_shape) != 4:
            raise ValueError("Input shape should be a tuple "
                             "(conv_dim1, conv_dim2, conv_dim3, channels)")

        if len(filters) < 1:
            raise ValueError("Filter layers need to be more than 1")

        input_img = Input(shape=input_shape, name="cad_input")
        x = input_img

        num_encoding_layers = len(filters)

        for i in range(num_encoding_layers):
            x = Conv3D(filters[i],
                       filter_size,
                       activation='relu',
                       padding='same',
                       name="cad_enc_{}".format(i))(x)
            x = MaxPooling3D(pool_size=pool_size,
                             strides=(2, 2, 2),
                             padding="same",
                             name="cad_pool_{}".format(i))(x)

        for i in range(num_encoding_layers)[::-1]:
            x = Conv3D(filters[i],
                       filter_size,
                       activation='relu',
                       padding='same',
                       name="cad_dec_{}".format(i))(x)
            x = UpSampling3D(size=pool_size,
                             data_format=None,
                             name="cad_unpool_{}".format(i))(x)

        x = Conv3D(1, (3, 3, 3),
                   activation='sigmoid',
                   padding='same',
                   name="cad_sigmoid")(x)
        model = Model(inputs=input_img, outputs=x)
        return model
Esempio n. 28
0
def UpSampling(*args, ndim=2, **kwargs):
    if ndim == 2:
        return UpSampling2D(*args, **kwargs)
    elif ndim == 3:
        kwargs['size'] = (1, kwargs['size'], kwargs['size'])
        return UpSampling3D(*args, **kwargs)
    else:
        raise ValueError("ndim must be 2 or 3")
Esempio n. 29
0
def buildDecoder3d(model, filters, filtersize=3):
    model.add(
        Conv3D(filters, (filtersize, filtersize, filtersize), padding='same'))
    model.add(BN())
    model.add(Dropout(0.2))
    model.add(Activation('relu'))
    model.add(UpSampling3D(size=(2, 2, 2)))
    return model
Esempio n. 30
0
def fUpSample(up_in, factor, method='repeat'):
    factor = int(np.round(1 / factor))
    if method == 'repeat':
        up_out = UpSampling3D(size=(factor, factor, factor),
                              data_format='channels_first')(up_in)
        #else:  use inteporlation
        #up_out = scaling.fscalingLayer3D(up_in, factor, [up_in._keras_shape[2],up_in._keras_shape[3],up_in._keras_shape[4]])
    return up_out