示例#1
0
def build_rim_parallel3_single(params):

    nc = params['nc']

    cell1 = ConvLSTM3DCell(params['cell_size1'], kernel_size=params['cell_kernel_size1'], padding='SAME')
    cell2 = ConvLSTM3DCell(params['cell_size2'], kernel_size=params['cell_kernel_size2'], padding='SAME')
    cell3 = ConvLSTM3DCell(params['cell_size3'], kernel_size=params['cell_kernel_size3'], padding='SAME')
    cells = [cell1, cell2, cell3]

    input_layer1 = Conv3D(params['input_size1'], kernel_size=params['input_kernel_size1'], 
                         trainable=True, padding='SAME', activation=params['input_activation'])
    input_layer2 = Conv3D(params['input_size2'], kernel_size=params['input_kernel_size2'], strides= [params['strides']]*3,
                         trainable=True, padding='SAME', activation=params['input_activation'])
    input_layer3 = Conv3D(params['input_size3'], kernel_size=params['input_kernel_size3'], strides= [2*params['strides']]*3,
                         trainable=True, padding='SAME', activation=params['input_activation'])
    input_layers = [input_layer1, input_layer2, input_layer3]

    output_layer1 = Conv3D(params['output_size1'], kernel_size=params['output_kernel_size1'], trainable=True, padding='SAME', 
                          activation=params['output_activation'])   
    output_layer2 = Conv3DTranspose(params['output_size2'], kernel_size=params['output_kernel_size2'], trainable=True, padding='SAME', 
                          strides= [params['strides']]*3, activation=params['output_activation'])
    output_layer3 = Conv3DTranspose(params['output_size3'], kernel_size=params['output_kernel_size3'], trainable=True, padding='SAME', 
                          strides= [params['strides']]*3, activation=params['output_activation'])
    output_layers = [output_layer1, output_layer2, output_layer3]
   
    rim = RIM3D_parallel3_single(cells, input_layers, output_layers, strides=params['strides'], niter=params['rim_iter'])

    return rim
示例#2
0
def space_attention_block(input, filters, kernel_size):
    output_trunk = input

    x = Conv3D(filters=filters, kernel_size=kernel_size, padding='same', use_bias=False,
               kernel_initializer='he_normal', kernel_regularizer=l2(5e-4))(input)
    x = BatchNormalization(axis=-1)(x)
    x = Activation('relu')(x)

    x_1 = Conv3D(filters, kernel_size=kernel_size, strides=(2, 2, 1), padding='same')(x)
    x_1 = Activation('relu')(x_1)

    x_2 = Conv3D(filters * 2, kernel_size=kernel_size, strides=(2, 2, 1), padding='same')(x_1)
    x_2 = Activation('relu')(x_2)

    x_3 = Conv3DTranspose(filters=filters, kernel_size=kernel_size, strides=(2, 2, 1), padding='same')(x_2)
    x_3 = Activation('relu')(x_3)

    x_4 = Conv3DTranspose(filters=filters, kernel_size=kernel_size, strides=(2, 2, 1), padding='same')(x_3)
    x_4 = Activation('sigmoid')(x_4)
    # x_4 = Activation('relu')(x_4)

    output = Multiply()([x_4, x])

    # output = add([output_trunk, x_4])

    # output = Lambda(lambda x: x + 1)(x_4)
    # output = Multiply()([output, output_trunk])

    x_add = add([output, output_trunk])

    return x_add
示例#3
0
def upsample(inp,
             factor,
             nchannels,
             bn=None,
             activation=None,
             bias=False,
             dilation_rate=1,
             prefix='unet_3d',
             idx=0,
             upsampling='copy',
             residual=False):

    if residual:
        resized = UpSampling3D(size=(1, factor, factor))(inp)
        resized = Conv3D(nchannels, (1, 1, 1), strides=1,
                         padding='same')(resized)

        resized2 = Conv3DTranspose(nchannels, (1, factor, factor),
                                   strides=(1, factor, factor),
                                   name=prefix + "_deconv3d_" + str(idx),
                                   kernel_initializer='he_normal',
                                   use_bias=bias,
                                   dilation_rate=dilation_rate)(inp)
    else:
        if upsampling == 'copy':
            resized = UpSampling3D(size=(1, factor, factor))(inp)
            resized = Conv3D(nchannels, (1, 1, 1), strides=1,
                             padding='same')(resized)
        else:
            resized = Conv3DTranspose(nchannels, (1, factor, factor),
                                      strides=(1, factor, factor),
                                      name=prefix + "_deconv3d_" + str(idx),
                                      kernel_initializer='he_normal',
                                      use_bias=bias,
                                      dilation_rate=dilation_rate)(inp)

    if bn == 'before':
        resized = BatchNormalization(axis=4,
                                     name=prefix + "_batchnorm_" +
                                     str(idx))(resized)

    resized = activation(resized)

    if bn == 'after':
        resized = BatchNormalization(axis=4,
                                     name=prefix + "_batchnorm_" +
                                     str(idx))(resized)

    if inp.get_shape().as_list()[-1] == nchannels and residual:
        x = inp + x

    return resized
示例#4
0
 def add(self,
         filters,
         kernel_size,
         strides=(1, 1, 1),
         padding='valid',
         output_padding=None,
         data_format=None,
         activation=None,
         use_bias=True,
         kernel_initializer='glorot_uniform',
         bias_initializer='zeros',
         kernel_regularizer=None,
         bias_regularizer=None,
         activity_regularizer=None,
         kernel_constraint=None,
         bias_constraint=None,
         **kwargs):
     return self._add_layer(
         Conv3DTranspose(filters,
                         kernel_size,
                         strides=strides,
                         padding=padding,
                         output_padding=output_padding,
                         data_format=data_format,
                         activation=activation,
                         use_bias=use_bias,
                         kernel_initializer=kernel_initializer,
                         bias_initializer=bias_initializer,
                         kernel_regularizer=kernel_regularizer,
                         bias_regularizer=bias_regularizer,
                         activity_regularizer=activity_regularizer,
                         kernel_constraint=kernel_constraint,
                         bias_constraint=bias_constraint,
                         **kwargs))
示例#5
0
def build_rim_parallel(params):

    nc = params['nc']
    input_layer = Conv3D(params['input_size'], kernel_size=params['input_kernel_size'], 
                         trainable=True, padding='SAME', 
                         input_shape=(None, nc, nc, nc, 2), activation=params['input_activation'])

    input_layer_sub = Conv3D(params['input_size'], kernel_size=params['input_kernel_size'], 
                             trainable=True, padding='SAME', strides= [params['strides']]*3,
                             input_shape=(None, nc, nc, nc, 2), activation=params['input_activation'])

    cell1 = ConvLSTM3DCell(params['cell_size'], kernel_size=params['cell_kernel_size'], padding='SAME')

    output_layer_up = Conv3DTranspose(params['cell_size'], kernel_size=params['middle_kernel_size'], 
                         trainable=True, padding='SAME', strides=[params['strides']]*3, 
                         activation=params['output_activation'])

    cell2 = ConvLSTM3DCell(params['cell_size'], kernel_size=params['cell_kernel_size'], padding='SAME')

    output_layer = Conv3D(1, kernel_size=params['output_kernel_size'], trainable=True, padding='SAME', 
                          input_shape=(None, nc, nc, nc, params['cell_size']*2), activation=params['output_activation'])
   
    rim = RIM3D_parallel(cell1, cell2, input_layer, input_layer_sub, output_layer_up, output_layer, strides=params['strides'],
                       niter=params['rim_iter'])

    return rim
示例#6
0
    def define_generator(self):
        """Makes a generator that takes a CT image as input to generate a dose distribution of the same dimensions"""

        # Define inputs
        ct_image = Input(self.ct_shape)
        roi_masks = Input(self.roi_masks_shape)

        # Build Model starting with Conv3D layers
        x = concatenate([ct_image, roi_masks])
        x1 = self.generator_convolution(x, self.initial_number_of_filters)
        x2 = self.generator_convolution(x1, 2 * self.initial_number_of_filters)
        x3 = self.generator_convolution(x2, 4 * self.initial_number_of_filters)
        x4 = self.generator_convolution(x3, 8 * self.initial_number_of_filters)
        x5 = self.generator_convolution(x4, 8 * self.initial_number_of_filters)
        x6 = self.generator_convolution(x5, 8 * self.initial_number_of_filters, use_batch_norm=False)

        # Build model back up from bottleneck
        x5b = self.generator_convolution_transpose(x6, 8 * self.initial_number_of_filters, use_dropout=False)
        x4b = self.generator_convolution_transpose(x5b, 8 * self.initial_number_of_filters, skip_x=x5)
        x3b = self.generator_convolution_transpose(x4b, 4 * self.initial_number_of_filters, use_dropout=False, skip_x=x4)
        x2b = self.generator_convolution_transpose(x3b, 2 * self.initial_number_of_filters, skip_x=x3)
        x1b = self.generator_convolution_transpose(x2b, self.initial_number_of_filters, use_dropout=False, skip_x=x2)

        # Final layer
        x0b = concatenate([x1b, x1])
        x0b = Conv3DTranspose(1, self.filter_size, strides=self.stride_size, padding="same")(x0b)
        x_final = AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding="same")(x0b)
        final_dose = Activation("relu")(x_final)

        # Compile model for use
        self.generator = Model(inputs=[ct_image, roi_masks], outputs=final_dose, name="generator")
        self.generator.compile(loss="mean_absolute_error", optimizer=self.gen_optimizer)
        self.generator.summary()
示例#7
0
	def sendec_block(input_tensor1, input_tensor2):		 	
		x = Conv3DTranspose(filters=16, kernel_size=(2, 3, 3), strides=(1, 2, 2), 
			padding='same', data_format='channels_last')(input_tensor1) 
		x = concatenate([input_tensor2, x], axis=-1) 
		x = BatchNormalization()(x)	
		x = Conv3D(filters=16, kernel_size=(1, 3, 3), strides=(1, 1, 1), 
			activation='relu', padding='same', data_format='channels_last')(x)

		return x
示例#8
0
    def _build(self):

        c = self.channels
        ksize = self.kernel_size
        self.l10, self.l11, self.l1 = Conv3D(c, kernel_size=ksize, padding='SAME', activation='linear'), \
                     Conv3D(c, kernel_size=ksize, padding='SAME', activation='linear'), \
                     Conv3D(c, kernel_size=1, padding='SAME', activation='linear')
        self.l12, self.l13, self.l1new = Conv3D(c, kernel_size=ksize, padding='SAME', activation='linear'), \
                     Conv3D(c, kernel_size=ksize, padding='SAME', activation='linear'), \
                     Conv3D(c, kernel_size=1, padding='SAME', activation='linear')
        #self.b10, self.b11, self.b1 = [BatchNormalization()]*3
        self.b10, self.b11, self.b1 = [linear] * 3

        self.l20, self.l21, self.l2 = Conv3D(2*c, kernel_size=ksize, padding='SAME', activation='linear'), \
                     Conv3D(2*c, kernel_size=ksize, padding='SAME', activation='linear'), \
                     Conv3D(2*c, kernel_size=1, padding='SAME', activation='linear')
        self.l22, self.l23, self.l2new = Conv3D(2*c, kernel_size=ksize, padding='SAME', activation='linear'), \
                     Conv3D(2*c, kernel_size=ksize, padding='SAME', activation='linear'), \
                     Conv3D(2*c, kernel_size=1, padding='SAME', activation='linear')
        #self.b20, self.b21, self.b2 = [BatchNormalization()]*3
        self.b20, self.b21, self.b2 = [linear] * 3

        self.l3in = Conv3D(c,
                           kernel_size=ksize,
                           padding='SAME',
                           activation='linear')
        self.l30, self.l31, self.l3 = Conv3D(c, kernel_size=ksize, padding='SAME', activation='linear'), \
                     Conv3D(c, kernel_size=ksize, padding='SAME', activation='linear'), \
                     Conv3D(c, kernel_size=1, padding='SAME', activation='linear')
        self.l32, self.l33, self.l3new = Conv3D(c, kernel_size=ksize, padding='SAME', activation='linear'), \
                     Conv3D(c, kernel_size=ksize, padding='SAME', activation='linear'), \
                     Conv3D(c, kernel_size=1, padding='SAME', activation='linear')
        #self.b30, self.b31, self.b3 = [BatchNormalization()]*3
        self.b30, self.b31, self.b3 = [linear] * 3

        self.sub = Conv3D(2 * c,
                          kernel_size=ksize,
                          padding='SAME',
                          activation='linear',
                          strides=[self.strides] * 3)
        self.sup = Conv3DTranspose(c,
                                   kernel_size=ksize,
                                   padding='SAME',
                                   activation='linear',
                                   strides=[self.strides] * 3)
        self.out0 = Conv3D(c,
                           kernel_size=ksize,
                           padding='SAME',
                           activation='linear')
        #self.outb = BatchNormalization()
        self.outb = linear
        self.out1 = Conv3D(c,
                           kernel_size=ksize,
                           padding='SAME',
                           activation='linear')
示例#9
0
    def generator_convolution_transpose(self, x, nodes, use_dropout=True, skip_x=None):
        """Convolution transpose block used for generator"""

        if skip_x is not None:
            x = concatenate([x, skip_x])
        x = Conv3DTranspose(nodes, self.filter_size, strides=self.stride_size, padding="same", use_bias=False)(x)
        x = BatchNormalization(momentum=0.99, epsilon=1e-3)(x)
        if use_dropout:
            x = SpatialDropout3D(0.2)(x)
        x = LeakyReLU(alpha=0)(x)  # Use LeakyReLU(alpha = 0) instead of ReLU because ReLU is buggy when saved

        return x
示例#10
0
def main(
    batch_size=16,
    episode_length=16,
    filters=16,
    width=64,
    height=64,
    memory_size=32,
):
    # Prevent TensorFlow from allocating all available GPU memory
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    tf.keras.backend.set_session(tf.Session(config=config))

    input_layer = Input([episode_length, width, height, 1])
    layer = input_layer

    layer = Conv3D(filters=filters, kernel_size=3, strides=(1, 2, 2), padding="same")(layer)
    layer = Conv3D(filters=filters, kernel_size=3, strides=(1, 2, 2), padding="same")(layer)
    layer = Conv3D(filters=filters, kernel_size=3, strides=(1, 2, 2), padding="same")(layer)
    layer = Conv3D(filters=filters, kernel_size=3, strides=(1, 2, 2), padding="same")(layer)
    layer = Conv3D(filters=filters, kernel_size=3, strides=(1, 2, 2), padding="same")(layer)

    tmp_shape = layer.shape.as_list()[1:]
    code_size = tmp_shape[1] * tmp_shape[2] * tmp_shape[3]

    layer = Reshape([episode_length, code_size])(layer)
    layer = Memory(code_size=code_size, memory_size=memory_size)(layer)
    layer = Reshape(tmp_shape)(layer)

    layer = Conv3DTranspose(filters=filters, kernel_size=3, strides=(1, 2, 2), padding="same")(layer)
    layer = Conv3DTranspose(filters=filters, kernel_size=3, strides=(1, 2, 2), padding="same")(layer)
    layer = Conv3DTranspose(filters=filters, kernel_size=3, strides=(1, 2, 2), padding="same")(layer)
    layer = Conv3DTranspose(filters=filters, kernel_size=3, strides=(1, 2, 2), padding="same")(layer)
    layer = Conv3DTranspose(filters=filters, kernel_size=3, strides=(1, 2, 2), padding="same")(layer)
    layer = Conv3DTranspose(filters=1, kernel_size=1, strides=1, padding="same", activation="sigmoid")(layer)

    output_layer = layer

    model = Model(inputs=input_layer, outputs=output_layer)

    model.compile("adam", loss="mse", metrics=["mse"])
    model.summary()

    for var in model.variables:
        print(var, end=' ')
        print(var.trainable)

    dataset_input_tensor = tf.random.normal(shape=[episode_length, width, height, 1])
    dataset_input_tensor = tf.clip_by_value(dataset_input_tensor, 0.0, 1.0)
    dataset = tf.data.Dataset.from_tensors(dataset_input_tensor)
    dataset = dataset.repeat(-1)
    dataset = dataset.map(lambda x: (x, x))
    dataset = dataset.batch(batch_size)

    log_dir = "../logs/KanervaMachine/log_{}".format(int(time()))
    os.makedirs(log_dir)
    tensorboard = TensorBoard(log_dir=log_dir, update_freq="batch")

    model.fit(dataset, callbacks=[tensorboard], steps_per_epoch=500, epochs=100)
示例#11
0
def deconv3D_block(x,
                   nchannels,
                   window,
                   strides=(1, 1, 1),
                   nblocks=1,
                   dropout=0,
                   prefix='unet_3d_deconv',
                   bias=False,
                   bn=None,
                   activation=Activation('relu'),
                   dilation_rate=1,
                   residual=False):

    if isinstance(activation, str):
        act = Activation(activation)
    else:
        act = activation

    for i in range(nblocks):
        if residual:
            pad = 'same'
        else:
            pad = 'valid'

        inp = x
        x = Conv3DTranspose(filters=nchannels,
                            kernel_size=window,
                            strides=strides,
                            name=prefix + "_deconv3d_" + str(i),
                            kernel_initializer='he_normal',
                            use_bias=bias,
                            dilation_rate=dilation_rate,
                            padding=pad)(x)

        if bn == 'before':
            x = BatchNormalization(axis=4,
                                   name=prefix + "_batchnorm_" + str(i))(x)

        x = act(x)

        if bn == 'after':
            x = BatchNormalization(axis=4,
                                   name=prefix + "_batchnorm_" + str(i))(x)

        if dropout > 0:
            x = SpatialDropout3D(rate=dropout, data_format='channels_last')(x)

        if inp.get_shape().as_list()[-1] == nchannels and residual:
            x = inp + x

    return x
示例#12
0
def decoder_block_guided(x,
                         cross_over_connection,
                         nr_of_convolutions,
                         iteration,
                         attention_layer,
                         use_bn=False,
                         spatial_dropout=None):
    x = Conv3DTranspose(nr_of_convolutions,
                        kernel_size=3,
                        padding='same',
                        strides=2)(x)
    upsampling_factor = int(math.pow(2, iteration))
    attention_layer_up = Conv3DTranspose(
        nr_of_convolutions,
        kernel_size=3,
        padding='same',
        strides=upsampling_factor)(attention_layer)
    x = Concatenate()([attention_layer_up, cross_over_connection, x])
    if use_bn:
        x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = convolution_block(x, nr_of_convolutions, use_bn, spatial_dropout)

    return x
示例#13
0
def decoder_block(x,
                  cross_over_connection,
                  nr_of_convolutions,
                  use_bn=False,
                  spatial_dropout=None):
    x = Conv3DTranspose(nr_of_convolutions,
                        kernel_size=3,
                        padding='same',
                        strides=2)(x)
    x = Concatenate()([cross_over_connection, x])
    if use_bn:
        x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = convolution_block(x, nr_of_convolutions, use_bn, spatial_dropout)

    return x
示例#14
0
def dconv_3d(x,
             filter_num,
             ks,
             strides=None,
             padding=None,
             activation=None,
             name=None):
    if strides is None:
        strides = (1, 1, 1)

    x = Conv3DTranspose(filter_num,
                        ks,
                        strides=strides,
                        padding=padding,
                        kernel_initializer=k_init,
                        name=name)(x)

    if activation is not None:
        if activation == "relu":
            x = Activation('relu')(x)
    return x
示例#15
0
def get_model(weights=None, verbose=True, **kwargs):
    for k, v in kwargs.items():
        assert k in PARAMS
        PARAMS[k] = v
    if verbose:
        print("Model hyper-parameters:", PARAMS)

    dhw = PARAMS['dhw']
    first_scale = PARAMS['first_scale']
    first_layer = PARAMS['first_layer']
    kernel_initializer = PARAMS['kernel_initializer']
    weight_decay = PARAMS['weight_decay']
    down_structure = PARAMS['down_structure']
    output_size = PARAMS['output_size']

    shape = dhw + [1]

    inputs = Input(shape=shape)

    if first_scale is not None:
        scaled = Lambda(first_scale)(inputs)
    else:
        scaled = inputs
    conv = Conv3D(first_layer, kernel_size=(3, 3, 3), padding='same', use_bias=True,
                  kernel_initializer=kernel_initializer,
                  kernel_regularizer=l2_penalty(weight_decay))(scaled)

    downsample_times = len(down_structure)
    top_down = []
    for l, n in enumerate(down_structure):
        db = _dense_block(conv, n)
        top_down.append(db)
        conv = _transmit_block(db, l == downsample_times - 1)

    feat = top_down[-1]
    for top_feat in reversed(top_down[:-1]):
        *_, f = top_feat.get_shape().as_list()
        deconv = Conv3DTranspose(filters=f, kernel_size=2, strides=2, use_bias=True,
                                 kernel_initializer=kernel_initializer,
                                 kernel_regularizer=l2_penalty(weight_decay))(feat)
        feat = add([top_feat, deconv])
    seg_head = Conv3D(1, kernel_size=(1, 1, 1), padding='same',
                      activation='sigmoid', use_bias=True,
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2_penalty(weight_decay),
                      name='seg')(feat)

    if output_size == 1:
        last_activation = 'sigmoid'
    else:
        last_activation = 'softmax'

    clf_head = Dense(output_size, activation=last_activation,
                     kernel_regularizer=l2_penalty(weight_decay),
                     kernel_initializer=kernel_initializer,
                     name='clf')(conv)

    model = Model(inputs, [clf_head, seg_head])
    if verbose:
        model.summary()

    if weights is not None:
        model.load_weights(weights)
    return model
示例#16
0
	def _sEnDec_cnn_lstm(input_dim, dp):

		print('[INFO] Creating sEnDec_cnn_lstm Model...\n')
		input_layer = Input(shape=input_dim)	
		seq0 = Conv3D(filters=16, kernel_size=(1, 3, 3), strides=(1, 1, 1),
		               activation='relu',
		               padding='same', data_format='channels_last')(input_layer)	

		# - SEnDec block 1
		seq1, seq12 = Models.sendec_block1(seq0)

		seq13 = Conv3D(filters=32, kernel_size=(1, 3, 3), strides=(1, 2, 2),
		               activation='relu',
		               padding='same', data_format='channels_last')(seq12)  
		
		# - SEnDec block 2
		seq2, seq22 = Models.sendec_block1(seq13)				

		seq22 = Conv3D(filters=32, kernel_size=(1, 3, 3), strides=(1, 2, 2),
		               activation='relu',
		               padding='same', data_format='channels_last')(seq22)
		
		# - SEnDec block 3
		seq30, seq32 = Models.sendec_block1(seq22)

		seq3 = Conv3D(filters=32, kernel_size=(1, 3, 3), strides=(1, 2, 2),
		               activation='relu',
		               padding='same', data_format='channels_last')(seq32) 
		seq4 = ConvLSTM2D(filters=16, kernel_size=(3, 3), strides=(2, 2),
		        activation='relu', padding='same', return_sequences=True)(seq3) 
				
		#-~~~~~~~~~~~~~~~~~~ Upsampling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
		      	               
		seq6 = Conv3DTranspose(filters=16, kernel_size=(2, 3, 3), 
				strides=(1, 2, 2), padding='same', data_format='channels_last')(seq4) 	
		seq6 = concatenate([seq6, seq3], axis=-1) 
		
		seq6 = Conv3D(filters=32, kernel_size=(1, 3, 3), strides=(1, 1, 1), padding='same', data_format='channels_last')(seq6)
		seq6 = BatchNormalization()(seq6)
		seq6 = Activation('relu')(seq6)
		seq6 = concatenate([seq6, seq30], axis=-1)       
		
		seq7 = Conv3DTranspose(filters=16, kernel_size=(2, 3, 3), 
				strides=(1, 2, 2), padding='same', data_format='channels_last')(seq6) 
		seq7 = concatenate([seq7, seq22], axis=-1) 
		              
		seq7 = Conv3D(filters=32, kernel_size=(1, 3, 3), strides=(1, 1, 1), padding='same', data_format='channels_last')(seq7)
		seq7 = BatchNormalization()(seq7)
		seq7 = Activation('relu')(seq7)
		seq7 = concatenate([seq7, seq2], axis=-1) 
		
		seq8 = Conv3DTranspose(filters=16, kernel_size=(2, 3, 3), 
				strides=(1, 2, 2), padding='same', data_format='channels_last')(seq7)  
		seq8 = concatenate([seq8, seq13], axis=-1) 
		seq8 = Conv3D(filters=32, kernel_size=(1, 3, 3), strides=(1, 1, 1), padding='same', data_format='channels_last')(seq8)
		
		seq8 = BatchNormalization()(seq8)
		seq8 = Activation('relu')(seq8)
		seq8 = concatenate([seq8, seq1], axis=-1) 
		
		seq9 = Conv3DTranspose(filters=16, kernel_size=(2, 3, 3), 
				strides=(1, 2, 2), padding='same', data_format='channels_last')(seq8) 
		seq9 = concatenate([seq9, seq0], axis=-1) 
		seq9 = Conv3D(filters=32, kernel_size=(1, 3, 3), strides=(1, 1, 1), padding='same', data_format='channels_last')(seq9)
		
		seq9 = BatchNormalization()(seq9)
		seq9 = Activation('relu')(seq9)
		

		seq91 = Dropout(dp)(seq9)

		output_layer = Conv3D(filters=1, kernel_size=(2, 3, 3), strides=(1, 1, 1),
		               activation='sigmoid',
		               padding='same', data_format='channels_last')(seq91) #240 x 320

	

		print('[INFO] Model Creation is Completed\n')

		return Model(input_layer, output_layer)
示例#17
0
def main():
    batch_size = 16
    episode_length = 64
    width = 64
    height = 64
    memory_size = 32

    input_layer = Input([episode_length, width, height, 1])
    layer = input_layer

    layer = Conv3D(filters=32,
                   kernel_size=3,
                   strides=(1, 2, 2),
                   padding="same")(layer)
    layer = Conv3D(filters=32,
                   kernel_size=3,
                   strides=(1, 2, 2),
                   padding="same")(layer)
    layer = Conv3D(filters=32,
                   kernel_size=3,
                   strides=(1, 2, 2),
                   padding="same")(layer)
    layer = Conv3D(filters=32,
                   kernel_size=3,
                   strides=(1, 2, 2),
                   padding="same")(layer)
    layer = Conv3D(filters=32,
                   kernel_size=3,
                   strides=(1, 2, 2),
                   padding="same")(layer)

    tmp_shape = layer.shape.as_list()[1:]
    code_size = tmp_shape[1] * tmp_shape[2] * tmp_shape[3]
    layer = Reshape([episode_length, code_size])(layer)

    memory = Memory(code_size=code_size, memory_size=memory_size)
    layer = memory(layer)

    layer = Reshape(tmp_shape)(layer)

    layer = Conv3DTranspose(filters=32,
                            kernel_size=3,
                            strides=(1, 2, 2),
                            padding="same")(layer)
    layer = Conv3DTranspose(filters=32,
                            kernel_size=3,
                            strides=(1, 2, 2),
                            padding="same")(layer)
    layer = Conv3DTranspose(filters=32,
                            kernel_size=3,
                            strides=(1, 2, 2),
                            padding="same")(layer)
    layer = Conv3DTranspose(filters=32,
                            kernel_size=3,
                            strides=(1, 2, 2),
                            padding="same")(layer)
    layer = Conv3DTranspose(filters=32,
                            kernel_size=3,
                            strides=(1, 2, 2),
                            padding="same")(layer)
    layer = Conv3DTranspose(filters=1,
                            kernel_size=1,
                            strides=1,
                            padding="same",
                            activation="sigmoid")(layer)

    output_layer = layer

    model = Model(inputs=input_layer, outputs=output_layer)

    model.compile("adam", loss="mse", metrics=["mse"])
    model.summary()

    dataset_input_tensor = tf.random.normal(
        shape=[episode_length, width, height, 1])
    dataset_input_tensor = tf.clip_by_value(dataset_input_tensor, 0.0, 1.0)
    dataset = tf.data.Dataset.from_tensors(dataset_input_tensor)
    dataset = dataset.repeat(-1)
    dataset = dataset.map(lambda x: (x, x))
    dataset = dataset.batch(batch_size)

    log_dir = "../logs/KanervaMachine/log_{}".format(int(time()))
    os.makedirs(log_dir)
    tensorboard = TensorBoard(log_dir=log_dir, update_freq="batch")

    model.fit(dataset,
              callbacks=[tensorboard],
              steps_per_epoch=500,
              epochs=100)
示例#18
0
文件: model.py 项目: VolkerH/TRAILMAP
def get_net():
    # Level 1
    input = Input((input_dim, input_dim, input_dim, 1))
    conv1 = Conv3D(32, (3, 3, 3), activation="relu", padding="same")(input)
    batch1 = BatchNormalization()(conv1)
    conv1 = Conv3D(64, (3, 3, 3), activation="relu", padding="same")(batch1)
    batch1 = BatchNormalization()(conv1)

    # Level 2
    pool2 = MaxPooling3D((2, 2, 2))(batch1)
    conv2 = Conv3D(64, (3, 3, 3), activation="relu", padding="same")(pool2)
    batch2 = BatchNormalization()(conv2)
    conv2 = Conv3D(128, (3, 3, 3), activation="relu", padding="same")(batch2)
    batch2 = BatchNormalization()(conv2)

    # Level 3
    pool3 = MaxPooling3D((2, 2, 2))(batch2)
    conv3 = Conv3D(128, (3, 3, 3), activation="relu", padding="same")(pool3)
    batch3 = BatchNormalization()(conv3)
    conv3 = Conv3D(256, (3, 3, 3), activation="relu", padding="same")(batch3)
    batch3 = BatchNormalization()(conv3)

    # Level 4
    pool4 = MaxPooling3D((2, 2, 2))(batch3)
    conv4 = Conv3D(256, (3, 3, 3), activation="relu", padding="same")(pool4)
    batch4 = BatchNormalization()(conv4)
    conv4 = Conv3D(512, (3, 3, 3), activation="relu", padding="same")(batch4)
    batch4 = BatchNormalization()(conv4)

    # Level 3
    up5 = Conv3DTranspose(512, (2, 2, 2),
                          strides=(2, 2, 2),
                          padding="same",
                          activation="relu")(batch4)
    merge5 = concatenate([up5, batch3])
    conv5 = Conv3D(256, (3, 3, 3), activation="relu")(merge5)
    batch5 = BatchNormalization()(conv5)
    conv5 = Conv3D(256, (3, 3, 3), activation="relu")(batch5)
    batch5 = BatchNormalization()(conv5)

    # Level 2
    up6 = Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2),
                          activation="relu")(batch5)
    merge6 = concatenate(
        [up6, Cropping3D(cropping=((4, 4), (4, 4), (4, 4)))(batch2)])
    conv6 = Conv3D(128, (3, 3, 3), activation="relu")(merge6)
    batch6 = BatchNormalization()(conv6)
    conv6 = Conv3D(128, (3, 3, 3), activation="relu")(batch6)
    batch6 = BatchNormalization()(conv6)

    # Level 1
    up7 = Conv3DTranspose(128, (2, 2, 2),
                          strides=(2, 2, 2),
                          padding="same",
                          activation="relu")(batch6)
    merge7 = concatenate(
        [up7, Cropping3D(cropping=((12, 12), (12, 12), (12, 12)))(batch1)])
    conv7 = Conv3D(64, (3, 3, 3), activation="relu")(merge7)
    batch7 = BatchNormalization()(conv7)
    conv7 = Conv3D(64, (3, 3, 3), activation="relu")(batch7)
    batch7 = BatchNormalization()(conv7)

    # Output dim is (36, 36, 36)
    preds = Conv3D(1, (1, 1, 1), activation="sigmoid")(batch7)
    model = Model(inputs=input, outputs=preds)

    model.compile(optimizer=Adam(lr=0.001, decay=0.00),
                  loss=weighted_binary_crossentropy,
                  metrics=[
                      axon_precision, axon_recall, f1_score,
                      artifact_precision, edge_axon_precision,
                      adjusted_accuracy
                  ])

    return model
示例#19
0
    def __init__(self, dtype):
        super().__init__()
        self.deconv1 = Conv3DTranspose(256,
                                       kernel_size=(4, 4, 4),
                                       padding='VALID',
                                       dtype=dtype,
                                       activation=None,
                                       name='deconv1')
        self.bn1 = BatchNormalization(momentum=0.999,
                                      epsilon=1e-5,
                                      dtype=dtype,
                                      name='bn1')
        self.lrelu1 = LeakyReLU(alpha=0.02, name='lrelu1')

        self.deconv2 = Conv3DTranspose(128,
                                       kernel_size=(4, 4, 4),
                                       strides=(2, 2, 2),
                                       padding='SAME',
                                       dtype=dtype,
                                       activation=None,
                                       name='deconv2')
        self.bn2 = BatchNormalization(momentum=0.999,
                                      epsilon=1e-5,
                                      dtype=dtype,
                                      name='bn2')
        self.lrelu2 = LeakyReLU(alpha=0.02, name='lrelu2')

        self.deconv3 = Conv3DTranspose(64,
                                       kernel_size=(4, 4, 4),
                                       strides=(2, 2, 2),
                                       padding='SAME',
                                       dtype=dtype,
                                       activation=None,
                                       name='deconv3')
        self.bn3 = BatchNormalization(momentum=0.999,
                                      epsilon=1e-5,
                                      dtype=dtype,
                                      name='bn3')
        self.lrelu3 = LeakyReLU(alpha=0.02, name='lrelu3')

        self.deconv4 = Conv3DTranspose(32,
                                       kernel_size=(4, 4, 4),
                                       strides=(2, 2, 2),
                                       padding='SAME',
                                       dtype=dtype,
                                       activation=None,
                                       name='deconv4')
        self.bn4 = BatchNormalization(momentum=0.999,
                                      epsilon=1e-5,
                                      dtype=dtype,
                                      name='bn4')
        self.lrelu4 = LeakyReLU(alpha=0.02, name='lrelu4')

        self.deconv5 = Conv3DTranspose(1,
                                       kernel_size=(4, 4, 4),
                                       strides=(2, 2, 2),
                                       padding='SAME',
                                       dtype=dtype,
                                       activation=None,
                                       name='deconv5')

        self.out = Activation(activation=tf.keras.activations.sigmoid,
                              name='sigmoid')
示例#20
0
 def conv3d_transpose_relu_dropout(input_, filters_, kernel_size_, name):
     output_ = Conv3DTranspose(filters=filters_, kernel_size=kernel_size_, padding='same', strides=(1, 2, 2),
                               name=name+'/Conv3DTranspose')(input_)
     output_ = ReLU(name=name+'/Activation')(output_)
     return output_