def foreground(): # kernal size (4,4,4) = 4 # stride = (2,2,2) = 2 # use Conv3DTranspose for upsampling in_shape = (1,1,100,1) fg = tf.keras.Sequential() fg.add(layers.Dense(4*4*2,use_bias=False,input_shape=(100,))) fg.add(layers.BatchNormalization()) #fg.add(activations.tanh()) fg.add(layers.Reshape((4,4,2,1))) #firt layer uses a (2,4,4) convolution; creates (4x4x2) from 100 dim Noise with 512 channels fg.add(layers.Conv3DTranspose(512,(2,4,4),strides=1,use_bias=False,padding='same')) #fg.add(layers.BatchNormalization()) #fg.add(layers.LeakyReLU()) #outputs 8x8x4 with 256 channels fg.add(layers.Conv3DTranspose(256,4,strides=2,use_bias=False,padding='same')) #outputs 16x16x8 with 128 channels fg.add(layers.Conv3DTranspose(128,4,strides=2,use_bias=False,padding='same')) #outputs 32x32x16 with 64 channels fg.add(layers.Conv3DTranspose(128,4,strides=2,use_bias=False,padding='same')) #outputs forground: 64x64x32 with 3 channels fg.add(layers.Conv3DTranspose(3,4,strides=2,use_bias=False,padding='same',activation='tanh')) return fg
def __init__(self, out_shape, strides=1, ksize = 3, shortcut = False): super(ResBlock_generator, self).__init__() self.shortcut = shortcut # self.upSample = layers.UpSampling3D() self.conv_0 = layers.Conv3DTranspose(out_shape,kernel_size = ksize, strides=2,padding='same', name = 'rg_conv1', use_bias=False) self.bn_0 = layers.BatchNormalization() self.PRelu0 = layers.LeakyReLU(name='G_LeakyReLU1') self.conv_1 =layers.Conv3D(out_shape,kernel_size = ksize ,strides=1,padding='same', name = 'rg_conv2', use_bias=False) self.bn_1 = layers.BatchNormalization() self.PRelu1 = layers.LeakyReLU(name='G_LeakyReLU2') self.conv_2 =layers.Conv3D(out_shape,kernel_size = ksize ,strides=1,padding='same', name = 'rg_conv3', use_bias=False) self.bn_2 = layers.BatchNormalization() self.PRelu2 = layers.LeakyReLU(name='G_LeakyReLU3') self.conv_3 =layers.Conv3D(out_shape,kernel_size = ksize ,strides=1,padding='same', name = 'rg_conv4', use_bias=False) self.bn_3 = layers.BatchNormalization() if shortcut: # self.upSample_shortcut = layers.UpSampling3D() self.conv_shortcut = layers.Conv3DTranspose(out_shape,kernel_size=1,strides=2, padding='same', use_bias=False) self.PRelu3 = layers.LeakyReLU(name='G_LeakyReLU4')
def make_generator_model(): const = ClipConstraint(0.01) model = tf.keras.Sequential() model.add(layers.Dense(5*5*5*256, use_bias=False, input_shape=(100,))) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((5, 5,5, 256))) assert model.output_shape == (None, 5, 5,5, 256) # Note: None is the batch size model.add(layers.Conv3DTranspose(128, (5, 5, 5), strides=(1, 1, 1), padding='same', use_bias=False)) assert model.output_shape == (None, 5, 5,5, 128) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv3DTranspose(64, (5, 5, 5), strides=(3, 3, 3), padding='same', use_bias=False)) assert model.output_shape == (None, 15, 15,15, 64) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv3DTranspose(1, (5, 5, 5), strides=(2, 2, 2), padding='same', use_bias=False, activation='sigmoid')) assert model.output_shape == (None, 30, 30, 30, 1) return model
def build_generator(): model = tf.keras.Sequential() model.add(layers.Dense(4 * 3 * 256, use_bias=False, input_shape=(100, ))) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((4, 4, 4, 256))) # Note: None is the batch size assert model.output_shape == (None, 4, 4, 4, 256) model.add( layers.Conv3DTranspose(128, KERNEL_SIZE, strides=STRIDES, padding='same', use_bias=False)) assert model.output_shape == (None, 4, 4, 4, 128) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add( layers.Conv3DTranspose(64, KERNEL_SIZE, strides=STRIDES, padding='same', use_bias=False)) assert model.output_shape == (None, 8, 8, 8, 64) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add( layers.Conv3DTranspose(1, KERNEL_SIZE, strides=STRIDES, padding='same', use_bias=False, activation='tanh')) assert model.output_shape == (None, 16, 16, 16, 1) return model
def setup_model(self): nl = self.hparams['num_latent_layers'] autoencoder_layers = [ tfl.Conv3D(64, (2, 2, 2), padding="same", name='conv_4_conv'), tfl.Activation(tf.nn.relu, name='conv_4_activation'), tfl.MaxPool3D((2, 2, 2), name='conv_4_maxpool'), tfl.Conv3D(128, (2, 2, 2), padding="same", name='conv_3_conv'), tfl.Activation(tf.nn.relu, name='conv_3_activation'), tfl.MaxPool3D((2, 2, 2), name='conv_3_maxpool'), tfl.Conv3D(256, (2, 2, 2), padding="same", name='conv_2_conv'), tfl.Activation(tf.nn.relu, name='conv_2_activation'), tfl.MaxPool3D((2, 2, 2), name='conv_2_maxpool'), tfl.Conv3D(512, (2, 2, 2), padding="same", name='conv_1_conv'), tfl.Activation(tf.nn.relu, name='conv_1_activation'), tfl.MaxPool3D((2, 2, 2), name='conv_1_maxpool'), tfl.Flatten(name='flatten'), tfl.Dense(nl, activation='relu', name='latent'), tfl.Dense(32768, activation='relu', name='expand'), tfl.Reshape((4, 4, 4, 512), name='reshape'), tfl.Conv3DTranspose(256, ( 2, 2, 2, ), strides=2, name='deconv_1_deconv'), tfl.Activation(tf.nn.relu, name='deconv_1_activation'), tfl.Conv3DTranspose(128, ( 2, 2, 2, ), strides=2, name='deconv_2_deconv'), tfl.Activation(tf.nn.relu, name='deconv_2_activation'), tfl.Conv3DTranspose(64, ( 2, 2, 2, ), strides=2, name='deconv_3_deconv'), tfl.Activation(tf.nn.relu, name='deconv_3_activation'), tfl.Conv3DTranspose(1, ( 2, 2, 2, ), strides=2, name='deconv_4_deconv'), # tfl.Activation(tf.nn.relu, name='deconv_4_activation'), # tfl.Conv3DTranspose(1, (2,2,2,), strides=1, name='deconv_5_deconv', padding="same"), ] for l in autoencoder_layers: self._add_layer(l)
def get_model(input_shape, num_classes): inputs = tf.keras.Input(input_shape) ### [First half of the network: downsampling inputs] ### # Entry block x = layers.Conv3D(32, 3, strides=2, padding="same")(inputs) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) previous_block_activation = x # Set aside residual # Blocks 1, 2, 3 are identical apart from the feature depth. for filters in [64, 128, 256]: x = layers.Activation("relu")(x) x = SeparableConv3D(filters, 3, padding="same")(x) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) x = SeparableConv3D(filters, 3, padding="same")(x) x = layers.BatchNormalization()(x) x = layers.MaxPooling3D(3, strides=2, padding="same")(x) # Project residual residual = layers.Conv3D(filters, 1, strides=2, padding="same")( previous_block_activation ) x = layers.add([x, residual]) # Add back residual previous_block_activation = x # Set aside next residual ### [Second half of the network: upsampling inputs] ### for filters in [256, 128, 64, 32]: x = layers.Activation("relu")(x) x = layers.Conv3DTranspose(filters, 3, padding="same")(x) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) x = layers.Conv3DTranspose(filters, 3, padding="same")(x) x = layers.BatchNormalization()(x) x = layers.UpSampling3D(2)(x) # Project residual residual = layers.UpSampling3D(2)(previous_block_activation) residual = layers.Conv3D(filters, 1, padding="same")(residual) x = layers.add([x, residual]) # Add back residual previous_block_activation = x # Set aside next residual # Add a per-pixel classification layer outputs = layers.Conv3D(num_classes, 3, activation="softmax", padding="same")(x) # Define the model model = tf.keras.Model(inputs, outputs) return model
def generator(): skernel = (4, 4, 4) sstride = (2, 2, 2) model = tf.keras.Sequential() CDIMNSION = DIMENSN // (2**4) #Project and reshape model.add( layers.Dense(CDIMNSION * CDIMNSION * CDIMNSION * 512, use_bias=False, input_shape=(100, ))) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((CDIMNSION, CDIMNSION, CDIMNSION, 512))) #3D convs model.add( layers.Conv3DTranspose(256, skernel, strides=(1, 1, 1), padding='same', use_bias=False)) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add( layers.Conv3DTranspose(128, skernel, strides=sstride, padding='same', use_bias=False)) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add( layers.Conv3DTranspose(64, skernel, strides=sstride, padding='same', use_bias=False)) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add( layers.Conv3DTranspose(32, skernel, strides=sstride, padding='same', use_bias=False)) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) #Final layer model.add( layers.Conv3DTranspose(1, skernel, strides=sstride, padding='same', use_bias=False, activation='tanh')) return model
def setup_model(self): nl = self.params['num_latent_layers'] autoencoder_layers = [ tfl.Conv3D(64, (2,2,2), padding="same", name='conv_4_conv'), tfl.Activation(tf.nn.relu, name='conv_4_activation'), tfl.MaxPool3D((2,2,2), name='conv_4_maxpool'), tfl.Conv3D(128, (2,2,2), padding="same", name='conv_3_conv'), tfl.Activation(tf.nn.relu, name='conv_3_activation'), tfl.MaxPool3D((2,2,2), name='conv_3_maxpool'), tfl.Conv3D(256, (2,2,2), padding="same", name='conv_2_conv'), tfl.Activation(tf.nn.relu, name='conv_2_activation'), tfl.MaxPool3D((2,2,2), name='conv_2_maxpool'), tfl.Conv3D(512, (2,2,2), padding="same", name='conv_1_conv'), tfl.Activation(tf.nn.relu, name='conv_1_activation'), tfl.MaxPool3D((2,2,2), name='conv_1_maxpool'), tfl.Flatten( name='flatten'), tfl.Dense(nl, activation='relu', name='latent'), tfl.Dense(32768, activation='relu', name='expand'), tfl.Reshape((4,4,4,512), name='reshape'), tfl.Conv3DTranspose(256, (2,2,2,), strides=2, name='deconv_1_deconv'), tfl.Activation(tf.nn.relu, name='deconv_1_activation'), tfl.Conv3DTranspose(128, (2,2,2,), strides=2, name='deconv_2_deconv'), tfl.Activation(tf.nn.relu, name='deconv_2_activation'), tfl.Conv3DTranspose(64, (2,2,2,), strides=2, name='deconv_3_deconv'), tfl.Activation(tf.nn.relu, name='deconv_3_activation'), tfl.Conv3DTranspose(1, (2,2,2,), strides=2, name='deconv_4_deconv'), # tfl.Activation(tf.nn.relu, name='deconv_4_activation'), # tfl.Conv3DTranspose(1, (2,2,2,), strides=1, name='deconv_5_deconv', padding="same"), ] if self.params['is_u_connected'] and self.params['use_final_unet_layer']: extra_unet_layers = [ tfl.Conv3D(2, (1,1,1,), use_bias=False, name='unet_combine'), # tfl.Activation(tf.nn.relu, name='unet_final_activation'), ] if self.params['final_activation'] == 'sigmoid': extra_unet_layers.append(tfl.Activation(tf.math.sigmoid, name='unet_final_activation')) if self.params['final_activation'] == 'relu': extra_unet_layers.append(tfl.Activation(tf.nn.relu, name='unet_final_activation')) autoencoder_layers = autoencoder_layers + extra_unet_layers for l in autoencoder_layers: self._add_layer(l)
def VAE_decoder(latent_dim): #latent_dim = 2 latent_inputs = keras.Input(shape=(latent_dim,)) x = layers.Dense(75 * 75 * 10 * 6, activation="relu")(latent_inputs) x = layers.Reshape((75, 75, 10, 6))(x) x = layers.Conv3DTranspose(64, (3,3,3), activation="relu", strides=2, padding="same")(x) x = layers.Conv3DTranspose(32, (3,3,3), activation="relu", strides=2, padding="same")(x) decoder_outputs = layers.Conv3DTranspose(3, (3,3,3), activation="sigmoid", padding="same")(x) decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder") decoder.summary() return decoder
def background(): in_shape = (4, 4, 1) bg = tf.keras.Sequential() bg.add(layers.Dense(4 * 4, use_bias=False, input_shape=in_shape)) bg.add(layers.BatchNormalization()) #fg.add(activations.tanh()) bg.add(layers.Reshape((4, 4, 1, 1))) #firt layer uses a (2,4,4) convolution; creates (4x4x2) from 100 dim Noise with 512 channels bg.add( layers.Conv3DTranspose(512, (2, 4, 4), strides=(1, 1, 2), use_bias=False, padding='same')) #outputs 8x8x4 with 256 channels bg.add( layers.Conv3DTranspose(256, 4, strides=(2, 2, 1), use_bias=False, padding='same')) #outputs 16x16x8 with 128 channels bg.add( layers.Conv3DTranspose(128, 4, strides=(2, 2, 1), use_bias=False, padding='same')) #outputs 32x32x16 with 64 channels bg.add( layers.Conv3DTranspose(128, 4, strides=(2, 2, 1), use_bias=False, padding='same')) #outputs forground: 64x64x32 with 3 channels bg.add( layers.Conv3DTranspose(3, 4, strides=(2, 2, 1), use_bias=False, padding='same', activation='tanh')) return bg
def generator_model(noise_len): model = tf.keras.Sequential() #Adding first dense layer model.add(layers.Dense(2048, use_bias=False, input_shape=(noise_len, ))) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((dim[1], dim[2], dim[3]))) assert model.output_shape == (None, dim[1], dim[2], dim[3]) #First transpose convolutional layer model.add( layers.Conv3DTranspose(256, (4, 4, 4), strides=2, padding='same', use_bias=False)) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) #Second transpose convolutional layer model.add( layers.Conv3DTranspose(128, (4, 4, 4), strides=2, padding='same', use_bias=False)) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) #Third transpose convolutional layer model.add( layers.Conv3DTranspose(64, (4, 4, 4), strides=2, padding='same', use_bias=False)) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) #Last transpose convolutional layer model.add( layers.Conv3DTranspose(1, (4, 4, 4), strides=2, padding='same', use_bias=False, activation='tanh')) model.add(layers.tanh()) assert model.output_shape == (None, 16, 16, 16, 1) return model
def create_block_components(names=None, dims=2): # --- padding == same, z-size == 1 kwargs_z1 = { 'kernel_size': (1, 3, 3) if dims == 2 else (3, 3, 3), 'padding': 'same', 'kernel_initializer': 'he_normal'} # --- padding = valid, z-size == 2 kwargs_z2 = { 'kernel_size': (2, 1, 1), 'padding': 'valid', 'kernel_initializer': 'he_normal'} # --- padding = valid, z-size == 2 kwargs_z3 = { 'kernel_size': (3, 1, 1), 'padding': 'valid', 'kernel_initializer': 'he_normal'} # --- Define block components conv_z1 = lambda x, filters, strides : layers.Conv3D(filters=filters, strides=strides, **kwargs_z1)(x) conv_z2 = lambda x, filters, strides : layers.Conv3D(filters=filters, strides=strides, **kwargs_z2)(x) conv_z3 = lambda x, filters, strides : layers.Conv3D(filters=filters, strides=strides, **kwargs_z3)(x) tran_z1 = lambda x, filters, strides : layers.Conv3DTranspose(filters=filters, strides=strides, **kwargs_z1)(x) conv_fc = lambda x, filters : (x) norm = lambda x : layers.BatchNormalization()(x) relu = lambda x : layers.LeakyReLU()(x) # --- Return local vars names = names or ('conv_z1', 'conv_z2', 'conv_z3', 'tran_z1', 'conv_fc', 'norm', 'relu') lvars = locals() return [lvars.get(n) for n in names]
def get_model(input_shape, num_classes): inputs = layers.Input(input_shape) conv_input = inputs conv_blocks = [] # Encoder for n_filter in [32, 64, 128, 256]: conv = conv3d_block(conv_input, n_filter) conv = conv3d_block(conv, n_filter) conv_blocks.append(conv) conv = layers.concatenate([conv_input, conv], axis=4) conv_input = layers.MaxPooling3D(pool_size=(2, 2, 2))(conv) # Bridge conv = conv3d_block(conv_input, 512) conv = conv3d_block(conv, 512) conv_input = layers.concatenate([conv_input, conv], axis=4) # Decoder for n_filter in [256, 128, 64, 32]: up = layers.concatenate([ layers.Conv3DTranspose(n_filter, 2, strides=2, padding='same')(conv_input), conv_blocks.pop() ], axis=-1) conv = conv3d_block(up, n_filter) conv = conv3d_block(conv, n_filter) conv_input = layers.concatenate([up, conv], axis=-1) conv = layers.Conv3D(num_classes, 1, activation='softmax')(conv_input) return tf.keras.models.Model(inputs=[inputs], outputs=[conv])
def __init__(self, num_channels, num_conv, use_2d=True, kernel_size=3, name="upsampling_conv_res_block", **kwargs): super(Up_ResBlock, self).__init__(name=name) self.num_conv = num_conv if use_2d: self.conv = tfkl.Conv2D(num_channels, kernel_size=kernel_size, padding='same') self.up_conv = tfkl.Conv2DTranspose(num_channels, kernel_size=kernel_size, strides=(2, 2), padding='same') else: self.conv = tfkl.Conv3D(num_channels, kernel_size=kernel_size, padding='same') self.up_conv = tfkl.Conv3DTranspose(num_channels, kernel_size=kernel_size, strides=(2, 2, 2), padding='same')
def up_conv_block(self, m, prev, filters_a, filters_b): """3D up-convolution block.""" m = layers.Conv3DTranspose( filters_a, self.transpose_kernel_size, strides=(2, 2, 1), padding="same", activation=self.activation, )(m) m = layers.BatchNormalization()(m) m = layers.Concatenate()([m, prev]) m = layers.Conv3D(filters_b, self.kernel_size, padding="same", activation=self.activation)(m) m = layers.BatchNormalization()(m) m = layers.Conv3D(filters_b, self.kernel_size, padding="same", activation=self.activation)(m) m = layers.BatchNormalization()(m) return m
def generator_model(self, out_size, start_size=8, start_filters=512): # Fading function def blend_resolutions(upper, lower, alpha): upper = tf.multiply(upper, alpha) lower = tf.multiply(lower, tf.subtract(1, alpha)) return kl.Add()([upper, lower]) # For now we start at 2x4x4 and upsample by 2x each time, e.g. 4x8x8 is next, followed by 8x16x16 conv_loop = int(np.log2(out_size/start_size)) z = kl.Input(shape=(self.z_dim,)) fade = kl.Input(shape=(1,)) # First resolution (2 x 4 x 4) x = kl.Dense(start_filters * start_size**2 * start_size/2, kernel_initializer=tf.keras.initializers.random_normal(stddev=0.01), name='dense')(z) x = kl.Reshape((int(start_size/2), start_size, start_size, start_filters))(x) x = kl.BatchNormalization()(x) x = kl.ReLU()(x) lower_res = None for resolution in range(conv_loop): filters = max(start_filters // 2**(resolution+1), 4) x = kl.Conv3DTranspose(filters=filters, kernel_size=4, strides=2, padding='same', kernel_initializer=self.conv_init, use_bias=True, name='conv_'+str(2**(resolution+1)))(x) x = kl.BatchNormalization()(x) x = kl.ReLU()(x) if resolution == conv_loop - 1 and conv_loop > 1: lower_res = x # Conversion to 3-channel color # This is explicitly defined so we can reuse it for the upsampled lower-resolution frames as well convert_to_image = kl.Conv3DTranspose(filters=3, kernel_size=1, strides=1, padding='same', kernel_initializer=self.conv_init, use_bias=True, activation='tanh', name='conv_to_img_'+str(x.get_shape().as_list()[-1])) x = convert_to_image(x) # Fade output of previous resolution stage into final resolution stage if self.fade and lower_res: lower_upsampled = kl.UpSampling3D()(lower_res) lower_upsampled = convert_to_image(lower_upsampled) x = kl.Lambda(lambda x, y, alpha: blend_resolutions(x, y, alpha))([x, lower_upsampled, fade]) return tf.keras.models.Model(inputs=[z, fade], outputs=x, name='generator')
def make_generator(params): """Basic VAE decoder""" n_features = params['num_latent_layers'] return tf.keras.Sequential([ tfl.InputLayer(input_shape=(n_features, )), tfl.Dense(4 * 4 * 4 * 512), tfl.Activation(tf.nn.relu), tfl.Reshape(target_shape=(4, 4, 4, 512)), tfl.Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2)), tfl.Activation(tf.nn.relu), tfl.Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2)), tfl.Activation(tf.nn.relu), tfl.Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2)), tfl.Activation(tf.nn.relu), tfl.Conv3DTranspose(32, (2, 2, 2), strides=(2, 2, 2)), tfl.Activation(tf.nn.relu), tfl.Conv3DTranspose(1, (2, 2, 2), strides=(1, 1, 1), padding="same"), ])
def make_generator_model(): model = tf.keras.Sequential() model.add( layers.Dense(5 * 5 * 5 * CHANNELS * 4, use_bias=False, input_shape=(noise_dim, ))) #model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((5, 5, 5, CHANNELS * 4))) assert model.output_shape == (None, 5, 5, 5, CHANNELS * 4) # The assert line shows the expected output shape at this stage. None is the batch size model.add( layers.Conv3DTranspose(CHANNELS * 2, (5, 5, 5), strides=(1, 1, 1), padding='same', use_bias=False)) assert model.output_shape == (None, 5, 5, 5, CHANNELS * 2) #model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add( layers.Conv3DTranspose(CHANNELS, (5, 5, 5), strides=(3, 3, 3), padding='same', use_bias=False)) assert model.output_shape == (None, 15, 15, 15, CHANNELS) #model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add( layers.Conv3DTranspose(CHANNELS, (5, 5, 5), strides=(2, 2, 2), padding='same', use_bias=False, activation='tanh')) assert model.output_shape == ( None, 30, 30, 30, CHANNELS ) #What is the desired output size for the whole generator? model.add(layers.ReLU()) return model
def fg_mask(fg): mask = tf.keras.models.clone_model(fg) mask.add( layers.Conv3DTranspose(1, 4, strides=1, use_bias=False, padding='same', activation='sigmoid')) return mask
def trans_conv3d_block(conv, conv_merge, n_filter): up = layers.Conv3DTranspose(512*n_filter, 2, strides=1, padding='same')(conv) merge = layers.concatenate([conv_merge, up], axis=-1) conv = layers.Conv3D(256*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(merge) conv = layers.BatchNormalization()(conv) conv = layers.Activation("relu")(conv) conv = layers.Conv3D(256*n_filter, 3, strides=1, padding='same', kernel_initializer='he_normal')(conv) conv = layers.BatchNormalization()(conv) conv = layers.Activation("relu")(conv) return conv
def make_generator_model(BASE): B2 = BASE * 2 B3 = BASE * 4 model = tf.keras.Sequential() model.add( layers.Dense(BASE * BASE * BASE * 256, use_bias=False, input_shape=(100, ))) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((BASE, BASE, BASE, 256))) assert model.output_shape == (None, BASE, BASE, BASE, 256 ) # Note: None is the batch size model.add( layers.Conv3DTranspose(128, (5, 5, 5), strides=(1, 1, 1), padding='same', use_bias=False)) assert model.output_shape == (None, BASE, BASE, BASE, 128) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add( layers.Conv3DTranspose(64, (5, 5, 5), strides=(2, 2, 2), padding='same', use_bias=False)) assert model.output_shape == (None, B2, B2, B2, 64) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add( layers.Conv3DTranspose(1, (5, 5, 5), strides=(2, 2, 2), padding='same', use_bias=False, activation='tanh')) assert model.output_shape == (None, B3, B3, B3, 1) return model
def upres(x, filters, upsize, strides, ksize1, ksize2, cat): x = layers.Conv3DTranspose(filters, upsize, strides, padding='same')(x) x = layers.Concatenate()([cat, x]) x = layers.Conv3D(filters, ksize1, padding='same')(x) x = BatchNormalization()(x) x = ReLU()(x) res = layers.Conv3D(filters, ksize2, padding='same')(x) x = Add()([x, res]) x = BatchNormalization()(x) x = ReLU()(x) return x
def deconv3d(layer_input, skip_input, filters, axis=-1): u = layers.Conv3DTranspose(filters, (3, 3, 3), strides=(2, 2, 2), padding='same')(layer_input) u = layers.BatchNormalization(axis=axis)(u) u = layers.Activation('relu')(u) u = layers.concatenate([u, skip_input], axis=axis) u = layers.Conv3D(filters, (3, 3, 3), padding='same')(u) u = layers.BatchNormalization(axis=axis)(u) u = layers.Activation('relu')(u) return u
def _transition_up_3d(skip_connection, block_to_upsample, filters_to_keep): """3D version of _transition_up. Performs upsampling on block_to_upsample by a factor 2 and concatenates it with skip_connection.""" layer = layers.Conv3DTranspose( filters_to_keep, kernel_size=3, strides=2, padding='same', kernel_initializer='he_uniform')(block_to_upsample) layer = layers.concatenate([layer, skip_connection], axis=-1) return layer
def unet(inputs, num_layers=6, num_classes=2, _3d=False): # --- Define kwargs dictionary if _3d: kwargs = {'kernel_size': (3, 3, 3), 'padding': 'same'} else: kwargs = {'kernel_size': (1, 3, 3), 'padding': 'same'} # --- Define lambda functions conv = lambda x, filters, strides: layers.Conv3D( filters=filters, strides=strides, **kwargs)(x) norm = lambda x: layers.BatchNormalization()(x) relu = lambda x: layers.LeakyReLU()(x) tran = lambda x, filters, strides: layers.Conv3DTranspose( filters=filters, strides=strides, **kwargs)(x) # --- Define stride-1, stride-2 blocks conv1 = lambda filters, x: relu(norm(conv(x, filters, strides=1))) conv2 = lambda filters, x: relu(norm(conv(x, filters, strides=(1, 2, 2)))) tran2 = lambda filters, x: relu(norm(tran(x, filters, strides=(1, 2, 2)))) # --- Define simple layers c_layer = lambda filters, x: conv1(filters, conv2(filters, x)) e_layer = lambda filters1, filters2, x: tran2(filters1, conv1(filters2, x)) contracting_layers = [] for i in range(num_layers): # 0,1,2,3,4,5 if i == 0: contracting_layers.append(conv1(8, inputs['dat'])) else: contracting_layers.append( c_layer(16 * i, contracting_layers[i - 1])) expanding_layers = [] for j in reversed(range(num_layers - 1)): # 4,3,2,1,0 if j == num_layers - 2: expanding_layers.append(tran2(16 * j, contracting_layers[j + 1])) else: expanding_layers.append( e_layer(16 * j if j != 0 else 8, 16 * (j + 1), expanding_layers[-1] + contracting_layers[j + 1])) last_layer = conv1(8, conv1(8, expanding_layers[-1] + contracting_layers[0])) # --- Create logits logits = {} logits['zones'] = layers.Conv3D(filters=num_classes, name='zones', **kwargs)(last_layer) # --- Create model model = Model(inputs=inputs, outputs=logits) return model
def make_decoder(inp_shape, batch_size, params): """Decoder of the autoencder""" inputs = tf.keras.Input(batch_size=batch_size, shape=(4, 4, 4, 512)) x = inputs for n_filter in [256, 128, 64, 12]: x = tfl.Conv3DTranspose(n_filter, (2, 2, 2,), use_bias=True, strides=2)(x) x = tfl.Activation(tf.nn.relu)(x) x = tfl.Conv3D(1, (1, 1, 1), use_bias=True)(x) ae_output_logits = x return tf.keras.Model(inputs=inputs, outputs=ae_output_logits)
def background(): in_shape = (1,1,100,1) bg = tf.keras.Sequential() bg.add(layers.Dense(4*4,use_bias=False,input_shape=(100,))) bg.add(layers.BatchNormalization()) bg.add(layers.LeakyReLU()) bg.add(layers.Reshape((4,4,1,1))) bg.add(layers.Conv3DTranspose(512,(2,4,4),strides=1,use_bias=False,padding='same')) #outputs 8x8x4 with 256 channels bg.add(layers.Conv3DTranspose(256,4,strides=(2,2,1),use_bias=False,padding='same')) #outputs 16x16x8 with 128 channels bg.add(layers.Conv3DTranspose(128,4,strides=(2,2,1),use_bias=False,padding='same')) #outputs 32x32x16 with 64 channels bg.add(layers.Conv3DTranspose(128,4,strides=(2,2,1),use_bias=False,padding='same')) #outputs forground: 64x64x32 with 3 channels bg.add(layers.Conv3DTranspose(3,4,strides=(2,2,1),use_bias=False,padding='same',activation='tanh')) return bg
def up_conv_block(self, m_in, prev, filters, length=1, strides=(2, 2, 2)): """3D up-convolution block.""" m_up = layers.Conv3DTranspose( filters, self.transpose_kernel_size, strides=strides, padding="same", )(m_in) m = layers.PReLU()(m_up) m = layers.BatchNormalization()(m) m = layers.Concatenate()([m, prev]) m = self.side_conv_block(m, filters, length, add=False) return m + m_up
def __init__(self, channels, n_convs, norm=False, drop=False, training=False): super(VNetUpBlock, self).__init__() self.channels = channels self.n_convs = n_convs self.training = training self.norm = norm self.drop = drop self.add = layers.Add() self.concatenate = layers.Concatenate() self.upsample = layers.Conv3DTranspose(filters=self.channels//2, kernel_size=(2,2,2), strides=2, padding='valid', kernel_initializer='he_normal', activation=None) self.convolution = layers.Conv3D(filters=self.channels, kernel_size=(5,5,5), strides=1, padding='same', kernel_initializer='he_normal', activation=None) self.batch_norm = layers.BatchNormalization(scale=False, renorm=True, trainable=self.training) self.activation = layers.Activation('relu') self.dropout = layers.Dropout(0.1)
def make_stack_net_v3(inp_shape, batch_size, params): """ Just an Autoencoder. Used to verify against v4 """ filter_size = [2, 2, 2] n_filters = [64, 128, 256, 512] inputs = { 'conditioned_occ': tf.keras.Input(batch_size=batch_size, shape=inp_shape), 'known_occ': tf.keras.Input(batch_size=batch_size, shape=inp_shape), 'known_free': tf.keras.Input(batch_size=batch_size, shape=inp_shape), } # Autoencoder x = tfl.concatenate([inputs['known_occ'], inputs['known_free']], axis=4) for n_filter in [64, 128, 256, 512]: x = tfl.Conv3D(n_filter, ( 2, 2, 2, ), use_bias=True, padding="same")(x) x = tfl.Activation(tf.nn.relu)(x) x = tfl.MaxPool3D((2, 2, 2))(x) x = tfl.Flatten()(x) x = tfl.Dense(params['num_latent_layers'], activation='relu')(x) x = tfl.Dense(32768, activation='relu')(x) x = tfl.Reshape((4, 4, 4, 512))(x) for n_filter in [256, 128, 64, 12]: x = tfl.Conv3DTranspose(n_filter, ( 2, 2, 2, ), use_bias=True, strides=2)(x) x = tfl.Activation(tf.nn.relu)(x) x = tfl.Conv3D(1, (1, 1, 1), use_bias=True)(x) x = tfl.Activation(tf.nn.sigmoid)(x) output = {"predicted_occ": x, "predicted_free": 1 - x} return tf.keras.Model(inputs=inputs, outputs=output)