class UNet(object): """ This class provides a simple interface to create a U-Net network with custom parameters. Args: input_size: input size for the network kernel_size: size of the kernel to be used in the convolutional layers of the U-Net strides: stride shape to be used in the convolutional layers of the U-Net deconv_strides: stride shape to be used in the deconvolutional layers of the U-Net deconv_kernel_size: kernel size shape to be used in the deconvolutional layers of the U-Net pool_size: size of the pool size to be used in MaxPooling layers pool_strides: size of the strides to be used in MaxPooling layers depth: depth of the U-Net model activation: activation function used in the U-Net layers padding: padding used for the input data in all the U-Net layers n_inital_filters: number of feature maps in the first layer of the U-Net add_batch_normalization: boolean flag to determine if batch normalization should be applied after convolutional layers kernel_regularizer: kernel regularizer to be applied to the convolutional layers of the U-Net bias_regularizer: bias regularizer to be applied to the convolutional layers of the U-Net n_classes: number of classes in labels """ def __init__(self, input_size=(128, 128, 128, 1), kernel_size=(3, 3, 3), strides=(1, 1, 1), deconv_kernel_size=(2, 2, 2), deconv_strides=(2, 2, 2), pool_size=(2, 2, 2), pool_strides=(2, 2, 2), depth=5, activation='relu', padding='same', n_initial_filters=8, add_batch_normalization=True, kernel_regularizer=regularizers.l2(0.001), bias_regularizer=regularizers.l2(0.001), n_classes=3): self.input_size = input_size self.n_dim = len(input_size) # Number of dimensions of the input data self.kernel_size = kernel_size self.strides = strides self.deconv_kernel_size = deconv_kernel_size self.deconv_strides = deconv_strides self.depth = depth self.activation = activation self.padding = padding self.n_initial_filters = n_initial_filters self.kernel_regularizer = kernel_regularizer self.bias_regularizer = bias_regularizer self.add_batch_normalization = add_batch_normalization self.pool_size = pool_size self.pool_strides = pool_strides self.n_classes = n_classes def create_model(self): ''' This function creates a U-Net network based on the configuration. ''' # Check if 2D or 3D convolution must be used if (self.n_dim == 3): conv_layer = layers.Conv2D max_pool_layer = layers.MaxPooling2D conv_transpose_layer = layers.Conv2DTranspose softmax_kernel_size = (1, 1) elif (self.n_dim == 4): conv_layer = layers.Conv3D max_pool_layer = layers.MaxPooling3D conv_transpose_layer = layers.Conv3DTranspose softmax_kernel_size = (1, 1, 1) else: print("Could not handle input dimensions.") return # Input layer temp_layer = layers.Input(shape=self.input_size) input_tensor = temp_layer # Variables holding the layers so that they can be concatenated downsampling_layers = [] upsampling_layers = [] # Down sampling branch for i in range(self.depth): for j in range(2): # Convolution temp_layer = conv_layer( self.n_initial_filters * pow(2, i), kernel_size=self.kernel_size, strides=self.strides, padding=self.padding, activation='linear', kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(temp_layer) # batch normalization if (self.add_batch_normalization): temp_layer = layers.BatchNormalization(axis=-1)(temp_layer) # activation temp_layer = layers.Activation(self.activation)(temp_layer) downsampling_layers.append(temp_layer) temp_layer = max_pool_layer(pool_size=self.pool_size, strides=self.pool_strides, padding=self.padding)(temp_layer) for j in range(2): # Bottleneck temp_layer = conv_layer( self.n_initial_filters * pow(2, self.depth), kernel_size=self.kernel_size, strides=self.strides, padding=self.padding, activation='linear', kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(temp_layer) if (self.add_batch_normalization): temp_layer = temp_layer = layers.BatchNormalization( axis=-1)(temp_layer) # activation temp_layer = layers.Activation(self.activation)(temp_layer) # Up sampling branch for i in range(self.depth): temp_layer = conv_transpose_layer( self.n_initial_filters * pow(2, (self.depth - 1) - i), kernel_size=self.deconv_kernel_size, strides=self.deconv_strides, activation='linear', padding=self.padding, kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(temp_layer) # activation temp_layer = layers.Activation(self.activation)(temp_layer) # concatenation temp_layer = layers.Concatenate(axis=self.n_dim)( [downsampling_layers[(self.depth - 1) - i], temp_layer]) # convolution for j in range(2): # Convolution temp_layer = conv_layer( self.n_initial_filters * pow(2, (self.depth - 1) - i), kernel_size=self.kernel_size, strides=self.strides, padding=self.padding, activation='linear', kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(temp_layer) # activation temp_layer = layers.Activation(self.activation)(temp_layer) # Convolution 1 filter sigmoidal (to make size converge to final one) temp_layer = conv_layer( self.n_classes, kernel_size=softmax_kernel_size, strides=self.strides, padding='same', activation='linear', kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer)(temp_layer) output_tensor = layers.Softmax(axis=-1)(temp_layer) self.model = Model(inputs=[input_tensor], outputs=[output_tensor]) def set_initial_weights(self, weights): ''' Set the initial weights of the U-Net, in case training was stopped and then resumed. An exception is raised in case the model currently configured has different properties than the one whose weights were stored. ''' try: self.model.load_weights(weights) except: raise def get_n_parameters(self): ''' Get the total number of parameters of the model ''' return self.model.count_params() def summary(self): ''' Print out summary of the model. ''' print(self.model.summary())
x = Conv2D(filters=384, kernel_size=3, strides=1, activation="relu")(x) x = ZeroPadding2D(padding=1)(x) x = Conv2D(filters=256, kernel_size=3, strides=1, activation="relu")(x) x = MaxPooling2D(pool_size=3, strides=2)(x) x = Flatten()(x) x = Dense(4096)(x) x = Dense(4096)(x) outputs = Dense(1000)(x) # At this point, you can create a Model by specifying its inputs and # outputs in the graph of layers model_functional = Model(inputs=inputs, outputs=outputs, name="CNNmodel") model_functional.summary() assert model_functional.count_params() == model.count_params() """ 3. SUBCLASS API Where you implement everything from scratch on your own. Use this if you have complex, out-of-the-box research use cases. """ class triBlockArchitecture(tf.keras.layers.Layer): def __init__(self, block=[True, True, True], f=1, k=1, p=1, s=1): self.block = block super(triBlockArchitecture, self).__init__() self.pad = ZeroPadding2D(p) self.conv = Conv2D(filters=f, kernel_size=k, strides=s,
plt.ylabel('Accuracy') plt.title('Accuracy on train and validation set') plt.legend() plt.subplot(1, 2, 2) plt.plot(mlp_hist.epoch, mlp_hist.history['loss'], 'b--', label='Train') plt.plot(mlp_hist.epoch, mlp_hist.history['val_loss'], 'b', label='Validation') plt.xlabel('Epochs') plt.ylabel('Loss') plt.title('Loss on train and validation set') plt.legend() plt.show() print(mlp.name, ": Test loss:", mlp_score[0], "Test accuracy:", mlp_score[1]) res_model.append("MLP") res_param_string = "1 hidden layer," + str( mlp.count_params()) + " parameters \n" + str(mlp_hist.epoch[-1] + 1) + " epochs" res_param.append(res_param_string) res_train_acc.append(mlp_hist.history['accuracy'][-1]) res_valid_acc.append(mlp_hist.history['val_accuracy'][-1]) res_test_acc.append(mlp_score[1]) ## MLP: overfitting ## epochs = [0, 50, 10, 50] #epoch[0] is not to be used inputs = keras.Input(shape=(d2_X_train.shape[1], )) x = Dense(32, activation='relu')(inputs) outputs = Dense(10, activation='softmax')(x) mlp1 = Model(inputs, outputs, name="MLP1")
def model(self, left_input, right_input, input_shape): def cosine_distance(vecs, normalize=False): x, y = vecs if normalize: x = K.l2_normalize(x, axis=0) y = K.l2_normalize(x, axis=0) return K.prod(K.stack([x, y], axis=1), axis=1) def cosine_distance_output_shape(shapes): return shapes[0] ''' # Convolutional Neural Network model = Sequential() model.add(Conv2D(64, (7,7), activation='relu', input_shape=input_shape)) model.add(MaxPooling2D()) model.add(Dropout(0.7)) model.add(Conv2D(128, (7,7), activation='relu')) model.add(MaxPooling2D()) model.add(Conv2D(256, (4,4), activation='relu')) model.add(Flatten()) model.add(Dense(16, activation='sigmoid')) print(model.summary()) ''' basemodel = VGG16(weights='imagenet', include_top=False, input_tensor=Input(shape=input_shape)) headmodel = basemodel.output headmodel = MaxPooling2D()(headmodel) headmodel = Flatten()(headmodel) headmodel = Dense(16, activation='sigmoid')(headmodel) model = Model(inputs=basemodel.input, outputs=headmodel) # Generate the encodings (feature vectors) for the two images encoded_l = model(left_input) encoded_r = model(right_input) # Add a customized layer to compute the absolute difference between the encodings L1_layer = Lambda(lambda tensors: tf.abs(tensors[0] - tensors[1])) L1_distance = L1_layer([encoded_l, encoded_r]) # Add a dense layer with a sigmoid unit to generate the similarity score prediction = Dense(1, activation='sigmoid')(L1_distance) # Connect the inputs with the outputs siamese_net = Model(inputs=[left_input, right_input], outputs=prediction) siamese_net.count_params() optimizer = Adam(self.lr) #//TODO: get layerwise learning rates and momentum annealing scheme described in paperworking siamese_net.compile(loss=self.loss, optimizer=optimizer, metrics=['accuracy']) return siamese_net