class ConfigurableTNNCell(TNNCell): def __init__(self, x_cols, p_is_branchful, layer_cfg, drop_g=None, *args, **kwargs): super().__init__(x_cols, drop_g, *args, **kwargs) self.g_layer_cfg = layer_cfg['g'] self.p_layer_cfg = layer_cfg['p'] self.p_is_branchful = p_is_branchful # conductances self.conductance_net = tf.keras.Sequential([ Dense(**l_d) for l_d in self.g_layer_cfg ]) self.conductance_net.add(layers.Lambda(lambda x: tf.abs(x))) def build(self, inputs): super().build(inputs) # power loss if self.p_is_branchful: x = Input(batch_input_shape=(inputs[0], inputs[1] + self.output_size)) out_l = [] for t in cfg.data_cfg['Target_param_names']: interim_cfg = [deepcopy(p) for p in self.p_layer_cfg[:-1]] for l in interim_cfg: t = t.replace('_[°C]', '') l['name'] = f'{l["name"]}_{t}' out_l.append(tf.keras.Sequential([ Dense(**l_d) for l_d in interim_cfg ])(x)) if len(out_l) > 1: y = Concatenate()(out_l) else: y = out_l[0] y = Dense(**self.p_layer_cfg[-1])(y) y = layers.Lambda(lambda z: tf.abs(z))(y) self.ploss_out_gen = Model(inputs=x, outputs=y) else: self.ploss_out_gen = tf.keras.Sequential([ Dense(**l_d) for l_d in self.p_layer_cfg ]) self.ploss_out_gen.add(layers.Lambda(lambda x: tf.abs(x))) def get_config(self): config = super().get_config() config.update({ 'p_is_branchful': self.p_is_branchful, 'layer_cfg': {'g': self.g_layer_cfg, 'p': self.p_layer_cfg}, }) return config
print(y) #%% # multilayer perceptron from tensorflow.keras import models, layers import numpy as np X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) T = np.array([0, 1, 1, 0]) model = models.Sequential() model.add(layers.Dense(units = 4, activation='sigmoid',input_shape=(2,))) model.add(layers.Dense(units = 4, activation = 'sigmoid')) model.add(layers.Dense(units = 1, activation = 'sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics = ['acc']) model.fit(X,T,epochs = 10000, batch_size=4) # test Xtest_loss,Xtest_acc = model.evaluate(X,T) print(Xtest_acc) Ttest = model.predict(X) print(Ttest)
n = DenseNet121(include_top=False, weights='imagenet', input_shape=(montage_image_size, montage_image_size, 3), pooling='avg') # do not train first layers, I want to only train # the 4 last layers (my own choice, up to you) for layer in n.layers[:-2]: layer.trainable = False model = Sequential() model.add( TimeDistributed(n, input_shape=(num_unique_time_days, montage_image_size, montage_image_size, 3))) model.add(TimeDistributed(Flatten())) model.add(LSTM(256, activation='relu', return_sequences=False)) # flatten the volume, then FC => RELU => BN => DROPOUT model.add(Flatten()) model.add(Dense(16)) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dropout(0.5)) # apply another FC layer, this one to match the number of nodes # coming out of the MLP model.add(Dense(4))
def build(self, hp): pad_sequences_maxlen = self.pad_sequences_maxlen max_words = self.max_words number_of_classes = self.number_of_classes output_dim = self.output_dim embedding_matrix = self.embedding_matrix initial_learning_rate = self.initial_learning_rate if self.initial_learning_rate else hp.Float( 'initial_learning_rate', min_value=0.01, max_value=0.1, default=0.01, step=0.04) dropout = self.dropout if self.dropout else hp.Float( 'dropout', min_value=0.2, max_value=0.4, default=0.3, step=0.1) recurrent_dropout = 0 # self.recurrent_dropout if self.recurrent_dropout else hp.Float('recurrent_dropout', min_value=0.0, max_value=0.4, default=0.3, step=0.1) if embedding_matrix is not None and embedding_matrix == 'bert': input_word_ids = tf.keras.Input(shape=(pad_sequences_maxlen, ), dtype=tf.int32) input_mask = tf.keras.Input(shape=(pad_sequences_maxlen, ), dtype=tf.int32) segment_ids = tf.keras.Input(shape=(pad_sequences_maxlen, ), dtype=tf.int32) bert_layer = hub.KerasLayer( "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1", trainable=False) _, sequence_output = bert_layer( inputs=[input_word_ids, input_mask, segment_ids]) clf_output = sequence_output[:, 0, :] # out = Dropout(dropout)(sequence_output) out = Dense(128, activation='relu')(sequence_output) out = Dense(number_of_classes + 1, activation='softmax')(out) model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out) else: model = Sequential() if embedding_matrix is not None: print("Including glove") model.add( Embedding(max_words, output_dim=output_dim, input_length=pad_sequences_maxlen, weights=[embedding_matrix], trainable=False)) else: model.add( Embedding(max_words, output_dim=output_dim, input_length=pad_sequences_maxlen)) model.add(Flatten()) model.add(Dropout(dropout)) model.add(Dense(128, activation='relu')) model.add(Dense(number_of_classes + 1, activation='softmax')) lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=initial_learning_rate, decay_steps=40, decay_rate=0.9) opt = Adam(learning_rate=lr_schedule, decay=initial_learning_rate / 20) # opt = tf.train.experimental.enable_mixed_precision_graph_rewrite(opt) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy', f1_m]) print(model.summary()) return model
from keras.layers.convolutional import MaxPooling2D from keras.layers.core import Activation, Flatten, Dropout, Dense from keras import backend as K from keras.preprocessing.image import ImageDataGenerator from keras.optimizers import Adam from keras.preprocessing import image from keras.preprocessing.image import img_to_array from sklearn.preprocessing import MultiLabelBinarizer from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt model = Sequential() chanDim = -1; model.add(Conv2D(32, (3, 3), padding="same",input_shape=(256,256,3))) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(Conv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim))
decoder2 = LSTM(100, activation='relu', return_sequences=True)(decoder2) decoder2 = TimeDistributed(Dense(1))(decoder2) # tie it together model = Model(inputs=visible, outputs=[decoder1, decoder2]) model.compile(optimizer='adam', loss='mse') plot_model(model, show_shapes=True, to_file='composite_lstm_autoencoder.png') # fit model model.fit(seq_in, [seq_in, seq_out], epochs=300, verbose=0) # demonstrate prediction yhat = model.predict(seq_in, verbose=0) print(yhat) # Prediction LSTM Autoencoder ''' # define input sequence seq_in = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]) # reshape input into [samples, timesteps, features] n_in = len(seq_in) seq_in = seq_in.reshape((1, n_in, 1)) # prepare output sequence seq_out = seq_in[:, 1:, :] n_out = n_in - 1 # define model model = Sequential() model.add(LSTM(100, activation='relu', input_shape=(n_in,1))) model.add(RepeatVector(n_out))
def autoencoder(type, start_filters=8, kernel=(3,3),input_size=(128,128,1)): """ Generates a range of Autoencoder models for 2D image denoising Type : UpSc = Uses MaxPooling and Upscaling to generate a simple denoising autoencoder Tran = Literally uses a Convolution and a Transpose Convolution to denoise the data Unet = Uses convolutional NN with the Unet architecture to generate the model (str) Start_filters : Number of filters in the first layer of the model, each layer increases the number of filters in the model by a factor of 2 to the centre (where the upscaling begings) (int) kernel : Shape of convolutional kernel to be used in the model (tuple) """ # input_shape = (128,128,1) input_layer = Input(shape=(input_size)) if type=="Unet_v3": sf = start_filters d1,c1 = DownSampleLayer2D(input_layer,num_filt=1,start_filters=sf, kernel=kernel) d2,c2 = DownSampleLayer2D(d1,num_filt=2, start_filters=sf,kernel=kernel) d3,c3 = DownSampleLayer2D(d2,num_filt=4, start_filters=sf,kernel=kernel) d4,c4 = DownSampleLayer2D(d3,num_filt=8, start_filters=sf,kernel=kernel) d5,c5 = DownSampleLayer2D(d4,num_filt=16, start_filters=sf,kernel=kernel) convm = Conv2D(sf * 32, kernel, activation="relu", padding="same")(d5) convm = Conv2D(sf * 32, kernel, activation="relu", padding="same")(convm) u5 = UpSampleLayer2D(convm,c5, start_filters=sf,num_filt=16, kernel=kernel) u4 = UpSampleLayer2D(u5,c4,start_filters=sf,num_filt=8, kernel=kernel) u3 = UpSampleLayer2D(u4,c3,start_filters=sf,num_filt=4, kernel=kernel) u2 = UpSampleLayer2D(u3,c2,start_filters=sf,num_filt=2, kernel=kernel) u1 = UpSampleLayer2D(u2,c1,start_filters=sf,num_filt=1, kernel=kernel) output_layer = Conv2D(1, (1,1),activation='linear', padding="same")(u1) model = Model(input_layer,output_layer) elif type=="Unet": input_shape = input_size input_layer = Input(shape=(input_shape)) # # LHS of UNET conv1 = Conv2D(start_filters * 1, kernel, activation="relu", padding="same")(input_layer) #128x128 conv1 = Conv2D(start_filters * 1, kernel, activation="relu", padding="same")(conv1) pool1 = MaxPooling2D((2, 2))(conv1) #64x64 pool1 = Dropout(0.5)(pool1) conv2 = Conv2D(start_filters * 4, kernel, activation="relu", padding="same")(pool1) conv2 = Conv2D(start_filters * 4, kernel, activation="relu", padding="same")(conv2) pool2 = MaxPooling2D((2, 2))(conv2) #32x32 pool2 = Dropout(0.5)(pool2) conv3 = Conv2D(start_filters * 4, kernel, activation="relu", padding="same")(pool2) conv3 = Conv2D(start_filters * 4, kernel, activation="relu", padding="same")(conv3) pool3 = MaxPooling2D((2, 2))(conv3) #16x16 pool3 = Dropout(0.5)(pool3) conv4 = Conv2D(start_filters * 8, kernel, activation="relu", padding="same")(pool2) conv4 = Conv2D(start_filters * 8, kernel, activation="relu", padding="same")(conv4) pool4 = MaxPooling2D((2, 2))(conv4) #8x8 pool4 = Dropout(0.5)(pool4) # # Middle convm = Conv2D(start_filters * 16, kernel, activation="relu", padding="same")(pool4) convm = Conv2D(start_filters * 16, kernel, activation="relu", padding="same")(convm) # # RHS of UNET deconv4 = Conv2DTranspose(start_filters * 8, kernel,strides=(2,2),padding="same")(convm) uconv4 = concatenate([deconv4, conv4]) uconv4 = Dropout(0.5)(uconv4) uconv4 = Conv2D(start_filters * 8, kernel, activation="relu", padding="same")(uconv4) uconv4 = Conv2D(start_filters * 8, kernel, activation="relu", padding="same")(uconv4) deconv3 = Conv2DTranspose(start_filters * 4, kernel, strides=(2,2), padding="same")(uconv4) uconv3 = concatenate([deconv3, conv3]) uconv3 = Dropout(0.5)(uconv3) uconv3 = Conv2D(start_filters * 4, kernel, activation="relu", padding="same")(uconv3) uconv3 = Conv2D(start_filters * 4, kernel, activation="relu", padding="same")(uconv3) deconv2 = Conv2DTranspose(start_filters * 4, kernel, strides=(2, 2), padding="same")(uconv4) uconv2 = concatenate([deconv2, conv2]) uconv2 = Dropout(0.5)(uconv2) uconv2 = Conv2D(start_filters * 4, kernel, activation="relu", padding="same")(uconv2) uconv2 = Conv2D(start_filters * 4, kernel, activation="relu", padding="same")(uconv2) deconv1 = Conv2DTranspose(start_filters * 1, kernel, strides=(2, 2), padding="same")(uconv2) uconv1 = concatenate([deconv1, conv1]) uconv1 = Dropout(0.5)(uconv1) uconv1 = Conv2D(start_filters * 1, kernel, activation="relu", padding="same")(uconv1) uconv1 = Conv2D(start_filters * 1, kernel, activation="relu", padding="same")(uconv1) output_layer = Conv2D(1, (1,1),activation='linear', padding="same")(uconv1) model = Model(input_layer,output_layer) elif type == 'upsc_v2': model = Sequential() model.add(Conv2D(filters = start_filters*2, kernel_size = kernel,padding = 'Same', activation ='relu',input_shape=input_size)) model.add(MaxPooling2D(pool_size=(2,2),padding='same')) # model.add(Dropout(0.2)) model.add(Conv2D(filters = start_filters*4, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(MaxPooling2D(pool_size=(2,2),padding='same')) # model.add(Dropout(0.2)) model.add(Conv2D(filters = start_filters*8, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(MaxPooling2D(pool_size=(2,2),padding='same')) # model.add(Dropout(0.2)) model.add(Conv2D(filters = start_filters*16, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(Conv2D(filters = start_filters*16, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(UpSampling2D((2,2))) model.add(Conv2D(filters = start_filters*8, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(UpSampling2D((2,2))) model.add(Conv2D(filters = start_filters*4, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(UpSampling2D((2,2))) model.add(Conv2D(filters = start_filters*2, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(Conv2D(1,(3,3),activation='linear',padding='same')) elif type == 'upsc_v2_avg': model = Sequential() model.add(Conv2D(filters = start_filters*2, kernel_size = kernel,padding = 'Same', activation ='relu',input_shape=input_size)) model.add(AveragePooling2D(pool_size=(2,2),padding='same')) # model.add(Dropout(0.2)) model.add(Conv2D(filters = start_filters*4, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(AveragePooling2D(pool_size=(2,2),padding='same')) # model.add(Dropout(0.2)) model.add(Conv2D(filters = start_filters*8, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(AveragePooling2D(pool_size=(2,2),padding='same')) # model.add(Dropout(0.2)) model.add(Conv2D(filters = start_filters*16, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(Conv2D(filters = start_filters*16, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(UpSampling2D((2,2))) model.add(Conv2D(filters = start_filters*8, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(UpSampling2D((2,2))) model.add(Conv2D(filters = start_filters*4, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(UpSampling2D((2,2))) model.add(Conv2D(filters = start_filters*2, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(Conv2D(1,(3,3),activation='linear',padding='same')) elif type == 'upsc_v3': model = Sequential() model.add(Conv2D(filters = start_filters*2, kernel_size = kernel,padding = 'Same', activation ='relu',input_shape=input_size)) model.add(MaxPooling2D(pool_size=(2,2),padding='same')) model.add(Conv2D(filters = start_filters*4, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(MaxPooling2D(pool_size=(2,2),padding='same')) model.add(Conv2D(filters = start_filters*8, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(MaxPooling2D(pool_size=(2,2),padding='same')) model.add(Conv2D(filters = start_filters*16, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(MaxPooling2D(pool_size=(2,2),padding='same')) model.add(Dropout(0.1)) model.add(Conv2D(filters = start_filters*16, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(UpSampling2D((2,2))) model.add(Conv2D(filters = start_filters*8, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(UpSampling2D((2,2))) model.add(Conv2D(filters = start_filters*4, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(UpSampling2D((2,2))) model.add(Conv2D(filters = start_filters*2, kernel_size = kernel,padding = 'Same', activation ='relu')) model.add(UpSampling2D((2,2))) model.add(Conv2D(1,(3,3),activation='linear',padding='same')) elif type == 'ANN': # input_img = Input(shape=(INPUT_SIZE2,)) input_shape = (256,) input_layer = Input(shape=(input_shape)) encoded1 = Dense(512,activation='relu')(input_layer) encoded2 = Dense(256,activation='relu')(encoded1) encoded3 = Dense(128,activation='relu')(encoded2) encoded4 = Dense(64,activation='relu')(encoded3) encoded5 = Dense(32,activation='relu')(encoded4) encoded6 = Dense(16,activation='relu')(encoded5) encoded6 = Dropout(0.2)(encoded6) decoded1 = Dense(16,activation='relu')(encoded6) decoded2 = Dense(32,activation='relu')(decoded1) decoded3 = Dense(64,activation='relu')(decoded2) decoded4 = Dense(128,activation='relu')(encoded3) decoded5 = Dense(256,activation='relu')(decoded4) decoded6 = Dense(512,activation='relu')(decoded5) decoded = Dense(256, activation='linear')(decoded6) model = Model(input_layer, decoded) return model
output = layers.Dense(10)(hidden1) model = Model(inputs=visible, outputs=output) model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) model.set_weights(weights) test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=0) fintest_trial.append(test_acc) parameters[name]=fintest_trial #NoisetoClear weights0=noiseModel.get_weights() model=None print('starting biomodel') model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), input_shape=(32,32,3))) model.add(layers.Activation('relu')) model.add(layers.GaussianNoise(0)) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3))) model.add(layers.Activation('relu')) model.add(layers.GaussianNoise(0)) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3))) model.add(layers.Activation('relu')) model.add(layers.GaussianNoise(0)) model.add(layers.Flatten()) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(10))
def __create_model(self): ''' Utility function to create and train model ''' if self.__model_type == 'complete': fp_pre_chain = keras.Input( shape=self._nnX['fp_pre_chain'][0].shape, name='fp_pre_chain') fp_amino_acid = keras.Input( shape=self._nnX['fp_amino_acid'][0].shape, name='fp_amino_acid') coupling_agent = keras.Input( shape=self._nnX['coupling_agent'][0].shape, name='coupling_agent') coupling_strokes = keras.Input( shape=self._nnX['coupling_strokes'][0].shape, name='coupling_strokes') temp_coupling = keras.Input( shape=self._nnX['temp_coupling'][0].shape, name='temp_coupling') deprotection_strokes = keras.Input( shape=self._nnX['deprotection_strokes'][0].shape, name='deprotection_strokes') flow_rate = keras.Input( shape=self._nnX['flow_rate'][0].shape, name='flow_rate') machine = keras.Input( shape=self._nnX['machine'][0].shape, name='machine') temp_reactor_1 = keras.Input( shape=self._nnX['temp_reactor_1'][0].shape, name='temp_reactor_1') x_pre_chain = Conv1D(2**self.model_params['pre_chain_conv1_filter'], 2**self.model_params['pre_chain_conv1_kernel'])(fp_pre_chain) x_pre_chain = Dense(2**self.model_params['pre_chain_dense1'])(x_pre_chain) x_pre_chain = Dropout(self.model_params['pre_chain_dropout1'])(x_pre_chain) x_pre_chain = Conv1D(2**self.model_params['pre_chain_conv2_filter'], 2**self.model_params['pre_chain_conv2_kernel'])(x_pre_chain) x_pre_chain = Dropout(self.model_params['pre_chain_dropout2'])(x_pre_chain) x_pre_chain = Activation(self.model_params['pre_chain_activation1'])(x_pre_chain) x_pre_chain = Flatten()(x_pre_chain) x_pre_chain = Dense(2**self.model_params['pre_chain_amino_acid_dense_final'], activation=self.model_params['pre_chain_activation2'])(x_pre_chain) x_amino_acid = Dense(2**self.model_params['amino_acid_dense1'])(fp_amino_acid) x_amino_acid = Dense(2**self.model_params['amino_acid_dense2'], activation=self.model_params['amino_acid_activation1'])(x_amino_acid) x_amino_acid = Dropout(self.model_params['amino_acid_dropout1'])(x_amino_acid) x_amino_acid = Dense(2**self.model_params['pre_chain_amino_acid_dense_final'], activation=self.model_params['amino_acid_activation2'])(x_amino_acid) x_chemistry = concatenate([x_pre_chain, x_amino_acid]) x_chemistry = Dense(2**self.model_params['chemistry_dense1'])(x_chemistry) x_chemistry = Dense(2**self.model_params['chemistry_dense2'])(x_chemistry) x_coupling_agent = Activation('sigmoid')(coupling_agent) x_coupling_strokes = Activation('sigmoid')(coupling_strokes) x_temp_coupling = Activation('sigmoid')(temp_coupling) x_deprotection_strokes = Activation('sigmoid')(deprotection_strokes) x_deprotection_strokes = Dense(4, activation='relu')(x_deprotection_strokes) x_coupling = concatenate( [x_coupling_agent, x_coupling_strokes, x_temp_coupling, x_deprotection_strokes]) x_coupling = Dense(self.model_params['coupling_dense1'])(x_coupling) x_coupling = Dense(self.model_params['coupling_dense2'])(x_coupling) x_flow_rate = Activation('sigmoid')(flow_rate) x_machine = Activation('sigmoid')(machine) x_machine = Dense(3, activation='relu')(x_machine) x_temp_reactor_1 = Activation('sigmoid')(temp_reactor_1) x_machine_variables = concatenate([x_flow_rate, x_machine, x_temp_reactor_1]) x_machine_variables = Dense(self.model_params['machine_dense1'])(x_machine_variables) x_machine_variables = Dense(self.model_params['machine_dense2'])(x_machine_variables) x = concatenate([x_chemistry, x_coupling, x_machine_variables]) x = Dense(2**self.model_params['concat_dense1'])(x) x = Dense(2**self.model_params['concat_dense2'], activation=self.model_params['concat_activation2'])(x) x = Dropout(self.model_params['concat_dropout1'])(x) x = Dense(2**self.model_params['concat_dense3'], activation=self.model_params['concat_activation3'])(x) first_area = Dense(1, activation='linear', name='first_area')(x) first_height = Dense(1, activation='linear', name='first_height')(x) first_width = Dense(1, activation='linear', name='first_width')(x) first_diff = Dense(1, activation='linear', name='first_diff')(x) model = Model( inputs=[fp_pre_chain, fp_amino_acid, coupling_agent, coupling_strokes, temp_coupling, deprotection_strokes, flow_rate, machine, temp_reactor_1], outputs=[first_area, first_height, first_width, first_diff] ) elif self.__model_type == 'minimal': model = Sequential() model.add(Conv1D( 2**self.model_params['pre_chain_conv1_filter'], 2**self.model_params['pre_chain_conv1_kernel'], input_shape=(self._nnX[0].shape[0], self._nnX[0].shape[1]))) model.add(Dense(2**self.model_params['pre_chain_dense1'])) model.add(Dropout(self.model_params['pre_chain_dropout1'])) model.add(Conv1D( 2**self.model_params['pre_chain_conv2_filter'], 2**self.model_params['pre_chain_conv2_kernel'])) model.add(Dropout(self.model_params['pre_chain_dropout2'])) # model.add(Activation(self.model_params['pre_chain_activation1'])) model.add(Flatten()) model.add(Dense( 2**self.model_params['pre_chain_amino_acid_dense_final'], activation=self.model_params['pre_chain_activation2'])) model.add(Dense( 2**self.model_params['concat_dense1'])) model.add(Dense( 2**self.model_params['concat_dense2'])) model.add(Dropout( self.model_params['concat_dropout1'])) model.add(Dense( 2**self.model_params['concat_dense3'])) model.add(Dense( 1, activation='linear')) model.compile( optimizer = RMSprop(lr=self.model_params['opt_lr']), loss=mse) callbacks_list = [] if self.model_params['save_checkpoint'] == True: checkpoint = ModelCheckpoint( self.model_params['checkpoint_filepath'] + "predictor-epoch{epoch:02d}-loss{loss:.4f}-val_loss{val_loss:.4f}.hdf5", monitor='val_loss', save_best_only=True, mode='min') callbacks_list = [checkpoint] model.fit(self._nnX, self._nnY, epochs=self.model_params['epochs'], batch_size=self.model_params['batch_size'], validation_split=self.model_params['val_split'], callbacks=callbacks_list, verbose=False ) self.model = model
class PolypDetectionModel(ObjectDetectionModel): def __init__(self, **kwargs): if len(kwargs) is 0: kwargs = config.configuration ObjectDetectionModel.__init__(self, **kwargs) self.train_dataset, self.valid_dataset = None, None self.model = None def polyp_image_input_layer(self, name=None): return Input([self.size[0], self.size[1], self.n_channels], name=name) def polyp_image_output_layer(self, inputs): output = Conv2D(filters=self.n_boxes * 5, kernel_size=1, strides=1, padding="same")(inputs) # output = Lambda(lambda x: tf.reshape(x, (-1, self.n_grid, self.n_grid, self.n_boxes, 5)))(output) #5, is objProb,cx,cy,w,h return output def reshape_output(self, outputs): grid = PolypDetectionModel.default_grid(self.n_grid) objectness_net_out, pred_box_offset_coord = tf.split(outputs, (1, 4), axis=-1) pred_box_normalized_coord_CxCy = ( pred_box_offset_coord[:, :, :, :, 0:2] + grid) / self.n_grid pred_box_normalized_coord_wh = tf.square( pred_box_offset_coord[:, :, :, :, 2:]) box_x1y1 = pred_box_normalized_coord_CxCy - pred_box_normalized_coord_wh / 2 box_x2y2 = pred_box_normalized_coord_CxCy + pred_box_normalized_coord_wh / 2 box_x1y1x2y2_withLUAs0_scale01 = tf.concat([box_x1y1, box_x2y2], axis=-1) return box_x1y1x2y2_withLUAs0_scale01, objectness_net_out def reshape_output_for_prediction(self, outputs): b, c = [], [] o = outputs num_tot_boxes = self.n_grid * self.n_grid * self.n_boxes bbox = tf.reshape(o[0], (1, num_tot_boxes, 1, 4)) confidence = tf.reshape(o[1], (1, num_tot_boxes, 1)) scores = confidence boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression( boxes=bbox, scores=confidence, max_output_size_per_class=100, max_total_size=100, iou_threshold=0.5, score_threshold=0.5) return boxes, scores, classes, valid_detections def get_dataset(self, train_dataset_path=None, val_dataset_path=None, classes_path=None, batch_size=1): train_data_val_data = [] for path in [train_dataset_path, val_dataset_path]: input_dataset = dataset.load_fake_dataset() if path: input_dataset = dataset.load_tfrecord_dataset( path, classes_path) input_dataset = input_dataset.shuffle(buffer_size=512) input_dataset = input_dataset.batch(batch_size) input_dataset = input_dataset.map( lambda x, y: (polyp_dataset.transform_images(x), polyp_dataset.transform_targets(y))) train_data_val_data.append(input_dataset) train_data_val_data[0] = train_data_val_data[0].prefetch( buffer_size=tf.data.experimental.AUTOTUNE) self.train_dataset, self.valid_dataset = train_data_val_data return train_data_val_data def squeeze_net_tiny(self, inputs=None, name=None, batch_norm_flag=False): if inputs is None: inputs = self.polyp_image_input_layer() x = inputs x = Conv2D(filters=16, kernel_size=3, strides=1, padding='same')(x) if batch_norm_flag: x = BatchNormalization()(x) x = ReLU()(x) for i in range(6): x = MaxPool2D(pool_size=3, strides=2, padding='same')(x) x = half_fire(x) # fire1 x = half_fire(x) # fire7 return tf.keras.Model(inputs, x, name=name) def build_model(self, training=False): self.model = Sequential() tmp = inputs = self.polyp_image_input_layer(name='Input') if self.model_name == "squeeze_tiny": tmp = self.squeeze_net_tiny(name=self.model_name)(tmp) else: logging.info( "There is no model with name {a}. Please choose one of squeeze_tiny and squeeze" .format(a=self.model_name)) outputs = self.polyp_image_output_layer(tmp) if training: #self.model = Model(inputs, outputs, name="MyModel") self.model.add(Model(inputs, outputs, name="MyModel")) self.model.add(Reshape( (self.n_grid, self.n_grid, self.n_boxes, 5))) else: boxes_0 = Lambda(lambda x: self.reshape_output(x), name='yolo_boxes_0')(outputs) outputs = Lambda(lambda x: self.reshape_output_for_prediction(x), name='yolo_nms')(boxes_0) self.model = Model(inputs, outputs, name='MyModel') def get_model(self): if self.model is None: self.build_model() logging.info( "Model was not built yet so build a new model with training=False" ) return self.model def load_weights(self, weight_file_path): if self.model is None: self.build_model() self.model.load_weights(weight_file_path).expect_partial() logging.info('weights loaded')