def setupModel(self, modelLayers=[], modelParameters={}): # Setup model pars = updateDictionary(modelParameters, DEFAULT_MODEL_PARAMETERS) self.parameters = pars if (len(modelLayers) == 0): modelLayers = ConvNetArchitecture(self.parameters) self.model = keras.Sequential(modelLayers) self.model.compile(optimizer=self.parameters['optimizer'], loss=self.parameters['loss'], metrics=self.parameters['metrics'])
def setup(self, options={}): """Initialize a data augmentation generator through Keras""" # Update options dictionary with input from user, else use DEFAULT_TRAINING_OPTIONS self.options = updateDictionary(self.options, DEFAULT_GENERATOR_OPTIONS) # Initialize generator self.extraProcessing = preProcessFunction( self.options ) # Adds spatial filtering and gaussian noise, but these image operations should not be performed on the mask self.dataGenerator = ImageDataGenerator( rescale=self.options['augment_rescaling'], rotation_range=self.options['augment_angleRange'], width_shift_range=self.options['augment_shiftRange'], height_shift_range=self.options['augment_shiftRange'], shear_range=self.options['augment_shearRange'], zoom_range=self.options['augment_zoomRange'], brightness_range=self.options['augment_brightRange'], horizontal_flip=self.options['augment_flipHoriz'], vertical_flip=self.options['augment_flipVert'], fill_mode=self.options['augment_fillMode'], cval=self.options['augment_fillVal'])
def setup(self, options={}): """Specify the preprocessing options to use with the Image Augmentation class""" self.options = updateDictionary(self.options, DEFAULT_GENERATOR_OPTIONS)
def trainModel(self, train_data, train_labels, test_data, test_labels, options=DEFAULT_TRAINING_OPTIONS): "Training the model on imaging data" # Ensure that the data shape is rank 4: (Images)x(X)x(Y)x(NChannels) if(len(train_data.shape)==3): # If data is not RGB (rank 4), then make rank 4 with last dimension of size 1 train_data = train_data.reshape((train_data.shape+(1,))) test_data = test_data.reshape((test_data.shape+(1,))) # Update options dictionary with input from user, else use DEFAULT_TRAINING_OPTIONS options = updateDictionary(options,DEFAULT_TRAINING_OPTIONS) self.trainingOptions = options # Setup training parameters nTrainSamples = train_labels.shape[0] nTestSamples = test_labels.shape[0] np.random.seed(options['seed']) # Data augmentation of imaging data (optional) if(options['augmentData']): useSequence = True if useSequence: dataGenerator_Train = ImageAugmentationSequence(options) dataGenerator_Test = ImageAugmentationSequence(options) else: dataGenerator_Train = ImageAugmentationGenerator(options) dataGenerator_Test = ImageAugmentationGenerator(options) # Visualize sample training data during each training epoch (optional) if(options['showTrainingData']): fig,axes=plt.subplots(nrows=options['subplotDims'][0],ncols=options['subplotDims'][1],figsize=(15,15)) if isinstance(axes,np.ndarray): axes=axes.flatten() else: axes=[axes] for ax in axes: ax.set_xticks([]) ax.set_yticks([]) ax.grid(False) self.logger.write("******************") self.logger.write("Training model: {}".format(type(self))) self.logger.write("Options: {}".format(options)) self.logger.write("******************") t0 = time.time() for block in range(options['blocks']): self.logger.write("Training block: {}/{}".format(block+1,options['blocks'])) # Train model using the minibatch approach (with a size equal to nTrainSamples) if(options['augmentData']): if useSequence: dataGenerator_Train.generate(train_data, train_labels, options['batchSize'], seed=block) dataGenerator_Test.generate( test_data, test_labels, options['batchSize'], seed=block) train_generator = dataGenerator_Train test_generator = dataGenerator_Test else: train_generator = dataGenerator_Train.get(train_data, train_labels, options['batchSize'], seed=block) test_generator = dataGenerator_Test.get(train_data, train_labels, options['batchSize'], seed=block) self.model.fit_generator(train_generator, validation_data=test_generator, steps_per_epoch=np.ceil(nTrainSamples/options['batchSize']), validation_steps=np.ceil(nTestSamples/options['batchSize']), verbose=options['verboseMode'], epochs=options['epochs'], callbacks=options['callbacks'], shuffle=True) else: self.model.fit(x=train_data, y=train_labels, validation_data=(test_data,test_labels), batch_size=options['batchSize'], steps_per_epoch=np.ceil(nTrainSamples/options['batchSize']), validation_steps=np.ceil(nTestSamples/options['batchSize']), verbose=options['verboseMode'], epochs=options['epochs'], callbacks=options['callbacks'], shuffle=True) # Evaluate model accuracy indices = block*options['epochs']+np.arange(options['epochs']) if block==STARTING_BLOCK: # Initialize model history during first block. metrics = [metric for metric in self.model.history.history.keys() if not metric.startswith('val_')] self.modelHistory = setupMetricDict(metrics, epochs=options['blocks'] * options['epochs']) for metric in metrics: train_metric = self.model.history.history[metric] test_metric = self.model.history.history['val_'+metric] self.modelHistory[metric][:,indices] = [train_metric,test_metric] self.logger.write("Model {}: {} (training) / {} (test)\n".format(metric,np.median(train_metric),np.median(test_metric))) # Check point model (including history and parameters) self.saveModel(r'D:\Code\ROI Segmentation\Code\Dave\Checkpoint_Model_Block{}.h5'.format(block)) self.saveModelParameters() self.saveModelHistory() elapsedTime = time.time()-t0 self.logger.write('Total elapsed training time: {:4.2f}s (or {:4.2f}s per block)'.format(elapsedTime,elapsedTime/options['blocks'])) return(self.modelHistory)
def updateParameters(self,modelParameters=DEFAULT_MODEL_PARAMETERS): # Update model parameters, or reinitializes to default values self.parameters = updateDictionary(modelParameters,self.parameters)
def __init__(self,modelLayers=[],modelParameters={}): self.parameters = updateDictionary(modelParameters,DEFAULT_MODEL_PARAMETERS) self.setupModel(modelLayers) self.compileModel() self.logger = logger()