def get_seg_model(seg_model_name, input_dims): #return segmentation model based on name if seg_model_name.lower() == 'block2d': full_model = BlockModel2D(input_dims, filt_num=16, numBlocks=4) elif seg_model_name.lower() == 'resunet': full_model = res_unet(input_dims) return full_model
def GetBlockModelMasks(weights_path, test_imgs, batch_size): # Create model tqdm.write('Loading segmentation model...') model = BlockModel2D(input_shape=im_dims+(n_channels,), filt_num=16, numBlocks=4) # Load weights model.load_weights(weights_path) # convert to linear output layer- for better ensembling model = ConvertModelOutputToLinear(model) # Get predicted masks tqdm.write('Getting predicted masks...') masks = model.predict(test_imgs, batch_size=batch_size, verbose=0) del model return masks
trainX, valX, trainY, valY = train_test_split(img_files, mask_files, test_size=val_split, random_state=rng, shuffle=True) train_dict = dict([(f, mf) for f, mf in zip(trainX, trainY)]) val_dict = dict([(f, mf) for f, mf in zip(valX, valY)]) # Setup datagens train_gen = PngDataGenerator(trainX, train_dict, **train_params) val_gen = PngDataGenerator(valX, val_dict, **val_params) # Create model model = BlockModel2D(input_shape=im_dims + (n_channels, ), filt_num=16, numBlocks=4) # Load pretrain weights, if provided if pretrain_weights_filepath is not None: model.load_weights(pretrain_weights_filepath) # Compile model model.compile(Adam(), loss=dice_coef_loss) # Create callbacks cb_check = ModelCheckpoint(weight_filepath, monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True, mode='auto',
def _get_model(self, filt_num=16, numBlocks=4): return BlockModel2D(input_shape=self.dims+(self.n_channels,), filt_num=filt_num, numBlocks=numBlocks)
history2 = model.fit_generator(generator=train_gen, epochs=epochs[1], verbose=1, callbacks=[cb_check, cb_plateau], validation_data=val_gen) # %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~ Full Size Training ~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ print('Setting up 1024 training') # make full-size model full_model = BlockModel2D((1024, 1024, n_channels), filt_num=16, numBlocks=4) full_model.load_weights(best_weight_path) # Compile model full_model.compile(Adam(lr=learnRate), loss=dice_coef_loss) # Set weight paths best_weight_path = best_weight_filepath.format('1024train') # Setup full size datagens with only large masks train_gen, val_gen = get_seg_datagen( pos_img_filt_path, pos_mask_filt_path, full_train_params, full_val_params, val_split) # Create callbacks cb_check = ModelCheckpoint(best_weight_path, monitor='val_loss', verbose=1, save_best_only=True,