def evaluate(self, x_val, y_val): x_val = DataSet.preprocess(x_val, "image") y_val = DataSet.preprocess(y_val, "mask") fit_loss = sigmoid_dice_loss fit_metrics = [binary_acc_ch0] self.model.compile(loss=fit_loss, optimizer="Adam", metrics=fit_metrics) # Score trained model. scores = self.model.evaluate(x_val, y_val, batch_size=5, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1])
def predict_and_save_stage1_masks(self, h5_data_path, h5_result_saved_path, fold_k=0, batch_size=4): """ 从h5data中读取images进行预测,并把预测mask保存进h5data中。 Args: h5_data_path: str, 存放有训练数据的h5文件路径。 batch_size: int, 批大小。 Returns: None. """ f_result = h5py.File(h5_result_saved_path, "a") try: stage1_predict_masks_grp = f_result.create_group("stage1_fold{}_predict_masks".format(fold_k)) except: stage1_predict_masks_grp = f_result["stage1_fold{}_predict_masks".format(fold_k)] dataset = DataSet(h5_data_path, fold_k) images_train = dataset.get_images(is_train=True) images_val = dataset.get_images(is_train=False) keys_train = dataset.get_keys(is_train=True) keys_val = dataset.get_keys(is_train=False) images = np.concatenate([images_train, images_val], axis=0) keys = np.concatenate([keys_train, keys_val], axis=0) print("predicting ...") images = dataset.preprocess(images, mode="image") y_pred = self.predict(images, batch_size, use_channels=1) print(y_pred.shape) print("Saving predicted masks ...") for i, key in enumerate(keys): stage1_predict_masks_grp.create_dataset(key, dtype=np.float32, data=y_pred[i]) print("Done.")
def predict_and_show(self, image, show_output_channels): """ Args: img: str(image path) or numpy array(b=1, h=576, w=576, c=1) show_output_channels: 1 or 2 Returns: """ if isinstance(image, str): images_src = self.read_images([image]) else: images_src = image img = DataSet.preprocess(images_src, mode="image") predict_mask = self.predict(img, 1, use_channels=show_output_channels) predict_mask = np.squeeze(predict_mask, axis=0) predict_mask = self.postprocess(predict_mask) predict_mask = DataSet.de_preprocess(predict_mask, mode="mask") if show_output_channels == 2: mask0 = predict_mask[..., 0] mask1 = predict_mask[..., 1] image_c3 = np.concatenate([np.squeeze(images_src, axis=0) for i in range(3)], axis=-1) image_mask0 = apply_mask(image_c3, mask0, color=[255, 106, 106], alpha=0.5) # result = np.concatenate((np.squeeze(images_src, axis=[0, -1]), mask0, mask1, image_mask0), axis=1) plt.imshow(image_mask0) else: result = np.concatenate((np.squeeze(images_src, axis=[0, -1]), predict_mask), axis=1) plt.imshow(result, cmap="gray") plt.show()
def predict(self, images, batch_size, use_channels=2): """ 对未预处理过的图片进行预测。 Args: images: 4-d numpy array. preprocessed image. (b, h, w, c=1) batch_size: use_channels: int, default to 2. 如果模型输出通道数为2,可以控制输出几个channel.默认输出第一个channel的预测值. Returns: 4-d numpy array. """ images = DataSet.preprocess(images, mode="image") outputs = self.model.predict(images, batch_size) if use_channels == 1: outputs = outputs[..., 0] outputs = np.expand_dims(outputs, -1) return outputs
def do_predict_custom(): model = get_dilated_unet( input_shape=(None, None, 1), mode='cascade', filters=32, n_class=1 ) model_weights = "/home/topsky/helloworld/study/njai_challenge/cbct/func/others_try/model_weights.hdf5" img_path = "/media/topsky/HHH/jzhang_root/data/njai/cbct/CBCT_testingset/CBCT_testingset/04+246ori.tif" img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img = np.expand_dims(img, axis=-1) img = np.expand_dims(img, axis=0) img = DataSet.preprocess(img, mode="image") # print(img.shape) # exit() model.load_weights(model_weights) pred = model.predict(img, batch_size=1) pred_img = np.squeeze(pred[0], -1) pred_img = DataSet.de_preprocess(pred_img, mode="image") plt.imshow(pred_img, "gray") plt.show()
def tta_test(): img = cv2.imread( "/media/topsky/HHH/jzhang_root/data/njai/cbct/CBCT_testingset/CBCT_testingset/04+246ori.tif", cv2.IMREAD_GRAYSCALE) img = np.expand_dims(img, axis=-1) img = DataSet.preprocess(img, mode="image") img = np.expand_dims(img, axis=0) model = get_densenet121_unet_sigmoid_gn(input_shape=(None, None, 1), output_channels=2, weights=None) model.load_weights( "/home/topsky/helloworld/study/njai_challenge/cbct/model_weights/20180731_0/best_val_acc_se_densenet_gn_fold0_random_0_1i_2o_20180801.h5" ) pred = tta_predict(model, img, batch_size=1) # print(pred) pred = np.squeeze(pred, 0) print(pred.shape) pred = np.where(pred > 0.5, 255, 0) cv2.imwrite("/home/topsky/Desktop/mask_04+246ori_f1_random.tif", pred[..., 0])
def predict_from_files_old(self, image_path_lst, batch_size=5, use_channels=2, mask_file_lst=None, tta=False, is_save_npy=False, is_save_mask0=False, is_save_mask1=False, result_save_dir=""): """ 给定图片路径列表,返回预测结果(未处理过的),如果指定了预测结果保存路径,则保存预测结果(已处理过的)。 如果指定了预测结果保存的文件名列表,则该列表顺序必须与image_path_lst一致; 如果没有指定预测结果保存的文件名列表,则自动生成和输入相同的文件名列表。 Args: image_path_lst: list. batch_size: use_channels: 输出几个channel。 mask_file_lst: list, 预测结果保存的文件名列表。 tta: bool, 预测时是否进行数据增强。 is_save_npy: bool, 是否保存npy文件。 is_save_mask0: bool is_save_mask1: bool result_save_dir: str, 结果保存的目录路径。 Returns: 4-d numpy array, predicted result. """ imgs = self.read_images(image_path_lst) imgs = DataSet.preprocess(imgs, mode="image") if tta: pred = tta_predict(self.model, imgs, batch_size=batch_size) else: pred = self.predict_old(imgs, batch_size=batch_size, use_channels=use_channels) if mask_file_lst is None: mask_file_lst = [os.path.basename(x) for x in image_path_lst] if is_save_npy: # 保存npy文件 npy_dir = os.path.join(result_save_dir, "npy") self.save_npy(pred, mask_file_lst, npy_dir) if is_save_mask0: mask_nb = 0 mask_save_dir = os.path.join(result_save_dir, "mask{}".format(mask_nb)) self.save_mask(pred, mask_file_lst, mask_nb=mask_nb, result_save_dir=mask_save_dir) if is_save_mask1: mask_nb = 1 mask_save_dir = os.path.join(result_save_dir, "mask{}".format(mask_nb)) self.save_mask(pred, mask_file_lst, mask_nb=mask_nb, result_save_dir=mask_save_dir) return pred
def inference_2stages_from_files(model_def_stage1, model_weights_stage1, model_def_stage2, model_weights_stage2, file_dir, pred_save_dir): if not os.path.isdir(pred_save_dir): os.makedirs(pred_save_dir) model_obj = ModelDeployment(model_def_stage1, model_weights_stage1) file_path_lst = get_file_path_list(file_dir, ext=".tif") dst_file_path_lst = [os.path.join(pred_save_dir, os.path.basename(x)) for x in file_path_lst] imgs_src = model_obj.read_images(file_path_lst) imgs = DataSet.preprocess(imgs_src, mode="image") pred_stage1 = model_obj.predict(imgs, batch_size=5, use_channels=1) pred_stage1 = np.expand_dims(pred_stage1, axis=-1) input_stage2 = np.concatenate([imgs_src, pred_stage1], axis=-1) del model_obj print(pred_stage1.shape) print(input_stage2.shape) model_obj = ModelDeployment(model_def_stage2, model_weights_stage2) pred = model_obj.predict(input_stage2, batch_size=5, use_channels=1) pred = model_obj.postprocess(pred) pred = DataSet.de_preprocess(pred, mode="mask") for i in range(len(pred)): cv2.imwrite(dst_file_path_lst[i], pred[i])
def predict_from_h5data_old(self, h5_data_path, val_fold_nb, is_train=False, save_dir=None, color_lst=None): dataset = DataSet(h5_data_path, val_fold_nb) images = dataset.get_images(is_train=is_train) imgs_src = np.concatenate([images for i in range(3)], axis=-1) masks = dataset.get_masks(is_train=is_train, mask_nb=0) masks = np.squeeze(masks, axis=-1) print("predicting ...") y_pred = self.predict(dataset.preprocess(images, mode="image"), batch_size=4, use_channels=1) y_pred = self.postprocess(y_pred) y_pred = DataSet.de_preprocess(y_pred, mode="mask") print(y_pred.shape) if save_dir: keys = dataset.get_keys(is_train) if color_lst is None: color_gt = [255, 106, 106] color_pred = [0, 191, 255] # color_pred = [255, 255, 0] else: color_gt = color_lst[0] color_pred = color_lst[1] # BGR to RGB imgs_src = imgs_src[..., ::-1] image_masks = [apply_mask(image, mask, color_gt, alpha=0.5) for image, mask in zip(imgs_src, masks)] image_preds = [apply_mask(image, mask, color_pred, alpha=0.5) for image, mask in zip(imgs_src, y_pred)] dst_image_path_lst = [os.path.join(save_dir, "{:03}.tif".format(int(key))) for key in keys] if not os.path.isdir(save_dir): os.makedirs(save_dir) image_mask_preds = np.concatenate([imgs_src, image_masks, image_preds], axis=2) for i in range(len(image_masks)): cv2.imwrite(dst_image_path_lst[i], image_mask_preds[i]) print("Done.") else: return y_pred