def predict_poly(model, threashold1, threashold2, result, first_class): predicted_mask = extra_functions.make_prediction_cropped( model, image, initial_size=(112, 112), final_size=(112 - 32, 112 - 32), num_masks=num_mask_channels, num_channels=num_channels) image_v = np.flipud(image) predicted_mask_v = extra_functions.make_prediction_cropped( model, image_v, initial_size=(112, 112), final_size=(112 - 32, 112 - 32), num_masks=2, num_channels=num_channels) image_h = np.fliplr(image) predicted_mask_h = extra_functions.make_prediction_cropped( model, image_h, initial_size=(112, 112), final_size=(112 - 32, 112 - 32), num_masks=2, num_channels=num_channels) image_s = np.rot90(image) predicted_mask_s = extra_functions.make_prediction_cropped( model, image_s, initial_size=(112, 112), final_size=(112 - 32, 112 - 32), num_masks=2, num_channels=num_channels) new_mask = np.power( predicted_mask * np.flipud(predicted_mask_v) * np.fliplr(predicted_mask_h) * np.rot90(predicted_mask_s, 3), 0.25) x_scaler, y_scaler = extra_functions.get_scalers(H, W, x_max, y_min) mask_channel = first_class result += [(image_id, mask_channel + 1, mask2poly(new_mask[:, :, 0], threashold1, x_scaler, y_scaler))] mask_channel = first_class + 1 result += [(image_id, mask_channel + 1, mask2poly(new_mask[:, :, 1], threashold2, x_scaler, y_scaler))] return result
for image_id in tqdm(test_ids): image = extra_functions.read_image_16(image_id) file_name = '{}.tif'.format(image_id) image_3 = tiff.imread(os.path.join(three_band_path, file_name)) image_3 = np.transpose(image_3, (1, 2, 0)) image_3 = image_3 / 2047 * 255 image_3 = np.array(image_3, dtype=np.uint8) H = image.shape[1] W = image.shape[2] x_max, y_min = extra_functions._get_xmax_ymin(image_id) predicted_mask = extra_functions.make_prediction_cropped( model, image, initial_size=(128, 128), final_size=(128 - 32, 128 - 32), num_masks=num_mask_channels, num_channels=num_channels) mask_to_draw = np.zeros((H, W, 3), np.uint8) for i in range(num_mask_channels): mask_to_draw[predicted_mask[i] >= threshold] = color[i + 1] #mask_to_draw[predicted_mask[i]<threshold] = (255,188,64) image_mask = cv2.addWeighted(image_3, 0.6, mask_to_draw, 0.4, 0) imwrite('../test_mask/{}_mask.png'.format(image_id), mask_to_draw) imwrite('../test_mask/{}image.png'.format(image_id), image_3) imwrite('../test_mask/{}_mask_image.png'.format(image_id), image_mask)
#vivian added test_ids = test_ids[200:220] #vivian added for image_id in tqdm(test_ids): image = extra_functions.read_image_16(image_id) H = image.shape[1] W = image.shape[2] x_max, y_min = extra_functions._get_xmax_ymin(image_id) predicted_mask = extra_functions.make_prediction_cropped( model, image, initial_size=(112, 112), final_size=(112 - 32, 112 - 32), num_masks=num_mask_channels, num_channels=num_channels) image_v = flip_axis(image, 1) predicted_mask_v = extra_functions.make_prediction_cropped( model, image_v, initial_size=(112, 112), final_size=(112 - 32, 112 - 32), num_masks=1, num_channels=num_channels) image_h = flip_axis(image, 2) predicted_mask_h = extra_functions.make_prediction_cropped(
continue # print(img_3.max()) # print(img_3.min()) # # 读取自定义训练图片 # image = np.transpose(plt.imread("../data/image_tiles{}.tif".format(image_id)), (2, 0, 1)) / 2047.0 # image=image.astype(np.float16) # image=np.transpose(cv2.imread("../data/image_file_test/{}".format(file_name)), (2, 0, 1)) / 2047.0 # image=image.astype(np.float16) H = image.shape[1] W = image.shape[2] # 预测图片 predicted_mask = extra_functions.make_prediction_cropped( model, image, initial_size=(image_row, image_col), final_size=(image_row - 32, image_col - 32), num_masks=num_mask_channels, num_channels=num_channels) # 将图片水平翻转然后预测 image_v = flip_axis(image, 1) predicted_mask_v = extra_functions.make_prediction_cropped( model, image_v, initial_size=(image_row, image_col), final_size=(image_row - 32, image_col - 32), num_masks=1, num_channels=num_channels) # 将图片竖直翻转然后预测 image_h = flip_axis(image, 2) predicted_mask_h = extra_functions.make_prediction_cropped(
test_ids.append(i) print("Number of images: ",len(test_ids)) result = [] def flip_axis(x, axis): x = np.asarray(x).swapaxes(axis, 0) x = x[::-1, ...] x = x.swapaxes(0, axis) return x for image_id in test_ids: print("Predicting: ", image_id) image = extra_functions.read_image_test(image_id) predicted_mask = extra_functions.make_prediction_cropped(model, image, batch_size, size=(288, 288)) image_v = flip_axis(image, 0) predicted_mask_v = extra_functions.make_prediction_cropped(model, image_v,batch_size, size=(288,288)) image_h = flip_axis(image, 1) predicted_mask_h = extra_functions.make_prediction_cropped(model, image_h,batch_size, size=(288,288)) image_s = image.swapaxes(0, 1) predicted_mask_s = extra_functions.make_prediction_cropped(model, image_s,batch_size, size=(288,288)) new_mask = np.power(predicted_mask * flip_axis(predicted_mask_v, 0) * flip_axis(predicted_mask_h, 1) * predicted_mask_s.swapaxes(0, 1), 0.25) new_mask[new_mask >= threshold] = 1; new_mask[new_mask < threshold] = 0;
print( "<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>" ) print("predict image id: ", image_id) print( "<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>" ) image = extra_functions.read_image_16(test_path, collect_name, image_id, normalization_value[index]) predicted_mask = extra_functions.make_prediction_cropped( model, image, initial_size=(patch_width, patch_height), final_size=(patch_width - 32, patch_height - 32), num_masks=1, num_channels=channels) image_v = flip_axis(image, 1) predicted_mask_v = extra_functions.make_prediction_cropped( model, image_v, initial_size=(patch_width, patch_height), final_size=(patch_width - 32, patch_height - 32), num_masks=1, num_channels=channels) image_h = flip_axis(image, 2) predicted_mask_h = extra_functions.make_prediction_cropped(