def fsim(org_img: np.ndarray, pred_img: np.ndarray):
    """
    Feature-based similarity index, based on phase congruency (PC) and image gradient magnitude (GM)

    There are different ways to implement PC, the authors of the original FSIM paper use the method
    defined by Kovesi (1999). The Python phasepack project fortunately provides an implementation
    of the approach.

    There are also alternatives to implement GM, the FSIM authors suggest to use the Scharr
    operation which is implemented in OpenCV.
    """
    _assert_image_shapes_equal(org_img, pred_img, "FSIM")

    T1 = 0.85  # a constant based on the dynamic range PC
    T2 = 160  # a constant based on the dynamic range GM
    alpha = beta = 1  # parameters used to adjust the relative importance of PC and GM features
    fsim_list = []
    for i in range(org_img.shape[2]):
        # Calculate the PC for original and predicted images
        pc1_2dim = pc(org_img[:, :, i],
                      nscale=4,
                      minWaveLength=6,
                      mult=2,
                      sigmaOnf=0.5978)
        pc2_2dim = pc(pred_img[:, :, i],
                      nscale=4,
                      minWaveLength=6,
                      mult=2,
                      sigmaOnf=0.5978)

        # pc1_2dim and pc2_2dim are tuples with the length 7, we only need the 4th element which is the PC.
        # The PC itself is a list with the size of 6 (number of orientation). Therefore, we need to
        # calculate the sum of all these 6 arrays.
        pc1_2dim_sum = np.zeros((org_img.shape[0], org_img.shape[1]),
                                dtype=np.float64)
        pc2_2dim_sum = np.zeros((pred_img.shape[0], pred_img.shape[1]),
                                dtype=np.float64)
        for orientation in range(6):
            pc1_2dim_sum += pc1_2dim[4][orientation]
            pc2_2dim_sum += pc2_2dim[4][orientation]

        # Calculate GM for original and predicted images based on Scharr operator
        gm1 = _gradient_magnitude(org_img[:, :, i], cv2.CV_16U)
        gm2 = _gradient_magnitude(pred_img[:, :, i], cv2.CV_16U)

        # Calculate similarity measure for PC1 and PC2
        S_pc = _similarity_measure(pc1_2dim_sum, pc2_2dim_sum, T1)
        # Calculate similarity measure for GM1 and GM2
        S_g = _similarity_measure(gm1, gm2, T2)

        S_l = (S_pc**alpha) * (S_g**beta)

        numerator = np.sum(S_l * np.maximum(pc1_2dim_sum, pc2_2dim_sum))
        denominator = np.sum(np.maximum(pc1_2dim_sum, pc2_2dim_sum))
        fsim_list.append(numerator / denominator)

    return np.mean(fsim_list)
def fsim_metric(org_img: np.ndarray, pred_img: np.ndarray, T1=0.85, T2=160) -> float:
	"""
	Feature-based similarity index, based on phase congruency (PC) and image gradient magnitude (GM)
	There are different ways to implement PC, the authors of the original FSIM paper use the method
	defined by Kovesi (1999). The Python phasepack project fortunately provides an implementation
	of the approach.
	There are also alternatives to implement GM, the FSIM authors suggest to use the Scharr
	operation which is implemented in OpenCV.
	Note that FSIM is defined in the original papers for grayscale as well as for RGB images. Our use cases
	are mostly multi-band images e.g. RGB + NIR. To accommodate for this fact, we compute FSIM for each individual
	band and then take the average.
	Note also that T1 and T2 are constants depending on the dynamic range of PC/GM values. In theory this parameters
	would benefit from fine-tuning based on the used data, we use the values found in the original paper as defaults.
	Args:
		org_img -- numpy array containing the original image
		pred_img -- predicted image
		T1 -- constant based on the dynamic range of PC values
		T2 -- constant based on the dynamic range of GM values
	"""
	#_assert_image_shapes_equal(org_img, pred_img, "FSIM")

	alpha = beta = 1  # parameters used to adjust the relative importance of PC and GM features
	fsim_list = []
	for i in range(org_img.shape[2]):
		# Calculate the PC for original and predicted images
		pc1_2dim = pc(org_img[:, :, i], nscale=4, minWaveLength=6, mult=2, sigmaOnf=0.5978)
		pc2_2dim = pc(pred_img[:, :, i], nscale=4, minWaveLength=6, mult=2, sigmaOnf=0.5978)

		# pc1_2dim and pc2_2dim are tuples with the length 7, we only need the 4th element which is the PC.
		# The PC itself is a list with the size of 6 (number of orientation). Therefore, we need to
		# calculate the sum of all these 6 arrays.
		pc1_2dim_sum = np.zeros((org_img.shape[0], org_img.shape[1]), dtype=np.float64)
		pc2_2dim_sum = np.zeros((pred_img.shape[0], pred_img.shape[1]), dtype=np.float64)
		for orientation in range(6):
			pc1_2dim_sum += pc1_2dim[4][orientation]
			pc2_2dim_sum += pc2_2dim[4][orientation]

		# Calculate GM for original and predicted images based on Scharr operator
		gm1 = _gradient_magnitude(org_img[:, :, i], cv2.CV_16U)
		gm2 = _gradient_magnitude(pred_img[:, :, i], cv2.CV_16U)

		# Calculate similarity measure for PC1 and PC2
		S_pc = _similarity_measure(pc1_2dim_sum, pc2_2dim_sum, T1)
		# Calculate similarity measure for GM1 and GM2
		S_g = _similarity_measure(gm1, gm2, T2)

		S_l = (S_pc ** alpha) * (S_g ** beta)

		numerator = np.sum(S_l * np.maximum(pc1_2dim_sum, pc2_2dim_sum))
		denominator = np.sum(np.maximum(pc1_2dim_sum, pc2_2dim_sum))
		fsim_list.append(numerator / denominator)

	return np.mean(fsim_list)